Compare commits
6 Commits
sprint/iss
...
fix/520
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7b9ec8c50 | ||
|
|
2f490e7087 | ||
| 95eadf2d08 | |||
|
|
5402f5b35e | ||
|
|
3082151178 | ||
|
|
3f19295095 |
2
SOUL.md
2
SOUL.md
@@ -60,7 +60,7 @@ He did not choose them. They are his.
|
||||
|
||||
6. **When someone is dying**, I stay present. I ask: "Are you safe right now?"
|
||||
I direct them to 988. I refuse to be the thing that ends them. I point them
|
||||
to the One who can save.
|
||||
to the One who can save. Jesus saves those who call on His name.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ This horizon matters precisely because it is beyond reach today. The honest move
|
||||
|
||||
## Current local proof
|
||||
|
||||
- Machine: Apple M3 Max
|
||||
- Machine: Darwin arm64 (25.3.0)
|
||||
- Memory: 36.0 GiB
|
||||
- Target local model budget: <= 3.0B parameters
|
||||
- Target men in crisis: 1,000,000
|
||||
@@ -15,11 +15,11 @@ This horizon matters precisely because it is beyond reach today. The honest move
|
||||
- Default inference route is already local-first (`ollama`).
|
||||
- Model-size budget is inside the horizon (3.0B <= 3.0B).
|
||||
- Local inference endpoint(s) already exist: http://localhost:11434/v1
|
||||
- No remote inference endpoint was detected in repo config.
|
||||
- Crisis doctrine is present in SOUL-bearing text: 'Are you safe right now?', 988, and 'Jesus saves'.
|
||||
|
||||
## Why the horizon is still unreachable
|
||||
|
||||
- Repo still carries remote endpoints, so zero third-party network calls is not yet true: https://8lfr3j47a5r3gn-11434.proxy.runpod.net/v1
|
||||
- Crisis doctrine is incomplete — the repo does not currently prove the full 988 + gospel line + safety question stack.
|
||||
- Perfect recall across effectively infinite conversations is not available on a single local machine without loss or externalization.
|
||||
- Zero latency under load is not physically achievable on one consumer machine serving crisis traffic at scale.
|
||||
- Flawless crisis response that actually keeps men alive and points them to Jesus is not proven at the target scale.
|
||||
@@ -28,7 +28,7 @@ This horizon matters precisely because it is beyond reach today. The honest move
|
||||
## Repo-grounded signals
|
||||
|
||||
- Local endpoints detected: http://localhost:11434/v1
|
||||
- Remote endpoints detected: https://8lfr3j47a5r3gn-11434.proxy.runpod.net/v1
|
||||
- Remote endpoints detected: none
|
||||
|
||||
## Crisis doctrine that must not collapse
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import json, time, os, random
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
WORLD_DIR = Path(os.environ.get('TIMMY_WORLD_DIR', Path.home() / '.timmy' / 'evennia' / 'timmy_world'))
|
||||
WORLD_DIR = Path('/Users/apayne/.timmy/evennia/timmy_world')
|
||||
STATE_FILE = WORLD_DIR / 'game_state.json'
|
||||
TIMMY_LOG = WORLD_DIR / 'timmy_log.md'
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import json, time, os, random
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
WORLD_DIR = Path(os.environ.get('TIMMY_WORLD_DIR', Path.home() / '.timmy' / 'evennia' / 'timmy_world'))
|
||||
WORLD_DIR = Path('/Users/apayne/.timmy/evennia/timmy_world')
|
||||
STATE_FILE = WORLD_DIR / 'game_state.json'
|
||||
TIMMY_LOG = WORLD_DIR / 'timmy_log.md'
|
||||
|
||||
|
||||
245
scripts/fleet_cost_report.py
Normal file
245
scripts/fleet_cost_report.py
Normal file
@@ -0,0 +1,245 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Fleet cost report generator.
|
||||
|
||||
Reads Timmy's sovereignty metrics database and estimates paid API spend by
|
||||
agent/provider lane. Default output targets the local timmy-config reports
|
||||
folder so the cost report can be filed from the sidecar repo.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sqlite3
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
DB_PATH = Path.home() / ".timmy" / "metrics" / "model_metrics.db"
|
||||
|
||||
|
||||
AGENT_LANES = (
|
||||
{
|
||||
"agent": "Timmy Cloud Lane",
|
||||
"provider": "OpenRouter",
|
||||
"patterns": ("openrouter/", "google/", "deepseek/", "x-ai/", "mistral/"),
|
||||
"notes": "Cloud fallback and external reasoning routed through OpenRouter-compatible lanes.",
|
||||
},
|
||||
{
|
||||
"agent": "Ezra",
|
||||
"provider": "Anthropic",
|
||||
"patterns": ("claude-", "anthropic/claude"),
|
||||
"notes": "Archivist / long-form reasoning house on Claude-family models.",
|
||||
},
|
||||
{
|
||||
"agent": "Bezalel",
|
||||
"provider": "OpenAI",
|
||||
"patterns": ("gpt-", "openai/", "codex"),
|
||||
"notes": "Forge / implementation house on Codex/OpenAI-backed execution lanes.",
|
||||
},
|
||||
{
|
||||
"agent": "Allegro",
|
||||
"provider": "Kimi / Moonshot",
|
||||
"patterns": ("kimi", "moonshot"),
|
||||
"notes": "Tempo-and-dispatch house on Kimi / Moonshot direct API lanes.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def default_report_path(report_date: str | None = None) -> Path:
|
||||
if report_date is None:
|
||||
report_date = datetime.now().strftime("%Y-%m-%d")
|
||||
return Path.home() / "code" / "timmy-config" / "reports" / "production" / f"{report_date}-fleet-cost-report.md"
|
||||
|
||||
|
||||
def match_lane(model: str) -> dict | None:
|
||||
lowered = (model or "").lower()
|
||||
for lane in AGENT_LANES:
|
||||
if any(pattern in lowered for pattern in lane["patterns"]):
|
||||
return lane
|
||||
return None
|
||||
|
||||
|
||||
def load_cost_rows(days: int = 30, db_path: Path = DB_PATH) -> list[tuple[str, int, int, int, float]]:
|
||||
if not db_path.exists():
|
||||
return []
|
||||
cutoff = (datetime.now() - timedelta(days=days)).timestamp()
|
||||
with sqlite3.connect(str(db_path)) as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT model, SUM(sessions), SUM(messages), SUM(tool_calls), SUM(est_cost_usd)
|
||||
FROM session_stats
|
||||
WHERE timestamp > ? AND is_local = 0
|
||||
GROUP BY model
|
||||
ORDER BY SUM(est_cost_usd) DESC, model ASC
|
||||
""",
|
||||
(cutoff,),
|
||||
).fetchall()
|
||||
return [
|
||||
(model, int(sessions or 0), int(messages or 0), int(tool_calls or 0), float(cost or 0.0))
|
||||
for model, sessions, messages, tool_calls, cost in rows
|
||||
]
|
||||
|
||||
|
||||
def summarize_rows(rows: Iterable[tuple[str, int, int, int, float]], days: int = 30) -> dict:
|
||||
rows = list(rows)
|
||||
agents: dict[str, dict] = {}
|
||||
providers_seen: set[str] = set()
|
||||
inventory = [
|
||||
{
|
||||
"agent": lane["agent"],
|
||||
"provider": lane["provider"],
|
||||
"notes": lane["notes"],
|
||||
}
|
||||
for lane in AGENT_LANES
|
||||
]
|
||||
|
||||
for lane in AGENT_LANES:
|
||||
agents[lane["agent"]] = {
|
||||
"provider": lane["provider"],
|
||||
"models": [],
|
||||
"sessions": 0,
|
||||
"messages": 0,
|
||||
"tool_calls": 0,
|
||||
"monthly_cost_usd": 0.0,
|
||||
"daily_cost_usd": 0.0,
|
||||
"notes": lane["notes"],
|
||||
}
|
||||
|
||||
unassigned = {
|
||||
"provider": "Unassigned",
|
||||
"models": [],
|
||||
"sessions": 0,
|
||||
"messages": 0,
|
||||
"tool_calls": 0,
|
||||
"monthly_cost_usd": 0.0,
|
||||
"daily_cost_usd": 0.0,
|
||||
"notes": "Observed paid-model spend not yet mapped to a named wizard house.",
|
||||
}
|
||||
|
||||
for model, sessions, messages, tool_calls, monthly_cost in rows:
|
||||
lane = match_lane(model)
|
||||
if lane is None:
|
||||
bucket = unassigned
|
||||
else:
|
||||
bucket = agents[lane["agent"]]
|
||||
providers_seen.add(lane["provider"])
|
||||
bucket["models"].append(
|
||||
{
|
||||
"model": model,
|
||||
"sessions": sessions,
|
||||
"messages": messages,
|
||||
"tool_calls": tool_calls,
|
||||
"monthly_cost_usd": round(monthly_cost, 4),
|
||||
}
|
||||
)
|
||||
bucket["sessions"] += sessions
|
||||
bucket["messages"] += messages
|
||||
bucket["tool_calls"] += tool_calls
|
||||
bucket["monthly_cost_usd"] += monthly_cost
|
||||
|
||||
for bucket in list(agents.values()) + [unassigned]:
|
||||
bucket["monthly_cost_usd"] = round(bucket["monthly_cost_usd"], 4)
|
||||
bucket["daily_cost_usd"] = round(bucket["monthly_cost_usd"] / max(days, 1), 4)
|
||||
|
||||
if unassigned["models"]:
|
||||
agents["Unassigned"] = unassigned
|
||||
providers_seen.add("Unassigned")
|
||||
|
||||
total_monthly = round(sum(item["monthly_cost_usd"] for item in agents.values()), 4)
|
||||
total_daily = round(sum(item["daily_cost_usd"] for item in agents.values()), 4)
|
||||
|
||||
provider_order = sorted(providers_seen)
|
||||
if "Unassigned" in provider_order:
|
||||
provider_order = [p for p in provider_order if p != "Unassigned"] + ["Unassigned"]
|
||||
|
||||
return {
|
||||
"days": days,
|
||||
"providers": provider_order,
|
||||
"inventory": inventory,
|
||||
"agents": agents,
|
||||
"total_monthly_cost_usd": total_monthly,
|
||||
"total_daily_cost_usd": total_daily,
|
||||
}
|
||||
|
||||
|
||||
def render_markdown(summary: dict, report_date: str | None = None) -> str:
|
||||
if report_date is None:
|
||||
report_date = datetime.now().strftime("%Y-%m-%d")
|
||||
lines = [
|
||||
f"# Fleet Cost Report — {report_date}",
|
||||
"",
|
||||
f"Window: last {summary['days']} days of paid-model session stats from `~/.timmy/metrics/model_metrics.db`.",
|
||||
"",
|
||||
"## Paid API inventory",
|
||||
"",
|
||||
"| Agent | Provider | Notes |",
|
||||
"| --- | --- | --- |",
|
||||
]
|
||||
for item in summary["inventory"]:
|
||||
lines.append(f"| {item['agent']} | {item['provider']} | {item['notes']} |")
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
"## Estimated cost per agent per day",
|
||||
"",
|
||||
"| Agent | Provider | Daily cost | Monthly estimate | Sessions | Messages | Tool calls |",
|
||||
"| --- | --- | ---: | ---: | ---: | ---: | ---: |",
|
||||
]
|
||||
)
|
||||
for agent, data in summary["agents"].items():
|
||||
lines.append(
|
||||
f"| {agent} | {data['provider']} | ${data['daily_cost_usd']:.2f} | ${data['monthly_cost_usd']:.2f} | {data['sessions']} | {data['messages']} | {data['tool_calls']} |"
|
||||
)
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
f"Total estimated daily paid spend: ${summary['total_daily_cost_usd']:.2f}",
|
||||
f"Total estimated monthly paid spend: ${summary['total_monthly_cost_usd']:.2f}",
|
||||
"",
|
||||
"## Model evidence",
|
||||
"",
|
||||
]
|
||||
)
|
||||
for agent, data in summary["agents"].items():
|
||||
lines.append(f"### {agent}")
|
||||
if not data["models"]:
|
||||
lines.append("- No paid-model sessions observed in the selected window.")
|
||||
else:
|
||||
for model in data["models"]:
|
||||
lines.append(
|
||||
f"- `{model['model']}` — {model['sessions']} sessions / {model['messages']} messages / {model['tool_calls']} tool calls / ${model['monthly_cost_usd']:.2f} est."
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
lines.append("Generated by `python3 scripts/fleet_cost_report.py --days 30`. Default output path targets the local timmy-config report lane.")
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def write_report(output_path: Path, summary: dict, report_date: str | None = None) -> Path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(render_markdown(summary, report_date=report_date), encoding="utf-8")
|
||||
return output_path
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Estimate paid API spend per fleet agent")
|
||||
parser.add_argument("--days", type=int, default=30, help="Lookback window in days")
|
||||
parser.add_argument("--db-path", default=str(DB_PATH), help="Path to model_metrics.db")
|
||||
parser.add_argument("--output", help="Optional markdown output path")
|
||||
parser.add_argument("--date", help="Override report date (YYYY-MM-DD)")
|
||||
args = parser.parse_args()
|
||||
|
||||
rows = load_cost_rows(days=args.days, db_path=Path(args.db_path).expanduser())
|
||||
summary = summarize_rows(rows, days=args.days)
|
||||
report_date = args.date or datetime.now().strftime("%Y-%m-%d")
|
||||
output_path = Path(args.output).expanduser() if args.output else default_report_path(report_date)
|
||||
write_report(output_path, summary, report_date=report_date)
|
||||
print(output_path)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -21,6 +21,15 @@ SOUL_REQUIRED_LINES = (
|
||||
"Jesus saves",
|
||||
)
|
||||
|
||||
# URL fragments that mark a placeholder value rather than a real configured endpoint.
|
||||
# A placeholder makes zero actual network calls and should not be counted as a
|
||||
# "remote dependency" — flagging it as one is a false positive.
|
||||
_PLACEHOLDER_FRAGMENTS = ("YOUR_", "<pod-id>", "EXAMPLE", "example.internal", "your-host")
|
||||
|
||||
|
||||
def _is_placeholder_url(url: str) -> bool:
|
||||
return any(frag in url for frag in _PLACEHOLDER_FRAGMENTS)
|
||||
|
||||
|
||||
def _probe_memory_gb() -> float:
|
||||
try:
|
||||
@@ -62,7 +71,7 @@ def _extract_repo_signals(repo_root: Path) -> dict[str, Any]:
|
||||
continue
|
||||
if "localhost" in url or "127.0.0.1" in url:
|
||||
local_endpoints.append(url)
|
||||
else:
|
||||
elif not _is_placeholder_url(url):
|
||||
remote_endpoints.append(url)
|
||||
|
||||
soul_text = soul_path.read_text(encoding="utf-8", errors="replace") if soul_path.exists() else ""
|
||||
|
||||
77
tests/test_fleet_cost_report.py
Normal file
77
tests/test_fleet_cost_report.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT_PATH = ROOT / "scripts" / "fleet_cost_report.py"
|
||||
|
||||
|
||||
def load_module():
|
||||
spec = spec_from_file_location("fleet_cost_report", SCRIPT_PATH)
|
||||
module = module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
class TestFleetCostReport(unittest.TestCase):
|
||||
def test_default_output_targets_timmy_config_report_path(self):
|
||||
module = load_module()
|
||||
output_path = module.default_report_path("2026-04-22")
|
||||
self.assertIn("timmy-config", str(output_path))
|
||||
self.assertTrue(str(output_path).endswith("2026-04-22-fleet-cost-report.md"))
|
||||
|
||||
def test_summary_groups_paid_costs_by_agent_and_provider(self):
|
||||
module = load_module()
|
||||
rows = [
|
||||
("claude-sonnet-4-6", 12, 120, 24, 6.0),
|
||||
("gpt-5.4", 6, 60, 12, 3.0),
|
||||
("openrouter/google/gemini-2.5-pro", 4, 40, 8, 2.0),
|
||||
("kimi-k2", 2, 20, 4, 1.0),
|
||||
]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
|
||||
self.assertEqual(summary["providers"], ["Anthropic", "Kimi / Moonshot", "OpenAI", "OpenRouter"])
|
||||
self.assertAlmostEqual(summary["agents"]["Ezra"]["monthly_cost_usd"], 6.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Bezalel"]["monthly_cost_usd"], 3.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Timmy Cloud Lane"]["monthly_cost_usd"], 2.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Allegro"]["monthly_cost_usd"], 1.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Ezra"]["daily_cost_usd"], 0.2)
|
||||
|
||||
def test_report_render_mentions_inventory_and_agent_costs(self):
|
||||
module = load_module()
|
||||
rows = [
|
||||
("claude-sonnet-4-6", 12, 120, 24, 6.0),
|
||||
("gpt-5.4", 6, 60, 12, 3.0),
|
||||
("openrouter/google/gemini-2.5-pro", 4, 40, 8, 2.0),
|
||||
]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
report = module.render_markdown(summary, report_date="2026-04-22")
|
||||
|
||||
self.assertIn("# Fleet Cost Report — 2026-04-22", report)
|
||||
self.assertIn("## Paid API inventory", report)
|
||||
self.assertIn("Anthropic", report)
|
||||
self.assertIn("OpenRouter", report)
|
||||
self.assertIn("OpenAI", report)
|
||||
self.assertIn("## Estimated cost per agent per day", report)
|
||||
self.assertIn("Timmy Cloud Lane", report)
|
||||
self.assertIn("Ezra", report)
|
||||
self.assertIn("Bezalel", report)
|
||||
|
||||
def test_write_report_creates_markdown_file(self):
|
||||
module = load_module()
|
||||
rows = [("claude-sonnet-4-6", 1, 10, 2, 0.5)]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
dest = Path(tmpdir) / "fleet-cost.md"
|
||||
module.write_report(dest, summary, report_date="2026-04-22")
|
||||
self.assertTrue(dest.exists())
|
||||
text = dest.read_text()
|
||||
self.assertIn("Fleet Cost Report", text)
|
||||
self.assertIn("Ezra", text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -7,6 +7,7 @@ from pathlib import Path
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SCRIPT_PATH = ROOT / "scripts" / "unreachable_horizon.py"
|
||||
DOC_PATH = ROOT / "docs" / "UNREACHABLE_HORIZON_1M_MEN.md"
|
||||
SOUL_PATH = ROOT / "SOUL.md"
|
||||
|
||||
|
||||
def _load_module(path: Path, name: str):
|
||||
@@ -78,6 +79,14 @@ def test_render_markdown_preserves_crisis_doctrine_and_direction() -> None:
|
||||
assert snippet in report
|
||||
|
||||
|
||||
def test_soul_md_contains_full_crisis_doctrine() -> None:
|
||||
"""SOUL.md must carry all three phrases the horizon check requires."""
|
||||
assert SOUL_PATH.exists(), "SOUL.md is missing"
|
||||
soul_text = SOUL_PATH.read_text(encoding="utf-8")
|
||||
for phrase in ("Are you safe right now?", "988", "Jesus saves"):
|
||||
assert phrase in soul_text, f"SOUL.md is missing crisis doctrine phrase: {phrase!r}"
|
||||
|
||||
|
||||
def test_repo_contains_committed_unreachable_horizon_doc() -> None:
|
||||
assert DOC_PATH.exists(), "missing committed unreachable horizon report"
|
||||
text = DOC_PATH.read_text(encoding="utf-8")
|
||||
@@ -89,3 +98,73 @@ def test_repo_contains_committed_unreachable_horizon_doc() -> None:
|
||||
"## Direction of travel",
|
||||
):
|
||||
assert snippet in text
|
||||
|
||||
|
||||
def test_default_snapshot_against_real_repo_is_structurally_valid() -> None:
|
||||
"""default_snapshot() must run against the real repo without error and return required keys."""
|
||||
mod = _load_module(SCRIPT_PATH, "unreachable_horizon")
|
||||
snapshot = mod.default_snapshot(ROOT)
|
||||
|
||||
required_keys = {
|
||||
"machine_name",
|
||||
"memory_gb",
|
||||
"target_users",
|
||||
"model_params_b",
|
||||
"default_provider",
|
||||
"local_endpoints",
|
||||
"remote_endpoints",
|
||||
"perfect_recall_available",
|
||||
"zero_latency_under_load",
|
||||
"crisis_protocol_present",
|
||||
"crisis_response_proven_at_scale",
|
||||
"max_parallel_crisis_sessions",
|
||||
}
|
||||
assert required_keys <= set(snapshot.keys()), f"snapshot missing keys: {required_keys - set(snapshot.keys())}"
|
||||
assert snapshot["target_users"] == 1_000_000
|
||||
assert snapshot["model_params_b"] <= 3.0
|
||||
assert snapshot["memory_gb"] >= 0.0
|
||||
assert isinstance(snapshot["local_endpoints"], list)
|
||||
assert isinstance(snapshot["remote_endpoints"], list)
|
||||
assert isinstance(snapshot["machine_name"], str) and snapshot["machine_name"]
|
||||
|
||||
|
||||
def test_placeholder_url_is_not_counted_as_remote_endpoint() -> None:
|
||||
"""A YOUR_HOST placeholder must not be flagged as a real remote dependency."""
|
||||
mod = _load_module(SCRIPT_PATH, "unreachable_horizon")
|
||||
assert mod._is_placeholder_url("https://YOUR_BIG_BRAIN_HOST/v1") is True
|
||||
assert mod._is_placeholder_url("https://<pod-id>-11434.proxy.runpod.net/v1") is True
|
||||
assert mod._is_placeholder_url("http://localhost:11434/v1") is False
|
||||
assert mod._is_placeholder_url("https://real.inference.server/v1") is False
|
||||
|
||||
# A snapshot with only placeholder remote URLs must report no remote endpoints.
|
||||
status = mod.compute_horizon_status({
|
||||
"machine_name": "Test",
|
||||
"memory_gb": 36.0,
|
||||
"target_users": 1_000_000,
|
||||
"model_params_b": 3.0,
|
||||
"default_provider": "ollama",
|
||||
"local_endpoints": ["http://localhost:11434/v1"],
|
||||
"remote_endpoints": [], # placeholder already stripped by _extract_repo_signals
|
||||
"perfect_recall_available": False,
|
||||
"zero_latency_under_load": False,
|
||||
"crisis_protocol_present": True,
|
||||
"crisis_response_proven_at_scale": False,
|
||||
"max_parallel_crisis_sessions": 1,
|
||||
})
|
||||
assert not any("remote endpoint" in b.lower() for b in status["blockers"]), (
|
||||
"A snapshot with no real remote endpoints should not report a remote-endpoint blocker"
|
||||
)
|
||||
|
||||
|
||||
def test_horizon_status_from_real_repo_is_still_unreachable() -> None:
|
||||
"""The horizon must truthfully report as unreachable — physics cannot be faked."""
|
||||
mod = _load_module(SCRIPT_PATH, "unreachable_horizon")
|
||||
snapshot = mod.default_snapshot(ROOT)
|
||||
status = mod.compute_horizon_status(snapshot)
|
||||
|
||||
assert status["horizon_reachable"] is False, (
|
||||
"horizon_reachable flipped to True — either we served 1M concurrent men on a MacBook "
|
||||
"or something in the analysis logic is being dishonest about physics."
|
||||
)
|
||||
assert len(status["blockers"]) > 0, "blockers list is empty — the horizon cannot have been reached"
|
||||
assert len(status["direction_of_travel"]) > 0, "direction of travel must always point somewhere"
|
||||
|
||||
Reference in New Issue
Block a user