124 lines
4.3 KiB
Python
124 lines
4.3 KiB
Python
from pathlib import Path
|
|
import importlib.util
|
|
import json
|
|
|
|
|
|
ROOT = Path(__file__).resolve().parent.parent
|
|
SCRIPT_PATH = ROOT / "scripts" / "predictive_resource_allocator.py"
|
|
DOC_PATH = ROOT / "docs" / "PREDICTIVE_RESOURCE_ALLOCATION.md"
|
|
|
|
|
|
def load_module(path: Path, name: str):
|
|
assert path.exists(), f"missing {path.relative_to(ROOT)}"
|
|
spec = importlib.util.spec_from_file_location(name, path)
|
|
assert spec and spec.loader
|
|
module = importlib.util.module_from_spec(spec)
|
|
spec.loader.exec_module(module)
|
|
return module
|
|
|
|
|
|
def write_jsonl(path: Path, rows: list[dict]) -> None:
|
|
path.write_text("".join(json.dumps(row) + "\n" for row in rows), encoding="utf-8")
|
|
|
|
|
|
def test_forecast_detects_recent_surge_and_recommends_prewarm(tmp_path):
|
|
mod = load_module(SCRIPT_PATH, "predictive_resource_allocator")
|
|
|
|
metrics_path = tmp_path / "metrics.jsonl"
|
|
heartbeat_path = tmp_path / "heartbeat.jsonl"
|
|
|
|
metric_rows = []
|
|
# baseline: 1 req/hour for 6 earlier hours
|
|
for hour in range(6):
|
|
metric_rows.append({
|
|
"timestamp": f"2026-03-29T0{hour}:00:00+00:00",
|
|
"caller": "heartbeat_tick",
|
|
"prompt_len": 1000,
|
|
"response_len": 50,
|
|
"success": True,
|
|
})
|
|
# recent surge: 5 req/hour plus large batch job
|
|
for minute in [0, 10, 20, 30, 40]:
|
|
metric_rows.append({
|
|
"timestamp": f"2026-03-29T12:{minute:02d}:00+00:00",
|
|
"caller": "heartbeat_tick",
|
|
"prompt_len": 1200,
|
|
"response_len": 50,
|
|
"success": True,
|
|
})
|
|
metric_rows.append({
|
|
"timestamp": "2026-03-29T12:15:00+00:00",
|
|
"caller": "know-thy-father-draft:batch_003",
|
|
"prompt_len": 14420,
|
|
"response_len": 50,
|
|
"success": True,
|
|
})
|
|
write_jsonl(metrics_path, metric_rows)
|
|
|
|
heartbeat_rows = [
|
|
{
|
|
"timestamp": "2026-03-29T12:10:00+00:00",
|
|
"perception": {"gitea_alive": True, "model_health": {"inference_ok": False}},
|
|
},
|
|
{
|
|
"timestamp": "2026-03-29T12:20:00+00:00",
|
|
"perception": {"gitea_alive": True, "model_health": {"inference_ok": False}},
|
|
},
|
|
]
|
|
write_jsonl(heartbeat_path, heartbeat_rows)
|
|
|
|
forecast = mod.forecast_resources(metrics_path, heartbeat_path, horizon_hours=6)
|
|
|
|
assert forecast["resource_mode"] == "surge"
|
|
assert forecast["surge_factor"] > 1.5
|
|
assert any("Pre-warm local inference" in action for action in forecast["recommended_actions"])
|
|
assert any("Throttle or defer large background jobs" in action for action in forecast["recommended_actions"])
|
|
assert forecast["top_callers_recent"][0]["caller"] == "heartbeat_tick"
|
|
|
|
|
|
def test_forecast_detects_control_plane_risk_from_gitea_outage(tmp_path):
|
|
mod = load_module(SCRIPT_PATH, "predictive_resource_allocator")
|
|
|
|
metrics_path = tmp_path / "metrics.jsonl"
|
|
heartbeat_path = tmp_path / "heartbeat.jsonl"
|
|
write_jsonl(metrics_path, [
|
|
{
|
|
"timestamp": "2026-03-29T13:00:00+00:00",
|
|
"caller": "heartbeat_tick",
|
|
"prompt_len": 1000,
|
|
"response_len": 50,
|
|
"success": True,
|
|
}
|
|
])
|
|
write_jsonl(heartbeat_path, [
|
|
{
|
|
"timestamp": "2026-03-29T13:00:00+00:00",
|
|
"perception": {"gitea_alive": False, "model_health": {"inference_ok": True}},
|
|
},
|
|
{
|
|
"timestamp": "2026-03-29T13:10:00+00:00",
|
|
"perception": {"gitea_alive": False, "model_health": {"inference_ok": True}},
|
|
},
|
|
])
|
|
|
|
forecast = mod.forecast_resources(metrics_path, heartbeat_path, horizon_hours=6)
|
|
|
|
assert forecast["gitea_outages_recent"] == 2
|
|
assert any("Pre-fetch or cache forge state" in action for action in forecast["recommended_actions"])
|
|
assert forecast["dispatch_posture"] == "degraded"
|
|
|
|
|
|
def test_repo_contains_predictive_resource_allocation_doc():
|
|
assert DOC_PATH.exists(), "missing predictive resource allocation doc"
|
|
text = DOC_PATH.read_text(encoding="utf-8")
|
|
required = [
|
|
"# Predictive Resource Allocation",
|
|
"scripts/predictive_resource_allocator.py",
|
|
"resource_mode",
|
|
"dispatch_posture",
|
|
"Pre-warm local inference",
|
|
"Throttle or defer large background jobs",
|
|
]
|
|
for snippet in required:
|
|
assert snippet in text
|