1
0

Compare commits

...

9 Commits

11 changed files with 2312 additions and 46 deletions

View File

@@ -71,6 +71,50 @@ class GitHand:
return True
return False
async def _exec_subprocess(
self, args: str, timeout: int,
) -> tuple[bytes, bytes, int]:
"""Run git as a subprocess, return (stdout, stderr, returncode).
Raises TimeoutError if the process exceeds *timeout* seconds.
"""
proc = await asyncio.create_subprocess_exec(
"git",
*args.split(),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=self._repo_dir,
)
try:
stdout, stderr = await asyncio.wait_for(
proc.communicate(), timeout=timeout,
)
except TimeoutError:
proc.kill()
await proc.wait()
raise
return stdout, stderr, proc.returncode or 0
@staticmethod
def _parse_output(
command: str,
stdout_bytes: bytes,
stderr_bytes: bytes,
returncode: int | None,
latency_ms: float,
) -> GitResult:
"""Decode subprocess output into a GitResult."""
exit_code = returncode or 0
stdout = stdout_bytes.decode("utf-8", errors="replace").strip()
stderr = stderr_bytes.decode("utf-8", errors="replace").strip()
return GitResult(
operation=command,
success=exit_code == 0,
output=stdout,
error=stderr if exit_code != 0 else "",
latency_ms=latency_ms,
)
async def run(
self,
args: str,
@@ -88,14 +132,15 @@ class GitHand:
GitResult with output or error details.
"""
start = time.time()
command = f"git {args}"
# Gate destructive operations
if self._is_destructive(args) and not allow_destructive:
return GitResult(
operation=f"git {args}",
operation=command,
success=False,
error=(
f"Destructive operation blocked: 'git {args}'. "
f"Destructive operation blocked: '{command}'. "
"Set allow_destructive=True to override."
),
requires_confirmation=True,
@@ -103,46 +148,20 @@ class GitHand:
)
effective_timeout = timeout or self._timeout
command = f"git {args}"
try:
proc = await asyncio.create_subprocess_exec(
"git",
*args.split(),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=self._repo_dir,
stdout_bytes, stderr_bytes, returncode = await self._exec_subprocess(
args, effective_timeout,
)
try:
stdout_bytes, stderr_bytes = await asyncio.wait_for(
proc.communicate(), timeout=effective_timeout
)
except TimeoutError:
proc.kill()
await proc.wait()
latency = (time.time() - start) * 1000
logger.warning("Git command timed out after %ds: %s", effective_timeout, command)
return GitResult(
operation=command,
success=False,
error=f"Command timed out after {effective_timeout}s",
latency_ms=latency,
)
except TimeoutError:
latency = (time.time() - start) * 1000
exit_code = proc.returncode or 0
stdout = stdout_bytes.decode("utf-8", errors="replace").strip()
stderr = stderr_bytes.decode("utf-8", errors="replace").strip()
logger.warning("Git command timed out after %ds: %s", effective_timeout, command)
return GitResult(
operation=command,
success=exit_code == 0,
output=stdout,
error=stderr if exit_code != 0 else "",
success=False,
error=f"Command timed out after {effective_timeout}s",
latency_ms=latency,
)
except FileNotFoundError:
latency = (time.time() - start) * 1000
logger.warning("git binary not found")
@@ -162,6 +181,12 @@ class GitHand:
latency_ms=latency,
)
return self._parse_output(
command, stdout_bytes, stderr_bytes,
returncode=returncode,
latency_ms=(time.time() - start) * 1000,
)
# ── Convenience wrappers ─────────────────────────────────────────────────
async def status(self) -> GitResult:

View File

@@ -86,6 +86,19 @@
<p>Your task has been added to the queue. Timmy will review it shortly.</p>
<button type="button" id="submit-another-btn" class="btn-primary">Submit Another</button>
</div>
<div id="submit-job-queued" class="submit-job-queued hidden">
<div class="queued-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
<circle cx="12" cy="12" r="10"></circle>
<polyline points="12 6 12 12 16 14"></polyline>
</svg>
</div>
<h3>Job Queued</h3>
<p>The server is unreachable right now. Your job has been saved locally and will be submitted automatically when the connection is restored.</p>
<div id="queue-count-display" class="queue-count-display"></div>
<button type="button" id="submit-another-queued-btn" class="btn-primary">Submit Another</button>
</div>
</div>
<div id="submit-job-backdrop" class="submit-job-backdrop"></div>
</div>
@@ -142,6 +155,7 @@
import { createFamiliar } from "./familiar.js";
import { setupControls } from "./controls.js";
import { StateReader } from "./state.js";
import { messageQueue } from "./queue.js";
// --- Renderer ---
const renderer = new THREE.WebGLRenderer({ antialias: true });
@@ -182,8 +196,60 @@
moodEl.textContent = state.timmyState.mood;
}
});
// Replay queued jobs whenever the server comes back online.
stateReader.onConnectionChange(async (online) => {
if (!online) return;
const pending = messageQueue.getPending();
if (pending.length === 0) return;
console.log(`[queue] Online — replaying ${pending.length} queued job(s)`);
for (const item of pending) {
try {
const response = await fetch("/api/tasks", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(item.payload),
});
if (response.ok) {
messageQueue.markDelivered(item.id);
console.log(`[queue] Delivered queued job ${item.id}`);
} else {
messageQueue.markFailed(item.id);
console.warn(`[queue] Failed to deliver job ${item.id}: ${response.status}`);
}
} catch (err) {
// Still offline — leave as QUEUED, will retry next cycle.
console.warn(`[queue] Replay aborted (still offline): ${err}`);
break;
}
}
messageQueue.prune();
_updateQueueBadge();
});
stateReader.connect();
// --- Queue badge (top-right indicator for pending jobs) ---
function _updateQueueBadge() {
const count = messageQueue.pendingCount();
let badge = document.getElementById("queue-badge");
if (count === 0) {
if (badge) badge.remove();
return;
}
if (!badge) {
badge = document.createElement("div");
badge.id = "queue-badge";
badge.className = "queue-badge";
badge.title = "Jobs queued offline — will submit on reconnect";
document.getElementById("overlay").appendChild(badge);
}
badge.textContent = `${count} queued`;
}
// Show badge on load if there are already queued messages.
messageQueue.prune();
_updateQueueBadge();
// --- About Panel ---
const infoBtn = document.getElementById("info-btn");
const aboutPanel = document.getElementById("about-panel");
@@ -228,6 +294,9 @@
const descWarning = document.getElementById("desc-warning");
const submitJobSuccess = document.getElementById("submit-job-success");
const submitAnotherBtn = document.getElementById("submit-another-btn");
const submitJobQueued = document.getElementById("submit-job-queued");
const submitAnotherQueuedBtn = document.getElementById("submit-another-queued-btn");
const queueCountDisplay = document.getElementById("queue-count-display");
// Constants
const MAX_TITLE_LENGTH = 200;
@@ -255,6 +324,7 @@
submitJobForm.reset();
submitJobForm.classList.remove("hidden");
submitJobSuccess.classList.add("hidden");
submitJobQueued.classList.add("hidden");
updateCharCounts();
clearErrors();
validateForm();
@@ -363,6 +433,7 @@
submitJobBackdrop.addEventListener("click", closeSubmitJobModal);
cancelJobBtn.addEventListener("click", closeSubmitJobModal);
submitAnotherBtn.addEventListener("click", resetForm);
submitAnotherQueuedBtn.addEventListener("click", resetForm);
// Input event listeners for real-time validation
jobTitle.addEventListener("input", () => {
@@ -420,9 +491,10 @@
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(formData)
body: JSON.stringify(formData),
signal: AbortSignal.timeout(8000),
});
if (response.ok) {
// Show success state
submitJobForm.classList.add("hidden");
@@ -433,9 +505,14 @@
descError.classList.add("visible");
}
} catch (error) {
// For demo/development, show success even if API fails
// Server unreachable — persist to localStorage queue.
messageQueue.enqueue(formData);
const count = messageQueue.pendingCount();
submitJobForm.classList.add("hidden");
submitJobSuccess.classList.remove("hidden");
submitJobQueued.classList.remove("hidden");
queueCountDisplay.textContent =
count > 1 ? `${count} jobs queued` : "1 job queued";
_updateQueueBadge();
} finally {
submitJobSubmit.disabled = false;
submitJobSubmit.textContent = "Submit Job";

90
static/world/queue.js Normal file
View File

@@ -0,0 +1,90 @@
/**
* Offline message queue for Workshop panel.
*
* Persists undelivered job submissions to localStorage so they survive
* page refreshes and are replayed when the server comes back online.
*/
const _QUEUE_KEY = "timmy_workshop_queue";
const _MAX_AGE_MS = 24 * 60 * 60 * 1000; // 24 hours — auto-expire old items
export const STATUS = {
QUEUED: "queued",
DELIVERED: "delivered",
FAILED: "failed",
};
function _load() {
try {
const raw = localStorage.getItem(_QUEUE_KEY);
return raw ? JSON.parse(raw) : [];
} catch {
return [];
}
}
function _save(items) {
try {
localStorage.setItem(_QUEUE_KEY, JSON.stringify(items));
} catch {
/* localStorage unavailable — degrade silently */
}
}
function _uid() {
return `msg_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
}
/** LocalStorage-backed message queue for Workshop job submissions. */
export const messageQueue = {
/** Add a payload. Returns the created item (with id and status). */
enqueue(payload) {
const item = {
id: _uid(),
payload,
queuedAt: new Date().toISOString(),
status: STATUS.QUEUED,
};
const items = _load();
items.push(item);
_save(items);
return item;
},
/** Mark a message as delivered and remove it from storage. */
markDelivered(id) {
_save(_load().filter((i) => i.id !== id));
},
/** Mark a message as permanently failed (kept for 24h for visibility). */
markFailed(id) {
_save(
_load().map((i) =>
i.id === id ? { ...i, status: STATUS.FAILED } : i
)
);
},
/** All messages waiting to be delivered. */
getPending() {
return _load().filter((i) => i.status === STATUS.QUEUED);
},
/** Total queued (QUEUED status only) count. */
pendingCount() {
return this.getPending().length;
},
/** Drop expired failed items (> 24h old). */
prune() {
const cutoff = Date.now() - _MAX_AGE_MS;
_save(
_load().filter(
(i) =>
i.status === STATUS.QUEUED ||
(i.status === STATUS.FAILED &&
new Date(i.queuedAt).getTime() > cutoff)
)
);
},
};

View File

@@ -3,6 +3,10 @@
*
* Provides Timmy's current state to the scene. In Phase 2 this is a
* static default; the WebSocket path is stubbed for future use.
*
* Also manages connection health monitoring: pings /api/matrix/health
* every 30 seconds and notifies listeners when online/offline state
* changes so the Workshop can replay any queued messages.
*/
const DEFAULTS = {
@@ -20,11 +24,19 @@ const DEFAULTS = {
version: 1,
};
const _HEALTH_URL = "/api/matrix/health";
const _PING_INTERVAL_MS = 30_000;
const _WS_RECONNECT_DELAY_MS = 5_000;
export class StateReader {
constructor() {
this.state = { ...DEFAULTS };
this.listeners = [];
this.connectionListeners = [];
this._ws = null;
this._online = false;
this._pingTimer = null;
this._reconnectTimer = null;
}
/** Subscribe to state changes. */
@@ -32,7 +44,12 @@ export class StateReader {
this.listeners.push(fn);
}
/** Notify all listeners. */
/** Subscribe to online/offline transitions. Called with (isOnline: bool). */
onConnectionChange(fn) {
this.connectionListeners.push(fn);
}
/** Notify all state listeners. */
_notify() {
for (const fn of this.listeners) {
try {
@@ -43,8 +60,48 @@ export class StateReader {
}
}
/** Try to connect to the world WebSocket for live updates. */
connect() {
/** Fire connection listeners only when state actually changes. */
_notifyConnection(online) {
if (online === this._online) return;
this._online = online;
for (const fn of this.connectionListeners) {
try {
fn(online);
} catch (e) {
console.warn("Connection listener error:", e);
}
}
}
/** Ping the health endpoint once and update connection state. */
async _ping() {
try {
const r = await fetch(_HEALTH_URL, {
signal: AbortSignal.timeout(5000),
});
this._notifyConnection(r.ok);
} catch {
this._notifyConnection(false);
}
}
/** Start 30-second health-check loop (idempotent). */
_startHealthCheck() {
if (this._pingTimer) return;
this._pingTimer = setInterval(() => this._ping(), _PING_INTERVAL_MS);
}
/** Schedule a WebSocket reconnect attempt after a delay (idempotent). */
_scheduleReconnect() {
if (this._reconnectTimer) return;
this._reconnectTimer = setTimeout(() => {
this._reconnectTimer = null;
this._connectWS();
}, _WS_RECONNECT_DELAY_MS);
}
/** Open (or re-open) the WebSocket connection. */
_connectWS() {
const proto = location.protocol === "https:" ? "wss:" : "ws:";
const url = `${proto}//${location.host}/api/world/ws`;
try {
@@ -52,10 +109,13 @@ export class StateReader {
this._ws.onopen = () => {
const dot = document.getElementById("connection-dot");
if (dot) dot.classList.add("connected");
this._notifyConnection(true);
};
this._ws.onclose = () => {
const dot = document.getElementById("connection-dot");
if (dot) dot.classList.remove("connected");
this._notifyConnection(false);
this._scheduleReconnect();
};
this._ws.onmessage = (ev) => {
try {
@@ -75,9 +135,18 @@ export class StateReader {
};
} catch (e) {
console.warn("WebSocket unavailable — using static state");
this._scheduleReconnect();
}
}
/** Connect to the world WebSocket and start health-check polling. */
connect() {
this._connectWS();
this._startHealthCheck();
// Immediate ping so connection status is known before the first interval.
this._ping();
}
/** Current mood string. */
get mood() {
return this.state.timmyState.mood;
@@ -92,4 +161,9 @@ export class StateReader {
get energy() {
return this.state.timmyState.energy;
}
/** Whether the server is currently reachable. */
get isOnline() {
return this._online;
}
}

View File

@@ -604,6 +604,68 @@ canvas {
opacity: 1;
}
/* Queued State (offline buffer) */
.submit-job-queued {
text-align: center;
padding: 32px 16px;
}
.submit-job-queued.hidden {
display: none;
}
.queued-icon {
width: 64px;
height: 64px;
margin: 0 auto 20px;
color: #ffaa33;
}
.queued-icon svg {
width: 100%;
height: 100%;
}
.submit-job-queued h3 {
font-size: 20px;
color: #ffaa33;
margin: 0 0 12px 0;
}
.submit-job-queued p {
font-size: 14px;
color: #888;
margin: 0 0 16px 0;
line-height: 1.5;
}
.queue-count-display {
font-size: 12px;
color: #ffaa33;
margin-bottom: 24px;
opacity: 0.8;
}
/* Queue badge — shown in overlay corner when offline jobs are pending */
.queue-badge {
position: absolute;
bottom: 16px;
right: 16px;
padding: 4px 10px;
background: rgba(10, 10, 20, 0.85);
border: 1px solid rgba(255, 170, 51, 0.6);
border-radius: 12px;
color: #ffaa33;
font-size: 11px;
pointer-events: none;
animation: queue-pulse 2s ease-in-out infinite;
}
@keyframes queue-pulse {
0%, 100% { opacity: 0.8; }
50% { opacity: 1; }
}
/* Mobile adjustments */
@media (max-width: 480px) {
.about-panel-content {

View File

@@ -0,0 +1,530 @@
"""Unit tests for dashboard/routes/daily_run.py."""
from __future__ import annotations
import json
import os
from datetime import UTC, datetime, timedelta
from pathlib import Path
from unittest.mock import MagicMock, patch
from urllib.error import HTTPError, URLError
import pytest
from dashboard.routes.daily_run import (
DEFAULT_CONFIG,
LAYER_LABELS,
DailyRunMetrics,
GiteaClient,
LayerMetrics,
_extract_layer,
_fetch_layer_metrics,
_get_metrics,
_get_token,
_load_config,
_load_cycle_data,
)
# ---------------------------------------------------------------------------
# _load_config
# ---------------------------------------------------------------------------
def test_load_config_returns_defaults():
with patch("dashboard.routes.daily_run.CONFIG_PATH") as mock_path:
mock_path.exists.return_value = False
config = _load_config()
assert config["gitea_api"] == DEFAULT_CONFIG["gitea_api"]
assert config["repo_slug"] == DEFAULT_CONFIG["repo_slug"]
def test_load_config_merges_file_orchestrator_section(tmp_path):
config_file = tmp_path / "daily_run.json"
config_file.write_text(
json.dumps({"orchestrator": {"repo_slug": "custom/repo", "gitea_api": "http://custom:3000/api/v1"}})
)
with patch("dashboard.routes.daily_run.CONFIG_PATH", config_file):
config = _load_config()
assert config["repo_slug"] == "custom/repo"
assert config["gitea_api"] == "http://custom:3000/api/v1"
def test_load_config_ignores_invalid_json(tmp_path):
config_file = tmp_path / "daily_run.json"
config_file.write_text("not valid json{{")
with patch("dashboard.routes.daily_run.CONFIG_PATH", config_file):
config = _load_config()
assert config["repo_slug"] == DEFAULT_CONFIG["repo_slug"]
def test_load_config_env_overrides(monkeypatch):
monkeypatch.setenv("TIMMY_GITEA_API", "http://envapi:3000/api/v1")
monkeypatch.setenv("TIMMY_REPO_SLUG", "env/repo")
monkeypatch.setenv("TIMMY_GITEA_TOKEN", "env-token-123")
with patch("dashboard.routes.daily_run.CONFIG_PATH") as mock_path:
mock_path.exists.return_value = False
config = _load_config()
assert config["gitea_api"] == "http://envapi:3000/api/v1"
assert config["repo_slug"] == "env/repo"
assert config["token"] == "env-token-123"
def test_load_config_no_env_overrides_without_vars(monkeypatch):
monkeypatch.delenv("TIMMY_GITEA_API", raising=False)
monkeypatch.delenv("TIMMY_REPO_SLUG", raising=False)
monkeypatch.delenv("TIMMY_GITEA_TOKEN", raising=False)
with patch("dashboard.routes.daily_run.CONFIG_PATH") as mock_path:
mock_path.exists.return_value = False
config = _load_config()
assert "token" not in config
# ---------------------------------------------------------------------------
# _get_token
# ---------------------------------------------------------------------------
def test_get_token_from_config_dict():
config = {"token": "direct-token", "token_file": "~/.hermes/gitea_token"}
assert _get_token(config) == "direct-token"
def test_get_token_from_file(tmp_path):
token_file = tmp_path / "token.txt"
token_file.write_text(" file-token \n")
config = {"token_file": str(token_file)}
assert _get_token(config) == "file-token"
def test_get_token_returns_none_when_file_missing(tmp_path):
config = {"token_file": str(tmp_path / "nonexistent_token")}
assert _get_token(config) is None
# ---------------------------------------------------------------------------
# GiteaClient
# ---------------------------------------------------------------------------
def _make_client(**kwargs) -> GiteaClient:
config = {**DEFAULT_CONFIG, **kwargs}
return GiteaClient(config, token="test-token")
def test_gitea_client_headers_include_auth():
client = _make_client()
headers = client._headers()
assert headers["Authorization"] == "token test-token"
assert headers["Accept"] == "application/json"
def test_gitea_client_headers_no_token():
config = {**DEFAULT_CONFIG}
client = GiteaClient(config, token=None)
headers = client._headers()
assert "Authorization" not in headers
def test_gitea_client_api_url():
client = _make_client()
url = client._api_url("issues")
assert url == f"{DEFAULT_CONFIG['gitea_api']}/repos/{DEFAULT_CONFIG['repo_slug']}/issues"
def test_gitea_client_api_url_strips_trailing_slash():
config = {**DEFAULT_CONFIG, "gitea_api": "http://localhost:3000/api/v1/"}
client = GiteaClient(config, token=None)
url = client._api_url("issues")
assert "//" not in url.replace("http://", "")
def test_gitea_client_is_available_true():
client = _make_client()
mock_resp = MagicMock()
mock_resp.status = 200
mock_resp.__enter__ = lambda s: mock_resp
mock_resp.__exit__ = MagicMock(return_value=False)
with patch("dashboard.routes.daily_run.urlopen", return_value=mock_resp):
assert client.is_available() is True
def test_gitea_client_is_available_cached():
client = _make_client()
client._available = True
# Should not call urlopen at all
with patch("dashboard.routes.daily_run.urlopen") as mock_urlopen:
assert client.is_available() is True
mock_urlopen.assert_not_called()
def test_gitea_client_is_available_false_on_url_error():
client = _make_client()
with patch("dashboard.routes.daily_run.urlopen", side_effect=URLError("refused")):
assert client.is_available() is False
def test_gitea_client_is_available_false_on_timeout():
client = _make_client()
with patch("dashboard.routes.daily_run.urlopen", side_effect=TimeoutError()):
assert client.is_available() is False
def test_gitea_client_get_paginated_single_page():
client = _make_client()
mock_resp = MagicMock()
mock_resp.read.return_value = json.dumps([{"id": 1}, {"id": 2}]).encode()
mock_resp.__enter__ = lambda s: mock_resp
mock_resp.__exit__ = MagicMock(return_value=False)
with patch("dashboard.routes.daily_run.urlopen", return_value=mock_resp):
result = client.get_paginated("issues")
assert len(result) == 2
assert result[0]["id"] == 1
def test_gitea_client_get_paginated_empty():
client = _make_client()
mock_resp = MagicMock()
mock_resp.read.return_value = b"[]"
mock_resp.__enter__ = lambda s: mock_resp
mock_resp.__exit__ = MagicMock(return_value=False)
with patch("dashboard.routes.daily_run.urlopen", return_value=mock_resp):
result = client.get_paginated("issues")
assert result == []
# ---------------------------------------------------------------------------
# LayerMetrics.trend
# ---------------------------------------------------------------------------
def test_layer_metrics_trend_no_previous_no_current():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=0, previous_count=0)
assert lm.trend == ""
def test_layer_metrics_trend_no_previous_with_current():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=5, previous_count=0)
assert lm.trend == ""
def test_layer_metrics_trend_big_increase():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=130, previous_count=100)
assert lm.trend == "↑↑"
def test_layer_metrics_trend_small_increase():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=108, previous_count=100)
assert lm.trend == ""
def test_layer_metrics_trend_stable():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=100, previous_count=100)
assert lm.trend == ""
def test_layer_metrics_trend_small_decrease():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=92, previous_count=100)
assert lm.trend == ""
def test_layer_metrics_trend_big_decrease():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=70, previous_count=100)
assert lm.trend == "↓↓"
def test_layer_metrics_trend_color_up():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=200, previous_count=100)
assert lm.trend_color == "var(--green)"
def test_layer_metrics_trend_color_down():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=50, previous_count=100)
assert lm.trend_color == "var(--amber)"
def test_layer_metrics_trend_color_stable():
lm = LayerMetrics(name="triage", label="layer:triage", current_count=100, previous_count=100)
assert lm.trend_color == "var(--text-dim)"
# ---------------------------------------------------------------------------
# DailyRunMetrics.sessions_trend
# ---------------------------------------------------------------------------
def _make_daily_metrics(**kwargs) -> DailyRunMetrics:
defaults = dict(
sessions_completed=10,
sessions_previous=8,
layers=[],
total_touched_current=20,
total_touched_previous=15,
lookback_days=7,
generated_at=datetime.now(UTC).isoformat(),
)
defaults.update(kwargs)
return DailyRunMetrics(**defaults)
def test_daily_metrics_sessions_trend_big_increase():
m = _make_daily_metrics(sessions_completed=130, sessions_previous=100)
assert m.sessions_trend == "↑↑"
def test_daily_metrics_sessions_trend_stable():
m = _make_daily_metrics(sessions_completed=100, sessions_previous=100)
assert m.sessions_trend == ""
def test_daily_metrics_sessions_trend_no_previous_zero_completed():
m = _make_daily_metrics(sessions_completed=0, sessions_previous=0)
assert m.sessions_trend == ""
def test_daily_metrics_sessions_trend_no_previous_with_completed():
m = _make_daily_metrics(sessions_completed=5, sessions_previous=0)
assert m.sessions_trend == ""
def test_daily_metrics_sessions_trend_color_green():
m = _make_daily_metrics(sessions_completed=200, sessions_previous=100)
assert m.sessions_trend_color == "var(--green)"
def test_daily_metrics_sessions_trend_color_amber():
m = _make_daily_metrics(sessions_completed=50, sessions_previous=100)
assert m.sessions_trend_color == "var(--amber)"
# ---------------------------------------------------------------------------
# _extract_layer
# ---------------------------------------------------------------------------
def test_extract_layer_finds_layer_label():
labels = [{"name": "bug"}, {"name": "layer:triage"}, {"name": "urgent"}]
assert _extract_layer(labels) == "triage"
def test_extract_layer_returns_none_when_no_layer():
labels = [{"name": "bug"}, {"name": "feature"}]
assert _extract_layer(labels) is None
def test_extract_layer_empty_labels():
assert _extract_layer([]) is None
def test_extract_layer_first_match_wins():
labels = [{"name": "layer:micro-fix"}, {"name": "layer:tests"}]
assert _extract_layer(labels) == "micro-fix"
# ---------------------------------------------------------------------------
# _load_cycle_data
# ---------------------------------------------------------------------------
def test_load_cycle_data_missing_file(tmp_path):
with patch("dashboard.routes.daily_run.REPO_ROOT", tmp_path):
result = _load_cycle_data(days=14)
assert result == {"current": 0, "previous": 0}
def test_load_cycle_data_counts_successful_sessions(tmp_path):
retro_dir = tmp_path / ".loop" / "retro"
retro_dir.mkdir(parents=True)
retro_file = retro_dir / "cycles.jsonl"
now = datetime.now(UTC)
recent_ts = (now - timedelta(days=3)).isoformat()
older_ts = (now - timedelta(days=10)).isoformat()
old_ts = (now - timedelta(days=20)).isoformat()
lines = [
json.dumps({"timestamp": recent_ts, "success": True}),
json.dumps({"timestamp": recent_ts, "success": False}), # not counted
json.dumps({"timestamp": older_ts, "success": True}),
json.dumps({"timestamp": old_ts, "success": True}), # outside window
]
retro_file.write_text("\n".join(lines))
with patch("dashboard.routes.daily_run.REPO_ROOT", tmp_path):
result = _load_cycle_data(days=7)
assert result["current"] == 1
assert result["previous"] == 1
def test_load_cycle_data_skips_invalid_json_lines(tmp_path):
retro_dir = tmp_path / ".loop" / "retro"
retro_dir.mkdir(parents=True)
retro_file = retro_dir / "cycles.jsonl"
now = datetime.now(UTC)
recent_ts = (now - timedelta(days=1)).isoformat()
retro_file.write_text(
f'not valid json\n{json.dumps({"timestamp": recent_ts, "success": True})}\n'
)
with patch("dashboard.routes.daily_run.REPO_ROOT", tmp_path):
result = _load_cycle_data(days=7)
assert result["current"] == 1
def test_load_cycle_data_skips_entries_with_no_timestamp(tmp_path):
retro_dir = tmp_path / ".loop" / "retro"
retro_dir.mkdir(parents=True)
retro_file = retro_dir / "cycles.jsonl"
retro_file.write_text(json.dumps({"success": True}))
with patch("dashboard.routes.daily_run.REPO_ROOT", tmp_path):
result = _load_cycle_data(days=7)
assert result == {"current": 0, "previous": 0}
# ---------------------------------------------------------------------------
# _fetch_layer_metrics
# ---------------------------------------------------------------------------
def _make_issue(updated_offset_days: int) -> dict:
ts = (datetime.now(UTC) - timedelta(days=updated_offset_days)).isoformat()
return {"updated_at": ts, "labels": [{"name": "layer:triage"}]}
def test_fetch_layer_metrics_counts_current_and_previous():
client = _make_client()
client._available = True
recent_issue = _make_issue(updated_offset_days=3)
older_issue = _make_issue(updated_offset_days=10)
with patch.object(client, "get_paginated", return_value=[recent_issue, older_issue]):
layers, total_current, total_previous = _fetch_layer_metrics(client, lookback_days=7)
# Should have one entry per LAYER_LABELS
assert len(layers) == len(LAYER_LABELS)
triage = next(lm for lm in layers if lm.name == "triage")
assert triage.current_count == 1
assert triage.previous_count == 1
def test_fetch_layer_metrics_degrades_on_http_error():
client = _make_client()
client._available = True
with patch.object(client, "get_paginated", side_effect=URLError("network")):
layers, total_current, total_previous = _fetch_layer_metrics(client, lookback_days=7)
assert len(layers) == len(LAYER_LABELS)
for lm in layers:
assert lm.current_count == 0
assert lm.previous_count == 0
assert total_current == 0
assert total_previous == 0
# ---------------------------------------------------------------------------
# _get_metrics
# ---------------------------------------------------------------------------
def test_get_metrics_returns_none_when_gitea_unavailable():
with patch("dashboard.routes.daily_run._load_config", return_value=DEFAULT_CONFIG):
with patch("dashboard.routes.daily_run._get_token", return_value=None):
with patch.object(GiteaClient, "is_available", return_value=False):
result = _get_metrics()
assert result is None
def test_get_metrics_returns_daily_run_metrics():
mock_layers = [
LayerMetrics(name="triage", label="layer:triage", current_count=5, previous_count=3)
]
with patch("dashboard.routes.daily_run._load_config", return_value=DEFAULT_CONFIG):
with patch("dashboard.routes.daily_run._get_token", return_value="tok"):
with patch.object(GiteaClient, "is_available", return_value=True):
with patch(
"dashboard.routes.daily_run._fetch_layer_metrics",
return_value=(mock_layers, 5, 3),
):
with patch(
"dashboard.routes.daily_run._load_cycle_data",
return_value={"current": 10, "previous": 8},
):
result = _get_metrics(lookback_days=7)
assert result is not None
assert result.sessions_completed == 10
assert result.sessions_previous == 8
assert result.lookback_days == 7
assert result.layers == mock_layers
def test_get_metrics_returns_none_on_exception():
with patch("dashboard.routes.daily_run._load_config", return_value=DEFAULT_CONFIG):
with patch("dashboard.routes.daily_run._get_token", return_value="tok"):
with patch.object(GiteaClient, "is_available", return_value=True):
with patch(
"dashboard.routes.daily_run._fetch_layer_metrics",
side_effect=Exception("unexpected"),
):
result = _get_metrics()
assert result is None
# ---------------------------------------------------------------------------
# Route handlers (FastAPI)
# ---------------------------------------------------------------------------
def test_daily_run_metrics_api_unavailable(client):
with patch("dashboard.routes.daily_run._get_metrics", return_value=None):
resp = client.get("/daily-run/metrics")
assert resp.status_code == 503
data = resp.json()
assert data["status"] == "unavailable"
def test_daily_run_metrics_api_returns_json(client):
mock_metrics = _make_daily_metrics(
layers=[
LayerMetrics(name="triage", label="layer:triage", current_count=3, previous_count=2)
]
)
with patch("dashboard.routes.daily_run._get_metrics", return_value=mock_metrics):
with patch(
"dashboard.routes.quests.check_daily_run_quests",
return_value=[],
create=True,
):
resp = client.get("/daily-run/metrics?lookback_days=7")
assert resp.status_code == 200
data = resp.json()
assert data["status"] == "ok"
assert data["lookback_days"] == 7
assert "sessions" in data
assert "layers" in data
assert "totals" in data
assert len(data["layers"]) == 1
assert data["layers"][0]["name"] == "triage"
def test_daily_run_panel_returns_html(client):
mock_metrics = _make_daily_metrics()
with patch("dashboard.routes.daily_run._get_metrics", return_value=mock_metrics):
with patch("dashboard.routes.daily_run._load_config", return_value=DEFAULT_CONFIG):
resp = client.get("/daily-run/panel")
assert resp.status_code == 200
assert "text/html" in resp.headers["content-type"]
def test_daily_run_panel_when_unavailable(client):
with patch("dashboard.routes.daily_run._get_metrics", return_value=None):
with patch("dashboard.routes.daily_run._load_config", return_value=DEFAULT_CONFIG):
resp = client.get("/daily-run/panel")
assert resp.status_code == 200

View File

@@ -0,0 +1,513 @@
"""Unit tests for infrastructure.chat_store module."""
import threading
from pathlib import Path
import pytest
from infrastructure.chat_store import MAX_MESSAGES, Message, MessageLog, _get_conn
# ---------------------------------------------------------------------------
# Message dataclass
# ---------------------------------------------------------------------------
class TestMessageDataclass:
"""Tests for the Message dataclass."""
def test_message_required_fields(self):
"""Message can be created with required fields only."""
msg = Message(role="user", content="hello", timestamp="2024-01-01T00:00:00")
assert msg.role == "user"
assert msg.content == "hello"
assert msg.timestamp == "2024-01-01T00:00:00"
def test_message_default_source(self):
"""Message source defaults to 'browser'."""
msg = Message(role="user", content="hi", timestamp="2024-01-01T00:00:00")
assert msg.source == "browser"
def test_message_custom_source(self):
"""Message source can be overridden."""
msg = Message(role="agent", content="reply", timestamp="2024-01-01T00:00:00", source="api")
assert msg.source == "api"
def test_message_equality(self):
"""Two Messages with the same fields are equal (dataclass default)."""
m1 = Message(role="user", content="x", timestamp="t")
m2 = Message(role="user", content="x", timestamp="t")
assert m1 == m2
def test_message_inequality(self):
"""Messages with different content are not equal."""
m1 = Message(role="user", content="x", timestamp="t")
m2 = Message(role="user", content="y", timestamp="t")
assert m1 != m2
# ---------------------------------------------------------------------------
# _get_conn context manager
# ---------------------------------------------------------------------------
class TestGetConnContextManager:
"""Tests for the _get_conn context manager."""
def test_creates_db_file(self, tmp_path):
"""_get_conn creates the database file on first use."""
db = tmp_path / "chat.db"
assert not db.exists()
with _get_conn(db) as conn:
assert conn is not None
assert db.exists()
def test_creates_parent_directories(self, tmp_path):
"""_get_conn creates any missing parent directories."""
db = tmp_path / "nested" / "deep" / "chat.db"
with _get_conn(db):
pass
assert db.exists()
def test_creates_schema(self, tmp_path):
"""_get_conn creates the chat_messages table."""
db = tmp_path / "chat.db"
with _get_conn(db) as conn:
tables = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='chat_messages'"
).fetchall()
assert len(tables) == 1
def test_schema_has_expected_columns(self, tmp_path):
"""chat_messages table has the expected columns."""
db = tmp_path / "chat.db"
with _get_conn(db) as conn:
info = conn.execute("PRAGMA table_info(chat_messages)").fetchall()
col_names = [row["name"] for row in info]
assert set(col_names) == {"id", "role", "content", "timestamp", "source"}
def test_idempotent_schema_creation(self, tmp_path):
"""Calling _get_conn twice does not fail (CREATE TABLE IF NOT EXISTS)."""
db = tmp_path / "chat.db"
with _get_conn(db):
pass
with _get_conn(db) as conn:
# Table still exists and is usable
conn.execute("SELECT COUNT(*) FROM chat_messages")
# ---------------------------------------------------------------------------
# MessageLog — basic operations
# ---------------------------------------------------------------------------
class TestMessageLogAppend:
"""Tests for MessageLog.append()."""
def test_append_single_message(self, tmp_path):
"""append() stores a message that can be retrieved."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "hello", "2024-01-01T00:00:00")
messages = log.all()
assert len(messages) == 1
assert messages[0].role == "user"
assert messages[0].content == "hello"
assert messages[0].timestamp == "2024-01-01T00:00:00"
assert messages[0].source == "browser"
log.close()
def test_append_custom_source(self, tmp_path):
"""append() stores the source field correctly."""
log = MessageLog(tmp_path / "chat.db")
log.append("agent", "reply", "2024-01-01T00:00:01", source="api")
msg = log.all()[0]
assert msg.source == "api"
log.close()
def test_append_multiple_messages_preserves_order(self, tmp_path):
"""append() preserves insertion order."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "first", "2024-01-01T00:00:00")
log.append("agent", "second", "2024-01-01T00:00:01")
log.append("user", "third", "2024-01-01T00:00:02")
messages = log.all()
assert [m.content for m in messages] == ["first", "second", "third"]
log.close()
def test_append_persists_across_instances(self, tmp_path):
"""Messages appended by one instance are readable by another."""
db = tmp_path / "chat.db"
log1 = MessageLog(db)
log1.append("user", "persisted", "2024-01-01T00:00:00")
log1.close()
log2 = MessageLog(db)
messages = log2.all()
assert len(messages) == 1
assert messages[0].content == "persisted"
log2.close()
class TestMessageLogAll:
"""Tests for MessageLog.all()."""
def test_all_on_empty_store_returns_empty_list(self, tmp_path):
"""all() returns [] when there are no messages."""
log = MessageLog(tmp_path / "chat.db")
assert log.all() == []
log.close()
def test_all_returns_message_objects(self, tmp_path):
"""all() returns a list of Message dataclass instances."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "hi", "2024-01-01T00:00:00")
messages = log.all()
assert all(isinstance(m, Message) for m in messages)
log.close()
def test_all_returns_all_messages(self, tmp_path):
"""all() returns every stored message."""
log = MessageLog(tmp_path / "chat.db")
for i in range(5):
log.append("user", f"msg{i}", f"2024-01-01T00:00:0{i}")
assert len(log.all()) == 5
log.close()
class TestMessageLogRecent:
"""Tests for MessageLog.recent()."""
def test_recent_on_empty_store_returns_empty_list(self, tmp_path):
"""recent() returns [] when there are no messages."""
log = MessageLog(tmp_path / "chat.db")
assert log.recent() == []
log.close()
def test_recent_default_limit(self, tmp_path):
"""recent() with default limit returns up to 50 messages."""
log = MessageLog(tmp_path / "chat.db")
for i in range(60):
log.append("user", f"msg{i}", f"2024-01-01T00:00:{i:02d}")
msgs = log.recent()
assert len(msgs) == 50
log.close()
def test_recent_custom_limit(self, tmp_path):
"""recent() respects a custom limit."""
log = MessageLog(tmp_path / "chat.db")
for i in range(10):
log.append("user", f"msg{i}", f"2024-01-01T00:00:0{i}")
msgs = log.recent(limit=3)
assert len(msgs) == 3
log.close()
def test_recent_returns_newest_messages(self, tmp_path):
"""recent() returns the most-recently-inserted messages."""
log = MessageLog(tmp_path / "chat.db")
for i in range(10):
log.append("user", f"msg{i}", f"2024-01-01T00:00:0{i}")
msgs = log.recent(limit=3)
# Should be the last 3 inserted, in oldest-first order
assert [m.content for m in msgs] == ["msg7", "msg8", "msg9"]
log.close()
def test_recent_fewer_than_limit_returns_all(self, tmp_path):
"""recent() returns all messages when count < limit."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "only", "2024-01-01T00:00:00")
msgs = log.recent(limit=10)
assert len(msgs) == 1
log.close()
def test_recent_returns_oldest_first(self, tmp_path):
"""recent() returns messages in oldest-first order."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "a", "2024-01-01T00:00:00")
log.append("user", "b", "2024-01-01T00:00:01")
log.append("user", "c", "2024-01-01T00:00:02")
msgs = log.recent(limit=2)
assert [m.content for m in msgs] == ["b", "c"]
log.close()
class TestMessageLogClear:
"""Tests for MessageLog.clear()."""
def test_clear_empties_the_store(self, tmp_path):
"""clear() removes all messages."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "hello", "2024-01-01T00:00:00")
log.clear()
assert log.all() == []
log.close()
def test_clear_on_empty_store_is_safe(self, tmp_path):
"""clear() on an empty store does not raise."""
log = MessageLog(tmp_path / "chat.db")
log.clear() # should not raise
assert log.all() == []
log.close()
def test_clear_allows_new_appends(self, tmp_path):
"""After clear(), new messages can be appended."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "old", "2024-01-01T00:00:00")
log.clear()
log.append("user", "new", "2024-01-01T00:00:01")
messages = log.all()
assert len(messages) == 1
assert messages[0].content == "new"
log.close()
def test_clear_resets_len_to_zero(self, tmp_path):
"""After clear(), __len__ returns 0."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "a", "t")
log.append("user", "b", "t")
log.clear()
assert len(log) == 0
log.close()
# ---------------------------------------------------------------------------
# MessageLog — __len__
# ---------------------------------------------------------------------------
class TestMessageLogLen:
"""Tests for MessageLog.__len__()."""
def test_len_empty_store(self, tmp_path):
"""__len__ returns 0 for an empty store."""
log = MessageLog(tmp_path / "chat.db")
assert len(log) == 0
log.close()
def test_len_after_appends(self, tmp_path):
"""__len__ reflects the number of stored messages."""
log = MessageLog(tmp_path / "chat.db")
for i in range(7):
log.append("user", f"msg{i}", "t")
assert len(log) == 7
log.close()
def test_len_after_clear(self, tmp_path):
"""__len__ is 0 after clear()."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "x", "t")
log.clear()
assert len(log) == 0
log.close()
# ---------------------------------------------------------------------------
# MessageLog — pruning
# ---------------------------------------------------------------------------
class TestMessageLogPrune:
"""Tests for automatic pruning via _prune()."""
def test_prune_keeps_at_most_max_messages(self, tmp_path):
"""After exceeding MAX_MESSAGES, oldest messages are pruned."""
log = MessageLog(tmp_path / "chat.db")
# Temporarily lower the limit via monkeypatching is not straightforward
# because _prune reads the module-level MAX_MESSAGES constant.
# We therefore patch it directly.
import infrastructure.chat_store as cs
original = cs.MAX_MESSAGES
cs.MAX_MESSAGES = 5
try:
for i in range(8):
log.append("user", f"msg{i}", f"t{i}")
assert len(log) == 5
finally:
cs.MAX_MESSAGES = original
log.close()
def test_prune_keeps_newest_messages(self, tmp_path):
"""Pruning removes oldest messages and keeps the newest ones."""
import infrastructure.chat_store as cs
log = MessageLog(tmp_path / "chat.db")
original = cs.MAX_MESSAGES
cs.MAX_MESSAGES = 3
try:
for i in range(5):
log.append("user", f"msg{i}", f"t{i}")
messages = log.all()
contents = [m.content for m in messages]
assert contents == ["msg2", "msg3", "msg4"]
finally:
cs.MAX_MESSAGES = original
log.close()
def test_no_prune_when_below_limit(self, tmp_path):
"""No messages are pruned while count is at or below MAX_MESSAGES."""
log = MessageLog(tmp_path / "chat.db")
import infrastructure.chat_store as cs
original = cs.MAX_MESSAGES
cs.MAX_MESSAGES = 10
try:
for i in range(10):
log.append("user", f"msg{i}", f"t{i}")
assert len(log) == 10
finally:
cs.MAX_MESSAGES = original
log.close()
# ---------------------------------------------------------------------------
# MessageLog — close / lifecycle
# ---------------------------------------------------------------------------
class TestMessageLogClose:
"""Tests for MessageLog.close()."""
def test_close_is_safe_before_first_use(self, tmp_path):
"""close() on a fresh (never-used) instance does not raise."""
log = MessageLog(tmp_path / "chat.db")
log.close() # should not raise
def test_close_multiple_times_is_safe(self, tmp_path):
"""close() can be called multiple times without error."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "hi", "t")
log.close()
log.close() # second close should not raise
def test_close_sets_conn_to_none(self, tmp_path):
"""close() sets the internal _conn attribute to None."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "hi", "t")
assert log._conn is not None
log.close()
assert log._conn is None
# ---------------------------------------------------------------------------
# Thread safety
# ---------------------------------------------------------------------------
class TestMessageLogThreadSafety:
"""Thread-safety tests for MessageLog."""
def test_concurrent_appends(self, tmp_path):
"""Multiple threads can append messages without data loss or errors."""
log = MessageLog(tmp_path / "chat.db")
errors: list[Exception] = []
def worker(n: int) -> None:
try:
for i in range(5):
log.append("user", f"t{n}-{i}", f"ts-{n}-{i}")
except Exception as exc: # noqa: BLE001
errors.append(exc)
threads = [threading.Thread(target=worker, args=(n,)) for n in range(4)]
for t in threads:
t.start()
for t in threads:
t.join()
assert errors == [], f"Concurrent append raised: {errors}"
# All 20 messages should be present (4 threads × 5 messages)
assert len(log) == 20
log.close()
def test_concurrent_reads_and_writes(self, tmp_path):
"""Concurrent reads and writes do not corrupt state."""
log = MessageLog(tmp_path / "chat.db")
errors: list[Exception] = []
def writer() -> None:
try:
for i in range(10):
log.append("user", f"msg{i}", f"t{i}")
except Exception as exc: # noqa: BLE001
errors.append(exc)
def reader() -> None:
try:
for _ in range(10):
log.all()
except Exception as exc: # noqa: BLE001
errors.append(exc)
threads = [threading.Thread(target=writer)] + [
threading.Thread(target=reader) for _ in range(3)
]
for t in threads:
t.start()
for t in threads:
t.join()
assert errors == [], f"Concurrent read/write raised: {errors}"
log.close()
# ---------------------------------------------------------------------------
# Edge cases
# ---------------------------------------------------------------------------
class TestMessageLogEdgeCases:
"""Edge-case tests for MessageLog."""
def test_empty_content_stored_and_retrieved(self, tmp_path):
"""Empty string content can be stored and retrieved."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "", "2024-01-01T00:00:00")
assert log.all()[0].content == ""
log.close()
def test_unicode_content_stored_and_retrieved(self, tmp_path):
"""Unicode characters in content are stored and retrieved correctly."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "こんにちは 🌍", "2024-01-01T00:00:00")
assert log.all()[0].content == "こんにちは 🌍"
log.close()
def test_newline_in_content(self, tmp_path):
"""Newlines in content are preserved."""
log = MessageLog(tmp_path / "chat.db")
multiline = "line1\nline2\nline3"
log.append("agent", multiline, "2024-01-01T00:00:00")
assert log.all()[0].content == multiline
log.close()
def test_default_db_path_attribute(self):
"""MessageLog without explicit path uses the module-level DB_PATH."""
from infrastructure.chat_store import DB_PATH
log = MessageLog()
assert log._db_path == DB_PATH
# Do NOT call close() here — this is the global singleton's path
def test_custom_db_path_used(self, tmp_path):
"""MessageLog uses the provided db_path."""
db = tmp_path / "custom.db"
log = MessageLog(db)
log.append("user", "test", "t")
assert db.exists()
log.close()
def test_recent_limit_zero_returns_empty(self, tmp_path):
"""recent(limit=0) returns an empty list."""
log = MessageLog(tmp_path / "chat.db")
log.append("user", "msg", "t")
assert log.recent(limit=0) == []
log.close()
def test_all_roles_stored_correctly(self, tmp_path):
"""Different role values are stored and retrieved correctly."""
log = MessageLog(tmp_path / "chat.db")
for role in ("user", "agent", "error", "system"):
log.append(role, f"{role} message", "t")
messages = log.all()
assert [m.role for m in messages] == ["user", "agent", "error", "system"]
log.close()

View File

@@ -1,10 +1,21 @@
"""Tests for the async event bus (infrastructure.events.bus)."""
import sqlite3
from pathlib import Path
from unittest.mock import patch
import pytest
from infrastructure.events.bus import Event, EventBus, emit, event_bus, on
import infrastructure.events.bus as bus_module
from infrastructure.events.bus import (
Event,
EventBus,
emit,
event_bus,
get_event_bus,
init_event_bus_persistence,
on,
)
class TestEvent:
@@ -349,3 +360,111 @@ class TestEventBusPersistence:
assert mode == "wal"
finally:
conn.close()
async def test_persist_event_exception_is_swallowed(self, tmp_path):
"""_persist_event must not propagate SQLite errors."""
from unittest.mock import MagicMock
bus = EventBus()
bus.enable_persistence(tmp_path / "events.db")
# Make the INSERT raise an OperationalError
mock_conn = MagicMock()
mock_conn.execute.side_effect = sqlite3.OperationalError("simulated failure")
from contextlib import contextmanager
@contextmanager
def fake_ctx():
yield mock_conn
with patch.object(bus, "_get_persistence_conn", fake_ctx):
# Should not raise
bus._persist_event(Event(type="x", source="s"))
async def test_replay_exception_returns_empty(self, tmp_path):
"""replay() must return [] when SQLite query fails."""
from unittest.mock import MagicMock
bus = EventBus()
bus.enable_persistence(tmp_path / "events.db")
mock_conn = MagicMock()
mock_conn.execute.side_effect = sqlite3.OperationalError("simulated failure")
from contextlib import contextmanager
@contextmanager
def fake_ctx():
yield mock_conn
with patch.object(bus, "_get_persistence_conn", fake_ctx):
result = bus.replay()
assert result == []
# ── Singleton helpers ─────────────────────────────────────────────────────────
class TestSingletonHelpers:
"""Test get_event_bus(), init_event_bus_persistence(), and module __getattr__."""
def test_get_event_bus_returns_same_instance(self):
"""get_event_bus() is a true singleton."""
a = get_event_bus()
b = get_event_bus()
assert a is b
def test_module_event_bus_attr_is_singleton(self):
"""Accessing bus_module.event_bus via __getattr__ returns the singleton."""
assert bus_module.event_bus is get_event_bus()
def test_module_getattr_unknown_raises(self):
"""Accessing an unknown module attribute raises AttributeError."""
with pytest.raises(AttributeError):
_ = bus_module.no_such_attr # type: ignore[attr-defined]
def test_init_event_bus_persistence_sets_path(self, tmp_path):
"""init_event_bus_persistence() enables persistence on the singleton."""
bus = get_event_bus()
original_path = bus._persistence_db_path
try:
bus._persistence_db_path = None # reset for the test
db_path = tmp_path / "test_init.db"
init_event_bus_persistence(db_path)
assert bus._persistence_db_path == db_path
finally:
bus._persistence_db_path = original_path
def test_init_event_bus_persistence_is_idempotent(self, tmp_path):
"""Calling init_event_bus_persistence() twice keeps the first path."""
bus = get_event_bus()
original_path = bus._persistence_db_path
try:
bus._persistence_db_path = None
first_path = tmp_path / "first.db"
second_path = tmp_path / "second.db"
init_event_bus_persistence(first_path)
init_event_bus_persistence(second_path) # should be ignored
assert bus._persistence_db_path == first_path
finally:
bus._persistence_db_path = original_path
def test_init_event_bus_persistence_default_path(self):
"""init_event_bus_persistence() uses 'data/events.db' when no path given."""
bus = get_event_bus()
original_path = bus._persistence_db_path
try:
bus._persistence_db_path = None
# Patch enable_persistence to capture what path it receives
captured = {}
def fake_enable(path: Path) -> None:
captured["path"] = path
with patch.object(bus, "enable_persistence", side_effect=fake_enable):
init_event_bus_persistence()
assert captured["path"] == Path("data/events.db")
finally:
bus._persistence_db_path = original_path

View File

@@ -18,6 +18,10 @@ def _make_settings(**env_overrides):
"""Create a fresh Settings instance with isolated env vars."""
from config import Settings
# Prevent Pydantic from reading .env file (local .env pollutes defaults)
_orig_config = Settings.model_config.copy()
Settings.model_config["env_file"] = None
# Strip keys that might bleed in from the test environment
clean_env = {
k: v
@@ -82,7 +86,10 @@ def _make_settings(**env_overrides):
}
clean_env.update(env_overrides)
with patch.dict(os.environ, clean_env, clear=True):
return Settings()
try:
return Settings()
finally:
Settings.model_config.update(_orig_config)
# ── normalize_ollama_url ──────────────────────────────────────────────────────
@@ -692,12 +699,12 @@ class TestGetEffectiveOllamaModel:
"""get_effective_ollama_model walks fallback chain."""
def test_returns_primary_when_available(self):
from config import get_effective_ollama_model
from config import get_effective_ollama_model, settings
with patch("config.check_ollama_model_available", return_value=True):
result = get_effective_ollama_model()
# Default is qwen3:14b
assert result == "qwen3:14b"
# Should return whatever the user's configured model is
assert result == settings.ollama_model
def test_falls_back_when_primary_unavailable(self):
from config import get_effective_ollama_model, settings

View File

@@ -2,10 +2,15 @@
from __future__ import annotations
from datetime import UTC, datetime, timedelta
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from timmy.vassal.agent_health import AgentHealthReport, AgentStatus
pytestmark = pytest.mark.unit
# ---------------------------------------------------------------------------
# AgentStatus
# ---------------------------------------------------------------------------
@@ -35,6 +40,25 @@ def test_agent_status_stuck():
assert s.needs_reassignment is True
def test_agent_status_checked_at_is_iso_string():
s = AgentStatus(agent="claude")
# Should be parseable as an ISO datetime
dt = datetime.fromisoformat(s.checked_at)
assert dt.tzinfo is not None
def test_agent_status_multiple_stuck_issues():
s = AgentStatus(agent="kimi", stuck_issue_numbers=[1, 2, 3])
assert s.is_stuck is True
assert s.needs_reassignment is True
def test_agent_status_active_but_not_stuck():
s = AgentStatus(agent="claude", active_issue_numbers=[5], is_idle=False)
assert s.is_stuck is False
assert s.needs_reassignment is False
# ---------------------------------------------------------------------------
# AgentHealthReport
# ---------------------------------------------------------------------------
@@ -47,11 +71,24 @@ def test_report_any_stuck():
assert report.any_stuck is True
def test_report_not_any_stuck():
report = AgentHealthReport(
agents=[AgentStatus(agent="claude"), AgentStatus(agent="kimi")]
)
assert report.any_stuck is False
def test_report_all_idle():
report = AgentHealthReport(agents=[AgentStatus(agent="claude"), AgentStatus(agent="kimi")])
assert report.all_idle is True
def test_report_not_all_idle():
claude = AgentStatus(agent="claude", active_issue_numbers=[1], is_idle=False)
report = AgentHealthReport(agents=[claude, AgentStatus(agent="kimi")])
assert report.all_idle is False
def test_report_for_agent_found():
kimi = AgentStatus(agent="kimi", active_issue_numbers=[42])
report = AgentHealthReport(agents=[AgentStatus(agent="claude"), kimi])
@@ -64,6 +101,233 @@ def test_report_for_agent_not_found():
assert report.for_agent("timmy") is None
def test_report_generated_at_is_iso_string():
report = AgentHealthReport()
dt = datetime.fromisoformat(report.generated_at)
assert dt.tzinfo is not None
def test_report_empty_agents():
report = AgentHealthReport(agents=[])
assert report.any_stuck is False
assert report.all_idle is True
# ---------------------------------------------------------------------------
# _issue_created_time
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_issue_created_time_valid():
from timmy.vassal.agent_health import _issue_created_time
issue = {"created_at": "2024-01-15T10:30:00Z"}
result = await _issue_created_time(issue)
assert result is not None
assert result.year == 2024
assert result.month == 1
assert result.day == 15
@pytest.mark.asyncio
async def test_issue_created_time_missing_key():
from timmy.vassal.agent_health import _issue_created_time
result = await _issue_created_time({})
assert result is None
@pytest.mark.asyncio
async def test_issue_created_time_invalid_format():
from timmy.vassal.agent_health import _issue_created_time
result = await _issue_created_time({"created_at": "not-a-date"})
assert result is None
@pytest.mark.asyncio
async def test_issue_created_time_with_timezone():
from timmy.vassal.agent_health import _issue_created_time
issue = {"created_at": "2024-06-01T12:00:00+00:00"}
result = await _issue_created_time(issue)
assert result is not None
assert result.tzinfo is not None
# ---------------------------------------------------------------------------
# _fetch_labeled_issues — mocked HTTP client
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_fetch_labeled_issues_success():
from timmy.vassal.agent_health import _fetch_labeled_issues
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json.return_value = [
{"number": 1, "title": "Fix bug"},
{"number": 2, "title": "Add feature", "pull_request": {"url": "..."}},
]
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_resp)
result = await _fetch_labeled_issues(
mock_client, "http://gitea/api/v1", {}, "owner/repo", "claude-ready"
)
# Only non-PR issues returned
assert len(result) == 1
assert result[0]["number"] == 1
@pytest.mark.asyncio
async def test_fetch_labeled_issues_http_error():
from timmy.vassal.agent_health import _fetch_labeled_issues
mock_resp = MagicMock()
mock_resp.status_code = 401
mock_resp.json.return_value = []
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_resp)
result = await _fetch_labeled_issues(
mock_client, "http://gitea/api/v1", {}, "owner/repo", "claude-ready"
)
assert result == []
@pytest.mark.asyncio
async def test_fetch_labeled_issues_exception():
from timmy.vassal.agent_health import _fetch_labeled_issues
mock_client = AsyncMock()
mock_client.get = AsyncMock(side_effect=ConnectionError("network down"))
result = await _fetch_labeled_issues(
mock_client, "http://gitea/api/v1", {}, "owner/repo", "claude-ready"
)
assert result == []
@pytest.mark.asyncio
async def test_fetch_labeled_issues_filters_pull_requests():
from timmy.vassal.agent_health import _fetch_labeled_issues
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json.return_value = [
{"number": 10, "title": "Issue"},
{"number": 11, "title": "PR", "pull_request": {"url": "http://gitea/pulls/11"}},
{"number": 12, "title": "Another Issue"},
]
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_resp)
result = await _fetch_labeled_issues(
mock_client, "http://gitea/api/v1", {}, "owner/repo", "claude-ready"
)
# Issues with truthy pull_request field are excluded
assert len(result) == 2
assert all(i["number"] in (10, 12) for i in result)
# ---------------------------------------------------------------------------
# _last_comment_time — mocked HTTP client
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_last_comment_time_with_comments():
from timmy.vassal.agent_health import _last_comment_time
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json.return_value = [
{"updated_at": "2024-03-10T14:00:00Z", "created_at": "2024-03-10T13:00:00Z"}
]
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_resp)
result = await _last_comment_time(
mock_client, "http://gitea/api/v1", {}, "owner/repo", 42
)
assert result is not None
assert result.year == 2024
assert result.month == 3
@pytest.mark.asyncio
async def test_last_comment_time_uses_created_at_fallback():
from timmy.vassal.agent_health import _last_comment_time
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json.return_value = [
{"created_at": "2024-03-10T13:00:00Z"} # no updated_at
]
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_resp)
result = await _last_comment_time(
mock_client, "http://gitea/api/v1", {}, "owner/repo", 42
)
assert result is not None
@pytest.mark.asyncio
async def test_last_comment_time_no_comments():
from timmy.vassal.agent_health import _last_comment_time
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json.return_value = []
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_resp)
result = await _last_comment_time(
mock_client, "http://gitea/api/v1", {}, "owner/repo", 99
)
assert result is None
@pytest.mark.asyncio
async def test_last_comment_time_http_error():
from timmy.vassal.agent_health import _last_comment_time
mock_resp = MagicMock()
mock_resp.status_code = 404
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_resp)
result = await _last_comment_time(
mock_client, "http://gitea/api/v1", {}, "owner/repo", 99
)
assert result is None
@pytest.mark.asyncio
async def test_last_comment_time_exception():
from timmy.vassal.agent_health import _last_comment_time
mock_client = AsyncMock()
mock_client.get = AsyncMock(side_effect=TimeoutError("timed out"))
result = await _last_comment_time(
mock_client, "http://gitea/api/v1", {}, "owner/repo", 7
)
assert result is None
# ---------------------------------------------------------------------------
# check_agent_health — no Gitea in unit tests
# ---------------------------------------------------------------------------
@@ -90,6 +354,140 @@ async def test_check_agent_health_no_token():
assert status.agent == "claude"
@pytest.mark.asyncio
async def test_check_agent_health_detects_stuck_issue(monkeypatch):
"""Issues with last activity before the cutoff are flagged as stuck."""
import timmy.vassal.agent_health as ah
old_time = (datetime.now(UTC) - timedelta(minutes=200)).isoformat()
async def _fake_fetch(client, base_url, headers, repo, label):
return [{"number": 55, "created_at": old_time}]
async def _fake_last_comment(client, base_url, headers, repo, issue_number):
return datetime.now(UTC) - timedelta(minutes=200)
monkeypatch.setattr(ah, "_fetch_labeled_issues", _fake_fetch)
monkeypatch.setattr(ah, "_last_comment_time", _fake_last_comment)
mock_settings = MagicMock()
mock_settings.gitea_enabled = True
mock_settings.gitea_token = "fake-token"
mock_settings.gitea_url = "http://gitea"
mock_settings.gitea_repo = "owner/repo"
import httpx
with patch("config.settings", mock_settings):
status = await ah.check_agent_health("claude", stuck_threshold_minutes=120)
assert 55 in status.active_issue_numbers
assert 55 in status.stuck_issue_numbers
assert status.is_stuck is True
@pytest.mark.asyncio
async def test_check_agent_health_active_not_stuck(monkeypatch):
"""Recent activity means issue is active but not stuck."""
import timmy.vassal.agent_health as ah
recent_time = (datetime.now(UTC) - timedelta(minutes=5)).isoformat()
async def _fake_fetch(client, base_url, headers, repo, label):
return [{"number": 77, "created_at": recent_time}]
async def _fake_last_comment(client, base_url, headers, repo, issue_number):
return datetime.now(UTC) - timedelta(minutes=5)
monkeypatch.setattr(ah, "_fetch_labeled_issues", _fake_fetch)
monkeypatch.setattr(ah, "_last_comment_time", _fake_last_comment)
mock_settings = MagicMock()
mock_settings.gitea_enabled = True
mock_settings.gitea_token = "fake-token"
mock_settings.gitea_url = "http://gitea"
mock_settings.gitea_repo = "owner/repo"
with patch("config.settings", mock_settings):
status = await ah.check_agent_health("claude", stuck_threshold_minutes=120)
assert 77 in status.active_issue_numbers
assert 77 not in status.stuck_issue_numbers
assert status.is_idle is False
@pytest.mark.asyncio
async def test_check_agent_health_uses_issue_created_when_no_comments(monkeypatch):
"""Falls back to issue created_at when no comment time is available."""
import timmy.vassal.agent_health as ah
old_time = (datetime.now(UTC) - timedelta(minutes=300)).isoformat()
async def _fake_fetch(client, base_url, headers, repo, label):
return [{"number": 99, "created_at": old_time}]
async def _fake_last_comment(client, base_url, headers, repo, issue_number):
return None # No comments
monkeypatch.setattr(ah, "_fetch_labeled_issues", _fake_fetch)
monkeypatch.setattr(ah, "_last_comment_time", _fake_last_comment)
mock_settings = MagicMock()
mock_settings.gitea_enabled = True
mock_settings.gitea_token = "fake-token"
mock_settings.gitea_url = "http://gitea"
mock_settings.gitea_repo = "owner/repo"
with patch("config.settings", mock_settings):
status = await ah.check_agent_health("kimi", stuck_threshold_minutes=120)
assert 99 in status.stuck_issue_numbers
@pytest.mark.asyncio
async def test_check_agent_health_gitea_disabled(monkeypatch):
"""When gitea_enabled=False, returns idle status without querying."""
import timmy.vassal.agent_health as ah
mock_settings = MagicMock()
mock_settings.gitea_enabled = False
mock_settings.gitea_token = "fake-token"
with patch("config.settings", mock_settings):
status = await ah.check_agent_health("claude")
assert status.is_idle is True
assert status.active_issue_numbers == []
@pytest.mark.asyncio
async def test_check_agent_health_fetch_exception(monkeypatch):
"""HTTP exception during check is handled gracefully."""
import timmy.vassal.agent_health as ah
async def _bad_fetch(client, base_url, headers, repo, label):
raise RuntimeError("connection refused")
monkeypatch.setattr(ah, "_fetch_labeled_issues", _bad_fetch)
mock_settings = MagicMock()
mock_settings.gitea_enabled = True
mock_settings.gitea_token = "fake-token"
mock_settings.gitea_url = "http://gitea"
mock_settings.gitea_repo = "owner/repo"
with patch("config.settings", mock_settings):
status = await ah.check_agent_health("claude")
assert isinstance(status, AgentStatus)
assert status.is_idle is True
# ---------------------------------------------------------------------------
# get_full_health_report
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_get_full_health_report_returns_both_agents():
from timmy.vassal.agent_health import get_full_health_report
@@ -98,3 +496,127 @@ async def test_get_full_health_report_returns_both_agents():
agent_names = {a.agent for a in report.agents}
assert "claude" in agent_names
assert "kimi" in agent_names
@pytest.mark.asyncio
async def test_get_full_health_report_structure():
from timmy.vassal.agent_health import get_full_health_report
report = await get_full_health_report()
assert isinstance(report, AgentHealthReport)
assert len(report.agents) == 2
# ---------------------------------------------------------------------------
# nudge_stuck_agent
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_nudge_stuck_agent_no_token():
"""Returns False gracefully when Gitea is not configured."""
from timmy.vassal.agent_health import nudge_stuck_agent
mock_settings = MagicMock()
mock_settings.gitea_enabled = False
mock_settings.gitea_token = ""
with patch("config.settings", mock_settings):
result = await nudge_stuck_agent("claude", 123)
assert result is False
@pytest.mark.asyncio
async def test_nudge_stuck_agent_success(monkeypatch):
"""Returns True when comment is posted successfully."""
import timmy.vassal.agent_health as ah
mock_resp = MagicMock()
mock_resp.status_code = 201
mock_client_instance = AsyncMock()
mock_client_instance.post = AsyncMock(return_value=mock_resp)
mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance)
mock_client_instance.__aexit__ = AsyncMock(return_value=False)
mock_settings = MagicMock()
mock_settings.gitea_enabled = True
mock_settings.gitea_token = "fake-token"
mock_settings.gitea_url = "http://gitea"
mock_settings.gitea_repo = "owner/repo"
with (
patch("config.settings", mock_settings),
patch("httpx.AsyncClient", return_value=mock_client_instance),
):
result = await ah.nudge_stuck_agent("claude", 55)
assert result is True
@pytest.mark.asyncio
async def test_nudge_stuck_agent_http_failure(monkeypatch):
"""Returns False when API returns non-2xx status."""
import timmy.vassal.agent_health as ah
mock_resp = MagicMock()
mock_resp.status_code = 500
mock_client_instance = AsyncMock()
mock_client_instance.post = AsyncMock(return_value=mock_resp)
mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance)
mock_client_instance.__aexit__ = AsyncMock(return_value=False)
mock_settings = MagicMock()
mock_settings.gitea_enabled = True
mock_settings.gitea_token = "fake-token"
mock_settings.gitea_url = "http://gitea"
mock_settings.gitea_repo = "owner/repo"
with (
patch("config.settings", mock_settings),
patch("httpx.AsyncClient", return_value=mock_client_instance),
):
result = await ah.nudge_stuck_agent("kimi", 77)
assert result is False
@pytest.mark.asyncio
async def test_nudge_stuck_agent_gitea_disabled(monkeypatch):
"""Returns False when gitea_enabled=False."""
import timmy.vassal.agent_health as ah
mock_settings = MagicMock()
mock_settings.gitea_enabled = False
mock_settings.gitea_token = "fake-token"
with patch("config.settings", mock_settings):
result = await ah.nudge_stuck_agent("claude", 42)
assert result is False
@pytest.mark.asyncio
async def test_nudge_stuck_agent_exception(monkeypatch):
"""Returns False on network exception."""
import timmy.vassal.agent_health as ah
mock_client_instance = AsyncMock()
mock_client_instance.post = AsyncMock(side_effect=ConnectionError("refused"))
mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance)
mock_client_instance.__aexit__ = AsyncMock(return_value=False)
mock_settings = MagicMock()
mock_settings.gitea_enabled = True
mock_settings.gitea_token = "fake-token"
mock_settings.gitea_url = "http://gitea"
mock_settings.gitea_repo = "owner/repo"
with (
patch("config.settings", mock_settings),
patch("httpx.AsyncClient", return_value=mock_client_instance),
):
result = await ah.nudge_stuck_agent("claude", 10)
assert result is False

View File

@@ -2,11 +2,17 @@
from __future__ import annotations
from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from timmy.vassal.backlog import AgentTarget, TriagedIssue
from timmy.vassal.dispatch import (
DispatchRecord,
_apply_label_to_issue,
_get_or_create_label,
_post_dispatch_comment,
clear_dispatch_registry,
get_dispatch_registry,
)
@@ -112,3 +118,244 @@ def test_dispatch_record_defaults():
assert r.label_applied is False
assert r.comment_posted is False
assert r.dispatched_at # has a timestamp
# ---------------------------------------------------------------------------
# _get_or_create_label
# ---------------------------------------------------------------------------
_HEADERS = {"Authorization": "token x"}
_BASE_URL = "http://gitea"
_REPO = "org/repo"
def _mock_response(status_code: int, json_data=None):
resp = MagicMock()
resp.status_code = status_code
resp.json.return_value = json_data or {}
return resp
@pytest.mark.asyncio
async def test_get_or_create_label_finds_existing():
"""Returns the ID of an existing label without creating it."""
existing = [{"name": "claude-ready", "id": 42}, {"name": "other", "id": 7}]
client = AsyncMock()
client.get.return_value = _mock_response(200, existing)
result = await _get_or_create_label(client, _BASE_URL, _HEADERS, _REPO, "claude-ready")
assert result == 42
client.post.assert_not_called()
@pytest.mark.asyncio
async def test_get_or_create_label_creates_when_missing():
"""Creates the label when it doesn't exist in the list."""
client = AsyncMock()
# GET returns empty list
client.get.return_value = _mock_response(200, [])
# POST creates label
client.post.return_value = _mock_response(201, {"id": 99})
result = await _get_or_create_label(client, _BASE_URL, _HEADERS, _REPO, "claude-ready")
assert result == 99
client.post.assert_called_once()
@pytest.mark.asyncio
async def test_get_or_create_label_returns_none_on_get_error():
"""Returns None if the GET raises an exception."""
client = AsyncMock()
client.get.side_effect = Exception("network error")
result = await _get_or_create_label(client, _BASE_URL, _HEADERS, _REPO, "claude-ready")
assert result is None
@pytest.mark.asyncio
async def test_get_or_create_label_returns_none_on_create_error():
"""Returns None if POST raises an exception."""
client = AsyncMock()
client.get.return_value = _mock_response(200, [])
client.post.side_effect = Exception("post failed")
result = await _get_or_create_label(client, _BASE_URL, _HEADERS, _REPO, "claude-ready")
assert result is None
@pytest.mark.asyncio
async def test_get_or_create_label_uses_default_color_for_unknown():
"""Unknown label name uses '#cccccc' fallback color."""
client = AsyncMock()
client.get.return_value = _mock_response(200, [])
client.post.return_value = _mock_response(201, {"id": 5})
await _get_or_create_label(client, _BASE_URL, _HEADERS, _REPO, "unknown-label")
call_kwargs = client.post.call_args
assert call_kwargs.kwargs["json"]["color"] == "#cccccc"
# ---------------------------------------------------------------------------
# _apply_label_to_issue
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_apply_label_to_issue_success():
"""Returns True when label is found and applied."""
client = AsyncMock()
client.get.return_value = _mock_response(200, [{"name": "claude-ready", "id": 10}])
client.post.return_value = _mock_response(201)
result = await _apply_label_to_issue(client, _BASE_URL, _HEADERS, _REPO, 42, "claude-ready")
assert result is True
@pytest.mark.asyncio
async def test_apply_label_to_issue_returns_false_when_no_label_id():
"""Returns False when label ID cannot be obtained."""
client = AsyncMock()
client.get.side_effect = Exception("unavailable")
result = await _apply_label_to_issue(client, _BASE_URL, _HEADERS, _REPO, 42, "claude-ready")
assert result is False
@pytest.mark.asyncio
async def test_apply_label_to_issue_returns_false_on_bad_status():
"""Returns False when the apply POST returns a non-2xx status."""
client = AsyncMock()
client.get.return_value = _mock_response(200, [{"name": "claude-ready", "id": 10}])
client.post.return_value = _mock_response(403)
result = await _apply_label_to_issue(client, _BASE_URL, _HEADERS, _REPO, 42, "claude-ready")
assert result is False
# ---------------------------------------------------------------------------
# _post_dispatch_comment
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_post_dispatch_comment_success():
"""Returns True on successful comment post."""
client = AsyncMock()
client.post.return_value = _mock_response(201)
issue = _make_triaged(7, "Some issue", AgentTarget.CLAUDE, priority=75)
result = await _post_dispatch_comment(client, _BASE_URL, _HEADERS, _REPO, issue, "claude-ready")
assert result is True
body = client.post.call_args.kwargs["json"]["body"]
assert "Claude" in body
assert "claude-ready" in body
assert "75" in body
@pytest.mark.asyncio
async def test_post_dispatch_comment_failure():
"""Returns False when comment POST returns a non-2xx status."""
client = AsyncMock()
client.post.return_value = _mock_response(500)
issue = _make_triaged(8, "Other issue", AgentTarget.KIMI)
result = await _post_dispatch_comment(client, _BASE_URL, _HEADERS, _REPO, issue, "kimi-ready")
assert result is False
# ---------------------------------------------------------------------------
# _perform_gitea_dispatch — settings-level gate
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_perform_gitea_dispatch_skips_when_disabled():
"""Does not call Gitea when gitea_enabled is False."""
import config
from timmy.vassal.dispatch import _perform_gitea_dispatch
mock_settings = SimpleNamespace(gitea_enabled=False, gitea_token="tok")
with patch.object(config, "settings", mock_settings):
issue = _make_triaged(9, "Disabled", AgentTarget.CLAUDE)
record = DispatchRecord(
issue_number=9,
issue_title="Disabled",
agent=AgentTarget.CLAUDE,
rationale="r",
)
await _perform_gitea_dispatch(issue, record)
assert record.label_applied is False
assert record.comment_posted is False
@pytest.mark.asyncio
async def test_perform_gitea_dispatch_skips_when_no_token():
"""Does not call Gitea when gitea_token is empty."""
import config
from timmy.vassal.dispatch import _perform_gitea_dispatch
mock_settings = SimpleNamespace(gitea_enabled=True, gitea_token="")
with patch.object(config, "settings", mock_settings):
issue = _make_triaged(10, "No token", AgentTarget.CLAUDE)
record = DispatchRecord(
issue_number=10,
issue_title="No token",
agent=AgentTarget.CLAUDE,
rationale="r",
)
await _perform_gitea_dispatch(issue, record)
assert record.label_applied is False
@pytest.mark.asyncio
async def test_perform_gitea_dispatch_updates_record():
"""Record is mutated to reflect label/comment success."""
import config
from timmy.vassal.dispatch import _perform_gitea_dispatch
mock_settings = SimpleNamespace(
gitea_enabled=True,
gitea_token="tok",
gitea_url="http://gitea",
gitea_repo="org/repo",
)
mock_client = AsyncMock()
# GET labels → empty list, POST create label → id 1
mock_client.get.return_value = _mock_response(200, [])
mock_client.post.side_effect = [
_mock_response(201, {"id": 1}), # create label
_mock_response(201), # apply label
_mock_response(201), # post comment
]
with (
patch.object(config, "settings", mock_settings),
patch("httpx.AsyncClient") as mock_cls,
):
mock_cls.return_value.__aenter__ = AsyncMock(return_value=mock_client)
mock_cls.return_value.__aexit__ = AsyncMock(return_value=False)
issue = _make_triaged(11, "Full dispatch", AgentTarget.CLAUDE)
record = DispatchRecord(
issue_number=11,
issue_title="Full dispatch",
agent=AgentTarget.CLAUDE,
rationale="r",
)
await _perform_gitea_dispatch(issue, record)
assert record.label_applied is True
assert record.comment_posted is True