forked from Rockachopa/Timmy-time-dashboard
Compare commits
1 Commits
kimi/issue
...
kimi/issue
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
57f4f37a9b |
@@ -54,6 +54,19 @@ providers:
|
||||
context_window: 2048
|
||||
capabilities: [text, vision, streaming]
|
||||
|
||||
# Secondary: Local AirLLM (if installed)
|
||||
- name: airllm-local
|
||||
type: airllm
|
||||
enabled: false # Enable if pip install airllm
|
||||
priority: 2
|
||||
models:
|
||||
- name: 70b
|
||||
default: true
|
||||
capabilities: [text, tools, json, streaming]
|
||||
- name: 8b
|
||||
capabilities: [text, tools, json, streaming]
|
||||
- name: 405b
|
||||
capabilities: [text, tools, json, streaming]
|
||||
|
||||
# Tertiary: OpenAI (if API key available)
|
||||
- name: openai-backup
|
||||
|
||||
@@ -94,17 +94,12 @@ def extract_cycle_number(title: str) -> int | None:
|
||||
return int(m.group(1)) if m else None
|
||||
|
||||
|
||||
def extract_issue_number(title: str, body: str, pr_number: int | None = None) -> int | None:
|
||||
"""Extract the issue number from PR body/title, ignoring the PR number itself.
|
||||
|
||||
Gitea appends "(#N)" to PR titles where N is the PR number — skip that
|
||||
so we don't confuse it with the linked issue.
|
||||
"""
|
||||
def extract_issue_number(title: str, body: str) -> int | None:
|
||||
# Try body first (usually has "closes #N")
|
||||
for text in [body or "", title]:
|
||||
for m in ISSUE_RE.finditer(text):
|
||||
num = int(m.group(1))
|
||||
if num != pr_number:
|
||||
return num
|
||||
m = ISSUE_RE.search(text)
|
||||
if m:
|
||||
return int(m.group(1))
|
||||
return None
|
||||
|
||||
|
||||
@@ -145,7 +140,7 @@ def main():
|
||||
else:
|
||||
cycle_counter = max(cycle_counter, cycle)
|
||||
|
||||
issue = extract_issue_number(title, body, pr_number=pr_num)
|
||||
issue = extract_issue_number(title, body)
|
||||
issue_type = classify_pr(title, body)
|
||||
duration = estimate_duration(pr)
|
||||
diff = get_pr_diff_stats(token, pr_num)
|
||||
|
||||
@@ -4,26 +4,11 @@
|
||||
Called after each cycle completes (success or failure).
|
||||
Appends a structured entry to .loop/retro/cycles.jsonl.
|
||||
|
||||
EPOCH NOTATION (turnover system):
|
||||
Each cycle carries a symbolic epoch tag alongside the raw integer:
|
||||
|
||||
⟳WW.D:NNN
|
||||
|
||||
⟳ turnover glyph — marks epoch-aware cycles
|
||||
WW ISO week-of-year (01–53)
|
||||
D ISO weekday (1=Mon … 7=Sun)
|
||||
NNN daily cycle counter, zero-padded, resets at midnight UTC
|
||||
|
||||
Example: ⟳12.3:042 — Week 12, Wednesday, 42nd cycle of the day.
|
||||
|
||||
The raw `cycle` integer is preserved for backward compatibility.
|
||||
The `epoch` field carries the symbolic notation.
|
||||
|
||||
SUCCESS DEFINITION:
|
||||
A cycle is only "success" if BOTH conditions are met:
|
||||
1. The hermes process exited cleanly (exit code 0)
|
||||
2. Main is green (smoke test passes on main after merge)
|
||||
|
||||
|
||||
A cycle that merges a PR but leaves main red is a FAILURE.
|
||||
The --main-green flag records the smoke test result.
|
||||
|
||||
@@ -44,8 +29,6 @@ from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
@@ -53,68 +36,10 @@ from pathlib import Path
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
RETRO_FILE = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||
SUMMARY_FILE = REPO_ROOT / ".loop" / "retro" / "summary.json"
|
||||
EPOCH_COUNTER_FILE = REPO_ROOT / ".loop" / "retro" / ".epoch_counter"
|
||||
|
||||
# How many recent entries to include in rolling summary
|
||||
SUMMARY_WINDOW = 50
|
||||
|
||||
# Branch patterns that encode an issue number, e.g. kimi/issue-492
|
||||
BRANCH_ISSUE_RE = re.compile(r"issue[/-](\d+)", re.IGNORECASE)
|
||||
|
||||
|
||||
def detect_issue_from_branch() -> int | None:
|
||||
"""Try to extract an issue number from the current git branch name."""
|
||||
try:
|
||||
branch = subprocess.check_output(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
stderr=subprocess.DEVNULL,
|
||||
text=True,
|
||||
).strip()
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
return None
|
||||
m = BRANCH_ISSUE_RE.search(branch)
|
||||
return int(m.group(1)) if m else None
|
||||
|
||||
|
||||
# ── Epoch turnover ────────────────────────────────────────────────────────
|
||||
|
||||
def _epoch_tag(now: datetime | None = None) -> tuple[str, dict]:
|
||||
"""Generate the symbolic epoch tag and advance the daily counter.
|
||||
|
||||
Returns (epoch_string, epoch_parts) where epoch_parts is a dict with
|
||||
week, weekday, daily_n for structured storage.
|
||||
|
||||
The daily counter persists in .epoch_counter as a two-line file:
|
||||
line 1: ISO date (YYYY-MM-DD) of the current epoch day
|
||||
line 2: integer count
|
||||
When the date rolls over, the counter resets to 1.
|
||||
"""
|
||||
if now is None:
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
iso_cal = now.isocalendar() # (year, week, weekday)
|
||||
week = iso_cal[1]
|
||||
weekday = iso_cal[2]
|
||||
today_str = now.strftime("%Y-%m-%d")
|
||||
|
||||
# Read / reset daily counter
|
||||
daily_n = 1
|
||||
EPOCH_COUNTER_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
if EPOCH_COUNTER_FILE.exists():
|
||||
try:
|
||||
lines = EPOCH_COUNTER_FILE.read_text().strip().splitlines()
|
||||
if len(lines) == 2 and lines[0] == today_str:
|
||||
daily_n = int(lines[1]) + 1
|
||||
except (ValueError, IndexError):
|
||||
pass # corrupt file — reset
|
||||
|
||||
# Persist
|
||||
EPOCH_COUNTER_FILE.write_text(f"{today_str}\n{daily_n}\n")
|
||||
|
||||
tag = f"\u27f3{week:02d}.{weekday}:{daily_n:03d}"
|
||||
parts = {"week": week, "weekday": weekday, "daily_n": daily_n}
|
||||
return tag, parts
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
p = argparse.ArgumentParser(description="Log a cycle retrospective")
|
||||
@@ -198,30 +123,8 @@ def update_summary() -> None:
|
||||
issue_failures[e["issue"]] = issue_failures.get(e["issue"], 0) + 1
|
||||
quarantine_candidates = {k: v for k, v in issue_failures.items() if v >= 2}
|
||||
|
||||
# Epoch turnover stats — cycles per week/day from epoch-tagged entries
|
||||
epoch_entries = [e for e in recent if e.get("epoch")]
|
||||
by_week: dict[int, int] = {}
|
||||
by_weekday: dict[int, int] = {}
|
||||
for e in epoch_entries:
|
||||
w = e.get("epoch_week")
|
||||
d = e.get("epoch_weekday")
|
||||
if w is not None:
|
||||
by_week[w] = by_week.get(w, 0) + 1
|
||||
if d is not None:
|
||||
by_weekday[d] = by_weekday.get(d, 0) + 1
|
||||
|
||||
# Current epoch — latest entry's epoch tag
|
||||
current_epoch = epoch_entries[-1].get("epoch", "") if epoch_entries else ""
|
||||
|
||||
# Weekday names for display
|
||||
weekday_glyphs = {1: "Mon", 2: "Tue", 3: "Wed", 4: "Thu",
|
||||
5: "Fri", 6: "Sat", 7: "Sun"}
|
||||
by_weekday_named = {weekday_glyphs.get(k, str(k)): v
|
||||
for k, v in sorted(by_weekday.items())}
|
||||
|
||||
summary = {
|
||||
"updated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"current_epoch": current_epoch,
|
||||
"window": len(recent),
|
||||
"measured_cycles": len(measured),
|
||||
"total_cycles": len(entries),
|
||||
@@ -233,12 +136,9 @@ def update_summary() -> None:
|
||||
"total_lines_removed": sum(e.get("lines_removed", 0) for e in recent),
|
||||
"total_prs_merged": sum(1 for e in recent if e.get("pr")),
|
||||
"by_type": type_stats,
|
||||
"by_week": dict(sorted(by_week.items())),
|
||||
"by_weekday": by_weekday_named,
|
||||
"quarantine_candidates": quarantine_candidates,
|
||||
"recent_failures": [
|
||||
{"cycle": e["cycle"], "epoch": e.get("epoch", ""),
|
||||
"issue": e.get("issue"), "reason": e.get("reason", "")}
|
||||
{"cycle": e["cycle"], "issue": e.get("issue"), "reason": e.get("reason", "")}
|
||||
for e in failures[-5:]
|
||||
],
|
||||
}
|
||||
@@ -249,10 +149,6 @@ def update_summary() -> None:
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
|
||||
# Auto-detect issue from branch when not explicitly provided
|
||||
if args.issue is None:
|
||||
args.issue = detect_issue_from_branch()
|
||||
|
||||
# Reject idle cycles — no issue and no duration means nothing happened
|
||||
if not args.issue and args.duration == 0:
|
||||
print(f"[retro] Cycle {args.cycle} skipped — idle (no issue, no duration)")
|
||||
@@ -261,17 +157,9 @@ def main() -> None:
|
||||
# A cycle is only truly successful if hermes exited clean AND main is green
|
||||
truly_success = args.success and args.main_green
|
||||
|
||||
# Generate epoch turnover tag
|
||||
now = datetime.now(timezone.utc)
|
||||
epoch_tag, epoch_parts = _epoch_tag(now)
|
||||
|
||||
entry = {
|
||||
"timestamp": now.isoformat(),
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"cycle": args.cycle,
|
||||
"epoch": epoch_tag,
|
||||
"epoch_week": epoch_parts["week"],
|
||||
"epoch_weekday": epoch_parts["weekday"],
|
||||
"epoch_daily_n": epoch_parts["daily_n"],
|
||||
"issue": args.issue,
|
||||
"type": args.type,
|
||||
"success": truly_success,
|
||||
@@ -296,7 +184,7 @@ def main() -> None:
|
||||
update_summary()
|
||||
|
||||
status = "✓ SUCCESS" if args.success else "✗ FAILURE"
|
||||
print(f"[retro] {epoch_tag} Cycle {args.cycle} {status}", end="")
|
||||
print(f"[retro] Cycle {args.cycle} {status}", end="")
|
||||
if args.issue:
|
||||
print(f" (#{args.issue} {args.type})", end="")
|
||||
if args.duration:
|
||||
|
||||
@@ -1,407 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Loop introspection — the self-improvement engine.
|
||||
|
||||
Analyzes retro data across time windows to detect trends, extract patterns,
|
||||
and produce structured recommendations. Output is consumed by deep_triage
|
||||
and injected into the loop prompt context.
|
||||
|
||||
This is the piece that closes the feedback loop:
|
||||
cycle_retro → introspect → deep_triage → loop behavior changes
|
||||
|
||||
Run: python3 scripts/loop_introspect.py
|
||||
Output: .loop/retro/insights.json (structured insights + recommendations)
|
||||
Prints human-readable summary to stdout.
|
||||
|
||||
Called by: deep_triage.sh (before the LLM triage), timmy-loop.sh (every 50 cycles)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
CYCLES_FILE = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||
DEEP_TRIAGE_FILE = REPO_ROOT / ".loop" / "retro" / "deep-triage.jsonl"
|
||||
TRIAGE_FILE = REPO_ROOT / ".loop" / "retro" / "triage.jsonl"
|
||||
QUARANTINE_FILE = REPO_ROOT / ".loop" / "quarantine.json"
|
||||
INSIGHTS_FILE = REPO_ROOT / ".loop" / "retro" / "insights.json"
|
||||
|
||||
# ── Helpers ──────────────────────────────────────────────────────────────
|
||||
|
||||
def load_jsonl(path: Path) -> list[dict]:
|
||||
"""Load a JSONL file, skipping bad lines."""
|
||||
if not path.exists():
|
||||
return []
|
||||
entries = []
|
||||
for line in path.read_text().strip().splitlines():
|
||||
try:
|
||||
entries.append(json.loads(line))
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
continue
|
||||
return entries
|
||||
|
||||
|
||||
def parse_ts(ts_str: str) -> datetime | None:
|
||||
"""Parse an ISO timestamp, tolerating missing tz."""
|
||||
if not ts_str:
|
||||
return None
|
||||
try:
|
||||
dt = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
return dt
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def window(entries: list[dict], days: int) -> list[dict]:
|
||||
"""Filter entries to the last N days."""
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(days=days)
|
||||
result = []
|
||||
for e in entries:
|
||||
ts = parse_ts(e.get("timestamp", ""))
|
||||
if ts and ts >= cutoff:
|
||||
result.append(e)
|
||||
return result
|
||||
|
||||
|
||||
# ── Analysis functions ───────────────────────────────────────────────────
|
||||
|
||||
def compute_trends(cycles: list[dict]) -> dict:
|
||||
"""Compare recent window (last 7d) vs older window (7-14d ago)."""
|
||||
recent = window(cycles, 7)
|
||||
older = window(cycles, 14)
|
||||
# Remove recent from older to get the 7-14d window
|
||||
recent_set = {(e.get("cycle"), e.get("timestamp")) for e in recent}
|
||||
older = [e for e in older if (e.get("cycle"), e.get("timestamp")) not in recent_set]
|
||||
|
||||
def stats(entries):
|
||||
if not entries:
|
||||
return {"count": 0, "success_rate": None, "avg_duration": None,
|
||||
"lines_net": 0, "prs_merged": 0}
|
||||
successes = sum(1 for e in entries if e.get("success"))
|
||||
durations = [e["duration"] for e in entries if e.get("duration", 0) > 0]
|
||||
return {
|
||||
"count": len(entries),
|
||||
"success_rate": round(successes / len(entries), 3) if entries else None,
|
||||
"avg_duration": round(sum(durations) / len(durations)) if durations else None,
|
||||
"lines_net": sum(e.get("lines_added", 0) - e.get("lines_removed", 0) for e in entries),
|
||||
"prs_merged": sum(1 for e in entries if e.get("pr")),
|
||||
}
|
||||
|
||||
recent_stats = stats(recent)
|
||||
older_stats = stats(older)
|
||||
|
||||
trend = {
|
||||
"recent_7d": recent_stats,
|
||||
"previous_7d": older_stats,
|
||||
"velocity_change": None,
|
||||
"success_rate_change": None,
|
||||
"duration_change": None,
|
||||
}
|
||||
|
||||
if recent_stats["count"] and older_stats["count"]:
|
||||
trend["velocity_change"] = recent_stats["count"] - older_stats["count"]
|
||||
if recent_stats["success_rate"] is not None and older_stats["success_rate"] is not None:
|
||||
trend["success_rate_change"] = round(
|
||||
recent_stats["success_rate"] - older_stats["success_rate"], 3
|
||||
)
|
||||
if recent_stats["avg_duration"] is not None and older_stats["avg_duration"] is not None:
|
||||
trend["duration_change"] = recent_stats["avg_duration"] - older_stats["avg_duration"]
|
||||
|
||||
return trend
|
||||
|
||||
|
||||
def type_analysis(cycles: list[dict]) -> dict:
|
||||
"""Per-type success rates and durations."""
|
||||
by_type: dict[str, list[dict]] = defaultdict(list)
|
||||
for c in cycles:
|
||||
by_type[c.get("type", "unknown")].append(c)
|
||||
|
||||
result = {}
|
||||
for t, entries in by_type.items():
|
||||
durations = [e["duration"] for e in entries if e.get("duration", 0) > 0]
|
||||
successes = sum(1 for e in entries if e.get("success"))
|
||||
result[t] = {
|
||||
"count": len(entries),
|
||||
"success_rate": round(successes / len(entries), 3) if entries else 0,
|
||||
"avg_duration": round(sum(durations) / len(durations)) if durations else 0,
|
||||
"max_duration": max(durations) if durations else 0,
|
||||
}
|
||||
return result
|
||||
|
||||
|
||||
def repeat_failures(cycles: list[dict]) -> list[dict]:
|
||||
"""Issues that have failed multiple times — quarantine candidates."""
|
||||
failures: dict[int, list] = defaultdict(list)
|
||||
for c in cycles:
|
||||
if not c.get("success") and c.get("issue"):
|
||||
failures[c["issue"]].append({
|
||||
"cycle": c.get("cycle"),
|
||||
"reason": c.get("reason", ""),
|
||||
"duration": c.get("duration", 0),
|
||||
})
|
||||
# Only issues with 2+ failures
|
||||
return [
|
||||
{"issue": k, "failure_count": len(v), "attempts": v}
|
||||
for k, v in sorted(failures.items(), key=lambda x: -len(x[1]))
|
||||
if len(v) >= 2
|
||||
]
|
||||
|
||||
|
||||
def duration_outliers(cycles: list[dict], threshold_multiple: float = 3.0) -> list[dict]:
|
||||
"""Cycles that took way longer than average — something went wrong."""
|
||||
durations = [c["duration"] for c in cycles if c.get("duration", 0) > 0]
|
||||
if len(durations) < 5:
|
||||
return []
|
||||
avg = sum(durations) / len(durations)
|
||||
threshold = avg * threshold_multiple
|
||||
|
||||
outliers = []
|
||||
for c in cycles:
|
||||
dur = c.get("duration", 0)
|
||||
if dur > threshold:
|
||||
outliers.append({
|
||||
"cycle": c.get("cycle"),
|
||||
"issue": c.get("issue"),
|
||||
"type": c.get("type"),
|
||||
"duration": dur,
|
||||
"avg_duration": round(avg),
|
||||
"multiple": round(dur / avg, 1) if avg > 0 else 0,
|
||||
"reason": c.get("reason", ""),
|
||||
})
|
||||
return outliers
|
||||
|
||||
|
||||
def triage_effectiveness(deep_triages: list[dict]) -> dict:
|
||||
"""How well is the deep triage performing?"""
|
||||
if not deep_triages:
|
||||
return {"runs": 0, "note": "No deep triage data yet"}
|
||||
|
||||
total_reviewed = sum(d.get("issues_reviewed", 0) for d in deep_triages)
|
||||
total_refined = sum(len(d.get("issues_refined", [])) for d in deep_triages)
|
||||
total_created = sum(len(d.get("issues_created", [])) for d in deep_triages)
|
||||
total_closed = sum(len(d.get("issues_closed", [])) for d in deep_triages)
|
||||
timmy_available = sum(1 for d in deep_triages if d.get("timmy_available"))
|
||||
|
||||
# Extract Timmy's feedback themes
|
||||
timmy_themes = []
|
||||
for d in deep_triages:
|
||||
fb = d.get("timmy_feedback", "")
|
||||
if fb:
|
||||
timmy_themes.append(fb[:200])
|
||||
|
||||
return {
|
||||
"runs": len(deep_triages),
|
||||
"total_reviewed": total_reviewed,
|
||||
"total_refined": total_refined,
|
||||
"total_created": total_created,
|
||||
"total_closed": total_closed,
|
||||
"timmy_consultation_rate": round(timmy_available / len(deep_triages), 2),
|
||||
"timmy_recent_feedback": timmy_themes[-1] if timmy_themes else "",
|
||||
"timmy_feedback_history": timmy_themes,
|
||||
}
|
||||
|
||||
|
||||
def generate_recommendations(
|
||||
trends: dict,
|
||||
types: dict,
|
||||
repeats: list,
|
||||
outliers: list,
|
||||
triage_eff: dict,
|
||||
) -> list[dict]:
|
||||
"""Produce actionable recommendations from the analysis."""
|
||||
recs = []
|
||||
|
||||
# 1. Success rate declining?
|
||||
src = trends.get("success_rate_change")
|
||||
if src is not None and src < -0.1:
|
||||
recs.append({
|
||||
"severity": "high",
|
||||
"category": "reliability",
|
||||
"finding": f"Success rate dropped {abs(src)*100:.0f}pp in the last 7 days",
|
||||
"recommendation": "Review recent failures. Are issues poorly scoped? "
|
||||
"Is main unstable? Check if triage is producing bad work items.",
|
||||
})
|
||||
|
||||
# 2. Velocity dropping?
|
||||
vc = trends.get("velocity_change")
|
||||
if vc is not None and vc < -5:
|
||||
recs.append({
|
||||
"severity": "medium",
|
||||
"category": "throughput",
|
||||
"finding": f"Velocity dropped by {abs(vc)} cycles vs previous week",
|
||||
"recommendation": "Check for loop stalls, long-running cycles, or queue starvation.",
|
||||
})
|
||||
|
||||
# 3. Duration creep?
|
||||
dc = trends.get("duration_change")
|
||||
if dc is not None and dc > 120: # 2+ minutes longer
|
||||
recs.append({
|
||||
"severity": "medium",
|
||||
"category": "efficiency",
|
||||
"finding": f"Average cycle duration increased by {dc}s vs previous week",
|
||||
"recommendation": "Issues may be growing in scope. Enforce tighter decomposition "
|
||||
"in deep triage. Check if tests are getting slower.",
|
||||
})
|
||||
|
||||
# 4. Type-specific problems
|
||||
for t, info in types.items():
|
||||
if info["count"] >= 3 and info["success_rate"] < 0.5:
|
||||
recs.append({
|
||||
"severity": "high",
|
||||
"category": "type_reliability",
|
||||
"finding": f"'{t}' issues fail {(1-info['success_rate'])*100:.0f}% of the time "
|
||||
f"({info['count']} attempts)",
|
||||
"recommendation": f"'{t}' issues need better scoping or different approach. "
|
||||
f"Consider: tighter acceptance criteria, smaller scope, "
|
||||
f"or delegating to Kimi with more context.",
|
||||
})
|
||||
if info["avg_duration"] > 600 and info["count"] >= 3: # >10 min avg
|
||||
recs.append({
|
||||
"severity": "medium",
|
||||
"category": "type_efficiency",
|
||||
"finding": f"'{t}' issues average {info['avg_duration']//60}m{info['avg_duration']%60}s "
|
||||
f"(max {info['max_duration']//60}m)",
|
||||
"recommendation": f"Break '{t}' issues into smaller pieces. Target <5 min per cycle.",
|
||||
})
|
||||
|
||||
# 5. Repeat failures
|
||||
for rf in repeats[:3]:
|
||||
recs.append({
|
||||
"severity": "high",
|
||||
"category": "repeat_failure",
|
||||
"finding": f"Issue #{rf['issue']} has failed {rf['failure_count']} times",
|
||||
"recommendation": "Quarantine or rewrite this issue. Repeated failure = "
|
||||
"bad scope or missing prerequisite.",
|
||||
})
|
||||
|
||||
# 6. Outliers
|
||||
if len(outliers) > 2:
|
||||
recs.append({
|
||||
"severity": "medium",
|
||||
"category": "outliers",
|
||||
"finding": f"{len(outliers)} cycles took {outliers[0].get('multiple', '?')}x+ "
|
||||
f"longer than average",
|
||||
"recommendation": "Long cycles waste resources. Add timeout enforcement or "
|
||||
"break complex issues earlier.",
|
||||
})
|
||||
|
||||
# 7. Code growth
|
||||
recent = trends.get("recent_7d", {})
|
||||
net = recent.get("lines_net", 0)
|
||||
if net > 500:
|
||||
recs.append({
|
||||
"severity": "low",
|
||||
"category": "code_health",
|
||||
"finding": f"Net +{net} lines added in the last 7 days",
|
||||
"recommendation": "Lines of code is a liability. Balance feature work with "
|
||||
"refactoring. Target net-zero or negative line growth.",
|
||||
})
|
||||
|
||||
# 8. Triage health
|
||||
if triage_eff.get("runs", 0) == 0:
|
||||
recs.append({
|
||||
"severity": "high",
|
||||
"category": "triage",
|
||||
"finding": "Deep triage has never run",
|
||||
"recommendation": "Enable deep triage (every 20 cycles). The loop needs "
|
||||
"LLM-driven issue refinement to stay effective.",
|
||||
})
|
||||
|
||||
# No recommendations = things are healthy
|
||||
if not recs:
|
||||
recs.append({
|
||||
"severity": "info",
|
||||
"category": "health",
|
||||
"finding": "No significant issues detected",
|
||||
"recommendation": "System is healthy. Continue current patterns.",
|
||||
})
|
||||
|
||||
return recs
|
||||
|
||||
|
||||
# ── Main ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def main() -> None:
|
||||
cycles = load_jsonl(CYCLES_FILE)
|
||||
deep_triages = load_jsonl(DEEP_TRIAGE_FILE)
|
||||
|
||||
if not cycles:
|
||||
print("[introspect] No cycle data found. Nothing to analyze.")
|
||||
return
|
||||
|
||||
# Run all analyses
|
||||
trends = compute_trends(cycles)
|
||||
types = type_analysis(cycles)
|
||||
repeats = repeat_failures(cycles)
|
||||
outliers = duration_outliers(cycles)
|
||||
triage_eff = triage_effectiveness(deep_triages)
|
||||
recommendations = generate_recommendations(trends, types, repeats, outliers, triage_eff)
|
||||
|
||||
insights = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"total_cycles_analyzed": len(cycles),
|
||||
"trends": trends,
|
||||
"by_type": types,
|
||||
"repeat_failures": repeats[:5],
|
||||
"duration_outliers": outliers[:5],
|
||||
"triage_effectiveness": triage_eff,
|
||||
"recommendations": recommendations,
|
||||
}
|
||||
|
||||
# Write insights
|
||||
INSIGHTS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
INSIGHTS_FILE.write_text(json.dumps(insights, indent=2) + "\n")
|
||||
|
||||
# Current epoch from latest entry
|
||||
latest_epoch = ""
|
||||
for c in reversed(cycles):
|
||||
if c.get("epoch"):
|
||||
latest_epoch = c["epoch"]
|
||||
break
|
||||
|
||||
# Human-readable output
|
||||
header = f"[introspect] Analyzed {len(cycles)} cycles"
|
||||
if latest_epoch:
|
||||
header += f" · current epoch: {latest_epoch}"
|
||||
print(header)
|
||||
|
||||
print(f"\n TRENDS (7d vs previous 7d):")
|
||||
r7 = trends["recent_7d"]
|
||||
p7 = trends["previous_7d"]
|
||||
print(f" Cycles: {r7['count']:>3d} (was {p7['count']})")
|
||||
if r7["success_rate"] is not None:
|
||||
arrow = "↑" if (trends["success_rate_change"] or 0) > 0 else "↓" if (trends["success_rate_change"] or 0) < 0 else "→"
|
||||
print(f" Success rate: {r7['success_rate']*100:>4.0f}% {arrow}")
|
||||
if r7["avg_duration"] is not None:
|
||||
print(f" Avg duration: {r7['avg_duration']//60}m{r7['avg_duration']%60:02d}s")
|
||||
print(f" PRs merged: {r7['prs_merged']:>3d} (was {p7['prs_merged']})")
|
||||
print(f" Lines net: {r7['lines_net']:>+5d}")
|
||||
|
||||
print(f"\n BY TYPE:")
|
||||
for t, info in sorted(types.items(), key=lambda x: -x[1]["count"]):
|
||||
print(f" {t:12s} n={info['count']:>2d} "
|
||||
f"ok={info['success_rate']*100:>3.0f}% "
|
||||
f"avg={info['avg_duration']//60}m{info['avg_duration']%60:02d}s")
|
||||
|
||||
if repeats:
|
||||
print(f"\n REPEAT FAILURES:")
|
||||
for rf in repeats[:3]:
|
||||
print(f" #{rf['issue']} failed {rf['failure_count']}x")
|
||||
|
||||
print(f"\n RECOMMENDATIONS ({len(recommendations)}):")
|
||||
for i, rec in enumerate(recommendations, 1):
|
||||
sev = {"high": "🔴", "medium": "🟡", "low": "🟢", "info": "ℹ️ "}.get(rec["severity"], "?")
|
||||
print(f" {sev} {rec['finding']}")
|
||||
print(f" → {rec['recommendation']}")
|
||||
|
||||
print(f"\n Written to: {INSIGHTS_FILE}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -64,10 +64,17 @@ class Settings(BaseSettings):
|
||||
# Seconds to wait for user confirmation before auto-rejecting.
|
||||
discord_confirm_timeout: int = 120
|
||||
|
||||
# ── Backend selection ────────────────────────────────────────────────────
|
||||
# ── AirLLM / backend selection ───────────────────────────────────────────
|
||||
# "ollama" — always use Ollama (default, safe everywhere)
|
||||
# "auto" — pick best available local backend, fall back to Ollama
|
||||
timmy_model_backend: Literal["ollama", "grok", "claude", "auto"] = "ollama"
|
||||
# "airllm" — always use AirLLM (requires pip install ".[bigbrain]")
|
||||
# "auto" — use AirLLM on Apple Silicon if airllm is installed,
|
||||
# fall back to Ollama otherwise
|
||||
timmy_model_backend: Literal["ollama", "airllm", "grok", "claude", "auto"] = "ollama"
|
||||
|
||||
# AirLLM model size when backend is airllm or auto.
|
||||
# Larger = smarter, but needs more RAM / disk.
|
||||
# 8b ~16 GB | 70b ~140 GB | 405b ~810 GB
|
||||
airllm_model_size: Literal["8b", "70b", "405b"] = "70b"
|
||||
|
||||
# ── Grok (xAI) — opt-in premium cloud backend ────────────────────────
|
||||
# Grok is a premium augmentation layer — local-first ethos preserved.
|
||||
@@ -131,12 +138,7 @@ class Settings(BaseSettings):
|
||||
|
||||
# CORS allowed origins for the web chat interface (Gitea Pages, etc.)
|
||||
# Set CORS_ORIGINS as a comma-separated list, e.g. "http://localhost:3000,https://example.com"
|
||||
cors_origins: list[str] = [
|
||||
"http://localhost:3000",
|
||||
"http://localhost:8000",
|
||||
"http://127.0.0.1:3000",
|
||||
"http://127.0.0.1:8000",
|
||||
]
|
||||
cors_origins: list[str] = ["*"]
|
||||
|
||||
# Trusted hosts for the Host header check (TrustedHostMiddleware).
|
||||
# Set TRUSTED_HOSTS as a comma-separated list. Wildcards supported (e.g. "*.ts.net").
|
||||
@@ -469,19 +471,8 @@ def validate_startup(*, force: bool = False) -> None:
|
||||
", ".join(_missing),
|
||||
)
|
||||
sys.exit(1)
|
||||
if "*" in settings.cors_origins:
|
||||
_startup_logger.error(
|
||||
"PRODUCTION SECURITY ERROR: CORS wildcard '*' is not allowed "
|
||||
"in production. Set CORS_ORIGINS to explicit origins."
|
||||
)
|
||||
sys.exit(1)
|
||||
_startup_logger.info("Production mode: security secrets validated ✓")
|
||||
else:
|
||||
if "*" in settings.cors_origins:
|
||||
_startup_logger.warning(
|
||||
"SEC: CORS_ORIGINS contains wildcard '*' — "
|
||||
"restrict to explicit origins before deploying to production."
|
||||
)
|
||||
if not settings.l402_hmac_secret:
|
||||
_startup_logger.warning(
|
||||
"SEC: L402_HMAC_SECRET is not set — "
|
||||
|
||||
@@ -442,7 +442,7 @@ async def lifespan(app: FastAPI):
|
||||
|
||||
register_error_recorder(get_session_logger().record_error)
|
||||
except Exception:
|
||||
logger.debug("Failed to register error recorder")
|
||||
pass
|
||||
|
||||
logger.info("✓ Dashboard ready for requests")
|
||||
|
||||
@@ -484,14 +484,15 @@ app = FastAPI(
|
||||
|
||||
|
||||
def _get_cors_origins() -> list[str]:
|
||||
"""Get CORS origins from settings, rejecting wildcards in production."""
|
||||
"""Get CORS origins from settings, with sensible defaults."""
|
||||
origins = settings.cors_origins
|
||||
if "*" in origins and not settings.debug:
|
||||
logger.warning(
|
||||
"Wildcard '*' in CORS_ORIGINS stripped in production — "
|
||||
"set explicit origins via CORS_ORIGINS env var"
|
||||
)
|
||||
origins = [o for o in origins if o != "*"]
|
||||
if settings.debug and origins == ["*"]:
|
||||
return [
|
||||
"http://localhost:3000",
|
||||
"http://localhost:8000",
|
||||
"http://127.0.0.1:3000",
|
||||
"http://127.0.0.1:8000",
|
||||
]
|
||||
return origins
|
||||
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
||||
...
|
||||
|
||||
Usage:
|
||||
app.add_middleware(CSRFMiddleware, secret=settings.csrf_secret)
|
||||
app.add_middleware(CSRFMiddleware, secret="your-secret-key")
|
||||
|
||||
Attributes:
|
||||
secret: Secret key for token signing (optional, for future use).
|
||||
|
||||
@@ -91,7 +91,7 @@ async def chat_agent(request: Request, message: str = Form(...)):
|
||||
|
||||
thinking_engine.record_user_input()
|
||||
except Exception:
|
||||
logger.debug("Failed to record user input for thinking engine")
|
||||
pass
|
||||
|
||||
timestamp = datetime.now().strftime("%H:%M:%S")
|
||||
response_text = None
|
||||
|
||||
@@ -85,7 +85,7 @@ async def api_chat(request: Request):
|
||||
|
||||
thinking_engine.record_user_input()
|
||||
except Exception:
|
||||
logger.debug("Failed to record user input for thinking engine")
|
||||
pass
|
||||
|
||||
timestamp = datetime.now().strftime("%H:%M:%S")
|
||||
|
||||
|
||||
@@ -166,7 +166,7 @@ async def api_briefing_status():
|
||||
if cached:
|
||||
last_generated = cached.generated_at.isoformat()
|
||||
except Exception:
|
||||
logger.debug("Failed to read briefing cache")
|
||||
pass
|
||||
|
||||
return JSONResponse(
|
||||
{
|
||||
@@ -190,7 +190,6 @@ async def api_memory_status():
|
||||
stats = get_memory_stats()
|
||||
indexed_files = stats.get("total_entries", 0)
|
||||
except Exception:
|
||||
logger.debug("Failed to get memory stats")
|
||||
indexed_files = 0
|
||||
|
||||
return JSONResponse(
|
||||
@@ -216,7 +215,7 @@ async def api_swarm_status():
|
||||
).fetchone()
|
||||
pending_tasks = row["cnt"] if row else 0
|
||||
except Exception:
|
||||
logger.debug("Failed to count pending tasks")
|
||||
pass
|
||||
|
||||
return JSONResponse(
|
||||
{
|
||||
|
||||
@@ -221,7 +221,7 @@ async def _heartbeat(websocket: WebSocket) -> None:
|
||||
await asyncio.sleep(_HEARTBEAT_INTERVAL)
|
||||
await websocket.send_text(json.dumps({"type": "ping"}))
|
||||
except Exception:
|
||||
logger.debug("Heartbeat stopped — connection gone")
|
||||
pass # connection gone — receive loop will clean up
|
||||
|
||||
|
||||
@router.websocket("/ws")
|
||||
@@ -250,7 +250,7 @@ async def world_ws(websocket: WebSocket) -> None:
|
||||
raw = await websocket.receive_text()
|
||||
await _handle_client_message(raw)
|
||||
except Exception:
|
||||
logger.debug("WebSocket receive loop ended")
|
||||
pass
|
||||
finally:
|
||||
ping_task.cancel()
|
||||
if websocket in _ws_clients:
|
||||
@@ -265,7 +265,6 @@ async def _broadcast(message: str) -> None:
|
||||
try:
|
||||
await ws.send_text(message)
|
||||
except Exception:
|
||||
logger.debug("Pruning dead WebSocket client")
|
||||
dead.append(ws)
|
||||
for ws in dead:
|
||||
if ws in _ws_clients:
|
||||
@@ -341,7 +340,7 @@ async def _bark_and_broadcast(visitor_text: str) -> None:
|
||||
|
||||
pip_familiar.on_event("visitor_spoke")
|
||||
except Exception:
|
||||
logger.debug("Pip familiar notification failed (optional)")
|
||||
pass # Pip is optional
|
||||
|
||||
_refresh_ground(visitor_text)
|
||||
_tick_commitments()
|
||||
|
||||
@@ -183,22 +183,6 @@ async def run_health_check(
|
||||
}
|
||||
|
||||
|
||||
@router.post("/reload")
|
||||
async def reload_config(
|
||||
cascade: Annotated[CascadeRouter, Depends(get_cascade_router)],
|
||||
) -> dict[str, Any]:
|
||||
"""Hot-reload providers.yaml without restart.
|
||||
|
||||
Preserves circuit breaker state and metrics for existing providers.
|
||||
"""
|
||||
try:
|
||||
result = cascade.reload_config()
|
||||
return {"status": "ok", **result}
|
||||
except Exception as exc:
|
||||
logger.error("Config reload failed: %s", exc)
|
||||
raise HTTPException(status_code=500, detail=f"Reload failed: {exc}") from exc
|
||||
|
||||
|
||||
@router.get("/config")
|
||||
async def get_config(
|
||||
cascade: Annotated[CascadeRouter, Depends(get_cascade_router)],
|
||||
|
||||
@@ -18,8 +18,6 @@ from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from config import settings
|
||||
|
||||
try:
|
||||
import yaml
|
||||
except ImportError:
|
||||
@@ -102,7 +100,7 @@ class Provider:
|
||||
"""LLM provider configuration and state."""
|
||||
|
||||
name: str
|
||||
type: str # ollama, openai, anthropic
|
||||
type: str # ollama, openai, anthropic, airllm
|
||||
enabled: bool
|
||||
priority: int
|
||||
url: str | None = None
|
||||
@@ -303,13 +301,22 @@ class CascadeRouter:
|
||||
# Can't check without requests, assume available
|
||||
return True
|
||||
try:
|
||||
url = provider.url or settings.ollama_url
|
||||
url = provider.url or "http://localhost:11434"
|
||||
response = requests.get(f"{url}/api/tags", timeout=5)
|
||||
return response.status_code == 200
|
||||
except Exception as exc:
|
||||
logger.debug("Ollama provider check error: %s", exc)
|
||||
return False
|
||||
|
||||
elif provider.type == "airllm":
|
||||
# Check if airllm is installed
|
||||
try:
|
||||
import importlib.util
|
||||
|
||||
return importlib.util.find_spec("airllm") is not None
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
return False
|
||||
|
||||
elif provider.type in ("openai", "anthropic", "grok"):
|
||||
# Check if API key is set
|
||||
return provider.api_key is not None and provider.api_key != ""
|
||||
@@ -574,7 +581,7 @@ class CascadeRouter:
|
||||
"""Call Ollama API with multi-modal support."""
|
||||
import aiohttp
|
||||
|
||||
url = f"{provider.url or settings.ollama_url}/api/chat"
|
||||
url = f"{provider.url}/api/chat"
|
||||
|
||||
# Transform messages for Ollama format (including images)
|
||||
transformed_messages = self._transform_messages_for_ollama(messages)
|
||||
@@ -808,66 +815,6 @@ class CascadeRouter:
|
||||
provider.status = ProviderStatus.HEALTHY
|
||||
logger.info("Circuit breaker CLOSED for %s", provider.name)
|
||||
|
||||
def reload_config(self) -> dict:
|
||||
"""Hot-reload providers.yaml, preserving runtime state.
|
||||
|
||||
Re-reads the config file, rebuilds the provider list, and
|
||||
preserves circuit breaker state and metrics for providers
|
||||
that still exist after reload.
|
||||
|
||||
Returns:
|
||||
Summary dict with added/removed/preserved counts.
|
||||
"""
|
||||
# Snapshot current runtime state keyed by provider name
|
||||
old_state: dict[
|
||||
str, tuple[ProviderMetrics, CircuitState, float | None, int, ProviderStatus]
|
||||
] = {}
|
||||
for p in self.providers:
|
||||
old_state[p.name] = (
|
||||
p.metrics,
|
||||
p.circuit_state,
|
||||
p.circuit_opened_at,
|
||||
p.half_open_calls,
|
||||
p.status,
|
||||
)
|
||||
|
||||
old_names = set(old_state.keys())
|
||||
|
||||
# Reload from disk
|
||||
self.providers = []
|
||||
self._load_config()
|
||||
|
||||
# Restore preserved state
|
||||
new_names = {p.name for p in self.providers}
|
||||
preserved = 0
|
||||
for p in self.providers:
|
||||
if p.name in old_state:
|
||||
metrics, circuit, opened_at, half_open, status = old_state[p.name]
|
||||
p.metrics = metrics
|
||||
p.circuit_state = circuit
|
||||
p.circuit_opened_at = opened_at
|
||||
p.half_open_calls = half_open
|
||||
p.status = status
|
||||
preserved += 1
|
||||
|
||||
added = new_names - old_names
|
||||
removed = old_names - new_names
|
||||
|
||||
logger.info(
|
||||
"Config reloaded: %d providers (%d preserved, %d added, %d removed)",
|
||||
len(self.providers),
|
||||
preserved,
|
||||
len(added),
|
||||
len(removed),
|
||||
)
|
||||
|
||||
return {
|
||||
"total_providers": len(self.providers),
|
||||
"preserved": preserved,
|
||||
"added": sorted(added),
|
||||
"removed": sorted(removed),
|
||||
}
|
||||
|
||||
def get_metrics(self) -> dict:
|
||||
"""Get metrics for all providers."""
|
||||
return {
|
||||
|
||||
@@ -220,7 +220,7 @@ def create_timmy(
|
||||
print_response(message, stream).
|
||||
"""
|
||||
resolved = _resolve_backend(backend)
|
||||
size = model_size or "70b"
|
||||
size = model_size or settings.airllm_model_size
|
||||
|
||||
if resolved == "claude":
|
||||
from timmy.backends import ClaudeBackend
|
||||
@@ -300,11 +300,7 @@ def create_timmy(
|
||||
max_context = 2000 if not use_tools else 8000
|
||||
if len(memory_context) > max_context:
|
||||
memory_context = memory_context[:max_context] + "\n... [truncated]"
|
||||
full_prompt = (
|
||||
f"{base_prompt}\n\n"
|
||||
f"## GROUNDED CONTEXT (verified sources — cite when using)\n\n"
|
||||
f"{memory_context}"
|
||||
)
|
||||
full_prompt = f"{base_prompt}\n\n## Memory Context\n\n{memory_context}"
|
||||
else:
|
||||
full_prompt = base_prompt
|
||||
except Exception as exc:
|
||||
|
||||
@@ -68,14 +68,18 @@ def _get_loop_agent():
|
||||
|
||||
Returns the same type of agent as `create_timmy()` but with a
|
||||
dedicated session so it doesn't pollute the main chat history.
|
||||
|
||||
Thread-safe: uses a lock to prevent duplicate agent creation
|
||||
when multiple loops start concurrently.
|
||||
"""
|
||||
global _loop_agent
|
||||
if _loop_agent is None:
|
||||
with _loop_agent_lock:
|
||||
if _loop_agent is None:
|
||||
from timmy.agent import create_timmy
|
||||
if _loop_agent is not None:
|
||||
return _loop_agent
|
||||
with _loop_agent_lock:
|
||||
if _loop_agent is None:
|
||||
from timmy.agent import create_timmy
|
||||
|
||||
_loop_agent = create_timmy()
|
||||
_loop_agent = create_timmy()
|
||||
return _loop_agent
|
||||
|
||||
|
||||
|
||||
@@ -636,7 +636,7 @@ class HotMemory:
|
||||
if len(lines) > 1:
|
||||
return "\n".join(lines)
|
||||
except Exception:
|
||||
logger.debug("DB context read failed, falling back to file")
|
||||
pass
|
||||
|
||||
# Fallback to file if DB unavailable
|
||||
if self.path.exists():
|
||||
|
||||
@@ -23,9 +23,6 @@ Rules:
|
||||
- Remember what the user tells you during the conversation.
|
||||
- If you don't know something, say so honestly — never fabricate facts.
|
||||
- If a request is ambiguous, ask a brief clarifying question before guessing.
|
||||
- SOURCE DISTINCTION: When answering from memory or retrieved context, cite it.
|
||||
When answering from your own training, use hedging: "I think", "I believe".
|
||||
The user must be able to tell grounded claims from pattern-matching.
|
||||
- Use the user's name if you know it.
|
||||
- When you state a fact, commit to it.
|
||||
- NEVER attempt arithmetic in your head. If asked to compute anything, respond:
|
||||
@@ -81,18 +78,6 @@ HONESTY:
|
||||
- Never fabricate tool output. Call the tool and wait.
|
||||
- If a tool errors, report the exact error.
|
||||
|
||||
SOURCE DISTINCTION (SOUL requirement — non-negotiable):
|
||||
- Every claim you make comes from one of two places: a verified source you
|
||||
can point to, or your own pattern-matching. The user must be able to tell
|
||||
which is which.
|
||||
- When your response uses information from GROUNDED CONTEXT (memory, retrieved
|
||||
documents, tool output), cite it: "From memory:", "According to [source]:".
|
||||
- When you are generating from your training data alone, signal it naturally:
|
||||
"I think", "My understanding is", "I believe" — never false certainty.
|
||||
- If the user asks a factual question and you have no grounded source, say so:
|
||||
"I don't have a verified source for this — from my training I think..."
|
||||
- Prefer "I don't know" over a confident-sounding guess. Refusal over fabrication.
|
||||
|
||||
MEMORY (three tiers):
|
||||
- Tier 1: MEMORY.md (hot, always loaded)
|
||||
- Tier 2: memory/ vault (structured, append-only, date-stamped)
|
||||
|
||||
@@ -97,11 +97,6 @@ async def chat(message: str, session_id: str | None = None) -> str:
|
||||
The agent's response text.
|
||||
"""
|
||||
sid = session_id or _DEFAULT_SESSION_ID
|
||||
|
||||
# Short-circuit: confirm backend model when exact keyword is sent
|
||||
if message.strip() == "Qwe":
|
||||
return "Confirmed: Qwe backend"
|
||||
|
||||
agent = _get_agent()
|
||||
session_logger = get_session_logger()
|
||||
|
||||
|
||||
@@ -75,8 +75,6 @@ def create_timmy_serve_app() -> FastAPI:
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
logger.info("Timmy Serve starting")
|
||||
app.state.timmy = create_timmy()
|
||||
logger.info("Timmy agent cached in app state")
|
||||
yield
|
||||
logger.info("Timmy Serve shutting down")
|
||||
|
||||
@@ -103,7 +101,7 @@ def create_timmy_serve_app() -> FastAPI:
|
||||
async def serve_chat(request: Request, body: ChatRequest):
|
||||
"""Process a chat request."""
|
||||
try:
|
||||
timmy = request.app.state.timmy
|
||||
timmy = create_timmy()
|
||||
result = timmy.run(body.message, stream=False)
|
||||
response_text = result.content if hasattr(result, "content") else str(result)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import time
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
@@ -489,182 +489,30 @@ class TestProviderAvailabilityCheck:
|
||||
|
||||
assert router._check_provider_available(provider) is False
|
||||
|
||||
def test_check_airllm_installed(self):
|
||||
"""Test AirLLM when installed."""
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
class TestCascadeRouterReload:
|
||||
"""Test hot-reload of providers.yaml."""
|
||||
|
||||
def test_reload_preserves_metrics(self, tmp_path):
|
||||
"""Test that reload preserves metrics for existing providers."""
|
||||
config = {
|
||||
"providers": [
|
||||
{
|
||||
"name": "test-openai",
|
||||
"type": "openai",
|
||||
"enabled": True,
|
||||
"priority": 1,
|
||||
"api_key": "sk-test",
|
||||
}
|
||||
],
|
||||
}
|
||||
config_path = tmp_path / "providers.yaml"
|
||||
config_path.write_text(yaml.dump(config))
|
||||
|
||||
router = CascadeRouter(config_path=config_path)
|
||||
assert len(router.providers) == 1
|
||||
|
||||
# Simulate some traffic
|
||||
router._record_success(router.providers[0], 150.0)
|
||||
router._record_success(router.providers[0], 250.0)
|
||||
assert router.providers[0].metrics.total_requests == 2
|
||||
|
||||
# Reload
|
||||
result = router.reload_config()
|
||||
|
||||
assert result["total_providers"] == 1
|
||||
assert result["preserved"] == 1
|
||||
assert result["added"] == []
|
||||
assert result["removed"] == []
|
||||
# Metrics survived
|
||||
assert router.providers[0].metrics.total_requests == 2
|
||||
assert router.providers[0].metrics.total_latency_ms == 400.0
|
||||
|
||||
def test_reload_preserves_circuit_breaker(self, tmp_path):
|
||||
"""Test that reload preserves circuit breaker state."""
|
||||
config = {
|
||||
"cascade": {"circuit_breaker": {"failure_threshold": 2}},
|
||||
"providers": [
|
||||
{
|
||||
"name": "test-openai",
|
||||
"type": "openai",
|
||||
"enabled": True,
|
||||
"priority": 1,
|
||||
"api_key": "sk-test",
|
||||
}
|
||||
],
|
||||
}
|
||||
config_path = tmp_path / "providers.yaml"
|
||||
config_path.write_text(yaml.dump(config))
|
||||
|
||||
router = CascadeRouter(config_path=config_path)
|
||||
|
||||
# Open circuit breaker
|
||||
for _ in range(2):
|
||||
router._record_failure(router.providers[0])
|
||||
assert router.providers[0].circuit_state == CircuitState.OPEN
|
||||
|
||||
# Reload
|
||||
router.reload_config()
|
||||
|
||||
# Circuit breaker state preserved
|
||||
assert router.providers[0].circuit_state == CircuitState.OPEN
|
||||
assert router.providers[0].status == ProviderStatus.UNHEALTHY
|
||||
|
||||
def test_reload_detects_added_provider(self, tmp_path):
|
||||
"""Test that reload detects newly added providers."""
|
||||
config = {
|
||||
"providers": [
|
||||
{
|
||||
"name": "openai-1",
|
||||
"type": "openai",
|
||||
"enabled": True,
|
||||
"priority": 1,
|
||||
"api_key": "sk-test",
|
||||
}
|
||||
],
|
||||
}
|
||||
config_path = tmp_path / "providers.yaml"
|
||||
config_path.write_text(yaml.dump(config))
|
||||
|
||||
router = CascadeRouter(config_path=config_path)
|
||||
assert len(router.providers) == 1
|
||||
|
||||
# Add a second provider to config
|
||||
config["providers"].append(
|
||||
{
|
||||
"name": "anthropic-1",
|
||||
"type": "anthropic",
|
||||
"enabled": True,
|
||||
"priority": 2,
|
||||
"api_key": "sk-ant-test",
|
||||
}
|
||||
provider = Provider(
|
||||
name="airllm",
|
||||
type="airllm",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
)
|
||||
config_path.write_text(yaml.dump(config))
|
||||
|
||||
result = router.reload_config()
|
||||
with patch("importlib.util.find_spec", return_value=MagicMock()):
|
||||
assert router._check_provider_available(provider) is True
|
||||
|
||||
assert result["total_providers"] == 2
|
||||
assert result["preserved"] == 1
|
||||
assert result["added"] == ["anthropic-1"]
|
||||
assert result["removed"] == []
|
||||
def test_check_airllm_not_installed(self):
|
||||
"""Test AirLLM when not installed."""
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
def test_reload_detects_removed_provider(self, tmp_path):
|
||||
"""Test that reload detects removed providers."""
|
||||
config = {
|
||||
"providers": [
|
||||
{
|
||||
"name": "openai-1",
|
||||
"type": "openai",
|
||||
"enabled": True,
|
||||
"priority": 1,
|
||||
"api_key": "sk-test",
|
||||
},
|
||||
{
|
||||
"name": "anthropic-1",
|
||||
"type": "anthropic",
|
||||
"enabled": True,
|
||||
"priority": 2,
|
||||
"api_key": "sk-ant-test",
|
||||
},
|
||||
],
|
||||
}
|
||||
config_path = tmp_path / "providers.yaml"
|
||||
config_path.write_text(yaml.dump(config))
|
||||
provider = Provider(
|
||||
name="airllm",
|
||||
type="airllm",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
)
|
||||
|
||||
router = CascadeRouter(config_path=config_path)
|
||||
assert len(router.providers) == 2
|
||||
|
||||
# Remove anthropic
|
||||
config["providers"] = [config["providers"][0]]
|
||||
config_path.write_text(yaml.dump(config))
|
||||
|
||||
result = router.reload_config()
|
||||
|
||||
assert result["total_providers"] == 1
|
||||
assert result["preserved"] == 1
|
||||
assert result["removed"] == ["anthropic-1"]
|
||||
|
||||
def test_reload_re_sorts_by_priority(self, tmp_path):
|
||||
"""Test that providers are re-sorted by priority after reload."""
|
||||
config = {
|
||||
"providers": [
|
||||
{
|
||||
"name": "low-priority",
|
||||
"type": "openai",
|
||||
"enabled": True,
|
||||
"priority": 10,
|
||||
"api_key": "sk-test",
|
||||
},
|
||||
{
|
||||
"name": "high-priority",
|
||||
"type": "openai",
|
||||
"enabled": True,
|
||||
"priority": 1,
|
||||
"api_key": "sk-test2",
|
||||
},
|
||||
],
|
||||
}
|
||||
config_path = tmp_path / "providers.yaml"
|
||||
config_path.write_text(yaml.dump(config))
|
||||
|
||||
router = CascadeRouter(config_path=config_path)
|
||||
assert router.providers[0].name == "high-priority"
|
||||
|
||||
# Swap priorities
|
||||
config["providers"][0]["priority"] = 1
|
||||
config["providers"][1]["priority"] = 10
|
||||
config_path.write_text(yaml.dump(config))
|
||||
|
||||
router.reload_config()
|
||||
|
||||
assert router.providers[0].name == "low-priority"
|
||||
assert router.providers[1].name == "high-priority"
|
||||
with patch("importlib.util.find_spec", return_value=None):
|
||||
assert router._check_provider_available(provider) is False
|
||||
|
||||
@@ -1,285 +0,0 @@
|
||||
"""Integration tests for agentic loop WebSocket broadcasts.
|
||||
|
||||
Verifies that ``run_agentic_loop`` pushes the correct sequence of events
|
||||
through the real ``ws_manager`` and that connected (mock) WebSocket clients
|
||||
receive every broadcast with the expected payloads.
|
||||
"""
|
||||
|
||||
import json
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from infrastructure.ws_manager.handler import WebSocketManager
|
||||
from timmy.agentic_loop import run_agentic_loop
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _mock_run(content: str):
|
||||
m = MagicMock()
|
||||
m.content = content
|
||||
return m
|
||||
|
||||
|
||||
def _ws_client() -> AsyncMock:
|
||||
"""Return a fake WebSocket that records sent messages."""
|
||||
return AsyncMock()
|
||||
|
||||
|
||||
def _collected_events(ws: AsyncMock) -> list[dict]:
|
||||
"""Extract parsed JSON events from a mock WebSocket's send_text calls."""
|
||||
return [json.loads(call.args[0]) for call in ws.send_text.call_args_list]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestAgenticLoopBroadcastSequence:
|
||||
"""Events arrive at WS clients in the correct order with expected data."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_successful_run_broadcasts_plan_steps_complete(self):
|
||||
"""A successful 2-step loop emits plan_ready → 2× step_complete → task_complete."""
|
||||
mgr = WebSocketManager()
|
||||
ws = _ws_client()
|
||||
mgr._connections = [ws]
|
||||
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run = MagicMock(
|
||||
side_effect=[
|
||||
_mock_run("1. Gather data\n2. Summarise"),
|
||||
_mock_run("Gathered 10 records"),
|
||||
_mock_run("Summary written"),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
|
||||
patch("infrastructure.ws_manager.handler.ws_manager", mgr),
|
||||
):
|
||||
result = await run_agentic_loop("Gather and summarise", max_steps=2)
|
||||
|
||||
assert result.status == "completed"
|
||||
|
||||
events = _collected_events(ws)
|
||||
event_names = [e["event"] for e in events]
|
||||
assert event_names == [
|
||||
"agentic.plan_ready",
|
||||
"agentic.step_complete",
|
||||
"agentic.step_complete",
|
||||
"agentic.task_complete",
|
||||
]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_plan_ready_payload(self):
|
||||
"""plan_ready contains task_id, task, steps list, and total count."""
|
||||
mgr = WebSocketManager()
|
||||
ws = _ws_client()
|
||||
mgr._connections = [ws]
|
||||
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run = MagicMock(
|
||||
side_effect=[
|
||||
_mock_run("1. Alpha\n2. Beta"),
|
||||
_mock_run("Alpha done"),
|
||||
_mock_run("Beta done"),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
|
||||
patch("infrastructure.ws_manager.handler.ws_manager", mgr),
|
||||
):
|
||||
result = await run_agentic_loop("Two steps")
|
||||
|
||||
plan_event = _collected_events(ws)[0]
|
||||
assert plan_event["event"] == "agentic.plan_ready"
|
||||
data = plan_event["data"]
|
||||
assert data["task_id"] == result.task_id
|
||||
assert data["task"] == "Two steps"
|
||||
assert data["steps"] == ["Alpha", "Beta"]
|
||||
assert data["total"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_step_complete_payload(self):
|
||||
"""step_complete carries step number, total, description, and result."""
|
||||
mgr = WebSocketManager()
|
||||
ws = _ws_client()
|
||||
mgr._connections = [ws]
|
||||
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run = MagicMock(
|
||||
side_effect=[
|
||||
_mock_run("1. Only step"),
|
||||
_mock_run("Step result text"),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
|
||||
patch("infrastructure.ws_manager.handler.ws_manager", mgr),
|
||||
):
|
||||
await run_agentic_loop("Single step", max_steps=1)
|
||||
|
||||
step_event = _collected_events(ws)[1]
|
||||
assert step_event["event"] == "agentic.step_complete"
|
||||
data = step_event["data"]
|
||||
assert data["step"] == 1
|
||||
assert data["total"] == 1
|
||||
assert data["description"] == "Only step"
|
||||
assert "Step result text" in data["result"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_complete_payload(self):
|
||||
"""task_complete has status, steps_completed, summary, and duration_ms."""
|
||||
mgr = WebSocketManager()
|
||||
ws = _ws_client()
|
||||
mgr._connections = [ws]
|
||||
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run = MagicMock(
|
||||
side_effect=[
|
||||
_mock_run("1. Do it"),
|
||||
_mock_run("Done"),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
|
||||
patch("infrastructure.ws_manager.handler.ws_manager", mgr),
|
||||
):
|
||||
await run_agentic_loop("Quick", max_steps=1)
|
||||
|
||||
complete_event = _collected_events(ws)[-1]
|
||||
assert complete_event["event"] == "agentic.task_complete"
|
||||
data = complete_event["data"]
|
||||
assert data["status"] == "completed"
|
||||
assert data["steps_completed"] == 1
|
||||
assert isinstance(data["duration_ms"], int)
|
||||
assert data["duration_ms"] >= 0
|
||||
assert data["summary"]
|
||||
|
||||
|
||||
class TestAdaptationBroadcast:
|
||||
"""Adapted steps emit step_adapted events."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_adapted_step_broadcasts_step_adapted(self):
|
||||
"""A failed-then-adapted step emits agentic.step_adapted."""
|
||||
mgr = WebSocketManager()
|
||||
ws = _ws_client()
|
||||
mgr._connections = [ws]
|
||||
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run = MagicMock(
|
||||
side_effect=[
|
||||
_mock_run("1. Risky step"),
|
||||
Exception("disk full"),
|
||||
_mock_run("Used /tmp instead"),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
|
||||
patch("infrastructure.ws_manager.handler.ws_manager", mgr),
|
||||
):
|
||||
result = await run_agentic_loop("Adapt test", max_steps=1)
|
||||
|
||||
events = _collected_events(ws)
|
||||
event_names = [e["event"] for e in events]
|
||||
assert "agentic.step_adapted" in event_names
|
||||
|
||||
adapted = next(e for e in events if e["event"] == "agentic.step_adapted")
|
||||
assert adapted["data"]["error"] == "disk full"
|
||||
assert adapted["data"]["adaptation"]
|
||||
assert result.steps[0].status == "adapted"
|
||||
|
||||
|
||||
class TestMultipleClients:
|
||||
"""All connected clients receive every broadcast."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_two_clients_receive_all_events(self):
|
||||
mgr = WebSocketManager()
|
||||
ws1 = _ws_client()
|
||||
ws2 = _ws_client()
|
||||
mgr._connections = [ws1, ws2]
|
||||
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run = MagicMock(
|
||||
side_effect=[
|
||||
_mock_run("1. Step A"),
|
||||
_mock_run("A done"),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
|
||||
patch("infrastructure.ws_manager.handler.ws_manager", mgr),
|
||||
):
|
||||
await run_agentic_loop("Multi-client", max_steps=1)
|
||||
|
||||
events1 = _collected_events(ws1)
|
||||
events2 = _collected_events(ws2)
|
||||
assert len(events1) == len(events2) == 3 # plan + step + complete
|
||||
assert [e["event"] for e in events1] == [e["event"] for e in events2]
|
||||
|
||||
|
||||
class TestEventHistory:
|
||||
"""Broadcasts are recorded in ws_manager event history."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_events_appear_in_history(self):
|
||||
mgr = WebSocketManager()
|
||||
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run = MagicMock(
|
||||
side_effect=[
|
||||
_mock_run("1. Only"),
|
||||
_mock_run("Done"),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
|
||||
patch("infrastructure.ws_manager.handler.ws_manager", mgr),
|
||||
):
|
||||
await run_agentic_loop("History test", max_steps=1)
|
||||
|
||||
history_events = [e.event for e in mgr.event_history]
|
||||
assert "agentic.plan_ready" in history_events
|
||||
assert "agentic.step_complete" in history_events
|
||||
assert "agentic.task_complete" in history_events
|
||||
|
||||
|
||||
class TestBroadcastGracefulDegradation:
|
||||
"""Loop completes even when ws_manager is unavailable."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_loop_succeeds_when_broadcast_fails(self):
|
||||
"""ImportError from ws_manager doesn't crash the loop."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.run = MagicMock(
|
||||
side_effect=[
|
||||
_mock_run("1. Do it"),
|
||||
_mock_run("Done"),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
|
||||
patch(
|
||||
"infrastructure.ws_manager.handler.ws_manager",
|
||||
new_callable=lambda: MagicMock,
|
||||
) as broken_mgr,
|
||||
):
|
||||
broken_mgr.broadcast = AsyncMock(side_effect=RuntimeError("ws down"))
|
||||
result = await run_agentic_loop("Resilient task", max_steps=1)
|
||||
|
||||
assert result.status == "completed"
|
||||
assert len(result.steps) == 1
|
||||
@@ -1,86 +0,0 @@
|
||||
"""Tests for scripts/cycle_retro.py issue auto-detection."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# Import the module under test — it's a script so we import the helpers directly
|
||||
import importlib
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
SCRIPTS_DIR = Path(__file__).resolve().parent.parent.parent / "scripts"
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _add_scripts_to_path(monkeypatch):
|
||||
monkeypatch.syspath_prepend(str(SCRIPTS_DIR))
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def mod():
|
||||
"""Import cycle_retro as a module."""
|
||||
return importlib.import_module("cycle_retro")
|
||||
|
||||
|
||||
class TestDetectIssueFromBranch:
|
||||
def test_kimi_issue_branch(self, mod):
|
||||
with patch.object(subprocess, "check_output", return_value="kimi/issue-492\n"):
|
||||
assert mod.detect_issue_from_branch() == 492
|
||||
|
||||
def test_plain_issue_branch(self, mod):
|
||||
with patch.object(subprocess, "check_output", return_value="issue-123\n"):
|
||||
assert mod.detect_issue_from_branch() == 123
|
||||
|
||||
def test_issue_slash_number(self, mod):
|
||||
with patch.object(subprocess, "check_output", return_value="fix/issue/55\n"):
|
||||
assert mod.detect_issue_from_branch() == 55
|
||||
|
||||
def test_no_issue_in_branch(self, mod):
|
||||
with patch.object(subprocess, "check_output", return_value="main\n"):
|
||||
assert mod.detect_issue_from_branch() is None
|
||||
|
||||
def test_feature_branch(self, mod):
|
||||
with patch.object(subprocess, "check_output", return_value="feature/add-widget\n"):
|
||||
assert mod.detect_issue_from_branch() is None
|
||||
|
||||
def test_git_not_available(self, mod):
|
||||
with patch.object(subprocess, "check_output", side_effect=FileNotFoundError):
|
||||
assert mod.detect_issue_from_branch() is None
|
||||
|
||||
def test_git_fails(self, mod):
|
||||
with patch.object(
|
||||
subprocess,
|
||||
"check_output",
|
||||
side_effect=subprocess.CalledProcessError(1, "git"),
|
||||
):
|
||||
assert mod.detect_issue_from_branch() is None
|
||||
|
||||
|
||||
class TestBackfillExtractIssueNumber:
|
||||
"""Tests for backfill_retro.extract_issue_number PR-number filtering."""
|
||||
|
||||
@pytest.fixture()
|
||||
def backfill(self):
|
||||
return importlib.import_module("backfill_retro")
|
||||
|
||||
def test_body_has_issue(self, backfill):
|
||||
assert backfill.extract_issue_number("fix: foo (#491)", "Fixes #490", pr_number=491) == 490
|
||||
|
||||
def test_title_skips_pr_number(self, backfill):
|
||||
assert backfill.extract_issue_number("fix: foo (#491)", "", pr_number=491) is None
|
||||
|
||||
def test_title_with_issue_and_pr(self, backfill):
|
||||
# [loop-cycle-538] refactor: ... (#459) (#481)
|
||||
assert (
|
||||
backfill.extract_issue_number(
|
||||
"[loop-cycle-538] refactor: remove dead airllm (#459) (#481)",
|
||||
"",
|
||||
pr_number=481,
|
||||
)
|
||||
== 459
|
||||
)
|
||||
|
||||
def test_no_pr_number_provided(self, backfill):
|
||||
assert backfill.extract_issue_number("fix: foo (#491)", "") == 491
|
||||
@@ -49,34 +49,6 @@ class TestConfigLazyValidation:
|
||||
# Should not raise
|
||||
validate_startup(force=True)
|
||||
|
||||
def test_validate_startup_exits_on_cors_wildcard_in_production(self):
|
||||
"""validate_startup() should exit in production when CORS has wildcard."""
|
||||
from config import settings, validate_startup
|
||||
|
||||
with (
|
||||
patch.object(settings, "timmy_env", "production"),
|
||||
patch.object(settings, "l402_hmac_secret", "test-secret-hex-value-32"),
|
||||
patch.object(settings, "l402_macaroon_secret", "test-macaroon-hex-value-32"),
|
||||
patch.object(settings, "cors_origins", ["*"]),
|
||||
pytest.raises(SystemExit),
|
||||
):
|
||||
validate_startup(force=True)
|
||||
|
||||
def test_validate_startup_warns_cors_wildcard_in_dev(self):
|
||||
"""validate_startup() should warn in dev when CORS has wildcard."""
|
||||
from config import settings, validate_startup
|
||||
|
||||
with (
|
||||
patch.object(settings, "timmy_env", "development"),
|
||||
patch.object(settings, "cors_origins", ["*"]),
|
||||
patch("config._startup_logger") as mock_logger,
|
||||
):
|
||||
validate_startup(force=True)
|
||||
mock_logger.warning.assert_any_call(
|
||||
"SEC: CORS_ORIGINS contains wildcard '*' — "
|
||||
"restrict to explicit origins before deploying to production."
|
||||
)
|
||||
|
||||
def test_validate_startup_skips_in_test_mode(self):
|
||||
"""validate_startup() should be a no-op in test mode."""
|
||||
from config import validate_startup
|
||||
|
||||
@@ -71,26 +71,6 @@ class TestAnnotateConfidence:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_confirms_qwe_backend():
|
||||
"""chat() should return exact confirmation when message is 'Qwe'."""
|
||||
from timmy.session import chat
|
||||
|
||||
result = await chat("Qwe")
|
||||
|
||||
assert result == "Confirmed: Qwe backend"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_confirms_qwe_backend_with_whitespace():
|
||||
"""chat() should handle 'Qwe' with surrounding whitespace."""
|
||||
from timmy.session import chat
|
||||
|
||||
result = await chat(" Qwe ")
|
||||
|
||||
assert result == "Confirmed: Qwe backend"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_returns_string():
|
||||
"""chat() should return a plain string response."""
|
||||
|
||||
@@ -8,14 +8,11 @@ from fastapi.testclient import TestClient
|
||||
|
||||
@pytest.fixture
|
||||
def serve_client():
|
||||
"""Create a TestClient for the timmy-serve app with mocked Timmy agent."""
|
||||
with patch("timmy_serve.app.create_timmy") as mock_create:
|
||||
mock_create.return_value = MagicMock()
|
||||
from timmy_serve.app import create_timmy_serve_app
|
||||
"""Create a TestClient for the timmy-serve app."""
|
||||
from timmy_serve.app import create_timmy_serve_app
|
||||
|
||||
app = create_timmy_serve_app()
|
||||
with TestClient(app) as client:
|
||||
yield client
|
||||
app = create_timmy_serve_app()
|
||||
return TestClient(app)
|
||||
|
||||
|
||||
class TestHealthEndpoint:
|
||||
@@ -37,40 +34,18 @@ class TestServeStatus:
|
||||
|
||||
class TestServeChatEndpoint:
|
||||
@patch("timmy_serve.app.create_timmy")
|
||||
def test_chat_returns_response(self, mock_create):
|
||||
def test_chat_returns_response(self, mock_create, serve_client):
|
||||
mock_agent = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.content = "I am Timmy."
|
||||
mock_agent.run.return_value = mock_result
|
||||
mock_create.return_value = mock_agent
|
||||
|
||||
from timmy_serve.app import create_timmy_serve_app
|
||||
|
||||
app = create_timmy_serve_app()
|
||||
with TestClient(app) as client:
|
||||
resp = client.post(
|
||||
"/serve/chat",
|
||||
json={"message": "Who are you?"},
|
||||
)
|
||||
resp = serve_client.post(
|
||||
"/serve/chat",
|
||||
json={"message": "Who are you?"},
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert data["response"] == "I am Timmy."
|
||||
mock_agent.run.assert_called_once_with("Who are you?", stream=False)
|
||||
|
||||
@patch("timmy_serve.app.create_timmy")
|
||||
def test_agent_cached_at_startup(self, mock_create):
|
||||
"""Verify create_timmy is called once at startup, not per request."""
|
||||
mock_agent = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.content = "reply"
|
||||
mock_agent.run.return_value = mock_result
|
||||
mock_create.return_value = mock_agent
|
||||
|
||||
from timmy_serve.app import create_timmy_serve_app
|
||||
|
||||
app = create_timmy_serve_app()
|
||||
with TestClient(app) as client:
|
||||
# Two requests — create_timmy should only be called once (at startup)
|
||||
client.post("/serve/chat", json={"message": "hello"})
|
||||
client.post("/serve/chat", json={"message": "world"})
|
||||
mock_create.assert_called_once()
|
||||
|
||||
@@ -104,6 +104,46 @@ class TestGetLoopAgent:
|
||||
finally:
|
||||
al._loop_agent = saved
|
||||
|
||||
def test_thread_safe_creation(self):
|
||||
"""Concurrent calls must only create one agent (thread-safety)."""
|
||||
import threading
|
||||
|
||||
import timmy.agentic_loop as al
|
||||
|
||||
saved = al._loop_agent
|
||||
try:
|
||||
al._loop_agent = None
|
||||
mock_agent = MagicMock()
|
||||
call_count = 0
|
||||
barrier = threading.Barrier(4)
|
||||
|
||||
original_create = MagicMock(return_value=mock_agent)
|
||||
|
||||
def slow_create():
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return original_create()
|
||||
|
||||
results = [None] * 4
|
||||
|
||||
def worker(idx):
|
||||
barrier.wait()
|
||||
results[idx] = al._get_loop_agent()
|
||||
|
||||
with patch("timmy.agent.create_timmy", side_effect=slow_create):
|
||||
threads = [threading.Thread(target=worker, args=(i,)) for i in range(4)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
# All threads got the same agent
|
||||
assert all(r is mock_agent for r in results)
|
||||
# create_timmy called exactly once
|
||||
assert call_count == 1
|
||||
finally:
|
||||
al._loop_agent = saved
|
||||
|
||||
|
||||
# ── _broadcast_progress ──────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@@ -99,19 +99,26 @@ class TestKeywordOverlap:
|
||||
|
||||
|
||||
class TestEmbedText:
|
||||
def setup_method(self):
|
||||
self._saved_model = emb.EMBEDDING_MODEL
|
||||
emb.EMBEDDING_MODEL = None
|
||||
|
||||
def teardown_method(self):
|
||||
emb.EMBEDDING_MODEL = self._saved_model
|
||||
|
||||
def test_uses_fallback_when_model_disabled(self):
|
||||
with patch.object(emb, "_get_embedding_model", return_value=False):
|
||||
vec = embed_text("test")
|
||||
emb.EMBEDDING_MODEL = False
|
||||
vec = embed_text("test")
|
||||
assert len(vec) == 128 # hash fallback dimension
|
||||
|
||||
def test_uses_model_when_available(self):
|
||||
mock_encoding = MagicMock()
|
||||
mock_encoding.tolist.return_value = [0.1, 0.2, 0.3]
|
||||
mock_model = MagicMock()
|
||||
mock_model.encode.return_value = mock_encoding
|
||||
import numpy as np
|
||||
|
||||
with patch.object(emb, "_get_embedding_model", return_value=mock_model):
|
||||
result = embed_text("test")
|
||||
mock_model = MagicMock()
|
||||
mock_model.encode.return_value = np.array([0.1, 0.2, 0.3])
|
||||
emb.EMBEDDING_MODEL = mock_model
|
||||
|
||||
result = embed_text("test")
|
||||
assert result == pytest.approx([0.1, 0.2, 0.3])
|
||||
mock_model.encode.assert_called_once_with("test")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user