Compare commits
7 Commits
fix/11-boo
...
feat/93-de
| Author | SHA1 | Date | |
|---|---|---|---|
| cbebd93cbb | |||
| 8d716ff03f | |||
| 920510996e | |||
| 1fafeaf5a4 | |||
| 36b440f998 | |||
| 9f3caabf42 | |||
| a21f3a44e1 |
249
scripts/dependency_graph.py
Normal file
249
scripts/dependency_graph.py
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Cross-Repo Dependency Graph Builder
|
||||||
|
|
||||||
|
Scans repos for import/require/reference patterns and builds a directed
|
||||||
|
dependency graph. Detects circular dependencies. Outputs DOT and Mermaid.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 scripts/dependency_graph.py /path/to/repos/
|
||||||
|
python3 scripts/dependency_graph.py --repos repo1,repo2,repo3 --format mermaid
|
||||||
|
python3 scripts/dependency_graph.py --repos-dir /path/to/ --format dot --output deps.dot
|
||||||
|
|
||||||
|
Patterns detected:
|
||||||
|
- Python: import X, from X import Y
|
||||||
|
- JavaScript: require("X"), import ... from "X"
|
||||||
|
- Go: import "X"
|
||||||
|
- Ansible: include_role, import_role
|
||||||
|
- Docker/Compose: image: X, depends_on
|
||||||
|
- Config references: repo-name in YAML/TOML/JSON
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
# Known repo names for matching
|
||||||
|
KNOWN_REPOS = [
|
||||||
|
"hermes-agent", "timmy-config", "timmy-home", "the-nexus", "the-door",
|
||||||
|
"the-beacon", "fleet-ops", "burn-fleet", "timmy-dispatch", "turboquant",
|
||||||
|
"compounding-intelligence", "the-playground", "second-son-of-timmy",
|
||||||
|
"ai-safety-review", "the-echo-pattern", "timmy-academy", "wolf",
|
||||||
|
"the-testament",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_repo_name(name: str) -> str:
|
||||||
|
"""Normalize a repo name for comparison."""
|
||||||
|
return name.lower().replace("_", "-").replace(".git", "").strip()
|
||||||
|
|
||||||
|
|
||||||
|
def scan_file_for_deps(filepath: str, content: str, own_repo: str) -> set:
|
||||||
|
"""Scan a file's content for references to other repos."""
|
||||||
|
deps = set()
|
||||||
|
own_norm = normalize_repo_name(own_repo)
|
||||||
|
|
||||||
|
for repo in KNOWN_REPOS:
|
||||||
|
repo_norm = normalize_repo_name(repo)
|
||||||
|
if repo_norm == own_norm:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Direct name references
|
||||||
|
patterns = [
|
||||||
|
repo, # exact name
|
||||||
|
repo.replace("-", "_"), # underscore variant
|
||||||
|
repo.replace("-", ""), # no separator
|
||||||
|
f"/{repo}/", # path reference
|
||||||
|
f'"{repo}"', # quoted
|
||||||
|
f"'{repo}'", # single quoted
|
||||||
|
f"Timmy_Foundation/{repo}", # full Gitea path
|
||||||
|
f"Timmy_Foundation.{repo}", # Python module path
|
||||||
|
]
|
||||||
|
|
||||||
|
for pattern in patterns:
|
||||||
|
if pattern in content:
|
||||||
|
deps.add(repo)
|
||||||
|
break
|
||||||
|
|
||||||
|
return deps
|
||||||
|
|
||||||
|
|
||||||
|
def scan_repo(repo_path: str, repo_name: str = None) -> dict:
|
||||||
|
"""Scan a repo directory for dependencies."""
|
||||||
|
path = Path(repo_path)
|
||||||
|
if not path.is_dir():
|
||||||
|
return {"error": f"Not a directory: {repo_path}"}
|
||||||
|
|
||||||
|
if not repo_name:
|
||||||
|
repo_name = path.name
|
||||||
|
|
||||||
|
deps = set()
|
||||||
|
files_scanned = 0
|
||||||
|
exts = {".py", ".js", ".ts", ".go", ".yaml", ".yml", ".toml", ".json",
|
||||||
|
".md", ".sh", ".bash", ".Dockerfile", ".tf", ".hcl"}
|
||||||
|
|
||||||
|
for fpath in path.rglob("*"):
|
||||||
|
if not fpath.is_file():
|
||||||
|
continue
|
||||||
|
if fpath.suffix not in exts:
|
||||||
|
continue
|
||||||
|
# Skip common non-source dirs
|
||||||
|
parts = fpath.parts
|
||||||
|
if any(p in (".git", "node_modules", "__pycache__", ".venv", "venv",
|
||||||
|
"vendor", "dist", "build", ".tox") for p in parts):
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = fpath.read_text(errors="ignore")
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
file_deps = scan_file_for_deps(str(fpath), content, repo_name)
|
||||||
|
deps.update(file_deps)
|
||||||
|
files_scanned += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"repo": repo_name,
|
||||||
|
"dependencies": sorted(deps),
|
||||||
|
"files_scanned": files_scanned,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def detect_cycles(graph: dict) -> list:
|
||||||
|
"""Detect circular dependencies using DFS."""
|
||||||
|
cycles = []
|
||||||
|
visited = set()
|
||||||
|
rec_stack = set()
|
||||||
|
|
||||||
|
def dfs(node, path):
|
||||||
|
visited.add(node)
|
||||||
|
rec_stack.add(node)
|
||||||
|
|
||||||
|
for neighbor in graph.get(node, {}).get("dependencies", []):
|
||||||
|
if neighbor not in visited:
|
||||||
|
result = dfs(neighbor, path + [neighbor])
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
elif neighbor in rec_stack:
|
||||||
|
cycle_start = path.index(neighbor)
|
||||||
|
return path[cycle_start:] + [neighbor]
|
||||||
|
|
||||||
|
rec_stack.remove(node)
|
||||||
|
return None
|
||||||
|
|
||||||
|
for node in graph:
|
||||||
|
if node not in visited:
|
||||||
|
cycle = dfs(node, [node])
|
||||||
|
if cycle:
|
||||||
|
cycles.append(cycle)
|
||||||
|
|
||||||
|
return cycles
|
||||||
|
|
||||||
|
|
||||||
|
def to_dot(graph: dict) -> str:
|
||||||
|
"""Generate DOT format output."""
|
||||||
|
lines = ["digraph dependencies {"]
|
||||||
|
lines.append(" rankdir=LR;")
|
||||||
|
lines.append(" node [shape=box, style=filled, fillcolor="#1a1a2e", fontcolor="#e6edf3"];")
|
||||||
|
lines.append(" edge [color="#4a4a6a"];")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
for repo, data in sorted(graph.items()):
|
||||||
|
dep_count = len(data.get("dependencies", []))
|
||||||
|
fill = "#2d1b69" if dep_count > 2 else "#16213e"
|
||||||
|
lines.append(f' "{repo}" [fillcolor="{fill}"];')
|
||||||
|
for dep in data.get("dependencies", []):
|
||||||
|
lines.append(f' "{repo}" -> "{dep}";')
|
||||||
|
|
||||||
|
lines.append("}")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def to_mermaid(graph: dict) -> str:
|
||||||
|
"""Generate Mermaid format output."""
|
||||||
|
lines = ["graph LR"]
|
||||||
|
|
||||||
|
for repo, data in sorted(graph.items()):
|
||||||
|
for dep in data.get("dependencies", []):
|
||||||
|
lines.append(f" {repo.replace('-','_')} --> {dep.replace('-','_')}")
|
||||||
|
|
||||||
|
# Add node labels
|
||||||
|
lines.append("")
|
||||||
|
for repo in sorted(graph.keys()):
|
||||||
|
lines.append(f" {repo.replace('-','_')}[{repo}]")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Build cross-repo dependency graph")
|
||||||
|
parser.add_argument("repos_dir", nargs="?", help="Directory containing repos")
|
||||||
|
parser.add_argument("--repos", help="Comma-separated list of repo paths")
|
||||||
|
parser.add_argument("--format", choices=["dot", "mermaid", "json"], default="json")
|
||||||
|
parser.add_argument("--output", "-o", help="Output file (default: stdout)")
|
||||||
|
parser.add_argument("--cycles-only", action="store_true", help="Only report cycles")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
results = {}
|
||||||
|
repo_paths = []
|
||||||
|
|
||||||
|
if args.repos:
|
||||||
|
repo_paths = [p.strip() for p in args.repos.split(",")]
|
||||||
|
elif args.repos_dir:
|
||||||
|
base = Path(args.repos_dir)
|
||||||
|
repo_paths = [str(p) for p in base.iterdir() if p.is_dir() and not p.name.startswith(".")]
|
||||||
|
else:
|
||||||
|
parser.print_help()
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
for rpath in repo_paths:
|
||||||
|
name = Path(rpath).name
|
||||||
|
print(f"Scanning {name}...", file=sys.stderr)
|
||||||
|
result = scan_repo(rpath, name)
|
||||||
|
if "error" not in result:
|
||||||
|
results[name] = result
|
||||||
|
|
||||||
|
# Detect cycles
|
||||||
|
cycles = detect_cycles(results)
|
||||||
|
|
||||||
|
if args.cycles_only:
|
||||||
|
if cycles:
|
||||||
|
print("CIRCULAR DEPENDENCIES DETECTED:")
|
||||||
|
for cycle in cycles:
|
||||||
|
print(f" {' -> '.join(cycle)}")
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
print("No circular dependencies found.")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Output
|
||||||
|
output = {}
|
||||||
|
if args.format == "dot":
|
||||||
|
output = to_dot(results)
|
||||||
|
elif args.format == "mermaid":
|
||||||
|
output = to_mermaid(results)
|
||||||
|
else:
|
||||||
|
output = json.dumps({
|
||||||
|
"repos": results,
|
||||||
|
"cycles": cycles,
|
||||||
|
"summary": {
|
||||||
|
"total_repos": len(results),
|
||||||
|
"total_deps": sum(len(r["dependencies"]) for r in results.values()),
|
||||||
|
"cycles_found": len(cycles),
|
||||||
|
}
|
||||||
|
}, indent=2)
|
||||||
|
|
||||||
|
if args.output:
|
||||||
|
Path(args.output).write_text(output)
|
||||||
|
print(f"Written to {args.output}", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
212
scripts/test_harvest_prompt_comprehensive.py
Normal file
212
scripts/test_harvest_prompt_comprehensive.py
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Comprehensive test script for knowledge extraction prompt.
|
||||||
|
Validates prompt structure, requirements, and consistency.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def test_prompt_structure():
|
||||||
|
"""Test that the prompt has the required structure."""
|
||||||
|
prompt_path = Path("templates/harvest-prompt.md")
|
||||||
|
if not prompt_path.exists():
|
||||||
|
return False, "harvest-prompt.md not found"
|
||||||
|
|
||||||
|
content = prompt_path.read_text()
|
||||||
|
|
||||||
|
# Check for required sections
|
||||||
|
required_sections = [
|
||||||
|
"System Prompt",
|
||||||
|
"Instructions",
|
||||||
|
"Categories",
|
||||||
|
"Output Format",
|
||||||
|
"Confidence Scoring",
|
||||||
|
"Constraints",
|
||||||
|
"Example"
|
||||||
|
]
|
||||||
|
|
||||||
|
for section in required_sections:
|
||||||
|
if section.lower() not in content.lower():
|
||||||
|
return False, f"Missing required section: {section}"
|
||||||
|
|
||||||
|
# Check for required categories
|
||||||
|
required_categories = ["fact", "pitfall", "pattern", "tool-quirk", "question"]
|
||||||
|
for category in required_categories:
|
||||||
|
if category not in content:
|
||||||
|
return False, f"Missing required category: {category}"
|
||||||
|
|
||||||
|
# Check for required output fields
|
||||||
|
required_fields = ["fact", "category", "repo", "confidence"]
|
||||||
|
for field in required_fields:
|
||||||
|
if field not in content:
|
||||||
|
return False, f"Missing required output field: {field}"
|
||||||
|
|
||||||
|
# Check prompt size (should be ~1k tokens, roughly 4k chars)
|
||||||
|
if len(content) > 5000:
|
||||||
|
return False, f"Prompt too large: {len(content)} chars (max ~5000)"
|
||||||
|
|
||||||
|
if len(content) < 1000:
|
||||||
|
return False, f"Prompt too small: {len(content)} chars (min ~1000)"
|
||||||
|
|
||||||
|
return True, "Prompt structure is valid"
|
||||||
|
|
||||||
|
def test_confidence_scoring():
|
||||||
|
"""Test that confidence scoring is properly defined."""
|
||||||
|
prompt_path = Path("templates/harvest-prompt.md")
|
||||||
|
content = prompt_path.read_text()
|
||||||
|
|
||||||
|
# Check for confidence scale definitions
|
||||||
|
confidence_levels = [
|
||||||
|
("0.9-1.0", "explicitly stated"),
|
||||||
|
("0.7-0.8", "clearly implied"),
|
||||||
|
("0.5-0.6", "suggested"),
|
||||||
|
("0.3-0.4", "inferred"),
|
||||||
|
("0.1-0.2", "speculative")
|
||||||
|
]
|
||||||
|
|
||||||
|
for level, description in confidence_levels:
|
||||||
|
if level not in content:
|
||||||
|
return False, f"Missing confidence level: {level}"
|
||||||
|
if description.lower() not in content.lower():
|
||||||
|
return False, f"Missing confidence description: {description}"
|
||||||
|
|
||||||
|
return True, "Confidence scoring is properly defined"
|
||||||
|
|
||||||
|
def test_example_quality():
|
||||||
|
"""Test that examples are clear and complete."""
|
||||||
|
prompt_path = Path("templates/harvest-prompt.md")
|
||||||
|
content = prompt_path.read_text()
|
||||||
|
|
||||||
|
# Check for example input/output
|
||||||
|
if "example" not in content.lower():
|
||||||
|
return False, "No examples provided"
|
||||||
|
|
||||||
|
# Check that example includes all categories
|
||||||
|
example_section = content[content.lower().find("example"):]
|
||||||
|
|
||||||
|
# Look for JSON example
|
||||||
|
json_match = re.search(r'\{[\s\S]*"knowledge"[\s\S]*\}', example_section)
|
||||||
|
if not json_match:
|
||||||
|
return False, "No JSON example found"
|
||||||
|
|
||||||
|
example_json = json_match.group(0)
|
||||||
|
|
||||||
|
# Check for all categories in example
|
||||||
|
for category in ["fact", "pitfall", "pattern", "tool-quirk", "question"]:
|
||||||
|
if category not in example_json:
|
||||||
|
return False, f"Example missing category: {category}"
|
||||||
|
|
||||||
|
return True, "Examples are clear and complete"
|
||||||
|
|
||||||
|
def test_constraint_coverage():
|
||||||
|
"""Test that constraints cover all requirements."""
|
||||||
|
prompt_path = Path("templates/harvest-prompt.md")
|
||||||
|
content = prompt_path.read_text()
|
||||||
|
|
||||||
|
required_constraints = [
|
||||||
|
"No hallucination",
|
||||||
|
"only extract",
|
||||||
|
"explicitly",
|
||||||
|
"partial",
|
||||||
|
"failed sessions",
|
||||||
|
"1k tokens"
|
||||||
|
]
|
||||||
|
|
||||||
|
for constraint in required_constraints:
|
||||||
|
if constraint.lower() not in content.lower():
|
||||||
|
return False, f"Missing constraint: {constraint}"
|
||||||
|
|
||||||
|
return True, "Constraints cover all requirements"
|
||||||
|
|
||||||
|
def test_test_sessions():
|
||||||
|
"""Test that test sessions exist and are valid."""
|
||||||
|
test_sessions_dir = Path("test_sessions")
|
||||||
|
if not test_sessions_dir.exists():
|
||||||
|
return False, "test_sessions directory not found"
|
||||||
|
|
||||||
|
session_files = list(test_sessions_dir.glob("*.jsonl"))
|
||||||
|
if len(session_files) < 5:
|
||||||
|
return False, f"Only {len(session_files)} test sessions found, need 5"
|
||||||
|
|
||||||
|
# Check each session file
|
||||||
|
for session_file in session_files:
|
||||||
|
content = session_file.read_text()
|
||||||
|
lines = content.strip().split("\n")
|
||||||
|
|
||||||
|
# Check that each line is valid JSON
|
||||||
|
for i, line in enumerate(lines, 1):
|
||||||
|
try:
|
||||||
|
json.loads(line)
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
return False, f"Invalid JSON in {session_file.name}, line {i}: {e}"
|
||||||
|
|
||||||
|
return True, f"Found {len(session_files)} valid test sessions"
|
||||||
|
|
||||||
|
def run_all_tests():
|
||||||
|
"""Run all tests and return results."""
|
||||||
|
tests = [
|
||||||
|
("Prompt Structure", test_prompt_structure),
|
||||||
|
("Confidence Scoring", test_confidence_scoring),
|
||||||
|
("Example Quality", test_example_quality),
|
||||||
|
("Constraint Coverage", test_constraint_coverage),
|
||||||
|
("Test Sessions", test_test_sessions)
|
||||||
|
]
|
||||||
|
|
||||||
|
results = []
|
||||||
|
all_passed = True
|
||||||
|
|
||||||
|
for test_name, test_func in tests:
|
||||||
|
try:
|
||||||
|
passed, message = test_func()
|
||||||
|
results.append({
|
||||||
|
"test": test_name,
|
||||||
|
"passed": passed,
|
||||||
|
"message": message
|
||||||
|
})
|
||||||
|
if not passed:
|
||||||
|
all_passed = False
|
||||||
|
except Exception as e:
|
||||||
|
results.append({
|
||||||
|
"test": test_name,
|
||||||
|
"passed": False,
|
||||||
|
"message": f"Error: {str(e)}"
|
||||||
|
})
|
||||||
|
all_passed = False
|
||||||
|
|
||||||
|
# Print results
|
||||||
|
print("=" * 60)
|
||||||
|
print("HARVEST PROMPT TEST RESULTS")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
for result in results:
|
||||||
|
status = "✓ PASS" if result["passed"] else "✗ FAIL"
|
||||||
|
print(f"{status}: {result['test']}")
|
||||||
|
print(f" {result['message']}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
print("=" * 60)
|
||||||
|
if all_passed:
|
||||||
|
print("ALL TESTS PASSED!")
|
||||||
|
else:
|
||||||
|
print("SOME TESTS FAILED!")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
return all_passed, results
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
all_passed, results = run_all_tests()
|
||||||
|
|
||||||
|
# Save results to file
|
||||||
|
with open("test_results.json", "w") as f:
|
||||||
|
json.dump({
|
||||||
|
"all_passed": all_passed,
|
||||||
|
"results": results,
|
||||||
|
"timestamp": "2026-04-14T19:05:00Z"
|
||||||
|
}, f, indent=2)
|
||||||
|
|
||||||
|
print(f"Results saved to test_results.json")
|
||||||
|
|
||||||
|
# Exit with appropriate code
|
||||||
|
exit(0 if all_passed else 1)
|
||||||
18
test_sessions/session_failure.jsonl
Normal file
18
test_sessions/session_failure.jsonl
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
{"role": "user", "content": "Deploy the application to production"}
|
||||||
|
{"role": "assistant", "content": "I'll deploy the application to production.", "tool_calls": [{"function": "terminal", "arguments": {"command": "ssh deploy@prod-server 'cd /app && git pull && docker-compose up -d'"}}]}
|
||||||
|
{"role": "tool", "content": "ssh: connect to host prod-server port 22: Connection refused"}
|
||||||
|
{"role": "assistant", "content": "Connection refused. Let me check the server status.", "tool_calls": [{"function": "terminal", "arguments": {"command": "ping -c 3 prod-server"}}]}
|
||||||
|
{"role": "tool", "content": "PING prod-server (192.168.1.100): 56 data bytes\nRequest timeout for icmp_seq 0\nRequest timeout for icmp_seq 1\nRequest timeout for icmp_seq 2\n--- prod-server ping statistics ---\n3 packets transmitted, 0 packets received, 100.0% packet loss"}
|
||||||
|
{"role": "assistant", "content": "The server appears to be down. Let me check if there's an alternative deployment method."}
|
||||||
|
{"role": "user", "content": "Try using the VPS instead"}
|
||||||
|
{"role": "assistant", "content": "I'll try deploying to the VPS.", "tool_calls": [{"function": "terminal", "arguments": {"command": "ssh deploy@vps-server 'cd /app && git pull && docker-compose up -d'"}}]}
|
||||||
|
{"role": "tool", "content": "Permission denied (publickey)."}
|
||||||
|
{"role": "assistant", "content": "Permission denied. The SSH key might not be set up correctly. Let me check the SSH configuration.", "tool_calls": [{"function": "terminal", "arguments": {"command": "cat ~/.ssh/config"}}]}
|
||||||
|
{"role": "tool", "content": "Host vps-server\n HostName 167.99.126.228\n User deploy\n IdentityFile ~/.ssh/id_rsa_vps"}
|
||||||
|
{"role": "assistant", "content": "The SSH config looks correct. Let me check if the key exists.", "tool_calls": [{"function": "terminal", "arguments": {"command": "ls -la ~/.ssh/id_rsa_vps"}}]}
|
||||||
|
{"role": "tool", "content": "ls: /Users/user/.ssh/id_rsa_vps: No such file or directory"}
|
||||||
|
{"role": "assistant", "content": "The SSH key doesn't exist. This is a pitfall - we need to generate the SSH key first."}
|
||||||
|
{"role": "user", "content": "Generate a new SSH key for the VPS"}
|
||||||
|
{"role": "assistant", "content": "I'll generate a new SSH key.", "tool_calls": [{"function": "terminal", "arguments": {"command": "ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa_vps -N ''"}}]}
|
||||||
|
{"role": "tool", "content": "Generating public/private rsa key pair.\nYour identification has been saved in /Users/user/.ssh/id_rsa_vps\nYour public key has been saved in /Users/user/.ssh/id_rsa_vps.pub\nThe key fingerprint is:\nSHA256:abc123... user@machine"}
|
||||||
|
{"role": "assistant", "content": "SSH key generated. Now I need to add it to the VPS."}
|
||||||
14
test_sessions/session_partial.jsonl
Normal file
14
test_sessions/session_partial.jsonl
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{"role": "user", "content": "Check the Gitea API for open issues"}
|
||||||
|
{"role": "assistant", "content": "I'll check the Gitea API for open issues.", "tool_calls": [{"function": "terminal", "arguments": {"command": "curl -H 'Authorization: token $GITEA_TOKEN' https://forge.alexanderwhitestone.com/api/v1/repos/Timmy_Foundation/compounding-intelligence/issues?state=open"}}]}
|
||||||
|
{"role": "tool", "content": "curl: (6) Could not resolve host: forge.alexanderwhitestone.com"}
|
||||||
|
{"role": "assistant", "content": "DNS resolution failed. Let me check the network connectivity.", "tool_calls": [{"function": "terminal", "arguments": {"command": "nslookup forge.alexanderwhitestone.com"}}]}
|
||||||
|
{"role": "tool", "content": "Server: 8.8.8.8\nAddress: 8.8.8.8#53\n\n** server can't find forge.alexanderwhitestone.com: NXDOMAIN"}
|
||||||
|
{"role": "assistant", "content": "The domain doesn't resolve. This might be a DNS issue. Let me try using the IP address directly.", "tool_calls": [{"function": "terminal", "arguments": {"command": "curl -H 'Authorization: token $GITEA_TOKEN' https://167.99.126.228/api/v1/repos/Timmy_Foundation/compounding-intelligence/issues?state=open"}}]}
|
||||||
|
{"role": "tool", "content": "curl: (60) SSL certificate problem: self signed certificate"}
|
||||||
|
{"role": "assistant", "content": "SSL certificate issue. The server is using a self-signed certificate. Let me try with -k flag to skip SSL verification.", "tool_calls": [{"function": "terminal", "arguments": {"command": "curl -k -H 'Authorization: token $GITEA_TOKEN' https://167.99.126.228/api/v1/repos/Timmy_Foundation/compounding-intelligence/issues?state=open"}}]}
|
||||||
|
{"role": "tool", "content": "[]"}
|
||||||
|
{"role": "assistant", "content": "No open issues found. The API returned an empty array."}
|
||||||
|
{"role": "user", "content": "Check if there are any pull requests"}
|
||||||
|
{"role": "assistant", "content": "Let me check for pull requests.", "tool_calls": [{"function": "terminal", "arguments": {"command": "curl -k -H 'Authorization: token $GITEA_TOKEN' https://167.99.126.228/api/v1/repos/Timmy_Foundation/compounding-intelligence/pulls?state=open"}}]}
|
||||||
|
{"role": "tool", "content": "[{"id": 20, "title": "feat: add harvester.py + session_reader.py — session knowledge extractor (closes #8)", "state": "open", "user": {"login": "Rockachopa"}, "created_at": "2026-04-14T17:28:20Z"}]"}
|
||||||
|
{"role": "assistant", "content": "Found 1 open pull request: PR #20 - "feat: add harvester.py + session_reader.py — session knowledge extractor (closes #8)" by Rockachopa."}
|
||||||
10
test_sessions/session_patterns.jsonl
Normal file
10
test_sessions/session_patterns.jsonl
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{"role": "user", "content": "How do I deploy a new version of the application?"}
|
||||||
|
{"role": "assistant", "content": "Here's the deployment process:\n1. Run tests: pytest\n2. Build Docker image: docker build -t app:latest .\n3. Push to registry: docker push registry.example.com/app:latest\n4. Update production: ssh prod 'cd /app && docker-compose pull && docker-compose up -d'\n5. Verify: curl https://app.example.com/health"}
|
||||||
|
{"role": "user", "content": "What if tests fail?"}
|
||||||
|
{"role": "assistant", "content": "If tests fail:\n1. Check the error logs: pytest -v\n2. Fix the failing tests\n3. Re-run tests: pytest\n4. If still failing, check recent changes: git log --oneline -10\n5. Consider reverting: git revert HEAD"}
|
||||||
|
{"role": "user", "content": "What if the Docker build fails?"}
|
||||||
|
{"role": "assistant", "content": "If Docker build fails:\n1. Check Dockerfile syntax: docker build --no-cache -t app:latest .\n2. Check for missing dependencies: cat requirements.txt\n3. Check disk space: df -h\n4. Clean up Docker: docker system prune -f\n5. Try building with verbose output: docker build --progress=plain -t app:latest ."}
|
||||||
|
{"role": "user", "content": "What if the push to registry fails?"}
|
||||||
|
{"role": "assistant", "content": "If push to registry fails:\n1. Check registry credentials: docker login registry.example.com\n2. Check registry space: curl https://registry.example.com/v2/_catalog\n3. Check image size: docker images app:latest\n4. Try tagging with version: docker tag app:latest registry.example.com/app:v1.0.0\n5. Push with version tag: docker push registry.example.com/app:v1.0.0"}
|
||||||
|
{"role": "user", "content": "What if the production update fails?"}
|
||||||
|
{"role": "assistant", "content": "If production update fails:\n1. Check SSH connection: ssh prod 'echo connected'\n2. Check disk space on prod: ssh prod 'df -h'\n3. Check Docker status: ssh prod 'docker ps -a'\n4. Check logs: ssh prod 'docker-compose logs --tail=100'\n5. Rollback: ssh prod 'cd /app && git checkout HEAD~1 && docker-compose up -d'"}
|
||||||
12
test_sessions/session_questions.jsonl
Normal file
12
test_sessions/session_questions.jsonl
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{"role": "user", "content": "I need to implement a new feature for the application"}
|
||||||
|
{"role": "assistant", "content": "I can help you implement a new feature. What feature would you like to add?"}
|
||||||
|
{"role": "user", "content": "I want to add user authentication"}
|
||||||
|
{"role": "assistant", "content": "User authentication is a good feature. I have a few questions:\n1. What authentication method do you prefer? (JWT, OAuth, Session-based)\n2. Do you need social login? (Google, GitHub, etc.)\n3. What user data do you need to store?\n4. Do you need role-based access control?\n5. What's your database? (PostgreSQL, MySQL, SQLite)"}
|
||||||
|
{"role": "user", "content": "I prefer JWT, no social login, just email and password, no roles, PostgreSQL"}
|
||||||
|
{"role": "assistant", "content": "Great choices. Here's the implementation plan:\n1. Create user model with email and password hash\n2. Create JWT token generation and validation\n3. Create login endpoint: POST /api/auth/login\n4. Create register endpoint: POST /api/auth/register\n5. Create middleware for protected routes\n6. Add password hashing with bcrypt\n7. Add rate limiting for login attempts\n8. Add password reset functionality\n9. Add email verification\n10. Test all endpoints"}
|
||||||
|
{"role": "user", "content": "What about password reset?"}
|
||||||
|
{"role": "assistant", "content": "For password reset:\n1. Create reset token table\n2. Create forgot password endpoint: POST /api/auth/forgot-password\n3. Send reset email with token\n4. Create reset password endpoint: POST /api/auth/reset-password\n5. Validate reset token and expiration\n6. Update password and delete token\n7. Send confirmation email"}
|
||||||
|
{"role": "user", "content": "What about email verification?"}
|
||||||
|
{"role": "assistant", "content": "For email verification:\n1. Add email_verified field to user model\n2. Create verification token table\n3. Send verification email on registration\n4. Create verify email endpoint: GET /api/auth/verify-email?token=...\n5. Validate token and update email_verified\n6. Add middleware to check email_verified for certain routes\n7. Add resend verification email endpoint"}
|
||||||
|
{"role": "user", "content": "What about rate limiting?"}
|
||||||
|
{"role": "assistant", "content": "For rate limiting:\n1. Use Redis for rate limit storage\n2. Create rate limit middleware\n3. Set limits: 5 login attempts per minute, 3 password resets per hour\n4. Return 429 Too Many Requests when limit exceeded\n5. Add X-RateLimit headers to responses\n6. Log rate limit violations\n7. Add whitelist for trusted IPs"}
|
||||||
13
test_sessions/session_success.jsonl
Normal file
13
test_sessions/session_success.jsonl
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{"role": "user", "content": "Clone the repo at forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence"}
|
||||||
|
{"role": "assistant", "content": "I'll clone the repository for you.", "tool_calls": [{"function": "terminal", "arguments": {"command": "git clone https://forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence.git"}}]}
|
||||||
|
{"role": "tool", "content": "Cloning into 'compounding-intelligence'...\nremote: Enumerating objects: 15, done.\nremote: Counting objects: 100% (15/15), done.\nremote: Compressing objects: 100% (12/12), done.\nremote: Total 15 (delta 2), reused 0 (delta 0), pack-reused 0\nReceiving objects: 100% (15/15), done.\nResolving deltas: 100% (2/2), done."}
|
||||||
|
{"role": "assistant", "content": "Repository cloned successfully. Let me check the structure.", "tool_calls": [{"function": "terminal", "arguments": {"command": "ls -la compounding-intelligence/"}}]}
|
||||||
|
{"role": "tool", "content": "total 8\ndrwxr-xr-x 5 user staff 160 Apr 14 17:21 .\ndrwxr-xr-x 12 user staff 384 Apr 14 17:21 ..\ndrwxr-xr-x 13 user staff 416 Apr 14 17:21 .git\n-rw-r--r-- 1 user staff 1234 Apr 14 17:21 README.md\ndrwxr-xr-x 3 user staff 96 Apr 14 17:21 knowledge\n-rw-r--r-- 1 user staff 456 Apr 14 17:21 metrics\ndrwxr-xr-x 4 user staff 128 Apr 14 17:21 scripts\ndrwxr-xr-x 3 user staff 96 Apr 14 17:21 templates"}
|
||||||
|
{"role": "user", "content": "What files are in the templates directory?"}
|
||||||
|
{"role": "assistant", "content": "Let me check the templates directory.", "tool_calls": [{"function": "terminal", "arguments": {"command": "ls -la compounding-intelligence/templates/"}}]}
|
||||||
|
{"role": "tool", "content": "total 0\ndrwxr-xr-x 3 user staff 96 Apr 14 17:21 .\ndrwxr-xr-x 5 user staff 160 Apr 14 17:21 ..\n-rw-r--r-- 1 user staff 0 Apr 14 17:21 .gitkeep"}
|
||||||
|
{"role": "assistant", "content": "The templates directory currently only contains a .gitkeep file. No templates have been added yet."}
|
||||||
|
{"role": "user", "content": "Create a harvest-prompt.md template in the templates directory."}
|
||||||
|
{"role": "assistant", "content": "I'll create the harvest-prompt.md template for you.", "tool_calls": [{"function": "terminal", "arguments": {"command": "cat > compounding-intelligence/templates/harvest-prompt.md << 'EOF'\n# Knowledge Extraction Prompt\n\nExtract knowledge from session transcripts.\nEOF"}}]}
|
||||||
|
{"role": "tool", "content": ""}
|
||||||
|
{"role": "assistant", "content": "Template created successfully. The harvest-prompt.md file is now in the templates directory."}
|
||||||
Reference in New Issue
Block a user