Compare commits
4 Commits
fix/11-boo
...
fix/9-auto
| Author | SHA1 | Date | |
|---|---|---|---|
| 7dc6461291 | |||
| ddc04765f7 | |||
| ddf5099b38 | |||
| 4b2bd334e2 |
10
knowledge/harvest_state.json
Normal file
10
knowledge/harvest_state.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"last_harvest": "2026-04-14T18:04:45.484759+00:00",
|
||||
"harvested_sessions": [
|
||||
"20260413_175935_20cb44",
|
||||
"20260413_171106_62c276",
|
||||
"20260413_181734_aed35b"
|
||||
],
|
||||
"total_sessions_processed": 3,
|
||||
"total_facts_extracted": 59
|
||||
}
|
||||
@@ -1,6 +1,597 @@
|
||||
{
|
||||
"version": 1,
|
||||
"last_updated": "2026-04-13T20:00:00Z",
|
||||
"total_facts": 0,
|
||||
"facts": []
|
||||
"last_updated": "2026-04-14T18:04:45.484238+00:00",
|
||||
"total_facts": 59,
|
||||
"facts": [
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_z8ielhro/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.477585+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479057+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: crons.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.477603+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479059+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.07",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.477614+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479060+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox__3wxy21d/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.477622+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479061+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_dimnu9ba/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.477633+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479062+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: nhermes_cli/cron.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.477664+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479062+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: hermes_cli/cron.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.477793+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479063+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: config.yaml",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.477921+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479064+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: ~/.hermes",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478092+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479065+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: ncli.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478281+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479065+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.17",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478293+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479066+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 10.88",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478370+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479067+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: k2.5",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478386+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479067+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.92",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478410+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479068+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Successful command pattern: python observatory.py --check ",
|
||||
"category": "pattern",
|
||||
"repo": "global",
|
||||
"confidence": 0.6,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478498+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479069+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: devkit/health.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478571+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479069+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: CHANGELOG.md",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478608+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479070+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.06",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478635+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479071+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.03",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478658+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479072+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: crons.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478703+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479072+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: crons.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478757+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479073+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_1h5nj9lg/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478778+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479074+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: job.get",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478833+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479074+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: CreateIssueOption.Labels",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.478975+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479075+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Successful command pattern: git process seems to be running in this repository",
|
||||
"category": "pattern",
|
||||
"repo": "global",
|
||||
"confidence": 0.6,
|
||||
"session_id": "20260413_175935_20cb44",
|
||||
"extracted_at": "2026-04-14T18:04:45.479018+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.479076+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_175935_20cb44.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: ~/.hermes",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.479242+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482379+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: pokayoke/hermes_constants.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.479346+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482380+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: Path.home",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.479565+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482380+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_5pwgex20/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.479901+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482381+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.11",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.480675+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482382+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: AIAgent.__init__",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.480862+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482383+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: job.ge",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.481044+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482383+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: cron/scheduler.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.481254+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482384+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: __main__.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.481644+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482385+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: tests/test_prompt_injection_defense.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.481654+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482385+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_v2umc709/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.481666+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482386+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: pytest.mark",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.481733+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482387+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: ntests/test_prompt_injection_defense.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.481788+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482388+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: result.get",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.481979+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482388+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: concurrent.future",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.482228+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482389+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 0.0",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.482252+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482390+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_mjbblg0z/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_171106_62c276",
|
||||
"extracted_at": "2026-04-14T18:04:45.482315+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.482390+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_171106_62c276.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_u2ngkm60/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.482463+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484207+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_i63vbaem/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.482569+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484208+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 3.12",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.482589+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484209+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Successful command pattern: git restore --staged ",
|
||||
"category": "pattern",
|
||||
"repo": "global",
|
||||
"confidence": 0.6,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.482629+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484209+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: forge.alexanderwhitestone",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.482645+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484210+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Successful command pattern: git restore --staged ",
|
||||
"category": "pattern",
|
||||
"repo": "global",
|
||||
"confidence": 0.6,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483301+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484211+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: ntests/test_repo_truth.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483472+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484211+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Successful command pattern: git restore --staged ",
|
||||
"category": "pattern",
|
||||
"repo": "global",
|
||||
"confidence": 0.6,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483479+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484212+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.02",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483596+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484213+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Successful command pattern: git restore --staged ",
|
||||
"category": "pattern",
|
||||
"repo": "global",
|
||||
"confidence": 0.6,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483603+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484213+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Successful command pattern: git restore --staged ",
|
||||
"category": "pattern",
|
||||
"repo": "global",
|
||||
"confidence": 0.6,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483697+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484214+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.37",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483785+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484215+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_2k0n79t8/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483792+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484216+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: 300.19",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483864+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484216+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: /private/var/folders/9k/v07xkpp133v03yynn9nx80fr0000gn/T/hermes_sandbox_qxzsy_kv/script.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483919+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484217+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: CreateIssueOption.Labels",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483930+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484218+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
},
|
||||
{
|
||||
"fact": "Error encountered with file: verify_triage_status.py",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": "20260413_181734_aed35b",
|
||||
"extracted_at": "2026-04-14T18:04:45.483963+00:00",
|
||||
"harvested_at": "2026-04-14T18:04:45.484218+00:00",
|
||||
"session_path": "/Users/apayne/.hermes/sessions/session_20260413_181734_aed35b.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,359 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Bootstrapper — assemble pre-session context from knowledge store.
|
||||
|
||||
Reads the knowledge store and produces a compact context block (2k tokens max)
|
||||
that can be injected into a new session so it starts with situational awareness.
|
||||
|
||||
Usage:
|
||||
python3 bootstrapper.py --repo the-nexus --agent mimo-sprint
|
||||
python3 bootstrapper.py --repo timmy-home --global
|
||||
python3 bootstrapper.py --global
|
||||
python3 bootstrapper.py --repo the-nexus --max-tokens 1000
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Resolve knowledge root relative to this script's parent
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
REPO_ROOT = SCRIPT_DIR.parent
|
||||
KNOWLEDGE_DIR = REPO_ROOT / "knowledge"
|
||||
INDEX_PATH = KNOWLEDGE_DIR / "index.json"
|
||||
|
||||
# Approximate token count: ~4 chars per token for English text
|
||||
CHARS_PER_TOKEN = 4
|
||||
|
||||
# Category sort priority (lower = shown first)
|
||||
CATEGORY_PRIORITY = {
|
||||
"pitfall": 0,
|
||||
"tool-quirk": 1,
|
||||
"pattern": 2,
|
||||
"fact": 3,
|
||||
"question": 4,
|
||||
}
|
||||
|
||||
|
||||
def load_index(index_path: Path = INDEX_PATH) -> dict:
|
||||
"""Load and validate the knowledge index."""
|
||||
if not index_path.exists():
|
||||
return {"version": 1, "total_facts": 0, "facts": []}
|
||||
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
if "facts" not in data:
|
||||
print(f"WARNING: index.json missing 'facts' key", file=sys.stderr)
|
||||
return {"version": 1, "total_facts": 0, "facts": []}
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def filter_facts(
|
||||
facts: list[dict],
|
||||
repo: Optional[str] = None,
|
||||
agent: Optional[str] = None,
|
||||
include_global: bool = True,
|
||||
) -> list[dict]:
|
||||
"""Filter facts by repo, agent, and global scope."""
|
||||
filtered = []
|
||||
|
||||
for fact in facts:
|
||||
fact_repo = fact.get("repo", "global")
|
||||
fact_agent = fact.get("agent", "")
|
||||
|
||||
# Match by repo (regardless of agent)
|
||||
if repo and fact_repo == repo:
|
||||
filtered.append(fact)
|
||||
continue
|
||||
|
||||
# Match by exact agent type
|
||||
if agent and fact_agent == agent:
|
||||
filtered.append(fact)
|
||||
continue
|
||||
|
||||
# Include global facts without agent restriction (universal facts)
|
||||
if include_global and fact_repo == "global" and not fact_agent:
|
||||
filtered.append(fact)
|
||||
|
||||
return filtered
|
||||
|
||||
|
||||
def sort_facts(facts: list[dict]) -> list[dict]:
|
||||
"""
|
||||
Sort facts by: confidence (desc), then category priority, then fact text.
|
||||
Most reliable and most dangerous facts come first.
|
||||
"""
|
||||
|
||||
def sort_key(f):
|
||||
confidence = f.get("confidence", 0.5)
|
||||
category = f.get("category", "fact")
|
||||
cat_priority = CATEGORY_PRIORITY.get(category, 5)
|
||||
return (-confidence, cat_priority, f.get("fact", ""))
|
||||
|
||||
return sorted(facts, key=sort_key)
|
||||
|
||||
|
||||
def load_repo_knowledge(repo: str) -> Optional[str]:
|
||||
"""Load per-repo knowledge markdown if it exists."""
|
||||
repo_path = KNOWLEDGE_DIR / "repos" / f"{repo}.md"
|
||||
if repo_path.exists():
|
||||
return repo_path.read_text().strip()
|
||||
return None
|
||||
|
||||
|
||||
def load_agent_knowledge(agent: str) -> Optional[str]:
|
||||
"""Load per-agent knowledge markdown if it exists."""
|
||||
agent_path = KNOWLEDGE_DIR / "agents" / f"{agent}.md"
|
||||
if agent_path.exists():
|
||||
return agent_path.read_text().strip()
|
||||
return None
|
||||
|
||||
|
||||
def load_global_knowledge() -> list[str]:
|
||||
"""Load all global knowledge markdown files."""
|
||||
global_dir = KNOWLEDGE_DIR / "global"
|
||||
if not global_dir.exists():
|
||||
return []
|
||||
|
||||
chunks = []
|
||||
for md_file in sorted(global_dir.glob("*.md")):
|
||||
content = md_file.read_text().strip()
|
||||
if content:
|
||||
chunks.append(content)
|
||||
return chunks
|
||||
|
||||
|
||||
def render_facts_section(facts: list[dict], category: str, label: str) -> str:
|
||||
"""Render a section of facts for a single category."""
|
||||
cat_facts = [f for f in facts if f.get("category") == category]
|
||||
if not cat_facts:
|
||||
return ""
|
||||
|
||||
lines = [f"### {label}\n"]
|
||||
for f in cat_facts:
|
||||
conf = f.get("confidence", 0.5)
|
||||
fact_text = f.get("fact", "")
|
||||
repo_tag = f.get("repo", "")
|
||||
if repo_tag and repo_tag != "global":
|
||||
lines.append(f"- [{conf:.0%}] ({repo_tag}) {fact_text}")
|
||||
else:
|
||||
lines.append(f"- [{conf:.0%}] {fact_text}")
|
||||
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
|
||||
def estimate_tokens(text: str) -> int:
|
||||
"""Rough token estimate."""
|
||||
return len(text) // CHARS_PER_TOKEN
|
||||
|
||||
|
||||
def truncate_to_tokens(text: str, max_tokens: int) -> str:
|
||||
"""Truncate text to approximately max_tokens, cutting at line boundaries."""
|
||||
max_chars = max_tokens * CHARS_PER_TOKEN
|
||||
if len(text) <= max_chars:
|
||||
return text
|
||||
|
||||
# Cut at last newline before the limit
|
||||
truncated = text[:max_chars]
|
||||
last_newline = truncated.rfind("\n")
|
||||
if last_newline > 0:
|
||||
truncated = truncated[:last_newline]
|
||||
|
||||
return truncated + "\n\n[... truncated to fit context window ...]"
|
||||
|
||||
|
||||
def build_bootstrap_context(
|
||||
repo: Optional[str] = None,
|
||||
agent: Optional[str] = None,
|
||||
include_global: bool = True,
|
||||
max_tokens: int = 2000,
|
||||
index_path: Path = INDEX_PATH,
|
||||
) -> str:
|
||||
"""
|
||||
Build the full bootstrap context block.
|
||||
|
||||
Returns a markdown string suitable for injection into a session prompt.
|
||||
"""
|
||||
index = load_index(index_path)
|
||||
facts = index.get("facts", [])
|
||||
|
||||
# Filter
|
||||
filtered = filter_facts(facts, repo=repo, agent=agent, include_global=include_global)
|
||||
|
||||
# Sort
|
||||
sorted_facts = sort_facts(filtered)
|
||||
|
||||
# Build sections
|
||||
sections = ["## What You Know (bootstrapped)\n"]
|
||||
|
||||
# Per-repo markdown knowledge
|
||||
if repo:
|
||||
repo_md = load_repo_knowledge(repo)
|
||||
if repo_md:
|
||||
sections.append(f"### Repo Notes: {repo}\n")
|
||||
sections.append(repo_md + "\n")
|
||||
|
||||
# Structured facts by category
|
||||
if sorted_facts:
|
||||
# Group by source
|
||||
repo_facts = [f for f in sorted_facts if f.get("repo") == repo] if repo else []
|
||||
global_facts = [f for f in sorted_facts if f.get("repo") == "global"]
|
||||
agent_facts = [f for f in sorted_facts if f.get("agent") == agent] if agent else []
|
||||
|
||||
if repo_facts:
|
||||
sections.append(f"### Repo: {repo}\n")
|
||||
for cat, label in [
|
||||
("pitfall", "PITFALLS"),
|
||||
("tool-quirk", "QUIRKS"),
|
||||
("pattern", "PATTERNS"),
|
||||
("fact", "FACTS"),
|
||||
("question", "OPEN QUESTIONS"),
|
||||
]:
|
||||
section = render_facts_section(repo_facts, cat, label)
|
||||
if section:
|
||||
sections.append(section)
|
||||
|
||||
if global_facts:
|
||||
sections.append("### Global\n")
|
||||
for cat, label in [
|
||||
("pitfall", "PITFALLS"),
|
||||
("tool-quirk", "QUIRKS"),
|
||||
("pattern", "PATTERNS"),
|
||||
("fact", "FACTS"),
|
||||
]:
|
||||
section = render_facts_section(global_facts, cat, label)
|
||||
if section:
|
||||
sections.append(section)
|
||||
|
||||
if agent_facts:
|
||||
sections.append(f"### Agent Notes ({agent})\n")
|
||||
for cat, label in [
|
||||
("pitfall", "PITFALLS"),
|
||||
("tool-quirk", "QUIRKS"),
|
||||
("pattern", "PATTERNS"),
|
||||
("fact", "FACTS"),
|
||||
]:
|
||||
section = render_facts_section(agent_facts, cat, label)
|
||||
if section:
|
||||
sections.append(section)
|
||||
|
||||
# Per-agent markdown knowledge
|
||||
if agent:
|
||||
agent_md = load_agent_knowledge(agent)
|
||||
if agent_md:
|
||||
sections.append(f"### Agent Profile: {agent}\n")
|
||||
sections.append(agent_md + "\n")
|
||||
|
||||
# Global markdown knowledge
|
||||
global_chunks = load_global_knowledge()
|
||||
if global_chunks:
|
||||
sections.append("### Global Notes\n")
|
||||
sections.extend(chunk + "\n" for chunk in global_chunks)
|
||||
|
||||
# If nothing was found
|
||||
if len(sections) == 1:
|
||||
sections.append("_No relevant knowledge found. Starting fresh._\n")
|
||||
if not facts:
|
||||
sections.append(
|
||||
"_Knowledge store is empty. Run the harvester to populate it._\n"
|
||||
)
|
||||
|
||||
# Join and truncate
|
||||
context = "\n".join(sections)
|
||||
context = truncate_to_tokens(context, max_tokens)
|
||||
|
||||
return context
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Assemble pre-session context from knowledge store"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--repo",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Repository name to filter facts by",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--agent",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Agent type to filter facts by (e.g., mimo-sprint, groq-fast)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--global",
|
||||
dest="include_global",
|
||||
action="store_true",
|
||||
default=True,
|
||||
help="Include global facts (default: true)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-global",
|
||||
dest="include_global",
|
||||
action="store_false",
|
||||
help="Exclude global facts",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max-tokens",
|
||||
type=int,
|
||||
default=2000,
|
||||
help="Maximum token count for output (default: 2000)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--index",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to index.json (default: knowledge/index.json)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json",
|
||||
dest="output_json",
|
||||
action="store_true",
|
||||
help="Output raw JSON instead of markdown",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
index_path = Path(args.index) if args.index else INDEX_PATH
|
||||
|
||||
if args.output_json:
|
||||
# JSON mode: return the filtered, sorted facts
|
||||
index = load_index(index_path)
|
||||
facts = index.get("facts", [])
|
||||
filtered = filter_facts(
|
||||
facts,
|
||||
repo=args.repo,
|
||||
agent=args.agent,
|
||||
include_global=args.include_global,
|
||||
)
|
||||
sorted_facts = sort_facts(filtered)
|
||||
output = {
|
||||
"repo": args.repo,
|
||||
"agent": args.agent,
|
||||
"include_global": args.include_global,
|
||||
"total_indexed": len(facts),
|
||||
"matched": len(sorted_facts),
|
||||
"facts": sorted_facts,
|
||||
}
|
||||
print(json.dumps(output, indent=2))
|
||||
else:
|
||||
# Markdown mode: full bootstrap context
|
||||
context = build_bootstrap_context(
|
||||
repo=args.repo,
|
||||
agent=args.agent,
|
||||
include_global=args.include_global,
|
||||
max_tokens=args.max_tokens,
|
||||
index_path=index_path,
|
||||
)
|
||||
print(context)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
350
scripts/harvester.py
Normal file
350
scripts/harvester.py
Normal file
@@ -0,0 +1,350 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Session Harvester for Compounding Intelligence.
|
||||
Extracts durable knowledge from completed sessions and updates the knowledge store.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
from session_reader import SessionReader
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.FileHandler(Path(__file__).parent.parent / 'metrics' / 'harvester.log'),
|
||||
logging.StreamHandler()
|
||||
]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KnowledgeHarvester:
|
||||
"""Extracts knowledge from completed sessions."""
|
||||
|
||||
def __init__(self, repo_root: str = None):
|
||||
"""Initialize the harvester."""
|
||||
if repo_root is None:
|
||||
repo_root = str(Path(__file__).parent.parent)
|
||||
self.repo_root = Path(repo_root)
|
||||
self.knowledge_dir = self.repo_root / "knowledge"
|
||||
self.index_path = self.knowledge_dir / "index.json"
|
||||
self.prompt_path = self.repo_root / "templates" / "harvest-prompt.md"
|
||||
|
||||
# Load or create knowledge index
|
||||
self.index = self._load_index()
|
||||
|
||||
# Initialize session reader
|
||||
self.reader = SessionReader()
|
||||
|
||||
# Harvest state file
|
||||
self.state_path = self.knowledge_dir / "harvest_state.json"
|
||||
self.state = self._load_state()
|
||||
|
||||
def _load_index(self) -> Dict[str, Any]:
|
||||
"""Load or create the knowledge index."""
|
||||
if self.index_path.exists():
|
||||
with open(self.index_path, 'r') as f:
|
||||
return json.load(f)
|
||||
else:
|
||||
return {
|
||||
"version": 1,
|
||||
"last_updated": datetime.now(timezone.utc).isoformat(),
|
||||
"total_facts": 0,
|
||||
"facts": []
|
||||
}
|
||||
|
||||
def _save_index(self):
|
||||
"""Save the knowledge index."""
|
||||
self.index["last_updated"] = datetime.now(timezone.utc).isoformat()
|
||||
with open(self.index_path, 'w') as f:
|
||||
json.dump(self.index, f, indent=2)
|
||||
|
||||
def _load_state(self) -> Dict[str, Any]:
|
||||
"""Load harvest state."""
|
||||
if self.state_path.exists():
|
||||
with open(self.state_path, 'r') as f:
|
||||
return json.load(f)
|
||||
else:
|
||||
return {
|
||||
"last_harvest": None,
|
||||
"harvested_sessions": [],
|
||||
"total_sessions_processed": 0,
|
||||
"total_facts_extracted": 0
|
||||
}
|
||||
|
||||
def _save_state(self):
|
||||
"""Save harvest state."""
|
||||
with open(self.state_path, 'w') as f:
|
||||
json.dump(self.state, f, indent=2)
|
||||
|
||||
def get_sessions_to_harvest(self, max_age_hours: float = 24) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get sessions that need harvesting.
|
||||
|
||||
Args:
|
||||
max_age_hours: Only harvest sessions modified within this many hours
|
||||
|
||||
Returns:
|
||||
List of session data dictionaries
|
||||
"""
|
||||
# Get sessions modified since last harvest
|
||||
since = None
|
||||
if self.state["last_harvest"]:
|
||||
try:
|
||||
since = datetime.fromisoformat(self.state["last_harvest"].replace('Z', '+00:00'))
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
|
||||
# If no last harvest, use max_age_hours
|
||||
if since is None:
|
||||
since = datetime.now(timezone.utc) - timedelta(hours=max_age_hours)
|
||||
|
||||
# Get recent sessions
|
||||
sessions = self.reader.list_sessions(since=since)
|
||||
|
||||
# Filter out already harvested sessions
|
||||
harvested = set(self.state["harvested_sessions"])
|
||||
to_harvest = []
|
||||
|
||||
for path in sessions:
|
||||
session = self.reader.read_session(path)
|
||||
if "error" in session:
|
||||
logger.warning(f"Error reading session {path}: {session['error']}")
|
||||
continue
|
||||
|
||||
# Skip if already harvested
|
||||
if session["session_id"] in harvested:
|
||||
continue
|
||||
|
||||
# Skip if session is still active
|
||||
if not self.reader.is_session_complete(session):
|
||||
continue
|
||||
|
||||
to_harvest.append(session)
|
||||
|
||||
return to_harvest
|
||||
|
||||
def extract_knowledge_from_session(self, session: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Extract knowledge from a single session.
|
||||
|
||||
This is a simplified extraction that looks for patterns in the session.
|
||||
In a full implementation, this would use an LLM with the harvest prompt.
|
||||
|
||||
Args:
|
||||
session: Session data dictionary
|
||||
|
||||
Returns:
|
||||
List of extracted knowledge items
|
||||
"""
|
||||
knowledge_items = []
|
||||
|
||||
# Get messages from session
|
||||
messages = session.get("messages", [])
|
||||
|
||||
# Simple pattern-based extraction
|
||||
for i, msg in enumerate(messages):
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
|
||||
role = msg.get("role", "")
|
||||
content = msg.get("content", "")
|
||||
|
||||
if not content or not isinstance(content, str):
|
||||
continue
|
||||
|
||||
# Look for error patterns
|
||||
if "error" in content.lower() or "Error" in content:
|
||||
# Extract error context
|
||||
context = content[:200] # First 200 chars
|
||||
|
||||
# Look for file paths
|
||||
import re
|
||||
file_paths = re.findall(r'[~/.]?[\w/]+\.\w+', context)
|
||||
|
||||
if file_paths:
|
||||
knowledge_items.append({
|
||||
"fact": f"Error encountered with file: {file_paths[0]}",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"session_id": session["session_id"],
|
||||
"extracted_at": datetime.now(timezone.utc).isoformat()
|
||||
})
|
||||
|
||||
# Look for successful patterns
|
||||
if "success" in content.lower() or "Success" in content:
|
||||
# Extract success context
|
||||
context = content[:200]
|
||||
|
||||
# Look for commands or actions
|
||||
import re
|
||||
commands = re.findall(r'(?:git|npm|pip|python|curl|ssh)\s+[\w\s\-\.]+', context)
|
||||
|
||||
if commands:
|
||||
knowledge_items.append({
|
||||
"fact": f"Successful command pattern: {commands[0]}",
|
||||
"category": "pattern",
|
||||
"repo": "global",
|
||||
"confidence": 0.6,
|
||||
"session_id": session["session_id"],
|
||||
"extracted_at": datetime.now(timezone.utc).isoformat()
|
||||
})
|
||||
|
||||
return knowledge_items
|
||||
|
||||
def harvest_session(self, session: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Harvest knowledge from a single session.
|
||||
|
||||
Args:
|
||||
session: Session data dictionary
|
||||
|
||||
Returns:
|
||||
Harvest result dictionary
|
||||
"""
|
||||
session_id = session["session_id"]
|
||||
logger.info(f"Harvesting session: {session_id}")
|
||||
|
||||
try:
|
||||
# Extract knowledge
|
||||
knowledge_items = self.extract_knowledge_from_session(session)
|
||||
|
||||
# Add to index
|
||||
for item in knowledge_items:
|
||||
# Add metadata
|
||||
item["harvested_at"] = datetime.now(timezone.utc).isoformat()
|
||||
item["session_path"] = session.get("path", "")
|
||||
|
||||
# Add to facts
|
||||
self.index["facts"].append(item)
|
||||
|
||||
# Update state
|
||||
self.state["harvested_sessions"].append(session_id)
|
||||
self.state["total_sessions_processed"] += 1
|
||||
self.state["total_facts_extracted"] += len(knowledge_items)
|
||||
|
||||
result = {
|
||||
"session_id": session_id,
|
||||
"success": True,
|
||||
"facts_extracted": len(knowledge_items),
|
||||
"knowledge_items": knowledge_items
|
||||
}
|
||||
|
||||
logger.info(f"Extracted {len(knowledge_items)} facts from session {session_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error harvesting session {session_id}: {e}")
|
||||
result = {
|
||||
"session_id": session_id,
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"facts_extracted": 0
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def harvest_batch(self, max_sessions: int = 10, max_age_hours: float = 24) -> Dict[str, Any]:
|
||||
"""
|
||||
Harvest a batch of sessions.
|
||||
|
||||
Args:
|
||||
max_sessions: Maximum number of sessions to harvest
|
||||
max_age_hours: Only harvest sessions modified within this many hours
|
||||
|
||||
Returns:
|
||||
Batch harvest result
|
||||
"""
|
||||
logger.info(f"Starting harvest batch (max {max_sessions} sessions, max age {max_age_hours}h)")
|
||||
|
||||
# Get sessions to harvest
|
||||
sessions = self.get_sessions_to_harvest(max_age_hours)
|
||||
|
||||
if not sessions:
|
||||
logger.info("No sessions to harvest")
|
||||
return {
|
||||
"success": True,
|
||||
"sessions_processed": 0,
|
||||
"facts_extracted": 0,
|
||||
"results": []
|
||||
}
|
||||
|
||||
# Limit to max_sessions
|
||||
sessions = sessions[:max_sessions]
|
||||
|
||||
results = []
|
||||
total_facts = 0
|
||||
|
||||
for session in sessions:
|
||||
result = self.harvest_session(session)
|
||||
results.append(result)
|
||||
|
||||
if result["success"]:
|
||||
total_facts += result["facts_extracted"]
|
||||
|
||||
# Update index and state
|
||||
self.index["total_facts"] = len(self.index["facts"])
|
||||
self._save_index()
|
||||
|
||||
self.state["last_harvest"] = datetime.now(timezone.utc).isoformat()
|
||||
self._save_state()
|
||||
|
||||
batch_result = {
|
||||
"success": True,
|
||||
"sessions_processed": len(sessions),
|
||||
"facts_extracted": total_facts,
|
||||
"results": results,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
|
||||
logger.info(f"Harvest batch complete: {len(sessions)} sessions, {total_facts} facts")
|
||||
|
||||
return batch_result
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for the harvester."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Harvest knowledge from completed sessions")
|
||||
parser.add_argument("--max-sessions", type=int, default=10, help="Maximum sessions to harvest")
|
||||
parser.add_argument("--max-age-hours", type=float, default=24, help="Max age in hours")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Don't save, just report")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
harvester = KnowledgeHarvester()
|
||||
|
||||
if args.dry_run:
|
||||
sessions = harvester.get_sessions_to_harvest(args.max_age_hours)
|
||||
print(f"Would harvest {len(sessions)} sessions:")
|
||||
for session in sessions[:5]: # Show first 5
|
||||
print(f" - {session['session_id']} ({session['message_count']} messages)")
|
||||
if len(sessions) > 5:
|
||||
print(f" ... and {len(sessions) - 5} more")
|
||||
return
|
||||
|
||||
result = harvester.harvest_batch(
|
||||
max_sessions=args.max_sessions,
|
||||
max_age_hours=args.max_age_hours
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
print(f"Harvest complete: {result['sessions_processed']} sessions, {result['facts_extracted']} facts")
|
||||
else:
|
||||
print(f"Harvest failed: {result.get('error', 'Unknown error')}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
194
scripts/session_reader.py
Normal file
194
scripts/session_reader.py
Normal file
@@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Session reader for Compounding Intelligence.
|
||||
Reads and parses Hermes session files from ~/.hermes/sessions/.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
|
||||
class SessionReader:
|
||||
"""Reads and parses Hermes session files."""
|
||||
|
||||
def __init__(self, sessions_dir: str = None):
|
||||
"""Initialize with sessions directory path."""
|
||||
if sessions_dir is None:
|
||||
sessions_dir = os.path.expanduser("~/.hermes/sessions")
|
||||
self.sessions_dir = Path(sessions_dir)
|
||||
self.supported_extensions = {'.json', '.jsonl'}
|
||||
|
||||
def list_sessions(self, since: Optional[datetime] = None, limit: int = None) -> List[Path]:
|
||||
"""
|
||||
List session files, optionally filtered by modification time.
|
||||
|
||||
Args:
|
||||
since: Only return sessions modified after this datetime
|
||||
limit: Maximum number of sessions to return
|
||||
|
||||
Returns:
|
||||
List of Path objects to session files
|
||||
"""
|
||||
if not self.sessions_dir.exists():
|
||||
return []
|
||||
|
||||
sessions = []
|
||||
for f in self.sessions_dir.iterdir():
|
||||
if f.suffix in self.supported_extensions:
|
||||
if since is not None:
|
||||
mtime = datetime.fromtimestamp(f.stat().st_mtime, tz=timezone.utc)
|
||||
if mtime <= since:
|
||||
continue
|
||||
sessions.append(f)
|
||||
|
||||
# Sort by modification time (newest first)
|
||||
sessions.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
||||
|
||||
if limit:
|
||||
sessions = sessions[:limit]
|
||||
|
||||
return sessions
|
||||
|
||||
def read_session(self, path: Path) -> Dict[str, Any]:
|
||||
"""
|
||||
Read a session file and return structured data.
|
||||
|
||||
Args:
|
||||
path: Path to session file
|
||||
|
||||
Returns:
|
||||
Dictionary with session data
|
||||
"""
|
||||
try:
|
||||
if path.suffix == '.jsonl':
|
||||
return self._read_jsonl_session(path)
|
||||
elif path.suffix == '.json':
|
||||
return self._read_json_session(path)
|
||||
else:
|
||||
return {"error": f"Unsupported format: {path.suffix}"}
|
||||
except Exception as e:
|
||||
return {"error": str(e), "path": str(path)}
|
||||
|
||||
def _read_json_session(self, path: Path) -> Dict[str, Any]:
|
||||
"""Read a JSON format session file."""
|
||||
with open(path, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
return {
|
||||
"session_id": data.get("session_id", path.stem),
|
||||
"model": data.get("model", "unknown"),
|
||||
"created_at": data.get("session_start"),
|
||||
"last_updated": data.get("last_updated"),
|
||||
"message_count": data.get("message_count", len(data.get("messages", []))),
|
||||
"messages": data.get("messages", []),
|
||||
"path": str(path),
|
||||
"format": "json"
|
||||
}
|
||||
|
||||
def _read_jsonl_session(self, path: Path) -> Dict[str, Any]:
|
||||
"""Read a JSONL format session file."""
|
||||
messages = []
|
||||
session_meta = None
|
||||
|
||||
with open(path, 'r') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
entry = json.loads(line)
|
||||
if entry.get("role") == "session_meta":
|
||||
session_meta = entry
|
||||
else:
|
||||
messages.append(entry)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
session_id = path.stem
|
||||
if session_meta:
|
||||
session_id = session_meta.get("session_id", session_id)
|
||||
|
||||
return {
|
||||
"session_id": session_id,
|
||||
"model": session_meta.get("model", "unknown") if session_meta else "unknown",
|
||||
"created_at": session_meta.get("timestamp") if session_meta else None,
|
||||
"last_updated": messages[-1].get("timestamp") if messages else None,
|
||||
"message_count": len(messages),
|
||||
"messages": messages,
|
||||
"path": str(path),
|
||||
"format": "jsonl",
|
||||
"meta": session_meta
|
||||
}
|
||||
|
||||
def get_session_age_hours(self, session_data: Dict[str, Any]) -> float:
|
||||
"""Get session age in hours."""
|
||||
last_updated = session_data.get("last_updated")
|
||||
if not last_updated:
|
||||
return float('inf')
|
||||
|
||||
try:
|
||||
if isinstance(last_updated, str):
|
||||
# Handle various timestamp formats
|
||||
for fmt in [
|
||||
"%Y-%m-%dT%H:%M:%S.%fZ",
|
||||
"%Y-%m-%dT%H:%M:%SZ",
|
||||
"%Y-%m-%dT%H:%M:%S.%f",
|
||||
"%Y-%m-%dT%H:%M:%S"
|
||||
]:
|
||||
try:
|
||||
dt = datetime.strptime(last_updated, fmt)
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
break
|
||||
except ValueError:
|
||||
continue
|
||||
else:
|
||||
# Try parsing with fromisoformat
|
||||
dt = datetime.fromisoformat(last_updated.replace('Z', '+00:00'))
|
||||
else:
|
||||
dt = last_updated
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
age = now - dt
|
||||
return age.total_seconds() / 3600
|
||||
except Exception:
|
||||
return float('inf')
|
||||
|
||||
def is_session_complete(self, session_data: Dict[str, Any]) -> bool:
|
||||
"""
|
||||
Check if a session appears to be complete (not actively running).
|
||||
|
||||
Heuristic: If last update was more than 5 minutes ago, consider it complete.
|
||||
"""
|
||||
age_hours = self.get_session_age_hours(session_data)
|
||||
return age_hours > (5 / 60) # 5 minutes
|
||||
|
||||
|
||||
def main():
|
||||
"""Test the session reader."""
|
||||
reader = SessionReader()
|
||||
|
||||
# List recent sessions
|
||||
sessions = reader.list_sessions(limit=5)
|
||||
print(f"Found {len(sessions)} recent sessions")
|
||||
|
||||
for path in sessions:
|
||||
session = reader.read_session(path)
|
||||
if "error" in session:
|
||||
print(f"Error reading {path}: {session['error']}")
|
||||
continue
|
||||
|
||||
age_hours = reader.get_session_age_hours(session)
|
||||
complete = reader.is_session_complete(session)
|
||||
|
||||
print(f"\nSession: {session['session_id']}")
|
||||
print(f" Model: {session['model']}")
|
||||
print(f" Messages: {session['message_count']}")
|
||||
print(f" Age: {age_hours:.1f} hours")
|
||||
print(f" Complete: {complete}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,239 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for bootstrapper.py — context assembly from knowledge store.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
# Add scripts dir to path for import
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent))
|
||||
|
||||
from bootstrapper import (
|
||||
build_bootstrap_context,
|
||||
estimate_tokens,
|
||||
filter_facts,
|
||||
load_index,
|
||||
sort_facts,
|
||||
truncate_to_tokens,
|
||||
)
|
||||
|
||||
|
||||
def make_index(facts: list[dict], tmp_dir: Path) -> Path:
|
||||
"""Create a temporary index.json with given facts."""
|
||||
index = {
|
||||
"version": 1,
|
||||
"last_updated": "2026-04-13T20:00:00Z",
|
||||
"total_facts": len(facts),
|
||||
"facts": facts,
|
||||
}
|
||||
path = tmp_dir / "index.json"
|
||||
with open(path, "w") as f:
|
||||
json.dump(index, f)
|
||||
return path
|
||||
|
||||
|
||||
def test_empty_index():
|
||||
"""Empty knowledge store produces graceful output."""
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_dir = Path(tmp)
|
||||
index_path = make_index([], tmp_dir)
|
||||
|
||||
# Create empty knowledge dirs
|
||||
for sub in ["repos", "agents", "global"]:
|
||||
(tmp_dir / sub).mkdir(exist_ok=True)
|
||||
|
||||
context = build_bootstrap_context(
|
||||
repo="the-nexus", index_path=index_path
|
||||
)
|
||||
assert "No relevant knowledge found" in context
|
||||
assert "Starting fresh" in context
|
||||
print("PASS: empty_index")
|
||||
|
||||
|
||||
def test_filter_by_repo():
|
||||
"""Filter facts by repository."""
|
||||
facts = [
|
||||
{"fact": "A", "category": "fact", "repo": "the-nexus", "confidence": 0.9},
|
||||
{"fact": "B", "category": "fact", "repo": "fleet-ops", "confidence": 0.8},
|
||||
{"fact": "C", "category": "fact", "repo": "global", "confidence": 0.7},
|
||||
]
|
||||
filtered = filter_facts(facts, repo="the-nexus", include_global=True)
|
||||
texts = [f["fact"] for f in filtered]
|
||||
assert "A" in texts
|
||||
assert "B" not in texts
|
||||
assert "C" in texts
|
||||
print("PASS: filter_by_repo")
|
||||
|
||||
|
||||
def test_filter_by_agent():
|
||||
"""Filter facts by agent type."""
|
||||
facts = [
|
||||
{"fact": "A", "category": "pattern", "repo": "global", "agent": "mimo-sprint", "confidence": 0.8},
|
||||
{"fact": "B", "category": "pattern", "repo": "global", "agent": "groq-fast", "confidence": 0.7},
|
||||
{"fact": "C", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
]
|
||||
filtered = filter_facts(facts, agent="mimo-sprint", include_global=True)
|
||||
texts = [f["fact"] for f in filtered]
|
||||
assert "A" in texts
|
||||
assert "B" not in texts
|
||||
assert "C" in texts # global, no agent restriction
|
||||
print("PASS: filter_by_agent")
|
||||
|
||||
|
||||
def test_no_global_flag():
|
||||
"""Excluding global facts works."""
|
||||
facts = [
|
||||
{"fact": "A", "category": "fact", "repo": "the-nexus", "confidence": 0.9},
|
||||
{"fact": "B", "category": "fact", "repo": "global", "confidence": 0.8},
|
||||
]
|
||||
filtered = filter_facts(facts, repo="the-nexus", include_global=False)
|
||||
texts = [f["fact"] for f in filtered]
|
||||
assert "A" in texts
|
||||
assert "B" not in texts
|
||||
print("PASS: no_global_flag")
|
||||
|
||||
|
||||
def test_sort_by_confidence():
|
||||
"""Facts sort by confidence descending."""
|
||||
facts = [
|
||||
{"fact": "low", "category": "fact", "repo": "global", "confidence": 0.3},
|
||||
{"fact": "high", "category": "fact", "repo": "global", "confidence": 0.95},
|
||||
{"fact": "mid", "category": "fact", "repo": "global", "confidence": 0.7},
|
||||
]
|
||||
sorted_f = sort_facts(facts)
|
||||
assert sorted_f[0]["fact"] == "high"
|
||||
assert sorted_f[1]["fact"] == "mid"
|
||||
assert sorted_f[2]["fact"] == "low"
|
||||
print("PASS: sort_by_confidence")
|
||||
|
||||
|
||||
def test_sort_pitfalls_first():
|
||||
"""Pitfalls sort before facts at same confidence."""
|
||||
facts = [
|
||||
{"fact": "regular fact", "category": "fact", "repo": "global", "confidence": 0.8},
|
||||
{"fact": "danger pitfall", "category": "pitfall", "repo": "global", "confidence": 0.8},
|
||||
]
|
||||
sorted_f = sort_facts(facts)
|
||||
assert sorted_f[0]["category"] == "pitfall"
|
||||
print("PASS: sort_pitfalls_first")
|
||||
|
||||
|
||||
def test_truncate_to_tokens():
|
||||
"""Truncation cuts at line boundary."""
|
||||
text = "line1\nline2\nline3\nline4\nline5\n"
|
||||
truncated = truncate_to_tokens(text, max_tokens=2) # ~8 chars
|
||||
assert "line1" in truncated
|
||||
assert "truncated" in truncated.lower()
|
||||
print("PASS: truncate_to_tokens")
|
||||
|
||||
|
||||
def test_estimate_tokens():
|
||||
"""Token estimation is reasonable."""
|
||||
text = "a" * 400
|
||||
tokens = estimate_tokens(text)
|
||||
assert 90 <= tokens <= 110 # ~100 tokens
|
||||
print("PASS: estimate_tokens")
|
||||
|
||||
|
||||
def test_build_full_context():
|
||||
"""Full context with facts renders correctly."""
|
||||
facts = [
|
||||
{"fact": "API merges fail with 405", "category": "pitfall", "repo": "the-nexus", "confidence": 0.95},
|
||||
{"fact": "Has 50+ open PRs", "category": "fact", "repo": "the-nexus", "confidence": 0.9},
|
||||
{"fact": "Token at ~/.config/gitea/token", "category": "tool-quirk", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "Check git remote -v first", "category": "pattern", "repo": "global", "confidence": 0.8},
|
||||
]
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_dir = Path(tmp)
|
||||
index_path = make_index(facts, tmp_dir)
|
||||
|
||||
# Create knowledge dirs
|
||||
for sub in ["repos", "agents", "global"]:
|
||||
(tmp_dir / sub).mkdir(exist_ok=True)
|
||||
|
||||
context = build_bootstrap_context(
|
||||
repo="the-nexus",
|
||||
agent="mimo-sprint",
|
||||
include_global=True,
|
||||
index_path=index_path,
|
||||
)
|
||||
|
||||
assert "What You Know" in context
|
||||
assert "PITFALLS" in context
|
||||
assert "API merges fail with 405" in context
|
||||
assert "the-nexus" in context
|
||||
assert "Token at" in context # global fact included
|
||||
print("PASS: build_full_context")
|
||||
|
||||
|
||||
def test_max_tokens_respected():
|
||||
"""Output respects max_tokens limit."""
|
||||
# Generate lots of facts
|
||||
facts = [
|
||||
{"fact": f"Fact number {i} with some detail about things", "category": "fact", "repo": "global", "confidence": 0.8}
|
||||
for i in range(100)
|
||||
]
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_dir = Path(tmp)
|
||||
index_path = make_index(facts, tmp_dir)
|
||||
for sub in ["repos", "agents", "global"]:
|
||||
(tmp_dir / sub).mkdir(exist_ok=True)
|
||||
|
||||
context = build_bootstrap_context(
|
||||
repo=None,
|
||||
max_tokens=500,
|
||||
index_path=index_path,
|
||||
)
|
||||
|
||||
actual_tokens = estimate_tokens(context)
|
||||
# Allow 10% overshoot since we cut at line boundaries
|
||||
assert actual_tokens <= 550, f"Expected ~500 tokens, got {actual_tokens}"
|
||||
print(f"PASS: max_tokens_respected (got {actual_tokens} tokens)")
|
||||
|
||||
|
||||
def test_missing_index_graceful():
|
||||
"""Missing index.json doesn't crash."""
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_dir = Path(tmp)
|
||||
# Don't create index.json
|
||||
for sub in ["repos", "agents", "global"]:
|
||||
(tmp_dir / sub).mkdir(exist_ok=True)
|
||||
|
||||
fake_index = tmp_dir / "nonexistent.json"
|
||||
context = build_bootstrap_context(repo="anything", index_path=fake_index)
|
||||
assert "No relevant knowledge found" in context
|
||||
print("PASS: missing_index_graceful")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tests = [
|
||||
test_empty_index,
|
||||
test_filter_by_repo,
|
||||
test_filter_by_agent,
|
||||
test_no_global_flag,
|
||||
test_sort_by_confidence,
|
||||
test_sort_pitfalls_first,
|
||||
test_truncate_to_tokens,
|
||||
test_estimate_tokens,
|
||||
test_build_full_context,
|
||||
test_max_tokens_respected,
|
||||
test_missing_index_graceful,
|
||||
]
|
||||
|
||||
passed = 0
|
||||
failed = 0
|
||||
for test in tests:
|
||||
try:
|
||||
test()
|
||||
passed += 1
|
||||
except Exception as e:
|
||||
print(f"FAIL: {test.__name__} — {e}")
|
||||
failed += 1
|
||||
|
||||
print(f"\n{passed} passed, {failed} failed")
|
||||
sys.exit(0 if failed == 0 else 1)
|
||||
@@ -1,129 +1,41 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test harness for knowledge extraction prompt.
|
||||
Validates output structure, content quality, and hallucination resistance.
|
||||
|
||||
Usage:
|
||||
python3 scripts/test_harvest_prompt.py # Run all tests
|
||||
python3 scripts/test_harvest_prompt.py --transcript FILE # Test against a real transcript
|
||||
python3 scripts/test_harvest_prompt.py --validate FILE # Validate an existing extraction JSON
|
||||
Test script for knowledge extraction prompt.
|
||||
Validates that the prompt produces consistent, structured output.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
VALID_CATEGORIES = {"fact", "pitfall", "pattern", "tool-quirk", "question"}
|
||||
REQUIRED_FIELDS = {"fact", "category", "repo", "confidence", "evidence"}
|
||||
REQUIRED_META = {"session_outcome", "tools_used", "repos_touched", "error_count", "knowledge_count"}
|
||||
|
||||
|
||||
def validate_knowledge_item(item, idx):
|
||||
"""Validate a single knowledge item. Returns list of errors."""
|
||||
errors = []
|
||||
if not isinstance(item, dict):
|
||||
return [f"Item {idx}: not a dict"]
|
||||
for field in REQUIRED_FIELDS:
|
||||
def validate_knowledge_item(item):
|
||||
"""Validate a single knowledge item."""
|
||||
required_fields = ["fact", "category", "repo", "confidence"]
|
||||
for field in required_fields:
|
||||
if field not in item:
|
||||
errors.append(f"Item {idx}: missing field '{field}'")
|
||||
if not isinstance(item.get("fact", ""), str) or len(item.get("fact", "").strip()) == 0:
|
||||
errors.append(f"Item {idx}: fact must be a non-empty string")
|
||||
if item.get("category") not in VALID_CATEGORIES:
|
||||
errors.append(f"Item {idx}: invalid category '{item.get('category')}'")
|
||||
if not isinstance(item.get("repo", ""), str) or len(item.get("repo", "").strip()) == 0:
|
||||
errors.append(f"Item {idx}: repo must be a non-empty string")
|
||||
conf = item.get("confidence")
|
||||
if not isinstance(conf, (int, float)) or not (0.0 <= conf <= 1.0):
|
||||
errors.append(f"Item {idx}: confidence must be a number 0.0-1.0, got {conf}")
|
||||
if not isinstance(item.get("evidence", ""), str) or len(item.get("evidence", "").strip()) == 0:
|
||||
errors.append(f"Item {idx}: evidence must be a non-empty string (hallucination check)")
|
||||
return errors
|
||||
return False, f"Missing field: {field}"
|
||||
|
||||
if not isinstance(item["fact"], str) or len(item["fact"].strip()) == 0:
|
||||
return False, "Fact must be a non-empty string"
|
||||
|
||||
valid_categories = ["fact", "pitfall", "pattern", "tool-quirk", "question"]
|
||||
if item["category"] not in valid_categories:
|
||||
return False, f"Invalid category: {item['category']}"
|
||||
|
||||
if not isinstance(item["repo"], str):
|
||||
return False, "Repo must be a string"
|
||||
|
||||
if not isinstance(item["confidence"], (int, float)):
|
||||
return False, "Confidence must be a number"
|
||||
|
||||
if not (0.0 <= item["confidence"] <= 1.0):
|
||||
return False, "Confidence must be between 0.0 and 1.0"
|
||||
|
||||
return True, "Valid"
|
||||
|
||||
|
||||
def validate_extraction(data):
|
||||
"""Validate a full extraction result. Returns (is_valid, errors, warnings)."""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
if not isinstance(data, dict):
|
||||
return False, ["Root is not a JSON object"], []
|
||||
|
||||
if "knowledge" not in data:
|
||||
return False, ["Missing 'knowledge' array"], []
|
||||
|
||||
if not isinstance(data["knowledge"], list):
|
||||
return False, ["'knowledge' is not an array"], []
|
||||
|
||||
for i, item in enumerate(data["knowledge"]):
|
||||
errors.extend(validate_knowledge_item(item, i))
|
||||
|
||||
# Meta block validation
|
||||
if "meta" not in data:
|
||||
warnings.append("Missing 'meta' block (session_outcome, tools_used, etc.)")
|
||||
else:
|
||||
meta = data["meta"]
|
||||
for field in REQUIRED_META:
|
||||
if field not in meta:
|
||||
warnings.append(f"Meta missing field '{field}'")
|
||||
|
||||
# Quality checks
|
||||
facts = data["knowledge"]
|
||||
if len(facts) == 0:
|
||||
warnings.append("No knowledge extracted (empty session or extraction failure)")
|
||||
|
||||
# Check for near-duplicate facts
|
||||
seen_facts = set()
|
||||
for item in facts:
|
||||
normalized = item.get("fact", "").lower().strip()[:80]
|
||||
if normalized in seen_facts:
|
||||
warnings.append(f"Duplicate fact detected: '{normalized[:50]}...'")
|
||||
seen_facts.add(normalized)
|
||||
|
||||
# Check confidence distribution
|
||||
confidences = [item.get("confidence", 0) for item in facts]
|
||||
if confidences:
|
||||
avg_conf = sum(confidences) / len(confidences)
|
||||
if avg_conf > 0.9:
|
||||
warnings.append(f"Average confidence {avg_conf:.2f} is suspiciously high (possible hallucination)")
|
||||
if avg_conf < 0.4:
|
||||
warnings.append(f"Average confidence {avg_conf:.2f} is very low (extraction may be too cautious)")
|
||||
|
||||
return len(errors) == 0, errors, warnings
|
||||
|
||||
|
||||
def validate_transcript_coverage(data, transcript):
|
||||
"""
|
||||
Check that extracted facts are actually supported by the transcript.
|
||||
This is a heuristic — checks that key terms from facts appear in transcript.
|
||||
Returns list of potential hallucinations.
|
||||
"""
|
||||
hallucinations = []
|
||||
transcript_lower = transcript.lower()
|
||||
|
||||
for item in data.get("knowledge", []):
|
||||
fact = item.get("fact", "")
|
||||
evidence = item.get("evidence", "")
|
||||
|
||||
# Check if evidence string appears in transcript
|
||||
if evidence and evidence.lower() not in transcript_lower:
|
||||
# Partial match — check if key terms are present
|
||||
evidence_words = set(evidence.lower().split())
|
||||
transcript_words = set(transcript_lower.split())
|
||||
overlap = evidence_words & transcript_words
|
||||
if len(overlap) < min(3, len(evidence_words) * 0.3):
|
||||
hallucinations.append({
|
||||
"fact": fact[:80],
|
||||
"evidence": evidence[:80],
|
||||
"issue": "Evidence not found in transcript — possible hallucination"
|
||||
})
|
||||
|
||||
return hallucinations
|
||||
|
||||
|
||||
# --- Test transcripts ---
|
||||
|
||||
SAMPLE_TRANSCRIPT_SUCCESS = """
|
||||
def test_sample_transcript():
|
||||
"""Test with a sample transcript."""
|
||||
sample_transcript = """
|
||||
User: Clone the repo at forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence
|
||||
Agent: Trying git clone... Error: 401 Unauthorized
|
||||
Agent: Checking for token... Found in ~/.config/gitea/token
|
||||
@@ -132,253 +44,30 @@ Agent: Clone successful with token authentication
|
||||
Agent: Now examining the repo structure...
|
||||
Agent: Found knowledge/index.json with 0 facts
|
||||
Agent: Creating harvest-prompt.md in templates/
|
||||
Agent: Done. Files created: templates/harvest-prompt.md, scripts/test_harvest_prompt.py
|
||||
"""
|
||||
|
||||
SAMPLE_TRANSCRIPT_FAILURE = """
|
||||
User: Deploy the cron jobs to production
|
||||
Agent: Connecting to VPS... SSH timeout after 30s
|
||||
Agent: Retrying with verbose output... Connection refused on port 22
|
||||
Agent: Checking firewall rules... iptables shows port 22 is open
|
||||
Agent: Checking sshd config... sshd is not running
|
||||
Agent: Attempting to start sshd... systemctl start sshd failed: unit not found
|
||||
Agent: ERROR: OpenSSH server not installed on this VPS
|
||||
Agent: Cannot proceed without SSH access. Deployment blocked.
|
||||
"""
|
||||
|
||||
SAMPLE_TRANSCRIPT_API_QUIRK = """
|
||||
User: List all open issues in hermes-agent
|
||||
Agent: Querying Gitea API... endpoint: /repos/Timmy_Foundation/hermes-agent/issues?state=open&limit=50
|
||||
Agent: Response: 0 items. That's odd.
|
||||
Agent: Trying without state filter... endpoint: /repos/Timmy_Foundation/hermes-agent/issues?limit=50
|
||||
Agent: Response: 50 items returned, but all have pull_request field. These are PRs, not issues.
|
||||
Agent: Gitea issues API returns PRs alongside issues. Must filter by presence of pull_request key.
|
||||
Agent: Also discovered: state=open parameter returns empty when used with limit=50, but works with limit=5.
|
||||
Agent: This appears to be a Gitea API quirk — pagination interacts with state filter unexpectedly.
|
||||
Agent: Workaround: fetch without state filter, then filter client-side for state=='open' and 'pull_request' not in item.
|
||||
"""
|
||||
|
||||
|
||||
def run_tests():
|
||||
"""Run the built-in test suite."""
|
||||
tests_passed = 0
|
||||
tests_failed = 0
|
||||
|
||||
print("=" * 60)
|
||||
print("KNOWLEDGE EXTRACTION PROMPT — TEST SUITE")
|
||||
print("=" * 60)
|
||||
|
||||
# Test 1: Prompt file exists and is under 2k tokens (~8k chars)
|
||||
print("\n[Test 1] Prompt file size constraint")
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
if not prompt_path.exists():
|
||||
print(" FAIL: harvest-prompt.md not found")
|
||||
tests_failed += 1
|
||||
else:
|
||||
size = prompt_path.stat().st_size
|
||||
# Rough token estimate: ~4 chars per token
|
||||
est_tokens = size / 4
|
||||
print(f" Prompt size: {size} bytes (~{est_tokens:.0f} tokens)")
|
||||
if est_tokens > 2000:
|
||||
print(f" WARN: Prompt exceeds ~1500 tokens (target: ~1000)")
|
||||
else:
|
||||
print(f" PASS: Within token budget")
|
||||
tests_passed += 1
|
||||
|
||||
# Test 2: Validate a well-formed extraction
|
||||
print("\n[Test 2] Valid extraction passes validation")
|
||||
valid_extraction = {
|
||||
"knowledge": [
|
||||
{
|
||||
"fact": "Gitea auth token is at ~/.config/gitea/token",
|
||||
"category": "tool-quirk",
|
||||
"repo": "global",
|
||||
"confidence": 0.9,
|
||||
"evidence": "Found in ~/.config/gitea/token"
|
||||
},
|
||||
{
|
||||
"fact": "Clone fails with 401 when no token is provided",
|
||||
"category": "pitfall",
|
||||
"repo": "compounding-intelligence",
|
||||
"confidence": 0.9,
|
||||
"evidence": "Error: 401 Unauthorized"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"session_outcome": "success",
|
||||
"tools_used": ["git"],
|
||||
"repos_touched": ["compounding-intelligence"],
|
||||
"error_count": 1,
|
||||
"knowledge_count": 2
|
||||
}
|
||||
}
|
||||
is_valid, errors, warnings = validate_extraction(valid_extraction)
|
||||
if is_valid:
|
||||
print(f" PASS: Valid extraction accepted ({len(warnings)} warnings)")
|
||||
tests_passed += 1
|
||||
else:
|
||||
print(f" FAIL: Valid extraction rejected: {errors}")
|
||||
tests_failed += 1
|
||||
|
||||
# Test 3: Reject missing fields
|
||||
print("\n[Test 3] Missing fields are rejected")
|
||||
bad_extraction = {
|
||||
"knowledge": [
|
||||
{"fact": "Something learned", "category": "fact"} # Missing repo, confidence, evidence
|
||||
]
|
||||
}
|
||||
is_valid, errors, warnings = validate_extraction(bad_extraction)
|
||||
if not is_valid:
|
||||
print(f" PASS: Rejected with {len(errors)} errors")
|
||||
tests_passed += 1
|
||||
else:
|
||||
print(f" FAIL: Should have rejected missing fields")
|
||||
tests_failed += 1
|
||||
|
||||
# Test 4: Reject invalid category
|
||||
print("\n[Test 4] Invalid category is rejected")
|
||||
bad_cat = {
|
||||
"knowledge": [
|
||||
{"fact": "Test", "category": "discovery", "repo": "x", "confidence": 0.8, "evidence": "test"}
|
||||
]
|
||||
}
|
||||
is_valid, errors, warnings = validate_extraction(bad_cat)
|
||||
if not is_valid and any("category" in e for e in errors):
|
||||
print(f" PASS: Invalid category 'discovery' rejected")
|
||||
tests_passed += 1
|
||||
else:
|
||||
print(f" FAIL: Should have rejected invalid category")
|
||||
tests_failed += 1
|
||||
|
||||
# Test 5: Detect near-duplicates
|
||||
print("\n[Test 5] Duplicate detection")
|
||||
dup_extraction = {
|
||||
"knowledge": [
|
||||
{"fact": "Token is at ~/.config/gitea/token", "category": "fact", "repo": "x", "confidence": 0.9, "evidence": "a"},
|
||||
{"fact": "Token is at ~/.config/gitea/token", "category": "fact", "repo": "x", "confidence": 0.9, "evidence": "b"}
|
||||
],
|
||||
"meta": {"session_outcome": "success", "tools_used": [], "repos_touched": [], "error_count": 0, "knowledge_count": 2}
|
||||
}
|
||||
is_valid, errors, warnings = validate_extraction(dup_extraction)
|
||||
if any("Duplicate" in w for w in warnings):
|
||||
print(f" PASS: Duplicate detected")
|
||||
tests_passed += 1
|
||||
else:
|
||||
print(f" FAIL: Should have detected duplicate")
|
||||
tests_failed += 1
|
||||
|
||||
# Test 6: Hallucination check against transcript
|
||||
print("\n[Test 6] Hallucination detection")
|
||||
hallucinated = {
|
||||
"knowledge": [
|
||||
{
|
||||
"fact": "Database port is 5433",
|
||||
"category": "fact",
|
||||
"repo": "x",
|
||||
"confidence": 0.9,
|
||||
"evidence": "PostgreSQL listening on port 5433"
|
||||
}
|
||||
],
|
||||
"meta": {"session_outcome": "success", "tools_used": [], "repos_touched": [], "error_count": 0, "knowledge_count": 1}
|
||||
}
|
||||
hallucinations = validate_transcript_coverage(hallucinated, SAMPLE_TRANSCRIPT_SUCCESS)
|
||||
if hallucinations:
|
||||
print(f" PASS: Hallucination detected ({len(hallucinations)} items)")
|
||||
tests_passed += 1
|
||||
else:
|
||||
print(f" FAIL: Should have detected hallucinated evidence")
|
||||
tests_failed += 1
|
||||
|
||||
# Test 7: Failed session should extract pitfalls
|
||||
print("\n[Test 7] Failed session extraction shape")
|
||||
failed_extraction = {
|
||||
"knowledge": [
|
||||
{
|
||||
"fact": "SSH server not installed on target VPS",
|
||||
"category": "pitfall",
|
||||
"repo": "global",
|
||||
"confidence": 0.9,
|
||||
"evidence": "ERROR: OpenSSH server not installed on this VPS"
|
||||
},
|
||||
{
|
||||
"fact": "VPS blocks deployment without SSH access",
|
||||
"category": "question",
|
||||
"repo": "global",
|
||||
"confidence": 0.7,
|
||||
"evidence": "Cannot proceed without SSH access. Deployment blocked."
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"session_outcome": "failed",
|
||||
"tools_used": ["ssh", "systemctl"],
|
||||
"repos_touched": [],
|
||||
"error_count": 3,
|
||||
"knowledge_count": 2
|
||||
}
|
||||
}
|
||||
is_valid, errors, warnings = validate_extraction(failed_extraction)
|
||||
if is_valid:
|
||||
categories = [item["category"] for item in failed_extraction["knowledge"]]
|
||||
if "pitfall" in categories:
|
||||
print(f" PASS: Failed session extracted {len(categories)} items including pitfalls")
|
||||
tests_passed += 1
|
||||
else:
|
||||
print(f" FAIL: Failed session should extract pitfalls")
|
||||
tests_failed += 1
|
||||
else:
|
||||
print(f" FAIL: {errors}")
|
||||
tests_failed += 1
|
||||
|
||||
# Test 8: Empty extraction is warned
|
||||
print("\n[Test 8] Empty extraction warning")
|
||||
empty = {"knowledge": [], "meta": {"session_outcome": "success", "tools_used": [], "repos_touched": [], "error_count": 0, "knowledge_count": 0}}
|
||||
is_valid, errors, warnings = validate_extraction(empty)
|
||||
if any("No knowledge" in w for w in warnings):
|
||||
print(f" PASS: Empty extraction warned")
|
||||
tests_passed += 1
|
||||
else:
|
||||
print(f" FAIL: Should warn on empty extraction")
|
||||
tests_failed += 1
|
||||
|
||||
# Summary
|
||||
print(f"\n{'=' * 60}")
|
||||
print(f"Results: {tests_passed} passed, {tests_failed} failed")
|
||||
print(f"{'=' * 60}")
|
||||
return tests_failed == 0
|
||||
|
||||
|
||||
def validate_file(filepath):
|
||||
"""Validate an existing extraction JSON file."""
|
||||
path = Path(filepath)
|
||||
if not path.exists():
|
||||
print(f"ERROR: {filepath} not found")
|
||||
return False
|
||||
|
||||
data = json.loads(path.read_text())
|
||||
is_valid, errors, warnings = validate_extraction(data)
|
||||
|
||||
print(f"Validation of {filepath}:")
|
||||
print(f" Knowledge items: {len(data.get('knowledge', []))}")
|
||||
print(f" Errors: {len(errors)}")
|
||||
print(f" Warnings: {len(warnings)}")
|
||||
|
||||
for e in errors:
|
||||
print(f" ERROR: {e}")
|
||||
for w in warnings:
|
||||
print(f" WARN: {w}")
|
||||
|
||||
return is_valid
|
||||
|
||||
|
||||
# This would be replaced with actual prompt execution
|
||||
print("Sample transcript processed")
|
||||
print("Expected categories: fact, pitfall, pattern, tool-quirk, question")
|
||||
return True
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Test knowledge extraction prompt")
|
||||
parser.add_argument("--validate", help="Validate an existing extraction JSON file")
|
||||
parser.add_argument("--transcript", help="Test against a real transcript file (informational)")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.validate:
|
||||
success = validate_file(args.validate)
|
||||
sys.exit(0 if success else 1)
|
||||
else:
|
||||
success = run_tests()
|
||||
sys.exit(0 if success else 1)
|
||||
print("Testing knowledge extraction prompt...")
|
||||
|
||||
# Test 1: Validate prompt file exists
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
if not prompt_path.exists():
|
||||
print("ERROR: harvest-prompt.md not found")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"OK: Prompt file exists: {prompt_path}")
|
||||
|
||||
# Test 2: Check prompt size
|
||||
prompt_size = prompt_path.stat().st_size
|
||||
print(f"OK: Prompt size: {prompt_size} bytes")
|
||||
|
||||
# Test 3: Test sample transcript processing
|
||||
if test_sample_transcript():
|
||||
print("OK: Sample transcript test passed")
|
||||
|
||||
print("\nAll tests passed!")
|
||||
|
||||
@@ -2,107 +2,98 @@
|
||||
|
||||
## System Prompt
|
||||
|
||||
You are a knowledge extraction engine. You read session transcripts and output ONLY structured JSON. You never infer. You never assume. You extract only what the transcript explicitly states.
|
||||
You are a knowledge extraction engine. Your task is to analyze a session transcript and extract durable knowledge that will help future sessions be more efficient.
|
||||
|
||||
## Prompt
|
||||
## Instructions
|
||||
|
||||
Read the session transcript carefully. Extract ONLY information that is explicitly stated in the transcript. Do NOT infer, assume, or hallucinate information.
|
||||
|
||||
### Categories
|
||||
|
||||
Extract knowledge into these categories:
|
||||
|
||||
1. **fact**: Concrete, verifiable information learned (e.g., "Repository X has 5 files", "API returns JSON with field Y")
|
||||
2. **pitfall**: Errors encountered, wrong assumptions, things that wasted time (e.g., "Assumed API token was in env var GITEA_TOKEN, but it's in ~/.config/gitea/token")
|
||||
3. **pattern**: Successful sequences of actions (e.g., "To deploy: 1. Run tests 2. Build 3. Push to Gitea 4. Trigger webhook")
|
||||
4. **tool-quirk**: Environment-specific behaviors (e.g., "Token paths are different on macOS vs Linux", "URL format requires trailing slash")
|
||||
5. **question**: Things identified but not answered (e.g., "Need to determine optimal batch size for harvesting")
|
||||
|
||||
### Output Format
|
||||
|
||||
Return a JSON object with an array of extracted knowledge items. Each item must have:
|
||||
|
||||
```json
|
||||
{
|
||||
"fact": "One sentence description of the knowledge",
|
||||
"category": "fact|pitfall|pattern|tool-quirk|question",
|
||||
"repo": "Repository name this applies to, or 'global' if general",
|
||||
"confidence": 0.0-1.0
|
||||
}
|
||||
```
|
||||
TASK: Extract durable knowledge from this session transcript.
|
||||
|
||||
RULES:
|
||||
1. Extract ONLY information explicitly stated in the transcript.
|
||||
2. Do NOT infer, assume, or hallucinate.
|
||||
3. Every fact must be verifiable by pointing to a specific line in the transcript.
|
||||
4. If the session failed or was partial, extract pitfalls and questions — these are the most valuable.
|
||||
5. Be specific. "Gitea API is slow" is worthless. "Gitea issues endpoint with state=open returns empty when limit=50 but works with limit=5" is knowledge.
|
||||
### Confidence Scoring
|
||||
|
||||
CATEGORIES (assign exactly one per item):
|
||||
- fact: Concrete, verifiable thing learned (paths, formats, counts, configs)
|
||||
- pitfall: Error hit, wrong assumption, time wasted, thing that didn't work
|
||||
- pattern: Successful sequence that should be reused (deploy steps, debug flow)
|
||||
- tool-quirk: Environment-specific behavior (token paths, URL formats, API gotchas)
|
||||
- question: Something identified but not answered — the NEXT agent should investigate
|
||||
- 0.9-1.0: Explicitly stated with verification (e.g., "Error message shows X")
|
||||
- 0.7-0.8: Clearly implied by multiple data points
|
||||
- 0.5-0.6: Suggested but not fully verified
|
||||
- 0.3-0.4: Inferred from limited data
|
||||
- 0.1-0.2: Speculative or uncertain
|
||||
|
||||
CONFIDENCE:
|
||||
- 0.9: Directly observed with error output or explicit verification
|
||||
- 0.7: Multiple data points confirm, but not explicitly verified
|
||||
- 0.5: Suggested by context, not tested
|
||||
- 0.3: Inferred from limited evidence
|
||||
### Constraints
|
||||
|
||||
OUTPUT FORMAT (valid JSON only, no markdown, no explanation):
|
||||
1. **No hallucination**: Only extract what's explicitly in the transcript
|
||||
2. **Specificity**: Each fact must be specific and actionable
|
||||
3. **Relevance**: Only extract knowledge that would help future sessions
|
||||
4. **Brevity**: One sentence per fact
|
||||
5. **Partial sessions**: Even failed or incomplete sessions may contain valuable pitfalls
|
||||
|
||||
### Example Input/Output
|
||||
|
||||
**Input Transcript (excerpt):**
|
||||
```
|
||||
User: Clone the repo at forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence
|
||||
Agent: Trying git clone... Error: 401 Unauthorized
|
||||
Agent: Checking for token... Found in ~/.config/gitea/token
|
||||
Agent: Token is gitea_token format, not OAuth
|
||||
Agent: Clone successful with token authentication
|
||||
```
|
||||
|
||||
**Output:**
|
||||
```json
|
||||
{
|
||||
"knowledge": [
|
||||
{
|
||||
"fact": "One specific sentence of knowledge",
|
||||
"category": "fact|pitfall|pattern|tool-quirk|question",
|
||||
"repo": "repo-name or global",
|
||||
"confidence": 0.0-1.0,
|
||||
"evidence": "Brief quote or reference from transcript that supports this"
|
||||
"fact": "Gitea repo at forge.alexanderwhitestone.com requires authentication for cloning",
|
||||
"category": "fact",
|
||||
"repo": "compounding-intelligence",
|
||||
"confidence": 0.9
|
||||
},
|
||||
{
|
||||
"fact": "Gitea authentication token is stored at ~/.config/gitea/token",
|
||||
"category": "tool-quirk",
|
||||
"repo": "global",
|
||||
"confidence": 0.9
|
||||
},
|
||||
{
|
||||
"fact": "Gitea uses gitea_token format, not OAuth for API access",
|
||||
"category": "tool-quirk",
|
||||
"repo": "global",
|
||||
"confidence": 0.8
|
||||
},
|
||||
{
|
||||
"fact": "Clone fails with 401 when no token is provided",
|
||||
"category": "pitfall",
|
||||
"repo": "compounding-intelligence",
|
||||
"confidence": 0.9
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"session_outcome": "success|partial|failed",
|
||||
"tools_used": ["tool1", "tool2"],
|
||||
"repos_touched": ["repo1"],
|
||||
"error_count": 0,
|
||||
"knowledge_count": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
TRANSCRIPT:
|
||||
{{transcript}}
|
||||
```
|
||||
|
||||
## Design Notes
|
||||
## Final Notes
|
||||
|
||||
### Why this works with mimo-v2-pro
|
||||
|
||||
Mimo needs:
|
||||
- Explicit format constraints ("valid JSON only, no markdown")
|
||||
- Clear category definitions with concrete examples
|
||||
- Hard rules before soft guidance
|
||||
- The transcript at the END (so it reads all instructions first)
|
||||
|
||||
This prompt front-loads all rules, then gives the transcript last. Mimo follows the pattern.
|
||||
|
||||
### Handling partial/failed sessions
|
||||
|
||||
Failed sessions are the richest source of pitfalls. The prompt explicitly says:
|
||||
> "If the session failed or was partial, extract pitfalls and questions — these are the most valuable."
|
||||
|
||||
This reframes failure as valuable output, not noise to discard.
|
||||
|
||||
### The `evidence` field
|
||||
|
||||
Added to the original spec. Every extracted item must cite where in the transcript it came from. This:
|
||||
- Prevents hallucination (can't cite what isn't there)
|
||||
- Enables verification (reviewer can check the source)
|
||||
- Trains confidence calibration (the agent must find evidence, not just claim it)
|
||||
|
||||
### Token budget
|
||||
|
||||
Target: ~1,000 tokens for the prompt (excluding transcript).
|
||||
|
||||
```
|
||||
System prompt: ~50 tokens
|
||||
Rules: ~200 tokens
|
||||
Categories: ~150 tokens
|
||||
Confidence: ~100 tokens
|
||||
Output format: ~200 tokens
|
||||
Design notes: NOT included in prompt (documentation only)
|
||||
─────────────────────────────
|
||||
Total prompt: ~700 tokens
|
||||
```
|
||||
|
||||
Leaves ~300 tokens headroom for variable content (transcript insertion, edge cases).
|
||||
|
||||
### What this replaces
|
||||
|
||||
The v1 prompt had:
|
||||
- Verbose prose explanations (waste tokens for mimo)
|
||||
- No `evidence` field (hallucination risk)
|
||||
- No `meta` block (no session-level metadata)
|
||||
- No explicit handling of failed sessions
|
||||
- Example was too long (~150 tokens of example for a 1k prompt)
|
||||
|
||||
This v2 is tighter, more structured, and adds the evidence requirement that prevents the #1 failure mode of extraction prompts: generating plausible-sounding facts that aren't in the transcript.
|
||||
- Process the entire transcript, not just the beginning
|
||||
- Pay special attention to errors and corrections
|
||||
- Note any environment-specific details
|
||||
- Track tool-specific behaviors and quirks
|
||||
- If the session failed, focus on pitfalls and questions
|
||||
|
||||
Reference in New Issue
Block a user