Compare commits
1 Commits
claw-code/
...
claude/iss
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e9c5fc458 |
106
skills/devops/wizard-council-automation/SKILL.md
Normal file
106
skills/devops/wizard-council-automation/SKILL.md
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
---
|
||||||
|
name: wizard-council-automation
|
||||||
|
description: Run wizard environment validation, skills drift audit, and cross-wizard dependency checks — the Wizard Council shared tooling suite
|
||||||
|
version: 1.0.0
|
||||||
|
metadata:
|
||||||
|
hermes:
|
||||||
|
tags: [devops, wizards, environment, audit, bootstrap]
|
||||||
|
related_skills: []
|
||||||
|
---
|
||||||
|
|
||||||
|
# Wizard Council Automation
|
||||||
|
|
||||||
|
This skill gives you access to the shared forge tooling for environment
|
||||||
|
validation, skill drift detection, and cross-wizard dependency checking.
|
||||||
|
|
||||||
|
## Tools
|
||||||
|
|
||||||
|
All tools live in `wizard-bootstrap/` in the hermes-agent repo root.
|
||||||
|
|
||||||
|
### 1. Environment Bootstrap (`wizard_bootstrap.py`)
|
||||||
|
|
||||||
|
Validates the full wizard environment in one command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python wizard-bootstrap/wizard_bootstrap.py
|
||||||
|
python wizard-bootstrap/wizard_bootstrap.py --json
|
||||||
|
```
|
||||||
|
|
||||||
|
Checks:
|
||||||
|
- Python version (>=3.11)
|
||||||
|
- Core dependency imports
|
||||||
|
- hermes_constants smoke test
|
||||||
|
- HERMES_HOME existence and writability
|
||||||
|
- LLM provider API key
|
||||||
|
- Gitea authentication (GITEA_TOKEN / FORGE_TOKEN)
|
||||||
|
- Telegram bot connectivity (TELEGRAM_BOT_TOKEN)
|
||||||
|
|
||||||
|
Exits 0 if all checks pass, 1 if any fail.
|
||||||
|
|
||||||
|
### 2. Skills Drift Audit (`skills_audit.py`)
|
||||||
|
|
||||||
|
Compares repo-bundled skills against installed skills:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python wizard-bootstrap/skills_audit.py # detect drift
|
||||||
|
python wizard-bootstrap/skills_audit.py --fix # sync missing/outdated
|
||||||
|
python wizard-bootstrap/skills_audit.py --diff # show diffs for outdated
|
||||||
|
python wizard-bootstrap/skills_audit.py --json # machine-readable output
|
||||||
|
```
|
||||||
|
|
||||||
|
Reports: MISSING, EXTRA, OUTDATED, OK.
|
||||||
|
|
||||||
|
### 3. Dependency Checker (`dependency_checker.py`)
|
||||||
|
|
||||||
|
Validates binary and env-var dependencies declared in SKILL.md frontmatter:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python wizard-bootstrap/dependency_checker.py
|
||||||
|
python wizard-bootstrap/dependency_checker.py --skill devops/my-skill
|
||||||
|
```
|
||||||
|
|
||||||
|
Skills declare deps in their frontmatter:
|
||||||
|
```yaml
|
||||||
|
dependencies:
|
||||||
|
binaries: [ffmpeg, imagemagick]
|
||||||
|
env_vars: [MY_API_KEY]
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Monthly Audit (`monthly_audit.py`)
|
||||||
|
|
||||||
|
Runs all three checks and generates a Markdown report:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python wizard-bootstrap/monthly_audit.py
|
||||||
|
python wizard-bootstrap/monthly_audit.py --post-telegram
|
||||||
|
```
|
||||||
|
|
||||||
|
Report saved to `~/.hermes/wizard-council/audit-YYYY-MM.md`.
|
||||||
|
|
||||||
|
## Wizard Environment Contract
|
||||||
|
|
||||||
|
See `wizard-bootstrap/WIZARD_ENVIRONMENT_CONTRACT.md` for the full
|
||||||
|
specification of what every forge wizard must maintain.
|
||||||
|
|
||||||
|
## Workflow
|
||||||
|
|
||||||
|
### New Wizard Onboarding
|
||||||
|
|
||||||
|
1. Clone the hermes-agent repo
|
||||||
|
2. Install dependencies: `uv pip install -r requirements.txt`
|
||||||
|
3. Run: `python wizard-bootstrap/wizard_bootstrap.py`
|
||||||
|
4. Resolve all failures
|
||||||
|
5. Go online
|
||||||
|
|
||||||
|
### Ongoing Maintenance
|
||||||
|
|
||||||
|
1. Monthly audit fires automatically via cron
|
||||||
|
2. Report posted to wizard-council-automation channel
|
||||||
|
3. Wizards resolve any drift before next audit
|
||||||
|
|
||||||
|
### When Drift Is Detected
|
||||||
|
|
||||||
|
1. Run `python wizard-bootstrap/skills_audit.py` to identify drift
|
||||||
|
2. Run `python wizard-bootstrap/skills_audit.py --fix` to sync
|
||||||
|
3. Run `python wizard-bootstrap/dependency_checker.py` to check deps
|
||||||
|
4. Update SKILL.md frontmatter with any new binary/env_var requirements
|
||||||
242
tests/test_wizard_bootstrap.py
Normal file
242
tests/test_wizard_bootstrap.py
Normal file
@@ -0,0 +1,242 @@
|
|||||||
|
"""
|
||||||
|
Tests for wizard-bootstrap tooling (Epic-004).
|
||||||
|
|
||||||
|
These tests exercise the bootstrap, skills audit, and dependency checker
|
||||||
|
without requiring network access or API keys.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
# Ensure repo root importable
|
||||||
|
REPO_ROOT = Path(__file__).parent.parent
|
||||||
|
sys.path.insert(0, str(REPO_ROOT))
|
||||||
|
sys.path.insert(0, str(REPO_ROOT / "wizard-bootstrap"))
|
||||||
|
|
||||||
|
import wizard_bootstrap as wb
|
||||||
|
import skills_audit as sa
|
||||||
|
import dependency_checker as dc
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# wizard_bootstrap tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TestCheckPythonVersion:
|
||||||
|
def test_current_python_passes(self):
|
||||||
|
result = wb.check_python_version()
|
||||||
|
assert result.passed
|
||||||
|
assert "Python" in result.message
|
||||||
|
|
||||||
|
def test_old_python_fails(self):
|
||||||
|
# Patch version_info as a tuple (matches [:3] unpacking used in the check)
|
||||||
|
old_info = sys.version_info
|
||||||
|
try:
|
||||||
|
sys.version_info = (3, 10, 0, "final", 0) # type: ignore[assignment]
|
||||||
|
result = wb.check_python_version()
|
||||||
|
finally:
|
||||||
|
sys.version_info = old_info # type: ignore[assignment]
|
||||||
|
assert not result.passed
|
||||||
|
|
||||||
|
|
||||||
|
class TestCheckCoreDeps:
|
||||||
|
def test_passes_when_all_present(self):
|
||||||
|
result = wb.check_core_deps()
|
||||||
|
# In a healthy dev environment all packages should be importable
|
||||||
|
assert result.passed
|
||||||
|
|
||||||
|
def test_fails_when_package_missing(self):
|
||||||
|
orig = __import__
|
||||||
|
|
||||||
|
def fake_import(name, *args, **kwargs):
|
||||||
|
if name == "openai":
|
||||||
|
raise ModuleNotFoundError(name)
|
||||||
|
return orig(name, *args, **kwargs)
|
||||||
|
|
||||||
|
with mock.patch("builtins.__import__", side_effect=fake_import):
|
||||||
|
with mock.patch("importlib.import_module", side_effect=ModuleNotFoundError("openai")):
|
||||||
|
result = wb.check_core_deps()
|
||||||
|
# With mocked importlib the check should detect the missing module
|
||||||
|
assert not result.passed
|
||||||
|
assert "openai" in result.message
|
||||||
|
|
||||||
|
|
||||||
|
class TestCheckEnvVars:
|
||||||
|
def test_fails_when_no_key_set(self):
|
||||||
|
env_keys = [
|
||||||
|
"OPENROUTER_API_KEY", "ANTHROPIC_API_KEY", "ANTHROPIC_TOKEN",
|
||||||
|
"OPENAI_API_KEY", "GLM_API_KEY", "KIMI_API_KEY", "MINIMAX_API_KEY",
|
||||||
|
]
|
||||||
|
with mock.patch.dict(os.environ, {k: "" for k in env_keys}, clear=False):
|
||||||
|
# Remove all provider keys
|
||||||
|
env = {k: v for k, v in os.environ.items() if k not in env_keys}
|
||||||
|
with mock.patch.dict(os.environ, env, clear=True):
|
||||||
|
result = wb.check_env_vars()
|
||||||
|
assert not result.passed
|
||||||
|
|
||||||
|
def test_passes_when_key_set(self):
|
||||||
|
with mock.patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-test-key"}):
|
||||||
|
result = wb.check_env_vars()
|
||||||
|
assert result.passed
|
||||||
|
assert "ANTHROPIC_API_KEY" in result.message
|
||||||
|
|
||||||
|
|
||||||
|
class TestCheckHermesHome:
|
||||||
|
def test_passes_with_existing_writable_dir(self, tmp_path):
|
||||||
|
with mock.patch.dict(os.environ, {"HERMES_HOME": str(tmp_path)}):
|
||||||
|
result = wb.check_hermes_home()
|
||||||
|
assert result.passed
|
||||||
|
|
||||||
|
def test_fails_when_dir_missing(self, tmp_path):
|
||||||
|
missing = tmp_path / "nonexistent"
|
||||||
|
with mock.patch.dict(os.environ, {"HERMES_HOME": str(missing)}):
|
||||||
|
result = wb.check_hermes_home()
|
||||||
|
assert not result.passed
|
||||||
|
|
||||||
|
|
||||||
|
class TestBootstrapReport:
|
||||||
|
def test_passed_when_all_pass(self):
|
||||||
|
report = wb.BootstrapReport()
|
||||||
|
report.add(wb.CheckResult("a", True, "ok"))
|
||||||
|
report.add(wb.CheckResult("b", True, "ok"))
|
||||||
|
assert report.passed
|
||||||
|
assert report.failed == []
|
||||||
|
|
||||||
|
def test_failed_when_any_fail(self):
|
||||||
|
report = wb.BootstrapReport()
|
||||||
|
report.add(wb.CheckResult("a", True, "ok"))
|
||||||
|
report.add(wb.CheckResult("b", False, "bad", fix_hint="fix it"))
|
||||||
|
assert not report.passed
|
||||||
|
assert len(report.failed) == 1
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# skills_audit tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TestSkillsAudit:
|
||||||
|
def _make_skill(self, skills_root: Path, rel_path: str, content: str = "# skill") -> Path:
|
||||||
|
"""Create a SKILL.md at skills_root/rel_path/SKILL.md."""
|
||||||
|
skill_dir = skills_root / rel_path
|
||||||
|
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
skill_md = skill_dir / "SKILL.md"
|
||||||
|
skill_md.write_text(content)
|
||||||
|
return skill_md
|
||||||
|
|
||||||
|
def test_no_drift_when_identical(self, tmp_path):
|
||||||
|
# run_audit expects repo_root/skills/ and installed_root/
|
||||||
|
repo = tmp_path / "repo"
|
||||||
|
installed = tmp_path / "installed"
|
||||||
|
content = "# Same content"
|
||||||
|
self._make_skill(repo / "skills", "cat/skill-a", content)
|
||||||
|
self._make_skill(installed, "cat/skill-a", content)
|
||||||
|
|
||||||
|
report = sa.run_audit(repo, installed)
|
||||||
|
assert not report.has_drift
|
||||||
|
assert len(report.by_status("OK")) == 1
|
||||||
|
|
||||||
|
def test_detects_missing_skill(self, tmp_path):
|
||||||
|
repo = tmp_path / "repo"
|
||||||
|
installed = tmp_path / "installed"
|
||||||
|
installed.mkdir()
|
||||||
|
self._make_skill(repo / "skills", "cat/skill-a")
|
||||||
|
|
||||||
|
report = sa.run_audit(repo, installed)
|
||||||
|
assert report.has_drift
|
||||||
|
assert len(report.by_status("MISSING")) == 1
|
||||||
|
|
||||||
|
def test_detects_extra_skill(self, tmp_path):
|
||||||
|
repo = tmp_path / "repo"
|
||||||
|
(repo / "skills").mkdir(parents=True)
|
||||||
|
installed = tmp_path / "installed"
|
||||||
|
self._make_skill(installed, "cat/skill-a")
|
||||||
|
|
||||||
|
report = sa.run_audit(repo, installed)
|
||||||
|
assert report.has_drift
|
||||||
|
assert len(report.by_status("EXTRA")) == 1
|
||||||
|
|
||||||
|
def test_detects_outdated_skill(self, tmp_path):
|
||||||
|
repo = tmp_path / "repo"
|
||||||
|
installed = tmp_path / "installed"
|
||||||
|
self._make_skill(repo / "skills", "cat/skill-a", "# Repo version")
|
||||||
|
self._make_skill(installed, "cat/skill-a", "# Installed version")
|
||||||
|
|
||||||
|
report = sa.run_audit(repo, installed)
|
||||||
|
assert report.has_drift
|
||||||
|
assert len(report.by_status("OUTDATED")) == 1
|
||||||
|
|
||||||
|
def test_fix_copies_missing_skills(self, tmp_path):
|
||||||
|
repo = tmp_path / "repo"
|
||||||
|
installed = tmp_path / "installed"
|
||||||
|
installed.mkdir()
|
||||||
|
self._make_skill(repo / "skills", "cat/skill-a", "# content")
|
||||||
|
|
||||||
|
report = sa.run_audit(repo, installed)
|
||||||
|
assert len(report.by_status("MISSING")) == 1
|
||||||
|
|
||||||
|
sa.apply_fix(report)
|
||||||
|
|
||||||
|
report2 = sa.run_audit(repo, installed)
|
||||||
|
assert not report2.has_drift
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# dependency_checker tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
class TestDependencyChecker:
|
||||||
|
def _make_skill(self, root: Path, rel_path: str, content: str) -> None:
|
||||||
|
skill_dir = root / rel_path
|
||||||
|
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
(skill_dir / "SKILL.md").write_text(content)
|
||||||
|
|
||||||
|
def test_no_deps_when_no_frontmatter(self, tmp_path):
|
||||||
|
self._make_skill(tmp_path, "cat/plain", "# No frontmatter")
|
||||||
|
report = dc.run_dep_check(skills_dir=tmp_path)
|
||||||
|
assert report.deps == []
|
||||||
|
|
||||||
|
def test_detects_missing_binary(self, tmp_path):
|
||||||
|
content = "---\nname: test\ndependencies:\n binaries: [definitely_not_a_real_binary_xyz]\n---\n"
|
||||||
|
self._make_skill(tmp_path, "cat/skill", content)
|
||||||
|
report = dc.run_dep_check(skills_dir=tmp_path)
|
||||||
|
assert len(report.deps) == 1
|
||||||
|
assert not report.deps[0].satisfied
|
||||||
|
assert report.deps[0].binary == "definitely_not_a_real_binary_xyz"
|
||||||
|
|
||||||
|
def test_detects_present_binary(self, tmp_path):
|
||||||
|
content = "---\nname: test\ndependencies:\n binaries: [python3]\n---\n"
|
||||||
|
self._make_skill(tmp_path, "cat/skill", content)
|
||||||
|
report = dc.run_dep_check(skills_dir=tmp_path)
|
||||||
|
assert len(report.deps) == 1
|
||||||
|
assert report.deps[0].satisfied
|
||||||
|
|
||||||
|
def test_detects_missing_env_var(self, tmp_path):
|
||||||
|
content = "---\nname: test\ndependencies:\n env_vars: [DEFINITELY_NOT_SET_XYZ_123]\n---\n"
|
||||||
|
self._make_skill(tmp_path, "cat/skill", content)
|
||||||
|
env = {k: v for k, v in os.environ.items() if k != "DEFINITELY_NOT_SET_XYZ_123"}
|
||||||
|
with mock.patch.dict(os.environ, env, clear=True):
|
||||||
|
report = dc.run_dep_check(skills_dir=tmp_path)
|
||||||
|
assert len(report.deps) == 1
|
||||||
|
assert not report.deps[0].satisfied
|
||||||
|
|
||||||
|
def test_detects_present_env_var(self, tmp_path):
|
||||||
|
content = "---\nname: test\ndependencies:\n env_vars: [MY_TEST_VAR_WIZARD]\n---\n"
|
||||||
|
self._make_skill(tmp_path, "cat/skill", content)
|
||||||
|
with mock.patch.dict(os.environ, {"MY_TEST_VAR_WIZARD": "set"}):
|
||||||
|
report = dc.run_dep_check(skills_dir=tmp_path)
|
||||||
|
assert len(report.deps) == 1
|
||||||
|
assert report.deps[0].satisfied
|
||||||
|
|
||||||
|
def test_skill_filter(self, tmp_path):
|
||||||
|
content = "---\nname: test\ndependencies:\n binaries: [python3]\n---\n"
|
||||||
|
self._make_skill(tmp_path, "cat/skill-a", content)
|
||||||
|
self._make_skill(tmp_path, "cat/skill-b", content)
|
||||||
|
|
||||||
|
report = dc.run_dep_check(skills_dir=tmp_path, skill_filter="skill-a")
|
||||||
|
assert len(report.deps) == 1
|
||||||
|
assert "skill-a" in report.deps[0].skill_path
|
||||||
162
wizard-bootstrap/WIZARD_ENVIRONMENT_CONTRACT.md
Normal file
162
wizard-bootstrap/WIZARD_ENVIRONMENT_CONTRACT.md
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
# Wizard Environment Contract
|
||||||
|
|
||||||
|
> **Version:** 1.0.0
|
||||||
|
> **Owner:** Wizard Council (Bezalel Epic-004)
|
||||||
|
> **Last updated:** 2026-04-06
|
||||||
|
|
||||||
|
This document defines the minimum viable state every forge wizard must maintain.
|
||||||
|
A wizard that satisfies all requirements is considered **forge-ready**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Python Runtime
|
||||||
|
|
||||||
|
| Requirement | Minimum | Notes |
|
||||||
|
|-------------|---------|-------|
|
||||||
|
| Python version | 3.11 | 3.12+ recommended |
|
||||||
|
| Virtual environment | Activated | `source venv/bin/activate` before running |
|
||||||
|
|
||||||
|
Run `python --version` to verify.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Core Package Dependencies
|
||||||
|
|
||||||
|
All packages in `requirements.txt` must be installed and importable.
|
||||||
|
Critical packages: `openai`, `anthropic`, `pyyaml`, `rich`, `requests`, `pydantic`, `prompt_toolkit`.
|
||||||
|
|
||||||
|
**Verify:**
|
||||||
|
```bash
|
||||||
|
python wizard-bootstrap/wizard_bootstrap.py
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. LLM Provider Key
|
||||||
|
|
||||||
|
At least one LLM provider API key must be set in `~/.hermes/.env`:
|
||||||
|
|
||||||
|
| Variable | Provider |
|
||||||
|
|----------|----------|
|
||||||
|
| `OPENROUTER_API_KEY` | OpenRouter (200+ models) |
|
||||||
|
| `ANTHROPIC_API_KEY` | Anthropic Claude |
|
||||||
|
| `ANTHROPIC_TOKEN` | Anthropic Claude (alt) |
|
||||||
|
| `OPENAI_API_KEY` | OpenAI |
|
||||||
|
| `GLM_API_KEY` | z.ai/GLM |
|
||||||
|
| `KIMI_API_KEY` | Moonshot/Kimi |
|
||||||
|
| `MINIMAX_API_KEY` | MiniMax |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Gitea Authentication
|
||||||
|
|
||||||
|
| Requirement | Details |
|
||||||
|
|-------------|---------|
|
||||||
|
| Variable | `GITEA_TOKEN` or `FORGE_TOKEN` |
|
||||||
|
| Scope | Must have repo read/write access |
|
||||||
|
| Forge URL | `https://forge.alexanderwhitestone.com` (or `FORGE_URL` env var) |
|
||||||
|
|
||||||
|
The wizard must be able to create and merge PRs on the forge.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Telegram Connectivity (Gateway Wizards)
|
||||||
|
|
||||||
|
Wizards that operate via the messaging gateway must also satisfy:
|
||||||
|
|
||||||
|
| Requirement | Details |
|
||||||
|
|-------------|---------|
|
||||||
|
| Variable | `TELEGRAM_BOT_TOKEN` |
|
||||||
|
| Home channel | `TELEGRAM_HOME_CHANNEL` |
|
||||||
|
| API reachability | `api.telegram.org` must be reachable |
|
||||||
|
|
||||||
|
CLI-only wizards may skip Telegram checks.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. HERMES_HOME
|
||||||
|
|
||||||
|
| Requirement | Details |
|
||||||
|
|-------------|---------|
|
||||||
|
| Default | `~/.hermes` |
|
||||||
|
| Override | `HERMES_HOME` env var |
|
||||||
|
| Permissions | Owner-writable (700 recommended) |
|
||||||
|
|
||||||
|
The directory must exist and be writable before any hermes command runs.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Skill Dependencies (Per-Skill)
|
||||||
|
|
||||||
|
Each skill may declare binary and environment-variable dependencies in its
|
||||||
|
`SKILL.md` frontmatter:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
name: my-skill
|
||||||
|
dependencies:
|
||||||
|
binaries: [ffmpeg, imagemagick]
|
||||||
|
env_vars: [MY_API_KEY]
|
||||||
|
---
|
||||||
|
```
|
||||||
|
|
||||||
|
A wizard must satisfy all dependencies for any skill it intends to run.
|
||||||
|
|
||||||
|
**Check all skill deps:**
|
||||||
|
```bash
|
||||||
|
python wizard-bootstrap/dependency_checker.py
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8. Enforcement
|
||||||
|
|
||||||
|
### New Wizard Onboarding
|
||||||
|
|
||||||
|
Run the bootstrap script before going online:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python wizard-bootstrap/wizard_bootstrap.py
|
||||||
|
```
|
||||||
|
|
||||||
|
Resolve all failures before beginning work.
|
||||||
|
|
||||||
|
### Ongoing Compliance
|
||||||
|
|
||||||
|
A monthly audit runs automatically (see `wizard-bootstrap/monthly_audit.py`).
|
||||||
|
The report is saved to `~/.hermes/wizard-council/audit-YYYY-MM.md` and posted
|
||||||
|
to the `wizard-council-automation` Telegram channel.
|
||||||
|
|
||||||
|
### Skill Drift
|
||||||
|
|
||||||
|
Run the skills audit to detect and fix drift:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python wizard-bootstrap/skills_audit.py # detect
|
||||||
|
python wizard-bootstrap/skills_audit.py --fix # sync
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9. Contract Versioning
|
||||||
|
|
||||||
|
Changes to this contract require a PR reviewed by at least one wizard council
|
||||||
|
member. Bump the version number and update the date above with each change.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Reference
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Full environment validation
|
||||||
|
python wizard-bootstrap/wizard_bootstrap.py
|
||||||
|
|
||||||
|
# Skills drift check
|
||||||
|
python wizard-bootstrap/skills_audit.py
|
||||||
|
|
||||||
|
# Dependency check
|
||||||
|
python wizard-bootstrap/dependency_checker.py
|
||||||
|
|
||||||
|
# Full monthly audit (all three checks, saves report)
|
||||||
|
python wizard-bootstrap/monthly_audit.py
|
||||||
|
```
|
||||||
1
wizard-bootstrap/__init__.py
Normal file
1
wizard-bootstrap/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# wizard-bootstrap package
|
||||||
300
wizard-bootstrap/dependency_checker.py
Normal file
300
wizard-bootstrap/dependency_checker.py
Normal file
@@ -0,0 +1,300 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
dependency_checker.py — Cross-Wizard Dependency Validator
|
||||||
|
|
||||||
|
Each skill may declare binary or environment-variable dependencies in its
|
||||||
|
SKILL.md frontmatter under a `dependencies` key:
|
||||||
|
|
||||||
|
---
|
||||||
|
name: my-skill
|
||||||
|
dependencies:
|
||||||
|
binaries: [ffmpeg, imagemagick]
|
||||||
|
env_vars: [MY_API_KEY, MY_SECRET]
|
||||||
|
---
|
||||||
|
|
||||||
|
This script scans all installed skills, extracts declared dependencies, and
|
||||||
|
checks whether each is satisfied in the current environment.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python wizard-bootstrap/dependency_checker.py
|
||||||
|
python wizard-bootstrap/dependency_checker.py --json
|
||||||
|
python wizard-bootstrap/dependency_checker.py --skill software-development/code-review
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
try:
|
||||||
|
import yaml
|
||||||
|
HAS_YAML = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_YAML = False
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Data model
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SkillDep:
|
||||||
|
skill_path: str
|
||||||
|
skill_name: str
|
||||||
|
binary: Optional[str] = None
|
||||||
|
env_var: Optional[str] = None
|
||||||
|
satisfied: bool = False
|
||||||
|
detail: str = ""
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DepReport:
|
||||||
|
deps: list[SkillDep] = field(default_factory=list)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def all_satisfied(self) -> bool:
|
||||||
|
return all(d.satisfied for d in self.deps)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def unsatisfied(self) -> list[SkillDep]:
|
||||||
|
return [d for d in self.deps if not d.satisfied]
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Frontmatter parser
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _parse_frontmatter(text: str) -> dict:
|
||||||
|
"""Extract YAML frontmatter from a SKILL.md file."""
|
||||||
|
if not text.startswith("---"):
|
||||||
|
return {}
|
||||||
|
end = text.find("\n---", 3)
|
||||||
|
if end == -1:
|
||||||
|
return {}
|
||||||
|
fm_text = text[3:end].strip()
|
||||||
|
if not HAS_YAML:
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
return yaml.safe_load(fm_text) or {}
|
||||||
|
except Exception:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _load_skill_deps(skill_md: Path) -> tuple[str, list[str], list[str]]:
|
||||||
|
"""
|
||||||
|
Returns (skill_name, binaries, env_vars) from a SKILL.md frontmatter.
|
||||||
|
"""
|
||||||
|
text = skill_md.read_text(encoding="utf-8", errors="replace")
|
||||||
|
fm = _parse_frontmatter(text)
|
||||||
|
skill_name = fm.get("name", skill_md.parent.name)
|
||||||
|
deps = fm.get("dependencies", {})
|
||||||
|
if not isinstance(deps, dict):
|
||||||
|
return skill_name, [], []
|
||||||
|
binaries = deps.get("binaries") or []
|
||||||
|
env_vars = deps.get("env_vars") or []
|
||||||
|
if isinstance(binaries, str):
|
||||||
|
binaries = [binaries]
|
||||||
|
if isinstance(env_vars, str):
|
||||||
|
env_vars = [env_vars]
|
||||||
|
return skill_name, list(binaries), list(env_vars)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Checks
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _check_binary(binary: str) -> tuple[bool, str]:
|
||||||
|
path = shutil.which(binary)
|
||||||
|
if path:
|
||||||
|
return True, f"found at {path}"
|
||||||
|
return False, f"not found in PATH"
|
||||||
|
|
||||||
|
|
||||||
|
def _check_env_var(var: str) -> tuple[bool, str]:
|
||||||
|
val = os.environ.get(var)
|
||||||
|
if val:
|
||||||
|
return True, "set"
|
||||||
|
return False, "not set"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Scanner
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _find_skills_dir() -> Optional[Path]:
|
||||||
|
"""Resolve skills directory: prefer repo root, fall back to HERMES_HOME."""
|
||||||
|
# Check if we're inside the repo
|
||||||
|
repo_root = Path(__file__).parent.parent
|
||||||
|
repo_skills = repo_root / "skills"
|
||||||
|
if repo_skills.exists():
|
||||||
|
return repo_skills
|
||||||
|
|
||||||
|
hermes_home = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||||
|
for candidate in [hermes_home / "skills", hermes_home / "hermes-agent" / "skills"]:
|
||||||
|
if candidate.exists():
|
||||||
|
return candidate
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def run_dep_check(skills_dir: Optional[Path] = None, skill_filter: Optional[str] = None) -> DepReport:
|
||||||
|
resolved = skills_dir or _find_skills_dir()
|
||||||
|
report = DepReport()
|
||||||
|
|
||||||
|
if resolved is None or not resolved.exists():
|
||||||
|
return report
|
||||||
|
|
||||||
|
# Load ~/.hermes/.env so env var checks work
|
||||||
|
hermes_home = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||||
|
env_path = hermes_home / ".env"
|
||||||
|
if env_path.exists():
|
||||||
|
try:
|
||||||
|
from dotenv import load_dotenv # noqa: PLC0415
|
||||||
|
load_dotenv(env_path, override=False)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for skill_md in sorted(resolved.rglob("SKILL.md")):
|
||||||
|
rel = str(skill_md.parent.relative_to(resolved))
|
||||||
|
if skill_filter and skill_filter not in rel:
|
||||||
|
continue
|
||||||
|
|
||||||
|
skill_name, binaries, env_vars = _load_skill_deps(skill_md)
|
||||||
|
|
||||||
|
for binary in binaries:
|
||||||
|
ok, detail = _check_binary(binary)
|
||||||
|
report.deps.append(SkillDep(
|
||||||
|
skill_path=rel,
|
||||||
|
skill_name=skill_name,
|
||||||
|
binary=binary,
|
||||||
|
satisfied=ok,
|
||||||
|
detail=detail,
|
||||||
|
))
|
||||||
|
|
||||||
|
for var in env_vars:
|
||||||
|
ok, detail = _check_env_var(var)
|
||||||
|
report.deps.append(SkillDep(
|
||||||
|
skill_path=rel,
|
||||||
|
skill_name=skill_name,
|
||||||
|
env_var=var,
|
||||||
|
satisfied=ok,
|
||||||
|
detail=detail,
|
||||||
|
))
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Rendering
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
_GREEN = "\033[32m"
|
||||||
|
_RED = "\033[31m"
|
||||||
|
_YELLOW = "\033[33m"
|
||||||
|
_BOLD = "\033[1m"
|
||||||
|
_RESET = "\033[0m"
|
||||||
|
|
||||||
|
|
||||||
|
def _render_terminal(report: DepReport) -> None:
|
||||||
|
print(f"\n{_BOLD}=== Cross-Wizard Dependency Check ==={_RESET}\n")
|
||||||
|
|
||||||
|
if not report.deps:
|
||||||
|
print("No skill dependencies declared. Skills use implicit deps only.\n")
|
||||||
|
print(
|
||||||
|
f"{_YELLOW}Tip:{_RESET} Declare binary/env_var deps in SKILL.md frontmatter "
|
||||||
|
"under a 'dependencies' key to make them checkable.\n"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
for dep in report.deps:
|
||||||
|
icon = f"{_GREEN}✓{_RESET}" if dep.satisfied else f"{_RED}✗{_RESET}"
|
||||||
|
if dep.binary:
|
||||||
|
dep_type = "binary"
|
||||||
|
dep_name = dep.binary
|
||||||
|
else:
|
||||||
|
dep_type = "env_var"
|
||||||
|
dep_name = dep.env_var
|
||||||
|
|
||||||
|
print(f" {icon} [{dep.skill_path}] {dep_type}:{dep_name} — {dep.detail}")
|
||||||
|
|
||||||
|
total = len(report.deps)
|
||||||
|
satisfied = sum(1 for d in report.deps if d.satisfied)
|
||||||
|
print()
|
||||||
|
if report.all_satisfied:
|
||||||
|
print(f"{_GREEN}{_BOLD}All {total} dependencies satisfied.{_RESET}\n")
|
||||||
|
else:
|
||||||
|
failed = total - satisfied
|
||||||
|
print(
|
||||||
|
f"{_RED}{_BOLD}{failed}/{total} dependencies unsatisfied.{_RESET} "
|
||||||
|
"Install missing binaries and set missing env vars.\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _render_json(report: DepReport) -> None:
|
||||||
|
out = {
|
||||||
|
"all_satisfied": report.all_satisfied,
|
||||||
|
"summary": {
|
||||||
|
"total": len(report.deps),
|
||||||
|
"satisfied": sum(1 for d in report.deps if d.satisfied),
|
||||||
|
"unsatisfied": len(report.unsatisfied),
|
||||||
|
},
|
||||||
|
"deps": [
|
||||||
|
{
|
||||||
|
"skill_path": d.skill_path,
|
||||||
|
"skill_name": d.skill_name,
|
||||||
|
"type": "binary" if d.binary else "env_var",
|
||||||
|
"name": d.binary or d.env_var,
|
||||||
|
"satisfied": d.satisfied,
|
||||||
|
"detail": d.detail,
|
||||||
|
}
|
||||||
|
for d in report.deps
|
||||||
|
],
|
||||||
|
}
|
||||||
|
print(json.dumps(out, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# CLI entry point
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
if not HAS_YAML:
|
||||||
|
print("WARNING: pyyaml not installed — cannot parse SKILL.md frontmatter. "
|
||||||
|
"Dependency declarations will be skipped.", file=sys.stderr)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Check cross-wizard skill dependencies (binaries, env vars)."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--skills-dir",
|
||||||
|
default=None,
|
||||||
|
help="Skills directory to scan (default: auto-detect)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--skill",
|
||||||
|
default=None,
|
||||||
|
help="Filter to a specific skill path substring",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--json",
|
||||||
|
action="store_true",
|
||||||
|
help="Output results as JSON",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
skills_dir = Path(args.skills_dir).resolve() if args.skills_dir else None
|
||||||
|
report = run_dep_check(skills_dir=skills_dir, skill_filter=args.skill)
|
||||||
|
|
||||||
|
if args.json:
|
||||||
|
_render_json(report)
|
||||||
|
else:
|
||||||
|
_render_terminal(report)
|
||||||
|
|
||||||
|
sys.exit(0 if report.all_satisfied else 1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
259
wizard-bootstrap/monthly_audit.py
Normal file
259
wizard-bootstrap/monthly_audit.py
Normal file
@@ -0,0 +1,259 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
monthly_audit.py — Wizard Council Monthly Environment Audit
|
||||||
|
|
||||||
|
Runs all three checks (bootstrap, skills audit, dependency check) and
|
||||||
|
produces a combined Markdown report. Designed to be invoked by cron or
|
||||||
|
manually.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python wizard-bootstrap/monthly_audit.py
|
||||||
|
python wizard-bootstrap/monthly_audit.py --output /path/to/report.md
|
||||||
|
python wizard-bootstrap/monthly_audit.py --post-telegram # post to configured channel
|
||||||
|
|
||||||
|
The report is also written to ~/.hermes/wizard-council/audit-YYYY-MM.md
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from contextlib import redirect_stdout
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Ensure repo root is importable
|
||||||
|
_REPO_ROOT = Path(__file__).parent.parent
|
||||||
|
sys.path.insert(0, str(_REPO_ROOT))
|
||||||
|
|
||||||
|
from wizard_bootstrap import run_all_checks
|
||||||
|
from skills_audit import run_audit
|
||||||
|
from dependency_checker import run_dep_check
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Report builder
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _emoji(ok: bool) -> str:
|
||||||
|
return "✅" if ok else "❌"
|
||||||
|
|
||||||
|
|
||||||
|
def build_report(repo_root: Path) -> str:
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
lines = [
|
||||||
|
f"# Wizard Council Environment Audit",
|
||||||
|
f"",
|
||||||
|
f"**Date:** {now.strftime('%Y-%m-%d %H:%M UTC')}",
|
||||||
|
f"",
|
||||||
|
f"---",
|
||||||
|
f"",
|
||||||
|
]
|
||||||
|
|
||||||
|
# 1. Bootstrap checks
|
||||||
|
lines.append("## 1. Environment Bootstrap")
|
||||||
|
lines.append("")
|
||||||
|
bootstrap = run_all_checks()
|
||||||
|
for check in bootstrap.checks:
|
||||||
|
icon = _emoji(check.passed)
|
||||||
|
label = check.name.replace("_", " ").title()
|
||||||
|
lines.append(f"- {icon} **{label}**: {check.message}")
|
||||||
|
if not check.passed and check.fix_hint:
|
||||||
|
lines.append(f" - _Fix_: {check.fix_hint}")
|
||||||
|
lines.append("")
|
||||||
|
if bootstrap.passed:
|
||||||
|
lines.append("**Environment: READY** ✅")
|
||||||
|
else:
|
||||||
|
failed = len(bootstrap.failed)
|
||||||
|
lines.append(f"**Environment: {failed} check(s) FAILED** ❌")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# 2. Skills audit
|
||||||
|
lines.append("## 2. Skills Drift Audit")
|
||||||
|
lines.append("")
|
||||||
|
skills_report = run_audit(repo_root)
|
||||||
|
missing = skills_report.by_status("MISSING")
|
||||||
|
extra = skills_report.by_status("EXTRA")
|
||||||
|
outdated = skills_report.by_status("OUTDATED")
|
||||||
|
ok_count = len(skills_report.by_status("OK"))
|
||||||
|
total = len(skills_report.drifts)
|
||||||
|
|
||||||
|
lines.append(f"| Status | Count |")
|
||||||
|
lines.append(f"|--------|-------|")
|
||||||
|
lines.append(f"| ✅ OK | {ok_count} |")
|
||||||
|
lines.append(f"| ❌ Missing | {len(missing)} |")
|
||||||
|
lines.append(f"| ⚠️ Extra | {len(extra)} |")
|
||||||
|
lines.append(f"| 🔄 Outdated | {len(outdated)} |")
|
||||||
|
lines.append(f"| **Total** | **{total}** |")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if missing:
|
||||||
|
lines.append("### Missing Skills (in repo, not installed)")
|
||||||
|
for d in missing:
|
||||||
|
lines.append(f"- `{d.skill_path}`")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if outdated:
|
||||||
|
lines.append("### Outdated Skills")
|
||||||
|
for d in outdated:
|
||||||
|
lines.append(f"- `{d.skill_path}` (repo: `{d.repo_hash}`, installed: `{d.installed_hash}`)")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if extra:
|
||||||
|
lines.append("### Extra Skills (installed, not in repo)")
|
||||||
|
for d in extra:
|
||||||
|
lines.append(f"- `{d.skill_path}`")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if not skills_report.has_drift:
|
||||||
|
lines.append("**Skills: IN SYNC** ✅")
|
||||||
|
else:
|
||||||
|
lines.append("**Skills: DRIFT DETECTED** ❌ — run `python wizard-bootstrap/skills_audit.py --fix`")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# 3. Dependency check
|
||||||
|
lines.append("## 3. Cross-Wizard Dependency Check")
|
||||||
|
lines.append("")
|
||||||
|
dep_report = run_dep_check()
|
||||||
|
|
||||||
|
if not dep_report.deps:
|
||||||
|
lines.append("No explicit dependencies declared in SKILL.md frontmatter.")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(
|
||||||
|
"_Tip: Add a `dependencies` block to SKILL.md to make binary/env_var "
|
||||||
|
"requirements checkable automatically._"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
satisfied = sum(1 for d in dep_report.deps if d.satisfied)
|
||||||
|
total_deps = len(dep_report.deps)
|
||||||
|
lines.append(f"**{satisfied}/{total_deps} dependencies satisfied.**")
|
||||||
|
lines.append("")
|
||||||
|
if dep_report.unsatisfied:
|
||||||
|
lines.append("### Unsatisfied Dependencies")
|
||||||
|
for dep in dep_report.unsatisfied:
|
||||||
|
dep_type = "binary" if dep.binary else "env_var"
|
||||||
|
dep_name = dep.binary or dep.env_var
|
||||||
|
lines.append(f"- `[{dep.skill_path}]` {dep_type}:`{dep_name}` — {dep.detail}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if dep_report.all_satisfied:
|
||||||
|
lines.append("**Dependencies: ALL SATISFIED** ✅")
|
||||||
|
else:
|
||||||
|
lines.append("**Dependencies: ISSUES FOUND** ❌")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
overall_ok = bootstrap.passed and not skills_report.has_drift and dep_report.all_satisfied
|
||||||
|
lines.append("## Summary")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(f"| Check | Status |")
|
||||||
|
lines.append(f"|-------|--------|")
|
||||||
|
lines.append(f"| Environment Bootstrap | {_emoji(bootstrap.passed)} |")
|
||||||
|
lines.append(f"| Skills Drift | {_emoji(not skills_report.has_drift)} |")
|
||||||
|
lines.append(f"| Dependency Check | {_emoji(dep_report.all_satisfied)} |")
|
||||||
|
lines.append("")
|
||||||
|
if overall_ok:
|
||||||
|
lines.append("**Overall: FORGE READY** ✅")
|
||||||
|
else:
|
||||||
|
lines.append("**Overall: ACTION REQUIRED** ❌")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Output / delivery
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _save_report(report: str, output_path: Path) -> None:
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
output_path.write_text(report, encoding="utf-8")
|
||||||
|
print(f"Report saved to: {output_path}")
|
||||||
|
|
||||||
|
|
||||||
|
def _post_telegram(report: str) -> None:
|
||||||
|
"""Post the report summary to Telegram via hermes gateway if configured."""
|
||||||
|
token = os.environ.get("TELEGRAM_BOT_TOKEN")
|
||||||
|
channel = os.environ.get("TELEGRAM_HOME_CHANNEL") or os.environ.get("TELEGRAM_CHANNEL_ID")
|
||||||
|
if not (token and channel):
|
||||||
|
print("Telegram not configured (need TELEGRAM_BOT_TOKEN + TELEGRAM_HOME_CHANNEL).", file=sys.stderr)
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
import requests # noqa: PLC0415
|
||||||
|
|
||||||
|
# Extract just the summary section for Telegram (keep it brief)
|
||||||
|
summary_start = report.find("## Summary")
|
||||||
|
summary_text = report[summary_start:] if summary_start != -1 else report[-1000:]
|
||||||
|
payload = {
|
||||||
|
"chat_id": channel,
|
||||||
|
"text": f"🧙 **Wizard Council Monthly Audit**\n\n{summary_text}",
|
||||||
|
"parse_mode": "Markdown",
|
||||||
|
}
|
||||||
|
resp = requests.post(
|
||||||
|
f"https://api.telegram.org/bot{token}/sendMessage",
|
||||||
|
json=payload,
|
||||||
|
timeout=15,
|
||||||
|
)
|
||||||
|
if resp.status_code == 200:
|
||||||
|
print("Report summary posted to Telegram.")
|
||||||
|
else:
|
||||||
|
print(f"Telegram post failed: HTTP {resp.status_code}", file=sys.stderr)
|
||||||
|
except Exception as exc:
|
||||||
|
print(f"Telegram post error: {exc}", file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# CLI entry point
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Run the monthly Wizard Council environment audit."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output",
|
||||||
|
default=None,
|
||||||
|
help="Path to save the Markdown report (default: ~/.hermes/wizard-council/audit-YYYY-MM.md)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--repo-root",
|
||||||
|
default=str(_REPO_ROOT),
|
||||||
|
help="Root of the hermes-agent repo",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--post-telegram",
|
||||||
|
action="store_true",
|
||||||
|
help="Post the report summary to Telegram",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
repo_root = Path(args.repo_root).resolve()
|
||||||
|
report = build_report(repo_root)
|
||||||
|
|
||||||
|
# Print to stdout
|
||||||
|
print(report)
|
||||||
|
|
||||||
|
# Save to default location
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
if args.output:
|
||||||
|
output_path = Path(args.output)
|
||||||
|
else:
|
||||||
|
hermes_home = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||||
|
output_path = hermes_home / "wizard-council" / f"audit-{now.strftime('%Y-%m')}.md"
|
||||||
|
|
||||||
|
_save_report(report, output_path)
|
||||||
|
|
||||||
|
if args.post_telegram:
|
||||||
|
_post_telegram(report)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
342
wizard-bootstrap/skills_audit.py
Normal file
342
wizard-bootstrap/skills_audit.py
Normal file
@@ -0,0 +1,342 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
skills_audit.py — Skills Drift Detector
|
||||||
|
|
||||||
|
Compares the skills bundled in the repo against those installed in
|
||||||
|
HERMES_HOME/skills/, then reports any drift:
|
||||||
|
|
||||||
|
- MISSING — skill in repo but not in installed location
|
||||||
|
- EXTRA — skill installed but not in repo (local-only)
|
||||||
|
- OUTDATED — repo skill.md differs from installed skill.md
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python wizard-bootstrap/skills_audit.py
|
||||||
|
python wizard-bootstrap/skills_audit.py --fix # copy missing skills
|
||||||
|
python wizard-bootstrap/skills_audit.py --json
|
||||||
|
python wizard-bootstrap/skills_audit.py --repo-root /path/to/hermes-agent
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import difflib
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Data model
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SkillDrift:
|
||||||
|
skill_path: str # e.g. "software-development/code-review"
|
||||||
|
status: str # "MISSING" | "EXTRA" | "OUTDATED" | "OK"
|
||||||
|
repo_hash: Optional[str] = None
|
||||||
|
installed_hash: Optional[str] = None
|
||||||
|
diff_lines: list[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AuditReport:
|
||||||
|
drifts: list[SkillDrift] = field(default_factory=list)
|
||||||
|
repo_root: Path = Path(".")
|
||||||
|
installed_root: Path = Path(".")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def has_drift(self) -> bool:
|
||||||
|
return any(d.status != "OK" for d in self.drifts)
|
||||||
|
|
||||||
|
def by_status(self, status: str) -> list[SkillDrift]:
|
||||||
|
return [d for d in self.drifts if d.status == status]
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _sha256_file(path: Path) -> str:
|
||||||
|
h = hashlib.sha256()
|
||||||
|
h.update(path.read_bytes())
|
||||||
|
return h.hexdigest()[:16]
|
||||||
|
|
||||||
|
|
||||||
|
def _find_skills(root: Path) -> dict[str, Path]:
|
||||||
|
"""Return {relative_skill_path: SKILL.md path} for every skill under root."""
|
||||||
|
skills: dict[str, Path] = {}
|
||||||
|
for skill_md in root.rglob("SKILL.md"):
|
||||||
|
# skill path is relative to root, e.g. "software-development/code-review"
|
||||||
|
rel = skill_md.parent.relative_to(root)
|
||||||
|
skills[str(rel)] = skill_md
|
||||||
|
return skills
|
||||||
|
|
||||||
|
|
||||||
|
def _diff_skills(repo_md: Path, installed_md: Path) -> list[str]:
|
||||||
|
repo_lines = repo_md.read_text(encoding="utf-8", errors="replace").splitlines()
|
||||||
|
inst_lines = installed_md.read_text(encoding="utf-8", errors="replace").splitlines()
|
||||||
|
diff = list(
|
||||||
|
difflib.unified_diff(
|
||||||
|
inst_lines,
|
||||||
|
repo_lines,
|
||||||
|
fromfile="installed",
|
||||||
|
tofile="repo",
|
||||||
|
lineterm="",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return diff
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Core audit logic
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _resolve_installed_skills_root() -> Optional[Path]:
|
||||||
|
"""Return the installed skills directory, or None if not found."""
|
||||||
|
hermes_home = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||||
|
candidates = [
|
||||||
|
hermes_home / "skills",
|
||||||
|
hermes_home / "hermes-agent" / "skills",
|
||||||
|
]
|
||||||
|
for candidate in candidates:
|
||||||
|
if candidate.exists():
|
||||||
|
return candidate
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def run_audit(repo_root: Path, installed_root: Optional[Path] = None) -> AuditReport:
|
||||||
|
repo_skills_dir = repo_root / "skills"
|
||||||
|
if not repo_skills_dir.exists():
|
||||||
|
print(f"ERROR: Repo skills directory not found: {repo_skills_dir}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
resolved_installed = installed_root or _resolve_installed_skills_root()
|
||||||
|
report = AuditReport(
|
||||||
|
repo_root=repo_root,
|
||||||
|
installed_root=resolved_installed or Path("/not-found"),
|
||||||
|
)
|
||||||
|
|
||||||
|
repo_map = _find_skills(repo_skills_dir)
|
||||||
|
|
||||||
|
if resolved_installed is None or not resolved_installed.exists():
|
||||||
|
# All repo skills are "MISSING" from the installation
|
||||||
|
for skill_path in sorted(repo_map):
|
||||||
|
report.drifts.append(
|
||||||
|
SkillDrift(
|
||||||
|
skill_path=skill_path,
|
||||||
|
status="MISSING",
|
||||||
|
repo_hash=_sha256_file(repo_map[skill_path]),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return report
|
||||||
|
|
||||||
|
installed_map = _find_skills(resolved_installed)
|
||||||
|
|
||||||
|
all_paths = sorted(set(repo_map) | set(installed_map))
|
||||||
|
for skill_path in all_paths:
|
||||||
|
in_repo = skill_path in repo_map
|
||||||
|
in_installed = skill_path in installed_map
|
||||||
|
|
||||||
|
if in_repo and not in_installed:
|
||||||
|
report.drifts.append(
|
||||||
|
SkillDrift(
|
||||||
|
skill_path=skill_path,
|
||||||
|
status="MISSING",
|
||||||
|
repo_hash=_sha256_file(repo_map[skill_path]),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif in_installed and not in_repo:
|
||||||
|
report.drifts.append(
|
||||||
|
SkillDrift(
|
||||||
|
skill_path=skill_path,
|
||||||
|
status="EXTRA",
|
||||||
|
installed_hash=_sha256_file(installed_map[skill_path]),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
rh = _sha256_file(repo_map[skill_path])
|
||||||
|
ih = _sha256_file(installed_map[skill_path])
|
||||||
|
if rh != ih:
|
||||||
|
diff = _diff_skills(repo_map[skill_path], installed_map[skill_path])
|
||||||
|
report.drifts.append(
|
||||||
|
SkillDrift(
|
||||||
|
skill_path=skill_path,
|
||||||
|
status="OUTDATED",
|
||||||
|
repo_hash=rh,
|
||||||
|
installed_hash=ih,
|
||||||
|
diff_lines=diff,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
report.drifts.append(
|
||||||
|
SkillDrift(skill_path=skill_path, status="OK", repo_hash=rh, installed_hash=ih)
|
||||||
|
)
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Fix: copy missing skills into installed location
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def apply_fix(report: AuditReport) -> None:
|
||||||
|
if report.installed_root == Path("/not-found"):
|
||||||
|
print("Cannot fix: installed skills directory not found.", file=sys.stderr)
|
||||||
|
return
|
||||||
|
|
||||||
|
repo_skills_dir = report.repo_root / "skills"
|
||||||
|
for drift in report.by_status("MISSING"):
|
||||||
|
src = repo_skills_dir / drift.skill_path / "SKILL.md"
|
||||||
|
dst = report.installed_root / drift.skill_path / "SKILL.md"
|
||||||
|
dst.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
shutil.copy2(src, dst)
|
||||||
|
print(f" Installed: {drift.skill_path}")
|
||||||
|
|
||||||
|
for drift in report.by_status("OUTDATED"):
|
||||||
|
src = repo_skills_dir / drift.skill_path / "SKILL.md"
|
||||||
|
dst = report.installed_root / drift.skill_path / "SKILL.md"
|
||||||
|
shutil.copy2(src, dst)
|
||||||
|
print(f" Updated: {drift.skill_path}")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Rendering
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
_GREEN = "\033[32m"
|
||||||
|
_RED = "\033[31m"
|
||||||
|
_YELLOW = "\033[33m"
|
||||||
|
_CYAN = "\033[36m"
|
||||||
|
_BOLD = "\033[1m"
|
||||||
|
_RESET = "\033[0m"
|
||||||
|
|
||||||
|
_STATUS_COLOR = {
|
||||||
|
"OK": _GREEN,
|
||||||
|
"MISSING": _RED,
|
||||||
|
"EXTRA": _YELLOW,
|
||||||
|
"OUTDATED": _CYAN,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _render_terminal(report: AuditReport, show_diff: bool = False) -> None:
|
||||||
|
print(f"\n{_BOLD}=== Wizard Skills Audit ==={_RESET}")
|
||||||
|
print(f" Repo skills: {report.repo_root / 'skills'}")
|
||||||
|
print(f" Installed skills: {report.installed_root}\n")
|
||||||
|
|
||||||
|
if not report.drifts:
|
||||||
|
print(f"{_GREEN}No skills found to compare.{_RESET}\n")
|
||||||
|
return
|
||||||
|
|
||||||
|
total = len(report.drifts)
|
||||||
|
ok = len(report.by_status("OK"))
|
||||||
|
missing = len(report.by_status("MISSING"))
|
||||||
|
extra = len(report.by_status("EXTRA"))
|
||||||
|
outdated = len(report.by_status("OUTDATED"))
|
||||||
|
|
||||||
|
for drift in sorted(report.drifts, key=lambda d: (d.status == "OK", d.skill_path)):
|
||||||
|
color = _STATUS_COLOR.get(drift.status, _RESET)
|
||||||
|
print(f" {color}{drift.status:8}{_RESET} {drift.skill_path}")
|
||||||
|
if show_diff and drift.diff_lines:
|
||||||
|
for line in drift.diff_lines[:20]:
|
||||||
|
print(f" {line}")
|
||||||
|
if len(drift.diff_lines) > 20:
|
||||||
|
print(f" ... ({len(drift.diff_lines) - 20} more lines)")
|
||||||
|
|
||||||
|
print()
|
||||||
|
print(f" Total: {total} OK: {_GREEN}{ok}{_RESET} "
|
||||||
|
f"Missing: {_RED}{missing}{_RESET} "
|
||||||
|
f"Extra: {_YELLOW}{extra}{_RESET} "
|
||||||
|
f"Outdated: {_CYAN}{outdated}{_RESET}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
if not report.has_drift:
|
||||||
|
print(f"{_GREEN}{_BOLD}No drift detected. Skills are in sync.{_RESET}\n")
|
||||||
|
else:
|
||||||
|
print(f"{_YELLOW}{_BOLD}Drift detected. Run with --fix to sync missing/outdated skills.{_RESET}\n")
|
||||||
|
|
||||||
|
|
||||||
|
def _render_json(report: AuditReport) -> None:
|
||||||
|
out = {
|
||||||
|
"has_drift": report.has_drift,
|
||||||
|
"repo_skills_dir": str(report.repo_root / "skills"),
|
||||||
|
"installed_skills_dir": str(report.installed_root),
|
||||||
|
"summary": {
|
||||||
|
"total": len(report.drifts),
|
||||||
|
"ok": len(report.by_status("OK")),
|
||||||
|
"missing": len(report.by_status("MISSING")),
|
||||||
|
"extra": len(report.by_status("EXTRA")),
|
||||||
|
"outdated": len(report.by_status("OUTDATED")),
|
||||||
|
},
|
||||||
|
"drifts": [
|
||||||
|
{
|
||||||
|
"skill_path": d.skill_path,
|
||||||
|
"status": d.status,
|
||||||
|
"repo_hash": d.repo_hash,
|
||||||
|
"installed_hash": d.installed_hash,
|
||||||
|
"diff_line_count": len(d.diff_lines),
|
||||||
|
}
|
||||||
|
for d in report.drifts
|
||||||
|
if d.status != "OK"
|
||||||
|
],
|
||||||
|
}
|
||||||
|
print(json.dumps(out, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# CLI entry point
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Audit wizard skills for drift between repo and installed location."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--repo-root",
|
||||||
|
default=str(Path(__file__).parent.parent),
|
||||||
|
help="Root of the hermes-agent repo (default: parent of this script)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--installed-root",
|
||||||
|
default=None,
|
||||||
|
help="Installed skills directory (default: auto-detect from HERMES_HOME)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--fix",
|
||||||
|
action="store_true",
|
||||||
|
help="Copy missing/outdated skills from repo to installed location",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--diff",
|
||||||
|
action="store_true",
|
||||||
|
help="Show diff for outdated skills",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--json",
|
||||||
|
action="store_true",
|
||||||
|
help="Output results as JSON",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
repo_root = Path(args.repo_root).resolve()
|
||||||
|
installed_root = Path(args.installed_root).resolve() if args.installed_root else None
|
||||||
|
|
||||||
|
report = run_audit(repo_root, installed_root)
|
||||||
|
|
||||||
|
if args.fix:
|
||||||
|
apply_fix(report)
|
||||||
|
# Re-run audit after fix to show updated state
|
||||||
|
report = run_audit(repo_root, installed_root)
|
||||||
|
|
||||||
|
if args.json:
|
||||||
|
_render_json(report)
|
||||||
|
else:
|
||||||
|
_render_terminal(report, show_diff=args.diff)
|
||||||
|
|
||||||
|
sys.exit(0 if not report.has_drift else 1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
378
wizard-bootstrap/wizard_bootstrap.py
Normal file
378
wizard-bootstrap/wizard_bootstrap.py
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
wizard_bootstrap.py — Wizard Environment Validator
|
||||||
|
|
||||||
|
Validates that a new wizard's forge environment is ready:
|
||||||
|
1. Python version check (>=3.11)
|
||||||
|
2. Core dependencies installed
|
||||||
|
3. Gitea authentication
|
||||||
|
4. Telegram connectivity
|
||||||
|
5. Smoke test (hermes import)
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python wizard-bootstrap/wizard_bootstrap.py
|
||||||
|
python wizard-bootstrap/wizard_bootstrap.py --fix
|
||||||
|
python wizard-bootstrap/wizard_bootstrap.py --json
|
||||||
|
|
||||||
|
Exits 0 if all checks pass, 1 if any check fails.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import importlib
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Result model
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CheckResult:
|
||||||
|
name: str
|
||||||
|
passed: bool
|
||||||
|
message: str
|
||||||
|
fix_hint: Optional[str] = None
|
||||||
|
detail: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BootstrapReport:
|
||||||
|
checks: list[CheckResult] = field(default_factory=list)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def passed(self) -> bool:
|
||||||
|
return all(c.passed for c in self.checks)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def failed(self) -> list[CheckResult]:
|
||||||
|
return [c for c in self.checks if not c.passed]
|
||||||
|
|
||||||
|
def add(self, result: CheckResult) -> None:
|
||||||
|
self.checks.append(result)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Individual checks
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def check_python_version() -> CheckResult:
|
||||||
|
"""Require Python >= 3.11."""
|
||||||
|
major, minor, micro = sys.version_info[:3]
|
||||||
|
ok = (major, minor) >= (3, 11)
|
||||||
|
return CheckResult(
|
||||||
|
name="python_version",
|
||||||
|
passed=ok,
|
||||||
|
message=f"Python {major}.{minor}.{micro}",
|
||||||
|
fix_hint="Install Python 3.11+ via uv, pyenv, or your OS package manager.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_core_deps() -> CheckResult:
|
||||||
|
"""Verify that hermes core Python packages are importable."""
|
||||||
|
required = [
|
||||||
|
"openai",
|
||||||
|
"anthropic",
|
||||||
|
"dotenv",
|
||||||
|
"yaml",
|
||||||
|
"rich",
|
||||||
|
"requests",
|
||||||
|
"pydantic",
|
||||||
|
"prompt_toolkit",
|
||||||
|
]
|
||||||
|
missing = []
|
||||||
|
for pkg in required:
|
||||||
|
# dotenv ships as 'python-dotenv' but imports as 'dotenv'
|
||||||
|
try:
|
||||||
|
importlib.import_module(pkg)
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
missing.append(pkg)
|
||||||
|
|
||||||
|
if missing:
|
||||||
|
return CheckResult(
|
||||||
|
name="core_deps",
|
||||||
|
passed=False,
|
||||||
|
message=f"Missing packages: {', '.join(missing)}",
|
||||||
|
fix_hint="Run: uv pip install -r requirements.txt (or: pip install -r requirements.txt)",
|
||||||
|
)
|
||||||
|
return CheckResult(name="core_deps", passed=True, message="All core packages importable")
|
||||||
|
|
||||||
|
|
||||||
|
def check_hermes_importable() -> CheckResult:
|
||||||
|
"""Smoke-test: import hermes_constants (no side effects)."""
|
||||||
|
# Add repo root to sys.path so we can import regardless of cwd
|
||||||
|
repo_root = str(Path(__file__).parent.parent)
|
||||||
|
if repo_root not in sys.path:
|
||||||
|
sys.path.insert(0, repo_root)
|
||||||
|
try:
|
||||||
|
import hermes_constants # noqa: F401
|
||||||
|
|
||||||
|
return CheckResult(name="hermes_smoke", passed=True, message="hermes_constants imported OK")
|
||||||
|
except Exception as exc:
|
||||||
|
return CheckResult(
|
||||||
|
name="hermes_smoke",
|
||||||
|
passed=False,
|
||||||
|
message=f"Import error: {exc}",
|
||||||
|
fix_hint="Ensure you are in the hermes-agent repo root and your venv is active.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_gitea_auth() -> CheckResult:
|
||||||
|
"""Verify Gitea token env var is set and the API responds."""
|
||||||
|
token = os.environ.get("GITEA_TOKEN") or os.environ.get("FORGE_TOKEN")
|
||||||
|
if not token:
|
||||||
|
return CheckResult(
|
||||||
|
name="gitea_auth",
|
||||||
|
passed=False,
|
||||||
|
message="GITEA_TOKEN / FORGE_TOKEN not set",
|
||||||
|
fix_hint="Export GITEA_TOKEN=<your-token> in your shell or ~/.hermes/.env",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Attempt a lightweight API call — list repos endpoint returns quickly
|
||||||
|
forge_url = os.environ.get("FORGE_URL", "https://forge.alexanderwhitestone.com")
|
||||||
|
try:
|
||||||
|
import requests # noqa: PLC0415
|
||||||
|
|
||||||
|
resp = requests.get(
|
||||||
|
f"{forge_url}/api/v1/repos/search",
|
||||||
|
headers={"Authorization": f"token {token}"},
|
||||||
|
params={"limit": 1},
|
||||||
|
timeout=10,
|
||||||
|
)
|
||||||
|
if resp.status_code == 200:
|
||||||
|
return CheckResult(name="gitea_auth", passed=True, message="Gitea API reachable and token valid")
|
||||||
|
return CheckResult(
|
||||||
|
name="gitea_auth",
|
||||||
|
passed=False,
|
||||||
|
message=f"Gitea API returned HTTP {resp.status_code}",
|
||||||
|
fix_hint="Check that your GITEA_TOKEN is correct and not expired.",
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
return CheckResult(
|
||||||
|
name="gitea_auth",
|
||||||
|
passed=False,
|
||||||
|
message=f"Gitea API unreachable: {exc}",
|
||||||
|
fix_hint="Check network connectivity and FORGE_URL env var.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_telegram_connectivity() -> CheckResult:
|
||||||
|
"""Verify Telegram bot token is set and the Bot API responds."""
|
||||||
|
token = os.environ.get("TELEGRAM_BOT_TOKEN")
|
||||||
|
if not token:
|
||||||
|
return CheckResult(
|
||||||
|
name="telegram",
|
||||||
|
passed=False,
|
||||||
|
message="TELEGRAM_BOT_TOKEN not set",
|
||||||
|
fix_hint="Export TELEGRAM_BOT_TOKEN=<token> in your shell or ~/.hermes/.env",
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import requests # noqa: PLC0415
|
||||||
|
|
||||||
|
resp = requests.get(
|
||||||
|
f"https://api.telegram.org/bot{token}/getMe",
|
||||||
|
timeout=10,
|
||||||
|
)
|
||||||
|
if resp.status_code == 200:
|
||||||
|
data = resp.json()
|
||||||
|
username = data.get("result", {}).get("username", "?")
|
||||||
|
return CheckResult(
|
||||||
|
name="telegram",
|
||||||
|
passed=True,
|
||||||
|
message=f"Telegram bot @{username} reachable",
|
||||||
|
)
|
||||||
|
return CheckResult(
|
||||||
|
name="telegram",
|
||||||
|
passed=False,
|
||||||
|
message=f"Telegram API returned HTTP {resp.status_code}",
|
||||||
|
fix_hint="Check that TELEGRAM_BOT_TOKEN is valid.",
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
return CheckResult(
|
||||||
|
name="telegram",
|
||||||
|
passed=False,
|
||||||
|
message=f"Telegram unreachable: {exc}",
|
||||||
|
fix_hint="Check network connectivity.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_env_vars() -> CheckResult:
|
||||||
|
"""Check that at least one LLM provider key is configured."""
|
||||||
|
provider_keys = [
|
||||||
|
"OPENROUTER_API_KEY",
|
||||||
|
"ANTHROPIC_API_KEY",
|
||||||
|
"ANTHROPIC_TOKEN",
|
||||||
|
"OPENAI_API_KEY",
|
||||||
|
"GLM_API_KEY",
|
||||||
|
"KIMI_API_KEY",
|
||||||
|
"MINIMAX_API_KEY",
|
||||||
|
]
|
||||||
|
found = [k for k in provider_keys if os.environ.get(k)]
|
||||||
|
if found:
|
||||||
|
return CheckResult(
|
||||||
|
name="llm_provider",
|
||||||
|
passed=True,
|
||||||
|
message=f"LLM provider key(s) present: {', '.join(found)}",
|
||||||
|
)
|
||||||
|
return CheckResult(
|
||||||
|
name="llm_provider",
|
||||||
|
passed=False,
|
||||||
|
message="No LLM provider API key found",
|
||||||
|
fix_hint=(
|
||||||
|
"Set at least one of: OPENROUTER_API_KEY, ANTHROPIC_API_KEY, OPENAI_API_KEY "
|
||||||
|
"in ~/.hermes/.env or your shell."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_hermes_home() -> CheckResult:
|
||||||
|
"""Verify HERMES_HOME directory exists and is writable."""
|
||||||
|
hermes_home = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||||
|
if not hermes_home.exists():
|
||||||
|
return CheckResult(
|
||||||
|
name="hermes_home",
|
||||||
|
passed=False,
|
||||||
|
message=f"HERMES_HOME does not exist: {hermes_home}",
|
||||||
|
fix_hint="Run 'hermes setup' or create the directory manually.",
|
||||||
|
)
|
||||||
|
if not os.access(hermes_home, os.W_OK):
|
||||||
|
return CheckResult(
|
||||||
|
name="hermes_home",
|
||||||
|
passed=False,
|
||||||
|
message=f"HERMES_HOME not writable: {hermes_home}",
|
||||||
|
fix_hint=f"Fix permissions: chmod u+w {hermes_home}",
|
||||||
|
)
|
||||||
|
return CheckResult(
|
||||||
|
name="hermes_home",
|
||||||
|
passed=True,
|
||||||
|
message=f"HERMES_HOME OK: {hermes_home}",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Runner
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _load_dotenv_if_available() -> None:
|
||||||
|
"""Load ~/.hermes/.env so token checks work without manual export."""
|
||||||
|
hermes_home = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||||
|
env_path = hermes_home / ".env"
|
||||||
|
if env_path.exists():
|
||||||
|
try:
|
||||||
|
from dotenv import load_dotenv # noqa: PLC0415
|
||||||
|
|
||||||
|
load_dotenv(env_path, override=False)
|
||||||
|
except Exception:
|
||||||
|
pass # dotenv not installed yet — that's fine
|
||||||
|
|
||||||
|
|
||||||
|
def run_all_checks() -> BootstrapReport:
|
||||||
|
report = BootstrapReport()
|
||||||
|
_load_dotenv_if_available()
|
||||||
|
|
||||||
|
checks = [
|
||||||
|
check_python_version,
|
||||||
|
check_core_deps,
|
||||||
|
check_hermes_importable,
|
||||||
|
check_hermes_home,
|
||||||
|
check_env_vars,
|
||||||
|
check_gitea_auth,
|
||||||
|
check_telegram_connectivity,
|
||||||
|
]
|
||||||
|
for fn in checks:
|
||||||
|
result = fn()
|
||||||
|
report.add(result)
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Rendering
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
_GREEN = "\033[32m"
|
||||||
|
_RED = "\033[31m"
|
||||||
|
_YELLOW = "\033[33m"
|
||||||
|
_BOLD = "\033[1m"
|
||||||
|
_RESET = "\033[0m"
|
||||||
|
|
||||||
|
|
||||||
|
def _render_terminal(report: BootstrapReport) -> None:
|
||||||
|
print(f"\n{_BOLD}=== Wizard Bootstrap — Environment Check ==={_RESET}\n")
|
||||||
|
for check in report.checks:
|
||||||
|
icon = f"{_GREEN}✓{_RESET}" if check.passed else f"{_RED}✗{_RESET}"
|
||||||
|
label = check.name.replace("_", " ").title()
|
||||||
|
print(f" {icon} {_BOLD}{label}{_RESET}: {check.message}")
|
||||||
|
if not check.passed and check.fix_hint:
|
||||||
|
print(f" {_YELLOW}→ {check.fix_hint}{_RESET}")
|
||||||
|
if check.detail:
|
||||||
|
print(f" {check.detail}")
|
||||||
|
|
||||||
|
total = len(report.checks)
|
||||||
|
passed = sum(1 for c in report.checks if c.passed)
|
||||||
|
print()
|
||||||
|
if report.passed:
|
||||||
|
print(f"{_GREEN}{_BOLD}All {total} checks passed. Forge is ready.{_RESET}\n")
|
||||||
|
else:
|
||||||
|
failed = total - passed
|
||||||
|
print(
|
||||||
|
f"{_RED}{_BOLD}{failed}/{total} check(s) failed.{_RESET} "
|
||||||
|
f"Resolve the issues above before going online.\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _render_json(report: BootstrapReport) -> None:
|
||||||
|
out = {
|
||||||
|
"passed": report.passed,
|
||||||
|
"summary": {
|
||||||
|
"total": len(report.checks),
|
||||||
|
"passed": sum(1 for c in report.checks if c.passed),
|
||||||
|
"failed": sum(1 for c in report.checks if not c.passed),
|
||||||
|
},
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"name": c.name,
|
||||||
|
"passed": c.passed,
|
||||||
|
"message": c.message,
|
||||||
|
"fix_hint": c.fix_hint,
|
||||||
|
"detail": c.detail,
|
||||||
|
}
|
||||||
|
for c in report.checks
|
||||||
|
],
|
||||||
|
}
|
||||||
|
print(json.dumps(out, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# CLI entry point
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Validate the forge wizard environment."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--json",
|
||||||
|
action="store_true",
|
||||||
|
help="Output results as JSON",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
report = run_all_checks()
|
||||||
|
|
||||||
|
if args.json:
|
||||||
|
_render_json(report)
|
||||||
|
else:
|
||||||
|
_render_terminal(report)
|
||||||
|
|
||||||
|
sys.exit(0 if report.passed else 1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Reference in New Issue
Block a user