Compare commits

...

13 Commits

Author SHA1 Message Date
fdacbf5e78 feat: sidecar config validation on deploy (#690)
Some checks failed
Architecture Lint / Linter Tests (pull_request) Successful in 8s
PR Checklist / pr-checklist (pull_request) Failing after 1m14s
Smoke Test / smoke (pull_request) Failing after 5s
Validate Config / YAML Lint (pull_request) Failing after 4s
Validate Config / Shell Script Lint (pull_request) Failing after 11s
Validate Config / Cron Syntax Check (pull_request) Successful in 3s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 3s
Validate Config / JSON Validate (pull_request) Successful in 4s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 21s
Validate Config / Playbook Schema Validation (pull_request) Successful in 6s
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
2026-04-17 05:10:11 +00:00
60c3838c2b feat: config validator with schema checks (#690)
Some checks failed
Architecture Lint / Linter Tests (pull_request) Successful in 44s
PR Checklist / pr-checklist (pull_request) Failing after 4m2s
Smoke Test / smoke (pull_request) Failing after 9s
Validate Config / YAML Lint (pull_request) Failing after 30s
Validate Config / JSON Validate (pull_request) Successful in 29s
Validate Config / Shell Script Lint (pull_request) Failing after 1m8s
Validate Config / Cron Syntax Check (pull_request) Successful in 14s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 1m52s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 10s
Validate Config / Playbook Schema Validation (pull_request) Successful in 17s
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
2026-04-15 03:35:41 +00:00
9251ffc4b5 test 2026-04-15 03:34:20 +00:00
d120526244 fix: add python3 shebang to scripts/visual_pr_reviewer.py (#681) 2026-04-15 02:57:53 +00:00
8596ff761b fix: add python3 shebang to scripts/diagram_meaning_extractor.py (#681) 2026-04-15 02:57:40 +00:00
7553fd4f3e fix: add python3 shebang to scripts/captcha_bypass_handler.py (#681) 2026-04-15 02:57:25 +00:00
71082fe06f fix: add python3 shebang to bin/soul_eval_gate.py (#681) 2026-04-15 02:57:14 +00:00
6d678e938e fix: add python3 shebang to bin/nostr-agent-demo.py (#681) 2026-04-15 02:57:00 +00:00
ad751a6de6 docs: add pipeline scheduler README 2026-04-14 22:47:12 +00:00
130fa40f0c feat: add pipeline-scheduler cron job 2026-04-14 22:46:51 +00:00
82f9810081 feat: add nightly-pipeline-scheduler.sh 2026-04-14 22:46:38 +00:00
2548277137 cleanup test
Some checks failed
Architecture Lint / Linter Tests (push) Successful in 22s
Smoke Test / smoke (push) Failing after 21s
Validate Config / YAML Lint (push) Failing after 13s
Validate Config / JSON Validate (push) Successful in 14s
Validate Config / Python Syntax & Import Check (push) Failing after 1m9s
Validate Config / Shell Script Lint (push) Failing after 31s
Validate Config / Cron Syntax Check (push) Successful in 5s
Validate Config / Deploy Script Dry Run (push) Successful in 7s
Validate Config / Playbook Schema Validation (push) Successful in 16s
Architecture Lint / Linter Tests (pull_request) Successful in 14s
Smoke Test / smoke (pull_request) Failing after 13s
Validate Config / YAML Lint (pull_request) Failing after 12s
Validate Config / JSON Validate (pull_request) Successful in 13s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 54s
Validate Config / Shell Script Lint (pull_request) Failing after 21s
Validate Config / Cron Syntax Check (pull_request) Successful in 5s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 7s
Validate Config / Playbook Schema Validation (pull_request) Successful in 18s
PR Checklist / pr-checklist (pull_request) Failing after 3m27s
Architecture Lint / Lint Repository (push) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (push) Has been cancelled
2026-04-14 22:39:03 +00:00
2b234fde79 test: verify API works
Some checks failed
Architecture Lint / Linter Tests (push) Has been cancelled
Architecture Lint / Lint Repository (push) Has been cancelled
Smoke Test / smoke (push) Failing after 12s
Validate Config / YAML Lint (push) Failing after 11s
Validate Config / JSON Validate (push) Successful in 11s
Validate Config / Python Syntax & Import Check (push) Failing after 47s
Validate Config / Shell Script Lint (push) Failing after 33s
Validate Config / Cron Syntax Check (push) Successful in 10s
Validate Config / Deploy Script Dry Run (push) Successful in 10s
Validate Config / Playbook Schema Validation (push) Successful in 14s
Validate Config / Python Test Suite (push) Has been cancelled
2026-04-14 22:39:02 +00:00
10 changed files with 737 additions and 0 deletions

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Full Nostr agent-to-agent communication demo - FINAL WORKING
"""

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Soul Eval Gate — The Conscience of the Training Pipeline

83
bin/validate_config.py Normal file
View File

@@ -0,0 +1,83 @@
#!/usr/bin/env python3
"""
Config Validator -- pre-deploy YAML validation for timmy-config sidecar.
Validates YAML syntax, required keys (model.default, model.provider,
toolsets), and provider names before deploy.sh writes to ~/.hermes/.
Usage:
python3 bin/validate_config.py [path/to/config.yaml]
python3 bin/validate_config.py --strict (fail on warnings too)
"""
import json, os, sys, yaml
from pathlib import Path
REQUIRED = {
"model": {"type": dict, "keys": {"default": str, "provider": str}},
"toolsets": {"type": list},
}
ALLOWED_PROVIDERS = [
"anthropic", "openai", "nous", "ollama", "openrouter", "openai-codex"
]
def validate(path):
errors = []
try:
with open(path) as f:
data = yaml.safe_load(f)
except Exception as e:
return [f"YAML parse error: {e}"]
if not isinstance(data, dict):
return [f"Expected mapping, got {type(data).__name__}"]
for key, spec in REQUIRED.items():
if key not in data:
errors.append(f"Required key missing: {key}")
continue
if spec["type"] == dict and not isinstance(data[key], dict):
errors.append(f"{key}: expected dict")
continue
if spec["type"] == list and not isinstance(data[key], list):
errors.append(f"{key}: expected list")
continue
if "keys" in spec:
for sub, sub_type in spec["keys"].items():
if sub not in data[key]:
errors.append(f"{key}.{sub}: required")
elif not isinstance(data[key][sub], sub_type):
errors.append(f"{key}.{sub}: expected {sub_type.__name__}")
provider = data.get("model", {}).get("provider")
if provider and provider not in ALLOWED_PROVIDERS:
errors.append(f"model.provider: unknown provider '{provider}'")
# Check JSON files
for jf in ["channel_directory.json"]:
jp = Path(path).parent / jf
if jp.exists():
try:
json.loads(jp.read_text())
except Exception as e:
errors.append(f"{jf}: invalid JSON: {e}")
return errors
def main():
strict = "--strict" in sys.argv
args = [a for a in sys.argv[1:] if not a.startswith("--")]
path = args[0] if args else str(Path(__file__).parent.parent / "config.yaml")
if not os.path.exists(path):
print(f"ERROR: {path} not found")
sys.exit(1)
errs = validate(path)
if errs:
for e in errs:
print(f"ERROR: {e}")
print(f"Validation FAILED: {len(errs)} issue(s)")
sys.exit(1)
print(f"OK: {path} is valid")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,9 @@
- name: Nightly Pipeline Scheduler
schedule: '*/30 18-23,0-8 * * *' # Every 30 min, off-peak hours only
tasks:
- name: Check and start pipelines
shell: "bash scripts/nightly-pipeline-scheduler.sh"
env:
PIPELINE_TOKEN_LIMIT: "500000"
PIPELINE_PEAK_START: "9"
PIPELINE_PEAK_END: "18"

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -0,0 +1,50 @@
# Nightly Pipeline Scheduler
Auto-starts batch pipelines when inference is available.
## What It Does
1. Checks inference provider health (OpenRouter, Ollama, RunPod)
2. Checks if it's off-peak hours (configurable, default: after 6PM)
3. Checks interactive session load (don't fight with live users)
4. Checks daily token budget (configurable limit)
5. Starts the highest-priority incomplete pipeline
## Pipeline Priority Order
| Priority | Pipeline | Deps | Max Tokens |
|----------|----------|------|------------|
| 1 | playground-factory | none | 100,000 |
| 2 | training-factory | none | 150,000 |
| 3 | knowledge-mine | training-factory running | 80,000 |
| 4 | adversary | knowledge-mine running | 50,000 |
| 5 | codebase-genome | none | 120,000 |
## Usage
```bash
# Normal run (used by cron)
./scripts/nightly-pipeline-scheduler.sh
# Dry run (show what would start)
./scripts/nightly-pipeline-scheduler.sh --dry-run
# Status report
./scripts/nightly-pipeline-scheduler.sh --status
# Force start during peak hours
./scripts/nightly-pipeline-scheduler.sh --force
```
## Configuration
Set via environment variables:
- `PIPELINE_TOKEN_LIMIT`: Daily token budget (default: 500,000)
- `PIPELINE_PEAK_START`: Peak hours start (default: 9)
- `PIPELINE_PEAK_END`: Peak hours end (default: 18)
- `HERMES_HOME`: Hermes home directory (default: ~/.hermes)
## Cron
Runs every 30 minutes. Off-peak only (unless --force).
See `cron/pipeline-scheduler.yml`.

View File

@@ -0,0 +1,383 @@
#!/usr/bin/env bash
# nightly-pipeline-scheduler.sh — Auto-start batch pipelines when inference is available.
#
# Checks provider health, pipeline progress, token budget, and interactive load.
# Starts the highest-priority incomplete pipeline that can run.
#
# Usage:
# ./scripts/nightly-pipeline-scheduler.sh # Normal run
# ./scripts/nightly-pipeline-scheduler.sh --dry-run # Show what would start
# ./scripts/nightly-pipeline-scheduler.sh --status # Pipeline status report
set -euo pipefail
# --- Configuration ---
HERMES_HOME="${HERMES_HOME:-$HOME/.hermes}"
BUDGET_FILE="${HERMES_HOME}/pipeline_budget.json"
STATE_FILE="${HERMES_HOME}/pipeline_state.json"
LOG_FILE="${HERMES_HOME}/logs/pipeline-scheduler.log"
TOKEN_DAILY_LIMIT="${PIPELINE_TOKEN_LIMIT:-500000}"
PEAK_HOURS_START="${PIPELINE_PEAK_START:-9}"
PEAK_HOURS_END="${PIPELINE_PEAK_END:-18}"
# Pipeline definitions (priority order)
# Each pipeline: name, script, max_tokens, dependencies
PIPELINES=(
"playground-factory|scripts/pipeline_playground_factory.sh|100000|none"
"training-factory|scripts/pipeline_training_factory.sh|150000|none"
"knowledge-mine|scripts/pipeline_knowledge_mine.sh|80000|training-factory"
"adversary|scripts/pipeline_adversary.sh|50000|knowledge-mine"
"codebase-genome|scripts/pipeline_codebase_genome.sh|120000|none"
)
# --- Colors ---
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
CYAN='\033[0;36m'
NC='\033[0m'
# --- Helpers ---
now_hour() { date +%-H; }
is_peak_hours() {
local h=$(now_hour)
[[ $h -ge $PEAK_HOURS_START && $h -lt $PEAK_HOURS_END ]]
}
ensure_dirs() {
mkdir -p "$(dirname "$LOG_FILE")" "$(dirname "$BUDGET_FILE")" "$(dirname "$STATE_FILE")"
}
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"; }
get_budget_used_today() {
if [[ -f "$BUDGET_FILE" ]]; then
local today=$(date +%Y-%m-%d)
python3 -c "
import json, sys
with open('$BUDGET_FILE') as f:
d = json.load(f)
print(d.get('daily', {}).get('$today', {}).get('tokens_used', 0))
" 2>/dev/null || echo 0
else
echo 0
fi
}
get_budget_remaining() {
local used=$(get_budget_used_today)
echo $((TOKEN_DAILY_LIMIT - used))
}
update_budget() {
local pipeline="$1"
local tokens="$2"
local today=$(date +%Y-%m-%d)
python3 -c "
import json, os
path = '$BUDGET_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
daily = d.setdefault('daily', {})
day = daily.setdefault('$today', {'tokens_used': 0, 'pipelines': {}})
day['tokens_used'] = day.get('tokens_used', 0) + $tokens
day['pipelines']['$pipeline'] = day['pipelines'].get('$pipeline', 0) + $tokens
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
}
get_pipeline_state() {
if [[ -f "$STATE_FILE" ]]; then
cat "$STATE_FILE"
else
echo "{}"
fi
}
set_pipeline_state() {
local pipeline="$1"
local state="$2" # running, complete, failed, skipped
python3 -c "
import json, os
path = '$STATE_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
d['$pipeline'] = {'state': '$state', 'updated': '$(date -Iseconds)'}
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
}
is_pipeline_complete() {
local pipeline="$1"
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('false')
else:
with open(path) as f:
d = json.load(f)
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'complete' else 'false')
" 2>/dev/null || echo false
}
is_pipeline_running() {
local pipeline="$1"
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('false')
else:
with open(path) as f:
d = json.load(f)
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'running' else 'false')
" 2>/dev/null || echo false
}
check_dependency() {
local dep="$1"
if [[ "$dep" == "none" ]]; then
return 0
fi
# For knowledge-mine: training-factory must be running or complete
if [[ "$dep" == "training-factory" ]]; then
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('training-factory', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
[[ "$state" == "running" || "$state" == "complete" ]]
return $?
fi
# For adversary: knowledge-mine must be at least 50% done
# Simplified: check if it's running (we'd need progress tracking for 50%)
if [[ "$dep" == "knowledge-mine" ]]; then
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('knowledge-mine', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
[[ "$state" == "running" || "$state" == "complete" ]]
return $?
fi
return 0
}
check_inference_available() {
# Check if any inference provider is responding
# 1. Check OpenRouter
local or_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "https://openrouter.ai/api/v1/models" 2>/dev/null || echo "000")
# 2. Check local Ollama
local ollama_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "http://localhost:11434/api/tags" 2>/dev/null || echo "000")
# 3. Check RunPod (if configured)
local runpod_ok="000"
if [[ -n "${RUNPOD_ENDPOINT:-}" ]]; then
runpod_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "$RUNPOD_ENDPOINT/health" 2>/dev/null || echo "000")
fi
if [[ "$or_ok" == "200" || "$ollama_ok" == "200" || "$runpod_ok" == "200" ]]; then
return 0
fi
return 1
}
check_interactive_load() {
# Check if there are active interactive sessions (don't fight with live users)
# Look for tmux panes with active hermes sessions
local active=$(tmux list-panes -a -F '#{pane_pid} #{pane_current_command}' 2>/dev/null \
| grep -c "hermes\|python3" || echo 0)
# If more than 3 interactive sessions, skip pipeline start
if [[ $active -gt 3 ]]; then
return 1
fi
return 0
}
start_pipeline() {
local name="$1"
local script="$2"
local max_tokens="$3"
local budget_remaining="$4"
local mode="${5:-run}"
if [[ "$budget_remaining" -lt "$max_tokens" ]]; then
log "SKIP $name: insufficient budget ($budget_remaining < $max_tokens tokens)"
return 1
fi
if [[ ! -f "$script" ]]; then
log "SKIP $name: script not found ($script)"
return 1
fi
if [[ "$mode" == "dry-run" ]]; then
log "DRY-RUN: Would start $name (budget: $budget_remaining, needs: $max_tokens)"
return 0
fi
log "START $name (budget: $budget_remaining, max_tokens: $max_tokens)"
set_pipeline_state "$name" "running"
# Run in background, capture output
local log_path="${HERMES_HOME}/logs/pipeline-${name}.log"
bash "$script" --max-tokens "$max_tokens" >> "$log_path" 2>&1 &
local pid=$!
# Wait a moment to check if it started OK
sleep 2
if kill -0 $pid 2>/dev/null; then
log "RUNNING $name (PID: $pid, log: $log_path)"
# Record the PID
python3 -c "
import json, os
path = '$STATE_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
d['$name']['pid'] = $pid
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
return 0
else
log "FAIL $name: script exited immediately"
set_pipeline_state "$name" "failed"
return 1
fi
}
# --- Main ---
main() {
local mode="${1:-run}"
ensure_dirs
log "=== Pipeline Scheduler ($mode) ==="
# Check 1: Is inference available?
if ! check_inference_available; then
log "No inference provider available. Skipping all pipelines."
exit 0
fi
log "Inference: AVAILABLE"
# Check 2: Is it peak hours?
if is_peak_hours && [[ "$mode" != "--force" ]]; then
local h=$(now_hour)
log "Peak hours ($h:00). Skipping pipeline start. Use --force to override."
exit 0
fi
log "Off-peak: OK"
# Check 3: Interactive load
if ! check_interactive_load && [[ "$mode" != "--force" ]]; then
log "High interactive load. Skipping pipeline start."
exit 0
fi
log "Interactive load: OK"
# Check 4: Token budget
local budget=$(get_budget_remaining)
log "Token budget remaining: $budget / $TOKEN_DAILY_LIMIT"
if [[ $budget -le 0 ]]; then
log "Daily token budget exhausted. Stopping."
exit 0
fi
# Check 5: Pipeline status
if [[ "$mode" == "--status" ]]; then
echo -e "${CYAN}Pipeline Status:${NC}"
echo "────────────────────────────────────────────────────"
for entry in "${PIPELINES[@]}"; do
IFS='|' read -r name script max_tokens dep <<< "$entry"
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('$name', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
local color=$NC
case "$state" in
running) color=$YELLOW ;;
complete) color=$GREEN ;;
failed) color=$RED ;;
esac
printf " %-25s %b%s%b (max: %s tokens, dep: %s)\n" "$name" "$color" "$state" "$NC" "$max_tokens" "$dep"
done
echo "────────────────────────────────────────────────────"
echo " Budget: $budget / $TOKEN_DAILY_LIMIT tokens remaining"
echo " Peak hours: $PEAK_HOURS_START:00 - $PEAK_HOURS_END:00"
exit 0
fi
# Find and start the highest-priority incomplete pipeline
local started=0
for entry in "${PIPELINES[@]}"; do
IFS='|' read -r name script max_tokens dep <<< "$entry"
# Skip if already running or complete
if [[ "$(is_pipeline_running $name)" == "true" ]]; then
log "SKIP $name: already running"
continue
fi
if [[ "$(is_pipeline_complete $name)" == "true" ]]; then
log "SKIP $name: already complete"
continue
fi
# Check dependency
if ! check_dependency "$dep"; then
log "SKIP $name: dependency $dep not met"
continue
fi
# Try to start
if start_pipeline "$name" "$script" "$max_tokens" "$budget" "$mode"; then
started=1
# Only start one pipeline per run (let it claim tokens before next check)
# Exception: playground-factory and training-factory can run in parallel
if [[ "$name" != "playground-factory" && "$name" != "training-factory" ]]; then
break
fi
fi
done
if [[ $started -eq 0 ]]; then
log "No pipelines to start (all complete, running, or blocked)."
fi
log "=== Pipeline Scheduler done ==="
}
main "$@"

View File

@@ -0,0 +1,207 @@
#!/usr/bin/env python3
"""
validate-sidecar-config.py — Pre-deploy validation for timmy-config sidecar.
Validates YAML syntax, required keys, value types before deploy.
Rejects bad config with clear errors.
Usage:
python3 scripts/validate-sidecar-config.py ~/.timmy/config.yaml
python3 scripts/validate-sidecar-config.py --all # Validate all config files
python3 scripts/validate-sidecar-config.py --schema # Print expected schema
"""
import argparse
import json
import os
import sys
from pathlib import Path
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
# Expected schema: field -> type
REQUIRED_SCHEMA = {
"model": str,
"provider": str,
}
OPTIONAL_SCHEMA = {
"api_base": str,
"max_tokens": int,
"temperature": (int, float),
"system_prompt": str,
"tools": list,
"memory_enabled": bool,
"session_timeout": int,
"log_level": str,
}
CONFIG_DIRS = [
Path.home() / ".timmy",
Path.home() / ".hermes",
]
def validate_yaml_syntax(filepath: Path) -> list[str]:
"""Validate YAML can be parsed."""
errors = []
try:
content = filepath.read_text(errors="ignore")
if HAS_YAML:
yaml.safe_load(content)
else:
# Fallback: check for basic JSON-in-YAML
json.loads(content)
except (yaml.YAMLError if HAS_YAML else Exception, json.JSONDecodeError) as e:
errors.append(f"YAML syntax error: {e}")
except OSError as e:
errors.append(f"Cannot read file: {e}")
return errors
def validate_schema(filepath: Path) -> list[str]:
"""Validate config against expected schema."""
errors = []
try:
content = filepath.read_text(errors="ignore")
if HAS_YAML:
config = yaml.safe_load(content) or {}
else:
config = json.loads(content)
except Exception as e:
return [f"Cannot parse: {e}"]
if not isinstance(config, dict):
return ["Config must be a YAML/JSON object (dict)"]
# Check required keys
for key, expected_type in REQUIRED_SCHEMA.items():
if key not in config:
errors.append(f"Missing required key: '{key}' (expected {expected_type.__name__})")
elif not isinstance(config[key], expected_type):
errors.append(f"Wrong type for '{key}': expected {expected_type.__name__}, got {type(config[key]).__name__}")
# Check optional keys types
for key, expected_type in OPTIONAL_SCHEMA.items():
if key in config:
if isinstance(expected_type, tuple):
if not isinstance(config[key], expected_type):
type_names = " or ".join(t.__name__ for t in expected_type)
errors.append(f"Wrong type for '{key}': expected {type_names}, got {type(config[key]).__name__}")
else:
if not isinstance(config[key], expected_type):
errors.append(f"Wrong type for '{key}': expected {expected_type.__name__}, got {type(config[key]).__name__}")
# Check for common mistakes
if "model" in config and isinstance(config["model"], str):
if config["model"].startswith("http"):
errors.append("'model' looks like a URL — did you mean 'api_base'?")
if "api_base" in config and isinstance(config["api_base"], str):
if not config["api_base"].startswith("http"):
errors.append("'api_base' should start with http:// or https://")
return errors
def validate_file(filepath: Path) -> tuple[bool, list[str]]:
"""Full validation of a config file. Returns (valid, errors)."""
errors = []
errors.extend(validate_yaml_syntax(filepath))
if not errors:
errors.extend(validate_schema(filepath))
return len(errors) == 0, errors
def find_config_files() -> list[Path]:
"""Find config files in standard locations."""
configs = []
for d in CONFIG_DIRS:
if not d.exists():
continue
for f in d.rglob("*.yaml"):
if f.name in ("config.yaml", "settings.yaml", "env.yaml", "config.yml"):
configs.append(f)
for f in d.rglob("*.yml"):
if "config" in f.name.lower():
configs.append(f)
for f in d.rglob("*.json"):
if f.name in ("config.json", "settings.json"):
configs.append(f)
return sorted(set(configs))
def cmd_validate(filepath: str) -> bool:
path = Path(filepath)
if not path.exists():
print(f"ERROR: {path} not found")
return False
valid, errors = validate_file(path)
if valid:
print(f"OK: {path}")
else:
print(f"FAIL: {path}")
for e in errors:
print(f" - {e}")
return valid
def cmd_validate_all() -> bool:
configs = find_config_files()
if not configs:
print("No config files found in standard locations.")
return True
all_valid = True
for config in configs:
valid, errors = validate_file(config)
if valid:
print(f"OK: {config}")
else:
all_valid = False
print(f"FAIL: {config}")
for e in errors:
print(f" - {e}")
print(f"\n{'All configs valid.' if all_valid else 'Validation failures found.'}")
return all_valid
def cmd_schema():
print("Required keys:")
for key, typ in REQUIRED_SCHEMA.items():
print(f" {key}: {typ.__name__}")
print("\nOptional keys:")
for key, typ in OPTIONAL_SCHEMA.items():
if isinstance(typ, tuple):
print(f" {key}: {'|'.join(t.__name__ for t in typ)}")
else:
print(f" {key}: {typ.__name__}")
def main():
parser = argparse.ArgumentParser(description="Validate sidecar config files")
parser.add_argument("file", nargs="?", help="Config file to validate")
parser.add_argument("--all", action="store_true", help="Validate all config files")
parser.add_argument("--schema", action="store_true", help="Print expected schema")
args = parser.parse_args()
if args.schema:
cmd_schema()
elif args.all:
ok = cmd_validate_all()
sys.exit(0 if ok else 1)
elif args.file:
ok = cmd_validate(args.file)
sys.exit(0 if ok else 1)
else:
parser.print_help()
if __name__ == "__main__":
main()

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision