Compare commits

...

10 Commits

Author SHA1 Message Date
6cb611f2b5 test: deploy config validator tests (#690)
Some checks failed
Architecture Lint / Linter Tests (pull_request) Successful in 36s
Smoke Test / smoke (pull_request) Failing after 20s
Validate Config / YAML Lint (pull_request) Failing after 16s
Validate Config / JSON Validate (pull_request) Successful in 10s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 1m15s
PR Checklist / pr-checklist (pull_request) Failing after 8m16s
Validate Config / Shell Script Lint (pull_request) Failing after 1m9s
Validate Config / Cron Syntax Check (pull_request) Successful in 18s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 17s
Validate Config / Playbook Schema Validation (pull_request) Successful in 32s
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
2026-04-15 03:23:06 +00:00
477fc6ce6f feat: sidecar config validation on deploy (#690) 2026-04-15 03:21:37 +00:00
d120526244 fix: add python3 shebang to scripts/visual_pr_reviewer.py (#681) 2026-04-15 02:57:53 +00:00
8596ff761b fix: add python3 shebang to scripts/diagram_meaning_extractor.py (#681) 2026-04-15 02:57:40 +00:00
7553fd4f3e fix: add python3 shebang to scripts/captcha_bypass_handler.py (#681) 2026-04-15 02:57:25 +00:00
71082fe06f fix: add python3 shebang to bin/soul_eval_gate.py (#681) 2026-04-15 02:57:14 +00:00
6d678e938e fix: add python3 shebang to bin/nostr-agent-demo.py (#681) 2026-04-15 02:57:00 +00:00
ad751a6de6 docs: add pipeline scheduler README 2026-04-14 22:47:12 +00:00
130fa40f0c feat: add pipeline-scheduler cron job 2026-04-14 22:46:51 +00:00
82f9810081 feat: add nightly-pipeline-scheduler.sh 2026-04-14 22:46:38 +00:00
10 changed files with 925 additions and 0 deletions

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Full Nostr agent-to-agent communication demo - FINAL WORKING
"""

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Soul Eval Gate — The Conscience of the Training Pipeline

View File

@@ -0,0 +1,9 @@
- name: Nightly Pipeline Scheduler
schedule: '*/30 18-23,0-8 * * *' # Every 30 min, off-peak hours only
tasks:
- name: Check and start pipelines
shell: "bash scripts/nightly-pipeline-scheduler.sh"
env:
PIPELINE_TOKEN_LIMIT: "500000"
PIPELINE_PEAK_START: "9"
PIPELINE_PEAK_END: "18"

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -0,0 +1,336 @@
#!/usr/bin/env python3
"""
deploy_config_validator.py — Pre-deploy config validation for timmy-config sidecar.
Validates YAML config before writing during deploy. Checks:
1. YAML syntax (pyyaml safe_load)
2. Required keys exist for the config type
3. Value types match expected schema
4. No banned providers referenced
5. Provider chain is well-formed
Usage:
# Validate a config file before deploy
python3 scripts/deploy_config_validator.py config.yaml
# Validate stdin (piped from deploy script)
cat config.yaml | python3 scripts/deploy_config_validator.py -
# Validate with expected type
python3 scripts/deploy_config_validator.py --type hermes config.yaml
# JSON output for CI/CD
python3 scripts/deploy_config_validator.py --json config.yaml
Exit codes:
0 — config is valid
1 — validation failed (errors printed to stderr)
2 — usage error
"""
import argparse
import json
import sys
from pathlib import Path
from typing import Any
try:
import yaml
except ImportError:
print("ERROR: PyYAML not installed. Run: pip install pyyaml", file=sys.stderr)
sys.exit(2)
# ── Schema Definitions ────────────────────────────────────────────────────────
# Required keys per config type
REQUIRED_KEYS = {
"hermes": {
"providers": {"type": list, "description": "List of provider configurations"},
},
"wizard": {
"providers": {"type": list, "description": "List of provider configurations"},
},
"ansible_inventory": {
"all": {"type": dict, "description": "Top-level inventory structure"},
},
"cron": {
"jobs": {"type": list, "description": "List of cron job definitions"},
},
"playbook": {
"name": {"type": str, "description": "Playbook name"},
},
"any": {}, # No required keys for generic validation
}
# Provider schema — each provider must have these keys
PROVIDER_REQUIRED = {"name", "model", "base_url"}
PROVIDER_ALLOWED_TYPES = {
"name": str,
"model": str,
"base_url": str,
"api_key_env": str,
"timeout": (int, float),
"reason": str,
}
# Banned provider patterns (from ansible inventory)
BANNED_PROVIDERS = {"anthropic", "claude"}
BANNED_MODEL_PATTERNS = ["claude-*", "anthropic/*", "*sonnet*", "*opus*", "*haiku*"]
# ── Validators ────────────────────────────────────────────────────────────────
class ValidationError:
def __init__(self, path: str, message: str, severity: str = "error"):
self.path = path
self.message = message
self.severity = severity
def __str__(self):
prefix = {"error": "ERROR", "warning": "WARN", "info": "INFO"}.get(self.severity, "???")
return f"[{prefix}] {self.path}: {self.message}"
def validate_yaml_syntax(text: str) -> tuple[Any | None, list[ValidationError]]:
"""Validate YAML syntax. Returns (parsed_data, errors)."""
errors = []
# Check for tabs
for i, line in enumerate(text.splitlines(), 1):
if "\t" in line:
errors.append(ValidationError(f"line {i}", "contains tab character (use spaces for YAML)", "warning"))
# Parse
try:
data = yaml.safe_load(text)
except yaml.YAMLError as e:
mark = getattr(e, "problem_mark", None)
if mark:
errors.append(ValidationError(
f"line {mark.line + 1}, col {mark.column + 1}",
f"YAML syntax error: {e.problem}"
))
else:
errors.append(ValidationError("(file)", f"YAML syntax error: {e}"))
return None, errors
if data is None:
errors.append(ValidationError("(file)", "empty or null config", "warning"))
return None, errors
return data, errors
def validate_required_keys(data: dict, config_type: str) -> list[ValidationError]:
"""Check that required keys exist."""
errors = []
schema = REQUIRED_KEYS.get(config_type, REQUIRED_KEYS["any"])
for key, spec in schema.items():
if key not in data:
errors.append(ValidationError(
f".{key}",
f"required key missing: {key} ({spec['description']})"
))
elif not isinstance(data[key], spec["type"]):
errors.append(ValidationError(
f".{key}",
f"expected {spec['type'].__name__}, got {type(data[key]).__name__}"
))
return errors
def validate_provider_chain(data: dict) -> list[ValidationError]:
"""Validate provider configurations."""
errors = []
providers = data.get("providers", [])
if not isinstance(providers, list):
return errors # Caught by required_keys check
for i, provider in enumerate(providers):
path = f".providers[{i}]"
if not isinstance(provider, dict):
errors.append(ValidationError(path, "provider must be a dict"))
continue
# Check required provider keys
for key in PROVIDER_REQUIRED:
if key not in provider:
errors.append(ValidationError(f"{path}.{key}", f"provider missing required key: {key}"))
elif not isinstance(provider[key], str):
errors.append(ValidationError(
f"{path}.{key}",
f"expected string, got {type(provider[key]).__name__}"
))
# Check for banned providers
name = provider.get("name", "").lower()
model = provider.get("model", "").lower()
for banned in BANNED_PROVIDERS:
if banned in name:
errors.append(ValidationError(
f"{path}.name",
f"banned provider: '{provider.get('name')}' (contains '{banned}')"
))
import fnmatch
for pattern in BANNED_MODEL_PATTERNS:
if fnmatch.fnmatch(model, pattern.lower()):
errors.append(ValidationError(
f"{path}.model",
f"banned model pattern: '{provider.get('model')}' matches '{pattern}'"
))
# Check value types
for key, val in provider.items():
expected = PROVIDER_ALLOWED_TYPES.get(key)
if expected and not isinstance(val, expected):
errors.append(ValidationError(
f"{path}.{key}",
f"expected {expected if isinstance(expected, type) else expected.__name__}, got {type(val).__name__}",
"warning"
))
# Check provider chain has at least one entry
if not providers:
errors.append(ValidationError(".providers", "provider chain is empty — no inference available"))
return errors
def validate_value_types(data: dict, path: str = "") -> list[ValidationError]:
"""Recursively check for obviously wrong value types."""
errors = []
if isinstance(data, dict):
for key, val in data.items():
full_path = f"{path}.{key}" if path else f".{key}"
# Ports should be integers
if key in ("port", "api_port", "hermes_port", "timeout") and val is not None:
if not isinstance(val, (int, float)):
errors.append(ValidationError(full_path, f"expected number, got {type(val).__name__}", "warning"))
# URLs should be strings starting with http
if key in ("base_url", "gitea_url", "url") and val is not None:
if isinstance(val, str) and not val.startswith(("http://", "https://")):
errors.append(ValidationError(full_path, f"URL should start with http:// or https://", "warning"))
# Recurse
errors.extend(validate_value_types(val, full_path))
elif isinstance(data, list):
for i, item in enumerate(data):
errors.extend(validate_value_types(item, f"{path}[{i}]"))
return errors
def validate_config(text: str, config_type: str = "any") -> list[ValidationError]:
"""Run all validations on a config text."""
# Step 1: YAML syntax
data, errors = validate_yaml_syntax(text)
if data is None:
return errors # Can't continue without parsed data
if not isinstance(data, dict):
if config_type != "any":
errors.append(ValidationError("(file)", f"expected dict for {config_type} config, got {type(data).__name__}"))
return errors
# Step 2: Required keys
errors.extend(validate_required_keys(data, config_type))
# Step 3: Provider chain validation (if providers exist)
if "providers" in data:
errors.extend(validate_provider_chain(data))
# Step 4: Value type checking
errors.extend(validate_value_types(data))
return errors
# ── Auto-detect config type ───────────────────────────────────────────────────
def detect_config_type(data: dict) -> str:
"""Guess config type from contents."""
if "providers" in data and "display" in data:
return "hermes"
if "providers" in data and "wizard_name" in data:
return "wizard"
if "all" in data and "children" in data.get("all", {}):
return "ansible_inventory"
if "jobs" in data:
return "cron"
if "name" in data and "hosts" in data:
return "playbook"
return "any"
# ── CLI ───────────────────────────────────────────────────────────────────────
def main():
parser = argparse.ArgumentParser(description="Pre-deploy config validation")
parser.add_argument("file", help="Config file to validate (use - for stdin)")
parser.add_argument("--type", choices=list(REQUIRED_KEYS.keys()),
help="Expected config type (auto-detected if omitted)")
parser.add_argument("--json", action="store_true", help="JSON output")
args = parser.parse_args()
# Read input
if args.file == "-":
text = sys.stdin.read()
filename = "<stdin>"
else:
path = Path(args.file)
if not path.exists():
print(f"ERROR: File not found: {path}", file=sys.stderr)
sys.exit(2)
text = path.read_text(encoding="utf-8", errors="replace")
filename = str(path)
# Detect type
config_type = args.type
if not config_type:
data, _ = validate_yaml_syntax(text)
if data and isinstance(data, dict):
config_type = detect_config_type(data)
else:
config_type = "any"
# Validate
errors = validate_config(text, config_type)
# Output
if args.json:
result = {
"file": filename,
"type": config_type,
"valid": not any(e.severity == "error" for e in errors),
"error_count": sum(1 for e in errors if e.severity == "error"),
"warning_count": sum(1 for e in errors if e.severity == "warning"),
"errors": [{"path": e.path, "message": e.message, "severity": e.severity} for e in errors],
}
print(json.dumps(result, indent=2))
else:
if errors:
print(f"Config validation FAILED: {filename} (type: {config_type})", file=sys.stderr)
for e in errors:
print(f" {e}", file=sys.stderr)
else:
print(f"Config validation PASSED: {filename} (type: {config_type})")
# Exit code
if any(e.severity == "error" for e in errors):
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -0,0 +1,50 @@
# Nightly Pipeline Scheduler
Auto-starts batch pipelines when inference is available.
## What It Does
1. Checks inference provider health (OpenRouter, Ollama, RunPod)
2. Checks if it's off-peak hours (configurable, default: after 6PM)
3. Checks interactive session load (don't fight with live users)
4. Checks daily token budget (configurable limit)
5. Starts the highest-priority incomplete pipeline
## Pipeline Priority Order
| Priority | Pipeline | Deps | Max Tokens |
|----------|----------|------|------------|
| 1 | playground-factory | none | 100,000 |
| 2 | training-factory | none | 150,000 |
| 3 | knowledge-mine | training-factory running | 80,000 |
| 4 | adversary | knowledge-mine running | 50,000 |
| 5 | codebase-genome | none | 120,000 |
## Usage
```bash
# Normal run (used by cron)
./scripts/nightly-pipeline-scheduler.sh
# Dry run (show what would start)
./scripts/nightly-pipeline-scheduler.sh --dry-run
# Status report
./scripts/nightly-pipeline-scheduler.sh --status
# Force start during peak hours
./scripts/nightly-pipeline-scheduler.sh --force
```
## Configuration
Set via environment variables:
- `PIPELINE_TOKEN_LIMIT`: Daily token budget (default: 500,000)
- `PIPELINE_PEAK_START`: Peak hours start (default: 9)
- `PIPELINE_PEAK_END`: Peak hours end (default: 18)
- `HERMES_HOME`: Hermes home directory (default: ~/.hermes)
## Cron
Runs every 30 minutes. Off-peak only (unless --force).
See `cron/pipeline-scheduler.yml`.

View File

@@ -0,0 +1,383 @@
#!/usr/bin/env bash
# nightly-pipeline-scheduler.sh — Auto-start batch pipelines when inference is available.
#
# Checks provider health, pipeline progress, token budget, and interactive load.
# Starts the highest-priority incomplete pipeline that can run.
#
# Usage:
# ./scripts/nightly-pipeline-scheduler.sh # Normal run
# ./scripts/nightly-pipeline-scheduler.sh --dry-run # Show what would start
# ./scripts/nightly-pipeline-scheduler.sh --status # Pipeline status report
set -euo pipefail
# --- Configuration ---
HERMES_HOME="${HERMES_HOME:-$HOME/.hermes}"
BUDGET_FILE="${HERMES_HOME}/pipeline_budget.json"
STATE_FILE="${HERMES_HOME}/pipeline_state.json"
LOG_FILE="${HERMES_HOME}/logs/pipeline-scheduler.log"
TOKEN_DAILY_LIMIT="${PIPELINE_TOKEN_LIMIT:-500000}"
PEAK_HOURS_START="${PIPELINE_PEAK_START:-9}"
PEAK_HOURS_END="${PIPELINE_PEAK_END:-18}"
# Pipeline definitions (priority order)
# Each pipeline: name, script, max_tokens, dependencies
PIPELINES=(
"playground-factory|scripts/pipeline_playground_factory.sh|100000|none"
"training-factory|scripts/pipeline_training_factory.sh|150000|none"
"knowledge-mine|scripts/pipeline_knowledge_mine.sh|80000|training-factory"
"adversary|scripts/pipeline_adversary.sh|50000|knowledge-mine"
"codebase-genome|scripts/pipeline_codebase_genome.sh|120000|none"
)
# --- Colors ---
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
CYAN='\033[0;36m'
NC='\033[0m'
# --- Helpers ---
now_hour() { date +%-H; }
is_peak_hours() {
local h=$(now_hour)
[[ $h -ge $PEAK_HOURS_START && $h -lt $PEAK_HOURS_END ]]
}
ensure_dirs() {
mkdir -p "$(dirname "$LOG_FILE")" "$(dirname "$BUDGET_FILE")" "$(dirname "$STATE_FILE")"
}
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"; }
get_budget_used_today() {
if [[ -f "$BUDGET_FILE" ]]; then
local today=$(date +%Y-%m-%d)
python3 -c "
import json, sys
with open('$BUDGET_FILE') as f:
d = json.load(f)
print(d.get('daily', {}).get('$today', {}).get('tokens_used', 0))
" 2>/dev/null || echo 0
else
echo 0
fi
}
get_budget_remaining() {
local used=$(get_budget_used_today)
echo $((TOKEN_DAILY_LIMIT - used))
}
update_budget() {
local pipeline="$1"
local tokens="$2"
local today=$(date +%Y-%m-%d)
python3 -c "
import json, os
path = '$BUDGET_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
daily = d.setdefault('daily', {})
day = daily.setdefault('$today', {'tokens_used': 0, 'pipelines': {}})
day['tokens_used'] = day.get('tokens_used', 0) + $tokens
day['pipelines']['$pipeline'] = day['pipelines'].get('$pipeline', 0) + $tokens
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
}
get_pipeline_state() {
if [[ -f "$STATE_FILE" ]]; then
cat "$STATE_FILE"
else
echo "{}"
fi
}
set_pipeline_state() {
local pipeline="$1"
local state="$2" # running, complete, failed, skipped
python3 -c "
import json, os
path = '$STATE_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
d['$pipeline'] = {'state': '$state', 'updated': '$(date -Iseconds)'}
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
}
is_pipeline_complete() {
local pipeline="$1"
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('false')
else:
with open(path) as f:
d = json.load(f)
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'complete' else 'false')
" 2>/dev/null || echo false
}
is_pipeline_running() {
local pipeline="$1"
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('false')
else:
with open(path) as f:
d = json.load(f)
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'running' else 'false')
" 2>/dev/null || echo false
}
check_dependency() {
local dep="$1"
if [[ "$dep" == "none" ]]; then
return 0
fi
# For knowledge-mine: training-factory must be running or complete
if [[ "$dep" == "training-factory" ]]; then
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('training-factory', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
[[ "$state" == "running" || "$state" == "complete" ]]
return $?
fi
# For adversary: knowledge-mine must be at least 50% done
# Simplified: check if it's running (we'd need progress tracking for 50%)
if [[ "$dep" == "knowledge-mine" ]]; then
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('knowledge-mine', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
[[ "$state" == "running" || "$state" == "complete" ]]
return $?
fi
return 0
}
check_inference_available() {
# Check if any inference provider is responding
# 1. Check OpenRouter
local or_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "https://openrouter.ai/api/v1/models" 2>/dev/null || echo "000")
# 2. Check local Ollama
local ollama_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "http://localhost:11434/api/tags" 2>/dev/null || echo "000")
# 3. Check RunPod (if configured)
local runpod_ok="000"
if [[ -n "${RUNPOD_ENDPOINT:-}" ]]; then
runpod_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "$RUNPOD_ENDPOINT/health" 2>/dev/null || echo "000")
fi
if [[ "$or_ok" == "200" || "$ollama_ok" == "200" || "$runpod_ok" == "200" ]]; then
return 0
fi
return 1
}
check_interactive_load() {
# Check if there are active interactive sessions (don't fight with live users)
# Look for tmux panes with active hermes sessions
local active=$(tmux list-panes -a -F '#{pane_pid} #{pane_current_command}' 2>/dev/null \
| grep -c "hermes\|python3" || echo 0)
# If more than 3 interactive sessions, skip pipeline start
if [[ $active -gt 3 ]]; then
return 1
fi
return 0
}
start_pipeline() {
local name="$1"
local script="$2"
local max_tokens="$3"
local budget_remaining="$4"
local mode="${5:-run}"
if [[ "$budget_remaining" -lt "$max_tokens" ]]; then
log "SKIP $name: insufficient budget ($budget_remaining < $max_tokens tokens)"
return 1
fi
if [[ ! -f "$script" ]]; then
log "SKIP $name: script not found ($script)"
return 1
fi
if [[ "$mode" == "dry-run" ]]; then
log "DRY-RUN: Would start $name (budget: $budget_remaining, needs: $max_tokens)"
return 0
fi
log "START $name (budget: $budget_remaining, max_tokens: $max_tokens)"
set_pipeline_state "$name" "running"
# Run in background, capture output
local log_path="${HERMES_HOME}/logs/pipeline-${name}.log"
bash "$script" --max-tokens "$max_tokens" >> "$log_path" 2>&1 &
local pid=$!
# Wait a moment to check if it started OK
sleep 2
if kill -0 $pid 2>/dev/null; then
log "RUNNING $name (PID: $pid, log: $log_path)"
# Record the PID
python3 -c "
import json, os
path = '$STATE_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
d['$name']['pid'] = $pid
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
return 0
else
log "FAIL $name: script exited immediately"
set_pipeline_state "$name" "failed"
return 1
fi
}
# --- Main ---
main() {
local mode="${1:-run}"
ensure_dirs
log "=== Pipeline Scheduler ($mode) ==="
# Check 1: Is inference available?
if ! check_inference_available; then
log "No inference provider available. Skipping all pipelines."
exit 0
fi
log "Inference: AVAILABLE"
# Check 2: Is it peak hours?
if is_peak_hours && [[ "$mode" != "--force" ]]; then
local h=$(now_hour)
log "Peak hours ($h:00). Skipping pipeline start. Use --force to override."
exit 0
fi
log "Off-peak: OK"
# Check 3: Interactive load
if ! check_interactive_load && [[ "$mode" != "--force" ]]; then
log "High interactive load. Skipping pipeline start."
exit 0
fi
log "Interactive load: OK"
# Check 4: Token budget
local budget=$(get_budget_remaining)
log "Token budget remaining: $budget / $TOKEN_DAILY_LIMIT"
if [[ $budget -le 0 ]]; then
log "Daily token budget exhausted. Stopping."
exit 0
fi
# Check 5: Pipeline status
if [[ "$mode" == "--status" ]]; then
echo -e "${CYAN}Pipeline Status:${NC}"
echo "────────────────────────────────────────────────────"
for entry in "${PIPELINES[@]}"; do
IFS='|' read -r name script max_tokens dep <<< "$entry"
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('$name', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
local color=$NC
case "$state" in
running) color=$YELLOW ;;
complete) color=$GREEN ;;
failed) color=$RED ;;
esac
printf " %-25s %b%s%b (max: %s tokens, dep: %s)\n" "$name" "$color" "$state" "$NC" "$max_tokens" "$dep"
done
echo "────────────────────────────────────────────────────"
echo " Budget: $budget / $TOKEN_DAILY_LIMIT tokens remaining"
echo " Peak hours: $PEAK_HOURS_START:00 - $PEAK_HOURS_END:00"
exit 0
fi
# Find and start the highest-priority incomplete pipeline
local started=0
for entry in "${PIPELINES[@]}"; do
IFS='|' read -r name script max_tokens dep <<< "$entry"
# Skip if already running or complete
if [[ "$(is_pipeline_running $name)" == "true" ]]; then
log "SKIP $name: already running"
continue
fi
if [[ "$(is_pipeline_complete $name)" == "true" ]]; then
log "SKIP $name: already complete"
continue
fi
# Check dependency
if ! check_dependency "$dep"; then
log "SKIP $name: dependency $dep not met"
continue
fi
# Try to start
if start_pipeline "$name" "$script" "$max_tokens" "$budget" "$mode"; then
started=1
# Only start one pipeline per run (let it claim tokens before next check)
# Exception: playground-factory and training-factory can run in parallel
if [[ "$name" != "playground-factory" && "$name" != "training-factory" ]]; then
break
fi
fi
done
if [[ $started -eq 0 ]]; then
log "No pipelines to start (all complete, running, or blocked)."
fi
log "=== Pipeline Scheduler done ==="
}
main "$@"

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -0,0 +1,142 @@
#!/usr/bin/env python3
"""Tests for deploy_config_validator.py"""
import json
import sys
import os
import pytest
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from scripts.deploy_config_validator import (
validate_yaml_syntax,
validate_required_keys,
validate_provider_chain,
validate_value_types,
validate_config,
detect_config_type,
ValidationError,
)
class TestYAMLSyntax:
def test_valid_yaml(self):
data, errors = validate_yaml_syntax("key: value\nlist:\n - a\n - b\n")
assert data is not None
assert len(errors) == 0
def test_invalid_yaml(self):
data, errors = validate_yaml_syntax("key: [unclosed")
assert data is None
assert len(errors) > 0
def test_empty_yaml(self):
data, errors = validate_yaml_syntax("")
assert data is None
assert any("empty" in e.message for e in errors)
def test_tabs_warning(self):
data, errors = validate_yaml_syntax("key:\tvalue\n")
assert any("tab" in e.message for e in errors)
class TestRequiredKeys:
def test_missing_key(self):
errors = validate_required_keys({}, "hermes")
assert any("providers" in e.message for e in errors)
def test_wrong_type(self):
errors = validate_required_keys({"providers": "not-a-list"}, "hermes")
assert any("expected list" in e.message for e in errors)
def test_valid(self):
errors = validate_required_keys({"providers": []}, "hermes")
provider_errors = [e for e in errors if "providers" in e.message and "missing" in e.message]
assert len(provider_errors) == 0
class TestProviderChain:
def test_empty_providers(self):
errors = validate_provider_chain({"providers": []})
assert any("empty" in e.message for e in errors)
def test_missing_name(self):
errors = validate_provider_chain({"providers": [{"model": "test", "base_url": "http://x"}]})
assert any("name" in e.message and "missing" in e.message for e in errors)
def test_banned_provider(self):
errors = validate_provider_chain({"providers": [
{"name": "anthropic", "model": "claude-3", "base_url": "http://x"}
]})
assert any("banned provider" in e.message for e in errors)
def test_banned_model(self):
errors = validate_provider_chain({"providers": [
{"name": "test", "model": "claude-sonnet-4", "base_url": "http://x"}
]})
assert any("banned model" in e.message for e in errors)
def test_valid_providers(self):
errors = validate_provider_chain({"providers": [
{"name": "kimi-coding", "model": "kimi-k2.5", "base_url": "https://api.kimi.com/v1"}
]})
provider_errors = [e for e in errors if e.severity == "error"]
assert len(provider_errors) == 0
class TestValueTypes:
def test_string_port(self):
errors = validate_value_types({"port": "8080"})
assert any("port" in e.path and "number" in e.message for e in errors)
def test_valid_port(self):
errors = validate_value_types({"port": 8080})
port_errors = [e for e in errors if "port" in e.path]
assert len(port_errors) == 0
def test_bad_url(self):
errors = validate_value_types({"base_url": "not-a-url"})
assert any("URL" in e.message for e in errors)
class TestDetectConfigType:
def test_hermes(self):
t = detect_config_type({"providers": [], "display": {}})
assert t == "hermes"
def test_ansible(self):
t = detect_config_type({"all": {"children": {"wizards": {}}}})
assert t == "ansible_inventory"
def test_unknown(self):
t = detect_config_type({"random": "data"})
assert t == "any"
class TestFullValidation:
def test_valid_hermes_config(self):
text = """
providers:
- name: kimi-coding
model: kimi-k2.5
base_url: https://api.kimi.com/coding/v1
timeout: 120
display:
skin: default
"""
errors = validate_config(text, "hermes")
assert not any(e.severity == "error" for e in errors)
def test_banned_provider_catches(self):
text = """
providers:
- name: anthropic
model: claude-sonnet-4
base_url: https://api.anthropic.com
"""
errors = validate_config(text, "hermes")
assert any("banned" in e.message for e in errors)
def test_missing_providers(self):
text = "display:\n skin: default\n"
errors = validate_config(text, "hermes")
assert any("providers" in e.message and "missing" in e.message for e in errors)