Compare commits

...

9 Commits

Author SHA1 Message Date
d8921630a5 feat: add training data quality filter (#687)
Some checks failed
Architecture Lint / Linter Tests (pull_request) Successful in 22s
PR Checklist / pr-checklist (pull_request) Failing after 4m0s
Smoke Test / smoke (pull_request) Failing after 16s
Validate Config / YAML Lint (pull_request) Failing after 20s
Validate Config / JSON Validate (pull_request) Successful in 17s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 1m32s
Validate Config / Shell Script Lint (pull_request) Failing after 53s
Validate Config / Cron Syntax Check (pull_request) Successful in 11s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 10s
Validate Config / Playbook Schema Validation (pull_request) Successful in 24s
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
2026-04-15 03:21:12 +00:00
d120526244 fix: add python3 shebang to scripts/visual_pr_reviewer.py (#681) 2026-04-15 02:57:53 +00:00
8596ff761b fix: add python3 shebang to scripts/diagram_meaning_extractor.py (#681) 2026-04-15 02:57:40 +00:00
7553fd4f3e fix: add python3 shebang to scripts/captcha_bypass_handler.py (#681) 2026-04-15 02:57:25 +00:00
71082fe06f fix: add python3 shebang to bin/soul_eval_gate.py (#681) 2026-04-15 02:57:14 +00:00
6d678e938e fix: add python3 shebang to bin/nostr-agent-demo.py (#681) 2026-04-15 02:57:00 +00:00
ad751a6de6 docs: add pipeline scheduler README 2026-04-14 22:47:12 +00:00
130fa40f0c feat: add pipeline-scheduler cron job 2026-04-14 22:46:51 +00:00
82f9810081 feat: add nightly-pipeline-scheduler.sh 2026-04-14 22:46:38 +00:00
9 changed files with 733 additions and 0 deletions

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Full Nostr agent-to-agent communication demo - FINAL WORKING
"""

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Soul Eval Gate — The Conscience of the Training Pipeline

View File

@@ -0,0 +1,9 @@
- name: Nightly Pipeline Scheduler
schedule: '*/30 18-23,0-8 * * *' # Every 30 min, off-peak hours only
tasks:
- name: Check and start pipelines
shell: "bash scripts/nightly-pipeline-scheduler.sh"
env:
PIPELINE_TOKEN_LIMIT: "500000"
PIPELINE_PEAK_START: "9"
PIPELINE_PEAK_END: "18"

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -0,0 +1,50 @@
# Nightly Pipeline Scheduler
Auto-starts batch pipelines when inference is available.
## What It Does
1. Checks inference provider health (OpenRouter, Ollama, RunPod)
2. Checks if it's off-peak hours (configurable, default: after 6PM)
3. Checks interactive session load (don't fight with live users)
4. Checks daily token budget (configurable limit)
5. Starts the highest-priority incomplete pipeline
## Pipeline Priority Order
| Priority | Pipeline | Deps | Max Tokens |
|----------|----------|------|------------|
| 1 | playground-factory | none | 100,000 |
| 2 | training-factory | none | 150,000 |
| 3 | knowledge-mine | training-factory running | 80,000 |
| 4 | adversary | knowledge-mine running | 50,000 |
| 5 | codebase-genome | none | 120,000 |
## Usage
```bash
# Normal run (used by cron)
./scripts/nightly-pipeline-scheduler.sh
# Dry run (show what would start)
./scripts/nightly-pipeline-scheduler.sh --dry-run
# Status report
./scripts/nightly-pipeline-scheduler.sh --status
# Force start during peak hours
./scripts/nightly-pipeline-scheduler.sh --force
```
## Configuration
Set via environment variables:
- `PIPELINE_TOKEN_LIMIT`: Daily token budget (default: 500,000)
- `PIPELINE_PEAK_START`: Peak hours start (default: 9)
- `PIPELINE_PEAK_END`: Peak hours end (default: 18)
- `HERMES_HOME`: Hermes home directory (default: ~/.hermes)
## Cron
Runs every 30 minutes. Off-peak only (unless --force).
See `cron/pipeline-scheduler.yml`.

View File

@@ -0,0 +1,383 @@
#!/usr/bin/env bash
# nightly-pipeline-scheduler.sh — Auto-start batch pipelines when inference is available.
#
# Checks provider health, pipeline progress, token budget, and interactive load.
# Starts the highest-priority incomplete pipeline that can run.
#
# Usage:
# ./scripts/nightly-pipeline-scheduler.sh # Normal run
# ./scripts/nightly-pipeline-scheduler.sh --dry-run # Show what would start
# ./scripts/nightly-pipeline-scheduler.sh --status # Pipeline status report
set -euo pipefail
# --- Configuration ---
HERMES_HOME="${HERMES_HOME:-$HOME/.hermes}"
BUDGET_FILE="${HERMES_HOME}/pipeline_budget.json"
STATE_FILE="${HERMES_HOME}/pipeline_state.json"
LOG_FILE="${HERMES_HOME}/logs/pipeline-scheduler.log"
TOKEN_DAILY_LIMIT="${PIPELINE_TOKEN_LIMIT:-500000}"
PEAK_HOURS_START="${PIPELINE_PEAK_START:-9}"
PEAK_HOURS_END="${PIPELINE_PEAK_END:-18}"
# Pipeline definitions (priority order)
# Each pipeline: name, script, max_tokens, dependencies
PIPELINES=(
"playground-factory|scripts/pipeline_playground_factory.sh|100000|none"
"training-factory|scripts/pipeline_training_factory.sh|150000|none"
"knowledge-mine|scripts/pipeline_knowledge_mine.sh|80000|training-factory"
"adversary|scripts/pipeline_adversary.sh|50000|knowledge-mine"
"codebase-genome|scripts/pipeline_codebase_genome.sh|120000|none"
)
# --- Colors ---
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
CYAN='\033[0;36m'
NC='\033[0m'
# --- Helpers ---
now_hour() { date +%-H; }
is_peak_hours() {
local h=$(now_hour)
[[ $h -ge $PEAK_HOURS_START && $h -lt $PEAK_HOURS_END ]]
}
ensure_dirs() {
mkdir -p "$(dirname "$LOG_FILE")" "$(dirname "$BUDGET_FILE")" "$(dirname "$STATE_FILE")"
}
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"; }
get_budget_used_today() {
if [[ -f "$BUDGET_FILE" ]]; then
local today=$(date +%Y-%m-%d)
python3 -c "
import json, sys
with open('$BUDGET_FILE') as f:
d = json.load(f)
print(d.get('daily', {}).get('$today', {}).get('tokens_used', 0))
" 2>/dev/null || echo 0
else
echo 0
fi
}
get_budget_remaining() {
local used=$(get_budget_used_today)
echo $((TOKEN_DAILY_LIMIT - used))
}
update_budget() {
local pipeline="$1"
local tokens="$2"
local today=$(date +%Y-%m-%d)
python3 -c "
import json, os
path = '$BUDGET_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
daily = d.setdefault('daily', {})
day = daily.setdefault('$today', {'tokens_used': 0, 'pipelines': {}})
day['tokens_used'] = day.get('tokens_used', 0) + $tokens
day['pipelines']['$pipeline'] = day['pipelines'].get('$pipeline', 0) + $tokens
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
}
get_pipeline_state() {
if [[ -f "$STATE_FILE" ]]; then
cat "$STATE_FILE"
else
echo "{}"
fi
}
set_pipeline_state() {
local pipeline="$1"
local state="$2" # running, complete, failed, skipped
python3 -c "
import json, os
path = '$STATE_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
d['$pipeline'] = {'state': '$state', 'updated': '$(date -Iseconds)'}
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
}
is_pipeline_complete() {
local pipeline="$1"
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('false')
else:
with open(path) as f:
d = json.load(f)
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'complete' else 'false')
" 2>/dev/null || echo false
}
is_pipeline_running() {
local pipeline="$1"
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('false')
else:
with open(path) as f:
d = json.load(f)
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'running' else 'false')
" 2>/dev/null || echo false
}
check_dependency() {
local dep="$1"
if [[ "$dep" == "none" ]]; then
return 0
fi
# For knowledge-mine: training-factory must be running or complete
if [[ "$dep" == "training-factory" ]]; then
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('training-factory', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
[[ "$state" == "running" || "$state" == "complete" ]]
return $?
fi
# For adversary: knowledge-mine must be at least 50% done
# Simplified: check if it's running (we'd need progress tracking for 50%)
if [[ "$dep" == "knowledge-mine" ]]; then
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('knowledge-mine', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
[[ "$state" == "running" || "$state" == "complete" ]]
return $?
fi
return 0
}
check_inference_available() {
# Check if any inference provider is responding
# 1. Check OpenRouter
local or_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "https://openrouter.ai/api/v1/models" 2>/dev/null || echo "000")
# 2. Check local Ollama
local ollama_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "http://localhost:11434/api/tags" 2>/dev/null || echo "000")
# 3. Check RunPod (if configured)
local runpod_ok="000"
if [[ -n "${RUNPOD_ENDPOINT:-}" ]]; then
runpod_ok=$(curl -s -o /dev/null -w "%{http_code}" \
--connect-timeout 5 "$RUNPOD_ENDPOINT/health" 2>/dev/null || echo "000")
fi
if [[ "$or_ok" == "200" || "$ollama_ok" == "200" || "$runpod_ok" == "200" ]]; then
return 0
fi
return 1
}
check_interactive_load() {
# Check if there are active interactive sessions (don't fight with live users)
# Look for tmux panes with active hermes sessions
local active=$(tmux list-panes -a -F '#{pane_pid} #{pane_current_command}' 2>/dev/null \
| grep -c "hermes\|python3" || echo 0)
# If more than 3 interactive sessions, skip pipeline start
if [[ $active -gt 3 ]]; then
return 1
fi
return 0
}
start_pipeline() {
local name="$1"
local script="$2"
local max_tokens="$3"
local budget_remaining="$4"
local mode="${5:-run}"
if [[ "$budget_remaining" -lt "$max_tokens" ]]; then
log "SKIP $name: insufficient budget ($budget_remaining < $max_tokens tokens)"
return 1
fi
if [[ ! -f "$script" ]]; then
log "SKIP $name: script not found ($script)"
return 1
fi
if [[ "$mode" == "dry-run" ]]; then
log "DRY-RUN: Would start $name (budget: $budget_remaining, needs: $max_tokens)"
return 0
fi
log "START $name (budget: $budget_remaining, max_tokens: $max_tokens)"
set_pipeline_state "$name" "running"
# Run in background, capture output
local log_path="${HERMES_HOME}/logs/pipeline-${name}.log"
bash "$script" --max-tokens "$max_tokens" >> "$log_path" 2>&1 &
local pid=$!
# Wait a moment to check if it started OK
sleep 2
if kill -0 $pid 2>/dev/null; then
log "RUNNING $name (PID: $pid, log: $log_path)"
# Record the PID
python3 -c "
import json, os
path = '$STATE_FILE'
d = {}
if os.path.exists(path):
with open(path) as f:
d = json.load(f)
d['$name']['pid'] = $pid
with open(path, 'w') as f:
json.dump(d, f, indent=2)
"
return 0
else
log "FAIL $name: script exited immediately"
set_pipeline_state "$name" "failed"
return 1
fi
}
# --- Main ---
main() {
local mode="${1:-run}"
ensure_dirs
log "=== Pipeline Scheduler ($mode) ==="
# Check 1: Is inference available?
if ! check_inference_available; then
log "No inference provider available. Skipping all pipelines."
exit 0
fi
log "Inference: AVAILABLE"
# Check 2: Is it peak hours?
if is_peak_hours && [[ "$mode" != "--force" ]]; then
local h=$(now_hour)
log "Peak hours ($h:00). Skipping pipeline start. Use --force to override."
exit 0
fi
log "Off-peak: OK"
# Check 3: Interactive load
if ! check_interactive_load && [[ "$mode" != "--force" ]]; then
log "High interactive load. Skipping pipeline start."
exit 0
fi
log "Interactive load: OK"
# Check 4: Token budget
local budget=$(get_budget_remaining)
log "Token budget remaining: $budget / $TOKEN_DAILY_LIMIT"
if [[ $budget -le 0 ]]; then
log "Daily token budget exhausted. Stopping."
exit 0
fi
# Check 5: Pipeline status
if [[ "$mode" == "--status" ]]; then
echo -e "${CYAN}Pipeline Status:${NC}"
echo "────────────────────────────────────────────────────"
for entry in "${PIPELINES[@]}"; do
IFS='|' read -r name script max_tokens dep <<< "$entry"
local state=$(python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
print('not_started')
else:
with open(path) as f:
d = json.load(f)
print(d.get('$name', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
local color=$NC
case "$state" in
running) color=$YELLOW ;;
complete) color=$GREEN ;;
failed) color=$RED ;;
esac
printf " %-25s %b%s%b (max: %s tokens, dep: %s)\n" "$name" "$color" "$state" "$NC" "$max_tokens" "$dep"
done
echo "────────────────────────────────────────────────────"
echo " Budget: $budget / $TOKEN_DAILY_LIMIT tokens remaining"
echo " Peak hours: $PEAK_HOURS_START:00 - $PEAK_HOURS_END:00"
exit 0
fi
# Find and start the highest-priority incomplete pipeline
local started=0
for entry in "${PIPELINES[@]}"; do
IFS='|' read -r name script max_tokens dep <<< "$entry"
# Skip if already running or complete
if [[ "$(is_pipeline_running $name)" == "true" ]]; then
log "SKIP $name: already running"
continue
fi
if [[ "$(is_pipeline_complete $name)" == "true" ]]; then
log "SKIP $name: already complete"
continue
fi
# Check dependency
if ! check_dependency "$dep"; then
log "SKIP $name: dependency $dep not met"
continue
fi
# Try to start
if start_pipeline "$name" "$script" "$max_tokens" "$budget" "$mode"; then
started=1
# Only start one pipeline per run (let it claim tokens before next check)
# Exception: playground-factory and training-factory can run in parallel
if [[ "$name" != "playground-factory" && "$name" != "training-factory" ]]; then
break
fi
fi
done
if [[ $started -eq 0 ]]; then
log "No pipelines to start (all complete, running, or blocked)."
fi
log "=== Pipeline Scheduler done ==="
}
main "$@"

View File

@@ -0,0 +1,286 @@
#!/usr/bin/env python3
"""
Training Data Quality Filter
Scores and removes low-quality training pairs from JSONL datasets.
Supports two formats:
- ShareGPT session format: {"conversations": [...], ...}
- Scene/pair format: {"terse": "...", "rich": "..."} or {"lyric_line": "...", "scene": {...}}
Scoring dimensions:
- Specificity: penalizes vague/generic content
- Length ratio: penalizes extreme input/output imbalances
- Code correctness: validates code blocks have matching fences
Usage:
python3 scripts/training_data_quality_filter.py input.jsonl [--threshold 0.4] [--output filtered.jsonl]
python3 scripts/training_data_quality_filter.py --dir training-data/ [--threshold 0.4]
"""
import argparse
import json
import re
import sys
from pathlib import Path
def score_specificity(text: str) -> float:
"""Score 0-1 based on how specific vs generic the text is."""
if not text or len(text.strip()) < 10:
return 0.0
score = 0.5 # baseline
# Penalize very generic starters
generic_starters = [
"sure,", "of course", "i can help", "here is", "here are",
"certainly", "absolutely", "let me help", "great question",
"that\'s a great", "interesting question",
]
lower = text.lower().strip()
for starter in generic_starters:
if lower.startswith(starter):
score -= 0.15
break
# Reward specific content indicators
if re.search(r"`[^`]+`", text): # inline code
score += 0.1
if re.search(r"```[\s\S]*?```", text): # code blocks
score += 0.15
if re.search(r"\d+\.\s", text): # numbered lists
score += 0.05
if len(text.split()) > 50: # substantial length
score += 0.1
if re.search(r"https?://", text): # URLs/references
score += 0.05
# Penalize extremely short outputs
if len(text.split()) < 5:
score -= 0.2
# Penalize repetition (same sentence repeated)
sentences = re.split(r"[.!?]+", text)
sentences = [s.strip().lower() for s in sentences if s.strip()]
if sentences:
unique_ratio = len(set(sentences)) / len(sentences)
if unique_ratio < 0.7:
score -= 0.15
return max(0.0, min(1.0, score))
def score_length_ratio(input_text: str, output_text: str) -> float:
"""Score 0-1 based on input/output length balance."""
in_len = len(input_text.split())
out_len = len(output_text.split())
if in_len == 0 or out_len == 0:
return 0.0
ratio = out_len / in_len
# Ideal ratio: 0.5-5x (output can be shorter or longer, but not extreme)
if 0.5 <= ratio <= 5.0:
return 1.0
elif 0.2 <= ratio <= 10.0:
return 0.6
elif 0.1 <= ratio <= 20.0:
return 0.3
else:
return 0.1
def score_code_correctness(text: str) -> float:
"""Score 0-1 based on code block correctness."""
code_blocks = re.findall(r"```[\s\S]*?```", text)
if not code_blocks:
return 1.0 # no code = no code errors
for block in code_blocks:
# Check balanced fences
fence_count = block.count("```")
if fence_count % 2 != 0:
return 0.2
# Check for common errors
content = block.split("\n", 1)[-1] if "\n" in block else ""
if "SyntaxError" in content or "Traceback" in content:
return 0.3
if content.strip().endswith("...") and len(content.strip()) < 30:
return 0.4 # truncated code
return 1.0
def score_pair(input_text: str, output_text: str) -> dict:
"""Score a training pair on all dimensions."""
spec = score_specificity(output_text)
length = score_length_ratio(input_text, output_text)
code = score_code_correctness(output_text)
# Weighted composite
composite = (spec * 0.4) + (length * 0.3) + (code * 0.3)
return {
"specificity": round(spec, 3),
"length_ratio": round(length, 3),
"code_correctness": round(code, 3),
"composite": round(composite, 3),
}
def extract_pairs(obj: dict) -> list:
"""Extract (input, output) pairs from a JSONL object."""
pairs = []
# ShareGPT session format
if "conversations" in obj:
convs = obj["conversations"]
for i, msg in enumerate(convs):
if msg.get("from") in ("gpt", "assistant"):
# Find preceding human message
input_text = ""
for j in range(i - 1, -1, -1):
if convs[j].get("from") == "human":
input_text = convs[j].get("value", "")
break
output_text = msg.get("value", "")
if input_text and output_text:
pairs.append((input_text, output_text))
# Scene/pair format (terse/rich)
elif "terse" in obj and "rich" in obj:
pairs.append((obj["terse"], obj["rich"]))
# Scene description format
elif "lyric_line" in obj and "scene" in obj:
scene_text = json.dumps(obj["scene"]) if isinstance(obj["scene"], dict) else str(obj["scene"])
pairs.append((obj["lyric_line"], scene_text))
# Generic prompt/response
elif "prompt" in obj and "response" in obj:
pairs.append((obj["prompt"], obj["response"]))
# Generic input/output
elif "input" in obj and "output" in obj:
pairs.append((obj["input"], obj["output"]))
return pairs
def filter_jsonl(input_path: str, threshold: float = 0.4, output_path: str = None) -> dict:
"""Filter a JSONL file, removing low-quality pairs."""
path = Path(input_path)
if not path.exists():
return {"error": f"File not found: {input_path}"}
lines = path.read_text().strip().split("\n")
total = 0
kept = 0
removed = 0
scores_list = []
kept_lines = []
for line in lines:
line = line.strip()
if not line:
continue
try:
obj = json.loads(line)
except json.JSONDecodeError:
removed += 1
continue
pairs = extract_pairs(obj)
total += 1
if not pairs:
# No extractable pairs — keep as-is (might be metadata)
kept += 1
kept_lines.append(line)
continue
# Score all pairs in this object
pair_scores = [score_pair(inp, out) for inp, out in pairs]
avg_composite = sum(s["composite"] for s in pair_scores) / len(pair_scores)
scores_list.append(avg_composite)
if avg_composite >= threshold:
kept += 1
kept_lines.append(line)
else:
removed += 1
# Write output
if output_path:
Path(output_path).write_text("\n".join(kept_lines) + "\n")
return {
"file": input_path,
"total": total,
"kept": kept,
"removed": removed,
"removal_rate": f"{removed}/{total}" if total > 0 else "0/0",
"avg_score": round(sum(scores_list) / len(scores_list), 3) if scores_list else None,
"min_score": round(min(scores_list), 3) if scores_list else None,
"max_score": round(max(scores_list), 3) if scores_list else None,
}
def main():
parser = argparse.ArgumentParser(description="Filter low-quality training data pairs")
parser.add_argument("input", nargs="?", help="Input JSONL file")
parser.add_argument("--threshold", type=float, default=0.4, help="Minimum quality score (0-1)")
parser.add_argument("--output", "-o", help="Output file (default: input_filtered.jsonl)")
parser.add_argument("--dir", help="Process all .jsonl files in directory")
parser.add_argument("--dry-run", action="store_true", help="Score only, don\'t write output")
args = parser.parse_args()
if args.dir:
dirpath = Path(args.dir)
jsonl_files = sorted(dirpath.rglob("*.jsonl"))
if not jsonl_files:
print(f"No .jsonl files found in {args.dir}")
sys.exit(1)
print(f"Processing {len(jsonl_files)} files (threshold={args.threshold})\n")
print(f"{'File':<50} {'Total':>6} {'Kept':>6} {'Removed':>8} {'Avg':>6}")
print("-" * 82)
grand_total = grand_kept = grand_removed = 0
for fpath in jsonl_files:
out = str(fpath).replace(".jsonl", "_filtered.jsonl") if not args.dry_run else None
result = filter_jsonl(str(fpath), args.threshold, out)
if "error" in result:
print(f"{str(fpath):<50} ERROR: {result['error']}")
continue
print(f"{fpath.name:<50} {result['total']:>6} {result['kept']:>6} {result['removed']:>8} {result['avg_score']:>6.3f}")
grand_total += result["total"]
grand_kept += result["kept"]
grand_removed += result["removed"]
print("-" * 82)
print(f"{'TOTAL':<50} {grand_total:>6} {grand_kept:>6} {grand_removed:>8}")
elif args.input:
out = args.output or args.input.replace(".jsonl", "_filtered.jsonl")
if args.dry_run:
out = None
result = filter_jsonl(args.input, args.threshold, out)
if "error" in result:
print(f"Error: {result['error']}")
sys.exit(1)
print(json.dumps(result, indent=2))
if out:
print(f"\nFiltered output written to: {out}")
else:
parser.print_help()
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision