Compare commits
9 Commits
fix/619-au
...
fix/687-tr
| Author | SHA1 | Date | |
|---|---|---|---|
| 223e1e5543 | |||
| d120526244 | |||
| 8596ff761b | |||
| 7553fd4f3e | |||
| 71082fe06f | |||
| 6d678e938e | |||
| ad751a6de6 | |||
| 130fa40f0c | |||
| 82f9810081 |
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Full Nostr agent-to-agent communication demo - FINAL WORKING
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Soul Eval Gate — The Conscience of the Training Pipeline
|
||||
|
||||
|
||||
9
cron/pipeline-scheduler.yml
Normal file
9
cron/pipeline-scheduler.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
- name: Nightly Pipeline Scheduler
|
||||
schedule: '*/30 18-23,0-8 * * *' # Every 30 min, off-peak hours only
|
||||
tasks:
|
||||
- name: Check and start pipelines
|
||||
shell: "bash scripts/nightly-pipeline-scheduler.sh"
|
||||
env:
|
||||
PIPELINE_TOKEN_LIMIT: "500000"
|
||||
PIPELINE_PEAK_START: "9"
|
||||
PIPELINE_PEAK_END: "18"
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
50
scripts/nightly-pipeline-scheduler.md
Normal file
50
scripts/nightly-pipeline-scheduler.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Nightly Pipeline Scheduler
|
||||
|
||||
Auto-starts batch pipelines when inference is available.
|
||||
|
||||
## What It Does
|
||||
|
||||
1. Checks inference provider health (OpenRouter, Ollama, RunPod)
|
||||
2. Checks if it's off-peak hours (configurable, default: after 6PM)
|
||||
3. Checks interactive session load (don't fight with live users)
|
||||
4. Checks daily token budget (configurable limit)
|
||||
5. Starts the highest-priority incomplete pipeline
|
||||
|
||||
## Pipeline Priority Order
|
||||
|
||||
| Priority | Pipeline | Deps | Max Tokens |
|
||||
|----------|----------|------|------------|
|
||||
| 1 | playground-factory | none | 100,000 |
|
||||
| 2 | training-factory | none | 150,000 |
|
||||
| 3 | knowledge-mine | training-factory running | 80,000 |
|
||||
| 4 | adversary | knowledge-mine running | 50,000 |
|
||||
| 5 | codebase-genome | none | 120,000 |
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Normal run (used by cron)
|
||||
./scripts/nightly-pipeline-scheduler.sh
|
||||
|
||||
# Dry run (show what would start)
|
||||
./scripts/nightly-pipeline-scheduler.sh --dry-run
|
||||
|
||||
# Status report
|
||||
./scripts/nightly-pipeline-scheduler.sh --status
|
||||
|
||||
# Force start during peak hours
|
||||
./scripts/nightly-pipeline-scheduler.sh --force
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Set via environment variables:
|
||||
- `PIPELINE_TOKEN_LIMIT`: Daily token budget (default: 500,000)
|
||||
- `PIPELINE_PEAK_START`: Peak hours start (default: 9)
|
||||
- `PIPELINE_PEAK_END`: Peak hours end (default: 18)
|
||||
- `HERMES_HOME`: Hermes home directory (default: ~/.hermes)
|
||||
|
||||
## Cron
|
||||
|
||||
Runs every 30 minutes. Off-peak only (unless --force).
|
||||
See `cron/pipeline-scheduler.yml`.
|
||||
383
scripts/nightly-pipeline-scheduler.sh
Normal file
383
scripts/nightly-pipeline-scheduler.sh
Normal file
@@ -0,0 +1,383 @@
|
||||
#!/usr/bin/env bash
|
||||
# nightly-pipeline-scheduler.sh — Auto-start batch pipelines when inference is available.
|
||||
#
|
||||
# Checks provider health, pipeline progress, token budget, and interactive load.
|
||||
# Starts the highest-priority incomplete pipeline that can run.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/nightly-pipeline-scheduler.sh # Normal run
|
||||
# ./scripts/nightly-pipeline-scheduler.sh --dry-run # Show what would start
|
||||
# ./scripts/nightly-pipeline-scheduler.sh --status # Pipeline status report
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# --- Configuration ---
|
||||
HERMES_HOME="${HERMES_HOME:-$HOME/.hermes}"
|
||||
BUDGET_FILE="${HERMES_HOME}/pipeline_budget.json"
|
||||
STATE_FILE="${HERMES_HOME}/pipeline_state.json"
|
||||
LOG_FILE="${HERMES_HOME}/logs/pipeline-scheduler.log"
|
||||
TOKEN_DAILY_LIMIT="${PIPELINE_TOKEN_LIMIT:-500000}"
|
||||
PEAK_HOURS_START="${PIPELINE_PEAK_START:-9}"
|
||||
PEAK_HOURS_END="${PIPELINE_PEAK_END:-18}"
|
||||
|
||||
# Pipeline definitions (priority order)
|
||||
# Each pipeline: name, script, max_tokens, dependencies
|
||||
PIPELINES=(
|
||||
"playground-factory|scripts/pipeline_playground_factory.sh|100000|none"
|
||||
"training-factory|scripts/pipeline_training_factory.sh|150000|none"
|
||||
"knowledge-mine|scripts/pipeline_knowledge_mine.sh|80000|training-factory"
|
||||
"adversary|scripts/pipeline_adversary.sh|50000|knowledge-mine"
|
||||
"codebase-genome|scripts/pipeline_codebase_genome.sh|120000|none"
|
||||
)
|
||||
|
||||
# --- Colors ---
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[0;33m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
# --- Helpers ---
|
||||
now_hour() { date +%-H; }
|
||||
is_peak_hours() {
|
||||
local h=$(now_hour)
|
||||
[[ $h -ge $PEAK_HOURS_START && $h -lt $PEAK_HOURS_END ]]
|
||||
}
|
||||
|
||||
ensure_dirs() {
|
||||
mkdir -p "$(dirname "$LOG_FILE")" "$(dirname "$BUDGET_FILE")" "$(dirname "$STATE_FILE")"
|
||||
}
|
||||
|
||||
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"; }
|
||||
|
||||
get_budget_used_today() {
|
||||
if [[ -f "$BUDGET_FILE" ]]; then
|
||||
local today=$(date +%Y-%m-%d)
|
||||
python3 -c "
|
||||
import json, sys
|
||||
with open('$BUDGET_FILE') as f:
|
||||
d = json.load(f)
|
||||
print(d.get('daily', {}).get('$today', {}).get('tokens_used', 0))
|
||||
" 2>/dev/null || echo 0
|
||||
else
|
||||
echo 0
|
||||
fi
|
||||
}
|
||||
|
||||
get_budget_remaining() {
|
||||
local used=$(get_budget_used_today)
|
||||
echo $((TOKEN_DAILY_LIMIT - used))
|
||||
}
|
||||
|
||||
update_budget() {
|
||||
local pipeline="$1"
|
||||
local tokens="$2"
|
||||
local today=$(date +%Y-%m-%d)
|
||||
python3 -c "
|
||||
import json, os
|
||||
path = '$BUDGET_FILE'
|
||||
d = {}
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
d = json.load(f)
|
||||
daily = d.setdefault('daily', {})
|
||||
day = daily.setdefault('$today', {'tokens_used': 0, 'pipelines': {}})
|
||||
day['tokens_used'] = day.get('tokens_used', 0) + $tokens
|
||||
day['pipelines']['$pipeline'] = day['pipelines'].get('$pipeline', 0) + $tokens
|
||||
with open(path, 'w') as f:
|
||||
json.dump(d, f, indent=2)
|
||||
"
|
||||
}
|
||||
|
||||
get_pipeline_state() {
|
||||
if [[ -f "$STATE_FILE" ]]; then
|
||||
cat "$STATE_FILE"
|
||||
else
|
||||
echo "{}"
|
||||
fi
|
||||
}
|
||||
|
||||
set_pipeline_state() {
|
||||
local pipeline="$1"
|
||||
local state="$2" # running, complete, failed, skipped
|
||||
python3 -c "
|
||||
import json, os
|
||||
path = '$STATE_FILE'
|
||||
d = {}
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
d = json.load(f)
|
||||
d['$pipeline'] = {'state': '$state', 'updated': '$(date -Iseconds)'}
|
||||
with open(path, 'w') as f:
|
||||
json.dump(d, f, indent=2)
|
||||
"
|
||||
}
|
||||
|
||||
is_pipeline_complete() {
|
||||
local pipeline="$1"
|
||||
python3 -c "
|
||||
import json, os
|
||||
path = '$STATE_FILE'
|
||||
if not os.path.exists(path):
|
||||
print('false')
|
||||
else:
|
||||
with open(path) as f:
|
||||
d = json.load(f)
|
||||
state = d.get('$pipeline', {}).get('state', 'not_started')
|
||||
print('true' if state == 'complete' else 'false')
|
||||
" 2>/dev/null || echo false
|
||||
}
|
||||
|
||||
is_pipeline_running() {
|
||||
local pipeline="$1"
|
||||
python3 -c "
|
||||
import json, os
|
||||
path = '$STATE_FILE'
|
||||
if not os.path.exists(path):
|
||||
print('false')
|
||||
else:
|
||||
with open(path) as f:
|
||||
d = json.load(f)
|
||||
state = d.get('$pipeline', {}).get('state', 'not_started')
|
||||
print('true' if state == 'running' else 'false')
|
||||
" 2>/dev/null || echo false
|
||||
}
|
||||
|
||||
check_dependency() {
|
||||
local dep="$1"
|
||||
if [[ "$dep" == "none" ]]; then
|
||||
return 0
|
||||
fi
|
||||
# For knowledge-mine: training-factory must be running or complete
|
||||
if [[ "$dep" == "training-factory" ]]; then
|
||||
local state=$(python3 -c "
|
||||
import json, os
|
||||
path = '$STATE_FILE'
|
||||
if not os.path.exists(path):
|
||||
print('not_started')
|
||||
else:
|
||||
with open(path) as f:
|
||||
d = json.load(f)
|
||||
print(d.get('training-factory', {}).get('state', 'not_started'))
|
||||
" 2>/dev/null || echo "not_started")
|
||||
[[ "$state" == "running" || "$state" == "complete" ]]
|
||||
return $?
|
||||
fi
|
||||
# For adversary: knowledge-mine must be at least 50% done
|
||||
# Simplified: check if it's running (we'd need progress tracking for 50%)
|
||||
if [[ "$dep" == "knowledge-mine" ]]; then
|
||||
local state=$(python3 -c "
|
||||
import json, os
|
||||
path = '$STATE_FILE'
|
||||
if not os.path.exists(path):
|
||||
print('not_started')
|
||||
else:
|
||||
with open(path) as f:
|
||||
d = json.load(f)
|
||||
print(d.get('knowledge-mine', {}).get('state', 'not_started'))
|
||||
" 2>/dev/null || echo "not_started")
|
||||
[[ "$state" == "running" || "$state" == "complete" ]]
|
||||
return $?
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_inference_available() {
|
||||
# Check if any inference provider is responding
|
||||
# 1. Check OpenRouter
|
||||
local or_ok=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
--connect-timeout 5 "https://openrouter.ai/api/v1/models" 2>/dev/null || echo "000")
|
||||
|
||||
# 2. Check local Ollama
|
||||
local ollama_ok=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
--connect-timeout 5 "http://localhost:11434/api/tags" 2>/dev/null || echo "000")
|
||||
|
||||
# 3. Check RunPod (if configured)
|
||||
local runpod_ok="000"
|
||||
if [[ -n "${RUNPOD_ENDPOINT:-}" ]]; then
|
||||
runpod_ok=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
--connect-timeout 5 "$RUNPOD_ENDPOINT/health" 2>/dev/null || echo "000")
|
||||
fi
|
||||
|
||||
if [[ "$or_ok" == "200" || "$ollama_ok" == "200" || "$runpod_ok" == "200" ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
check_interactive_load() {
|
||||
# Check if there are active interactive sessions (don't fight with live users)
|
||||
# Look for tmux panes with active hermes sessions
|
||||
local active=$(tmux list-panes -a -F '#{pane_pid} #{pane_current_command}' 2>/dev/null \
|
||||
| grep -c "hermes\|python3" || echo 0)
|
||||
|
||||
# If more than 3 interactive sessions, skip pipeline start
|
||||
if [[ $active -gt 3 ]]; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
start_pipeline() {
|
||||
local name="$1"
|
||||
local script="$2"
|
||||
local max_tokens="$3"
|
||||
local budget_remaining="$4"
|
||||
local mode="${5:-run}"
|
||||
|
||||
if [[ "$budget_remaining" -lt "$max_tokens" ]]; then
|
||||
log "SKIP $name: insufficient budget ($budget_remaining < $max_tokens tokens)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$script" ]]; then
|
||||
log "SKIP $name: script not found ($script)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$mode" == "dry-run" ]]; then
|
||||
log "DRY-RUN: Would start $name (budget: $budget_remaining, needs: $max_tokens)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "START $name (budget: $budget_remaining, max_tokens: $max_tokens)"
|
||||
set_pipeline_state "$name" "running"
|
||||
|
||||
# Run in background, capture output
|
||||
local log_path="${HERMES_HOME}/logs/pipeline-${name}.log"
|
||||
bash "$script" --max-tokens "$max_tokens" >> "$log_path" 2>&1 &
|
||||
local pid=$!
|
||||
|
||||
# Wait a moment to check if it started OK
|
||||
sleep 2
|
||||
if kill -0 $pid 2>/dev/null; then
|
||||
log "RUNNING $name (PID: $pid, log: $log_path)"
|
||||
# Record the PID
|
||||
python3 -c "
|
||||
import json, os
|
||||
path = '$STATE_FILE'
|
||||
d = {}
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
d = json.load(f)
|
||||
d['$name']['pid'] = $pid
|
||||
with open(path, 'w') as f:
|
||||
json.dump(d, f, indent=2)
|
||||
"
|
||||
return 0
|
||||
else
|
||||
log "FAIL $name: script exited immediately"
|
||||
set_pipeline_state "$name" "failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# --- Main ---
|
||||
main() {
|
||||
local mode="${1:-run}"
|
||||
ensure_dirs
|
||||
|
||||
log "=== Pipeline Scheduler ($mode) ==="
|
||||
|
||||
# Check 1: Is inference available?
|
||||
if ! check_inference_available; then
|
||||
log "No inference provider available. Skipping all pipelines."
|
||||
exit 0
|
||||
fi
|
||||
log "Inference: AVAILABLE"
|
||||
|
||||
# Check 2: Is it peak hours?
|
||||
if is_peak_hours && [[ "$mode" != "--force" ]]; then
|
||||
local h=$(now_hour)
|
||||
log "Peak hours ($h:00). Skipping pipeline start. Use --force to override."
|
||||
exit 0
|
||||
fi
|
||||
log "Off-peak: OK"
|
||||
|
||||
# Check 3: Interactive load
|
||||
if ! check_interactive_load && [[ "$mode" != "--force" ]]; then
|
||||
log "High interactive load. Skipping pipeline start."
|
||||
exit 0
|
||||
fi
|
||||
log "Interactive load: OK"
|
||||
|
||||
# Check 4: Token budget
|
||||
local budget=$(get_budget_remaining)
|
||||
log "Token budget remaining: $budget / $TOKEN_DAILY_LIMIT"
|
||||
|
||||
if [[ $budget -le 0 ]]; then
|
||||
log "Daily token budget exhausted. Stopping."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check 5: Pipeline status
|
||||
if [[ "$mode" == "--status" ]]; then
|
||||
echo -e "${CYAN}Pipeline Status:${NC}"
|
||||
echo "────────────────────────────────────────────────────"
|
||||
for entry in "${PIPELINES[@]}"; do
|
||||
IFS='|' read -r name script max_tokens dep <<< "$entry"
|
||||
local state=$(python3 -c "
|
||||
import json, os
|
||||
path = '$STATE_FILE'
|
||||
if not os.path.exists(path):
|
||||
print('not_started')
|
||||
else:
|
||||
with open(path) as f:
|
||||
d = json.load(f)
|
||||
print(d.get('$name', {}).get('state', 'not_started'))
|
||||
" 2>/dev/null || echo "not_started")
|
||||
|
||||
local color=$NC
|
||||
case "$state" in
|
||||
running) color=$YELLOW ;;
|
||||
complete) color=$GREEN ;;
|
||||
failed) color=$RED ;;
|
||||
esac
|
||||
printf " %-25s %b%s%b (max: %s tokens, dep: %s)\n" "$name" "$color" "$state" "$NC" "$max_tokens" "$dep"
|
||||
done
|
||||
echo "────────────────────────────────────────────────────"
|
||||
echo " Budget: $budget / $TOKEN_DAILY_LIMIT tokens remaining"
|
||||
echo " Peak hours: $PEAK_HOURS_START:00 - $PEAK_HOURS_END:00"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Find and start the highest-priority incomplete pipeline
|
||||
local started=0
|
||||
for entry in "${PIPELINES[@]}"; do
|
||||
IFS='|' read -r name script max_tokens dep <<< "$entry"
|
||||
|
||||
# Skip if already running or complete
|
||||
if [[ "$(is_pipeline_running $name)" == "true" ]]; then
|
||||
log "SKIP $name: already running"
|
||||
continue
|
||||
fi
|
||||
if [[ "$(is_pipeline_complete $name)" == "true" ]]; then
|
||||
log "SKIP $name: already complete"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check dependency
|
||||
if ! check_dependency "$dep"; then
|
||||
log "SKIP $name: dependency $dep not met"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Try to start
|
||||
if start_pipeline "$name" "$script" "$max_tokens" "$budget" "$mode"; then
|
||||
started=1
|
||||
# Only start one pipeline per run (let it claim tokens before next check)
|
||||
# Exception: playground-factory and training-factory can run in parallel
|
||||
if [[ "$name" != "playground-factory" && "$name" != "training-factory" ]]; then
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $started -eq 0 ]]; then
|
||||
log "No pipelines to start (all complete, running, or blocked)."
|
||||
fi
|
||||
|
||||
log "=== Pipeline Scheduler done ==="
|
||||
}
|
||||
|
||||
main "$@"
|
||||
266
scripts/training_quality_filter.py
Normal file
266
scripts/training_quality_filter.py
Normal file
@@ -0,0 +1,266 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
[QUALITY] Training Data Quality Filter
|
||||
Part of the Timmy Foundation tooling.
|
||||
|
||||
Scores and filters JSONL training pairs on specificity, length ratio,
|
||||
and code correctness. Removes low-quality pairs and reports results.
|
||||
|
||||
Usage:
|
||||
python3 scripts/training_quality_filter.py input.jsonl -o filtered.jsonl
|
||||
python3 scripts/training_quality_filter.py input.jsonl --threshold 0.4
|
||||
cat input.jsonl | python3 scripts/training_quality_filter.py -
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
import re
|
||||
from typing import Dict, Any, Tuple
|
||||
|
||||
DEFAULT_THRESHOLD = 0.35
|
||||
MIN_TERSE_LEN = 3
|
||||
MIN_RICH_LEN = 10
|
||||
|
||||
|
||||
def score_specificity(terse: str, rich: str) -> float:
|
||||
"""Score how specific the rich response is vs the terse prompt.
|
||||
|
||||
Higher score = more specific, actionable detail in the rich version.
|
||||
"""
|
||||
if not terse or not rich:
|
||||
return 0.0
|
||||
|
||||
# Ratio of unique words (higher = more varied/specific language)
|
||||
rich_words = rich.lower().split()
|
||||
terse_words = terse.lower().split()
|
||||
|
||||
if len(rich_words) < 3:
|
||||
return 0.1
|
||||
|
||||
unique_ratio = len(set(rich_words)) / len(rich_words)
|
||||
|
||||
# Check for concrete details: numbers, file paths, commands, code refs
|
||||
concrete_patterns = [
|
||||
r"\b\d+\b", # numbers
|
||||
r"[/\\]\w+", # file paths
|
||||
r"`[^`]+`", # inline code
|
||||
r"\b(fix|add|remove|update|create|delete|check|run|use)\b", # action verbs
|
||||
]
|
||||
concrete_count = sum(
|
||||
len(re.findall(p, rich, re.IGNORECASE)) for p in concrete_patterns
|
||||
)
|
||||
concrete_score = min(concrete_count / 5.0, 1.0)
|
||||
|
||||
# Length expansion ratio (rich should be meaningfully longer than terse)
|
||||
expansion = len(rich_words) / max(len(terse_words), 1)
|
||||
expansion_score = min(expansion / 5.0, 1.0)
|
||||
|
||||
return round(0.3 * unique_ratio + 0.4 * concrete_score + 0.3 * expansion_score, 3)
|
||||
|
||||
|
||||
def score_length_ratio(terse: str, rich: str) -> float:
|
||||
"""Score the length ratio between terse and rich.
|
||||
|
||||
Too short rich = low quality. Too long = possibly padded.
|
||||
Sweet spot: 3-15x expansion.
|
||||
"""
|
||||
if not terse or not rich:
|
||||
return 0.0
|
||||
|
||||
t_len = len(terse.split())
|
||||
r_len = len(rich.split())
|
||||
|
||||
if t_len < MIN_TERSE_LEN or r_len < MIN_RICH_LEN:
|
||||
return 0.1
|
||||
|
||||
ratio = r_len / max(t_len, 1)
|
||||
|
||||
if ratio < 1.5:
|
||||
return 0.2 # barely expanded
|
||||
elif ratio < 3.0:
|
||||
return 0.5 # some expansion
|
||||
elif ratio <= 15.0:
|
||||
return 1.0 # good expansion
|
||||
elif ratio <= 30.0:
|
||||
return 0.7 # possibly padded
|
||||
else:
|
||||
return 0.4 # very padded
|
||||
|
||||
|
||||
def score_code_correctness(terse: str, rich: str) -> float:
|
||||
"""Score code blocks in the rich response for basic correctness.
|
||||
|
||||
Checks for matching brackets, valid-looking syntax patterns.
|
||||
"""
|
||||
if not rich:
|
||||
return 0.5 # no code = neutral
|
||||
|
||||
code_blocks = re.findall(r"```(?:\w*)\n(.*?)```", rich, re.DOTALL)
|
||||
if not code_blocks:
|
||||
return 0.5 # no code blocks = neutral
|
||||
|
||||
scores = []
|
||||
for block in code_blocks:
|
||||
block_score = 1.0
|
||||
|
||||
# Check bracket balance
|
||||
for open_c, close_c in [("(", ")"), ("[", "]"), ("{", "}")]:
|
||||
if block.count(open_c) != block.count(close_c):
|
||||
block_score -= 0.3
|
||||
|
||||
# Check for common syntax errors
|
||||
if re.search(r"def \w+[^:]*\n(?!\s)", block):
|
||||
block_score -= 0.2 # missing colon or body
|
||||
|
||||
# Minimum viable code length
|
||||
if len(block.strip()) < 10:
|
||||
block_score -= 0.3
|
||||
|
||||
scores.append(max(block_score, 0.0))
|
||||
|
||||
return round(sum(scores) / len(scores), 3) if scores else 0.5
|
||||
|
||||
|
||||
def score_pair(pair: Dict[str, Any]) -> Tuple[float, Dict[str, float]]:
|
||||
"""Score a single training pair. Returns (total_score, breakdown)."""
|
||||
terse = pair.get("terse", "") or pair.get("prompt", "") or ""
|
||||
rich = pair.get("rich", "") or pair.get("response", "") or ""
|
||||
|
||||
spec = score_specificity(terse, rich)
|
||||
length = score_length_ratio(terse, rich)
|
||||
code = score_code_correctness(terse, rich)
|
||||
|
||||
# Weighted total
|
||||
total = round(0.4 * spec + 0.3 * length + 0.3 * code, 3)
|
||||
|
||||
return total, {"specificity": spec, "length_ratio": length, "code_correctness": code}
|
||||
|
||||
|
||||
def filter_pairs(input_path: str, output_path: str, threshold: float,
|
||||
report: bool = False) -> Dict[str, Any]:
|
||||
"""Filter JSONL training pairs by quality score."""
|
||||
kept = []
|
||||
removed = []
|
||||
errors = 0
|
||||
|
||||
source = sys.stdin if input_path == "-" else open(input_path, "r")
|
||||
|
||||
try:
|
||||
for line_num, line in enumerate(source, 1):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
pair = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
score, breakdown = score_pair(pair)
|
||||
entry = {**pair, "_quality_score": score, "_quality_breakdown": breakdown}
|
||||
|
||||
if score >= threshold:
|
||||
kept.append(entry)
|
||||
else:
|
||||
removed.append(entry)
|
||||
finally:
|
||||
if source is not sys.stdin:
|
||||
source.close()
|
||||
|
||||
# Write filtered output
|
||||
if output_path:
|
||||
out = sys.stdout if output_path == "-" else open(output_path, "w")
|
||||
try:
|
||||
for pair in kept:
|
||||
# Strip internal scoring fields before output
|
||||
clean = {k: v for k, v in pair.items() if not k.startswith("_quality")}
|
||||
out.write(json.dumps(clean, ensure_ascii=False) + "\n")
|
||||
finally:
|
||||
if out is not sys.stdin:
|
||||
out.close()
|
||||
|
||||
result = {
|
||||
"total": len(kept) + len(removed),
|
||||
"kept": len(kept),
|
||||
"filtered_out": len(removed),
|
||||
"errors": errors,
|
||||
"threshold": threshold,
|
||||
"filter_rate": round(len(removed) / max(len(kept) + len(removed), 1) * 100, 1),
|
||||
}
|
||||
|
||||
if report and removed:
|
||||
# Show worst offenders
|
||||
removed_sorted = sorted(removed, key=lambda x: x["_quality_score"])
|
||||
result["worst_5"] = [
|
||||
{
|
||||
"score": e["_quality_score"],
|
||||
"terse": (e.get("terse", "") or e.get("prompt", ""))[:80],
|
||||
"breakdown": e["_quality_breakdown"],
|
||||
}
|
||||
for e in removed_sorted[:5]
|
||||
]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Filter training data pairs by quality")
|
||||
parser.add_argument("input", help="Input JSONL file (use - for stdin)")
|
||||
parser.add_argument("-o", "--output", default="-", help="Output JSONL file (default: stdout)")
|
||||
parser.add_argument("-t", "--threshold", type=float, default=DEFAULT_THRESHOLD,
|
||||
help=f"Quality threshold (0.0-1.0, default: {DEFAULT_THRESHOLD})")
|
||||
parser.add_argument("--report", action="store_true", help="Show quality report")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Score only, dont filter")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.dry_run:
|
||||
# Just score and report, no filtering
|
||||
source = sys.stdin if args.input == "-" else open(args.input, "r")
|
||||
scores = []
|
||||
try:
|
||||
for line in source:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
pair = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
score, breakdown = score_pair(pair)
|
||||
scores.append(score)
|
||||
finally:
|
||||
if source is not sys.stdin:
|
||||
source.close()
|
||||
|
||||
if scores:
|
||||
avg = sum(scores) / len(scores)
|
||||
below = sum(1 for s in scores if s < args.threshold)
|
||||
print(f"Total pairs: {len(scores)}")
|
||||
print(f"Average score: {avg:.3f}")
|
||||
print(f"Below threshold ({args.threshold}): {below} ({below/len(scores)*100:.1f}%)")
|
||||
print(f"Min: {min(scores):.3f} Max: {max(scores):.3f} Median: {sorted(scores)[len(scores)//2]:.3f}")
|
||||
return
|
||||
|
||||
result = filter_pairs(args.input, args.output, args.threshold, report=args.report)
|
||||
|
||||
print(f"Training Data Quality Filter", file=sys.stderr)
|
||||
print(f"{'='*40}", file=sys.stderr)
|
||||
print(f"Total pairs: {result['total']}", file=sys.stderr)
|
||||
print(f"Kept: {result['kept']}", file=sys.stderr)
|
||||
print(f"Filtered out: {result['filtered_out']} ({result['filter_rate']}%)", file=sys.stderr)
|
||||
print(f"Errors: {result['errors']}", file=sys.stderr)
|
||||
print(f"Threshold: {result['threshold']}", file=sys.stderr)
|
||||
|
||||
if args.report and "worst_5" in result:
|
||||
print(f"\nWorst 5 pairs:", file=sys.stderr)
|
||||
for w in result["worst_5"]:
|
||||
terse_preview = w["terse"][:60]
|
||||
print(f" [{w['score']:.3f}] {terse_preview}...", file=sys.stderr)
|
||||
bd = w["breakdown"]
|
||||
print(f" spec={bd['specificity']} length={bd['length_ratio']} code={bd['code_correctness']}", file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
Reference in New Issue
Block a user