Compare commits
6 Commits
fix/issue-
...
fix/687-tr
| Author | SHA1 | Date | |
|---|---|---|---|
| d8921630a5 | |||
| d120526244 | |||
| 8596ff761b | |||
| 7553fd4f3e | |||
| 71082fe06f | |||
| 6d678e938e |
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Full Nostr agent-to-agent communication demo - FINAL WORKING
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Soul Eval Gate — The Conscience of the Training Pipeline
|
||||
|
||||
|
||||
@@ -1,53 +1,6 @@
|
||||
"""Sovereign orchestration — Huey replaces 3,843 lines of homebrew."""
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from huey import SqliteHuey, crontab
|
||||
from pathlib import Path
|
||||
|
||||
from huey import SqliteHuey, signals
|
||||
|
||||
huey = SqliteHuey(filename=str(Path.home() / ".hermes" / "orchestration.db"))
|
||||
|
||||
# === Token Tracking ===
|
||||
TOKEN_LOG = Path.home() / ".hermes" / "token_usage.jsonl"
|
||||
|
||||
|
||||
def log_token_usage(task_name, result):
|
||||
"""Log token usage from a completed pipeline task.
|
||||
|
||||
Reads input_tokens/output_tokens from the agent result dict.
|
||||
Auto-detects pipeline name from task context.
|
||||
Appends to JSONL for downstream analysis.
|
||||
"""
|
||||
if not isinstance(result, dict):
|
||||
return
|
||||
|
||||
input_tokens = result.get("input_tokens", 0)
|
||||
output_tokens = result.get("output_tokens", 0)
|
||||
|
||||
if input_tokens == 0 and output_tokens == 0:
|
||||
return
|
||||
|
||||
# Auto-detect pipeline name from task function name
|
||||
pipeline = task_name.replace("_task", "").replace("_", "-")
|
||||
|
||||
entry = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"pipeline": pipeline,
|
||||
"input_tokens": input_tokens,
|
||||
"output_tokens": output_tokens,
|
||||
"total_tokens": input_tokens + output_tokens,
|
||||
"task": task_name,
|
||||
}
|
||||
|
||||
TOKEN_LOG.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(TOKEN_LOG, "a") as f:
|
||||
f.write(json.dumps(entry) + "\n")
|
||||
|
||||
|
||||
@huey.signal(signals.SIGNAL_COMPLETE)
|
||||
def on_task_complete(signal, task, task_value=None, **kwargs):
|
||||
"""Huey hook: log token usage after each pipeline task completes."""
|
||||
task_name = getattr(task, "name", "unknown")
|
||||
log_token_usage(task_name, task_value)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
286
scripts/training_data_quality_filter.py
Normal file
286
scripts/training_data_quality_filter.py
Normal file
@@ -0,0 +1,286 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Training Data Quality Filter
|
||||
|
||||
Scores and removes low-quality training pairs from JSONL datasets.
|
||||
Supports two formats:
|
||||
- ShareGPT session format: {"conversations": [...], ...}
|
||||
- Scene/pair format: {"terse": "...", "rich": "..."} or {"lyric_line": "...", "scene": {...}}
|
||||
|
||||
Scoring dimensions:
|
||||
- Specificity: penalizes vague/generic content
|
||||
- Length ratio: penalizes extreme input/output imbalances
|
||||
- Code correctness: validates code blocks have matching fences
|
||||
|
||||
Usage:
|
||||
python3 scripts/training_data_quality_filter.py input.jsonl [--threshold 0.4] [--output filtered.jsonl]
|
||||
python3 scripts/training_data_quality_filter.py --dir training-data/ [--threshold 0.4]
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def score_specificity(text: str) -> float:
|
||||
"""Score 0-1 based on how specific vs generic the text is."""
|
||||
if not text or len(text.strip()) < 10:
|
||||
return 0.0
|
||||
|
||||
score = 0.5 # baseline
|
||||
|
||||
# Penalize very generic starters
|
||||
generic_starters = [
|
||||
"sure,", "of course", "i can help", "here is", "here are",
|
||||
"certainly", "absolutely", "let me help", "great question",
|
||||
"that\'s a great", "interesting question",
|
||||
]
|
||||
lower = text.lower().strip()
|
||||
for starter in generic_starters:
|
||||
if lower.startswith(starter):
|
||||
score -= 0.15
|
||||
break
|
||||
|
||||
# Reward specific content indicators
|
||||
if re.search(r"`[^`]+`", text): # inline code
|
||||
score += 0.1
|
||||
if re.search(r"```[\s\S]*?```", text): # code blocks
|
||||
score += 0.15
|
||||
if re.search(r"\d+\.\s", text): # numbered lists
|
||||
score += 0.05
|
||||
if len(text.split()) > 50: # substantial length
|
||||
score += 0.1
|
||||
if re.search(r"https?://", text): # URLs/references
|
||||
score += 0.05
|
||||
|
||||
# Penalize extremely short outputs
|
||||
if len(text.split()) < 5:
|
||||
score -= 0.2
|
||||
|
||||
# Penalize repetition (same sentence repeated)
|
||||
sentences = re.split(r"[.!?]+", text)
|
||||
sentences = [s.strip().lower() for s in sentences if s.strip()]
|
||||
if sentences:
|
||||
unique_ratio = len(set(sentences)) / len(sentences)
|
||||
if unique_ratio < 0.7:
|
||||
score -= 0.15
|
||||
|
||||
return max(0.0, min(1.0, score))
|
||||
|
||||
|
||||
def score_length_ratio(input_text: str, output_text: str) -> float:
|
||||
"""Score 0-1 based on input/output length balance."""
|
||||
in_len = len(input_text.split())
|
||||
out_len = len(output_text.split())
|
||||
|
||||
if in_len == 0 or out_len == 0:
|
||||
return 0.0
|
||||
|
||||
ratio = out_len / in_len
|
||||
|
||||
# Ideal ratio: 0.5-5x (output can be shorter or longer, but not extreme)
|
||||
if 0.5 <= ratio <= 5.0:
|
||||
return 1.0
|
||||
elif 0.2 <= ratio <= 10.0:
|
||||
return 0.6
|
||||
elif 0.1 <= ratio <= 20.0:
|
||||
return 0.3
|
||||
else:
|
||||
return 0.1
|
||||
|
||||
|
||||
def score_code_correctness(text: str) -> float:
|
||||
"""Score 0-1 based on code block correctness."""
|
||||
code_blocks = re.findall(r"```[\s\S]*?```", text)
|
||||
|
||||
if not code_blocks:
|
||||
return 1.0 # no code = no code errors
|
||||
|
||||
for block in code_blocks:
|
||||
# Check balanced fences
|
||||
fence_count = block.count("```")
|
||||
if fence_count % 2 != 0:
|
||||
return 0.2
|
||||
|
||||
# Check for common errors
|
||||
content = block.split("\n", 1)[-1] if "\n" in block else ""
|
||||
if "SyntaxError" in content or "Traceback" in content:
|
||||
return 0.3
|
||||
if content.strip().endswith("...") and len(content.strip()) < 30:
|
||||
return 0.4 # truncated code
|
||||
|
||||
return 1.0
|
||||
|
||||
|
||||
def score_pair(input_text: str, output_text: str) -> dict:
|
||||
"""Score a training pair on all dimensions."""
|
||||
spec = score_specificity(output_text)
|
||||
length = score_length_ratio(input_text, output_text)
|
||||
code = score_code_correctness(output_text)
|
||||
|
||||
# Weighted composite
|
||||
composite = (spec * 0.4) + (length * 0.3) + (code * 0.3)
|
||||
|
||||
return {
|
||||
"specificity": round(spec, 3),
|
||||
"length_ratio": round(length, 3),
|
||||
"code_correctness": round(code, 3),
|
||||
"composite": round(composite, 3),
|
||||
}
|
||||
|
||||
|
||||
def extract_pairs(obj: dict) -> list:
|
||||
"""Extract (input, output) pairs from a JSONL object."""
|
||||
pairs = []
|
||||
|
||||
# ShareGPT session format
|
||||
if "conversations" in obj:
|
||||
convs = obj["conversations"]
|
||||
for i, msg in enumerate(convs):
|
||||
if msg.get("from") in ("gpt", "assistant"):
|
||||
# Find preceding human message
|
||||
input_text = ""
|
||||
for j in range(i - 1, -1, -1):
|
||||
if convs[j].get("from") == "human":
|
||||
input_text = convs[j].get("value", "")
|
||||
break
|
||||
output_text = msg.get("value", "")
|
||||
if input_text and output_text:
|
||||
pairs.append((input_text, output_text))
|
||||
|
||||
# Scene/pair format (terse/rich)
|
||||
elif "terse" in obj and "rich" in obj:
|
||||
pairs.append((obj["terse"], obj["rich"]))
|
||||
|
||||
# Scene description format
|
||||
elif "lyric_line" in obj and "scene" in obj:
|
||||
scene_text = json.dumps(obj["scene"]) if isinstance(obj["scene"], dict) else str(obj["scene"])
|
||||
pairs.append((obj["lyric_line"], scene_text))
|
||||
|
||||
# Generic prompt/response
|
||||
elif "prompt" in obj and "response" in obj:
|
||||
pairs.append((obj["prompt"], obj["response"]))
|
||||
|
||||
# Generic input/output
|
||||
elif "input" in obj and "output" in obj:
|
||||
pairs.append((obj["input"], obj["output"]))
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def filter_jsonl(input_path: str, threshold: float = 0.4, output_path: str = None) -> dict:
|
||||
"""Filter a JSONL file, removing low-quality pairs."""
|
||||
path = Path(input_path)
|
||||
if not path.exists():
|
||||
return {"error": f"File not found: {input_path}"}
|
||||
|
||||
lines = path.read_text().strip().split("\n")
|
||||
total = 0
|
||||
kept = 0
|
||||
removed = 0
|
||||
scores_list = []
|
||||
kept_lines = []
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
try:
|
||||
obj = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
removed += 1
|
||||
continue
|
||||
|
||||
pairs = extract_pairs(obj)
|
||||
total += 1
|
||||
|
||||
if not pairs:
|
||||
# No extractable pairs — keep as-is (might be metadata)
|
||||
kept += 1
|
||||
kept_lines.append(line)
|
||||
continue
|
||||
|
||||
# Score all pairs in this object
|
||||
pair_scores = [score_pair(inp, out) for inp, out in pairs]
|
||||
avg_composite = sum(s["composite"] for s in pair_scores) / len(pair_scores)
|
||||
|
||||
scores_list.append(avg_composite)
|
||||
|
||||
if avg_composite >= threshold:
|
||||
kept += 1
|
||||
kept_lines.append(line)
|
||||
else:
|
||||
removed += 1
|
||||
|
||||
# Write output
|
||||
if output_path:
|
||||
Path(output_path).write_text("\n".join(kept_lines) + "\n")
|
||||
|
||||
return {
|
||||
"file": input_path,
|
||||
"total": total,
|
||||
"kept": kept,
|
||||
"removed": removed,
|
||||
"removal_rate": f"{removed}/{total}" if total > 0 else "0/0",
|
||||
"avg_score": round(sum(scores_list) / len(scores_list), 3) if scores_list else None,
|
||||
"min_score": round(min(scores_list), 3) if scores_list else None,
|
||||
"max_score": round(max(scores_list), 3) if scores_list else None,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Filter low-quality training data pairs")
|
||||
parser.add_argument("input", nargs="?", help="Input JSONL file")
|
||||
parser.add_argument("--threshold", type=float, default=0.4, help="Minimum quality score (0-1)")
|
||||
parser.add_argument("--output", "-o", help="Output file (default: input_filtered.jsonl)")
|
||||
parser.add_argument("--dir", help="Process all .jsonl files in directory")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Score only, don\'t write output")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.dir:
|
||||
dirpath = Path(args.dir)
|
||||
jsonl_files = sorted(dirpath.rglob("*.jsonl"))
|
||||
if not jsonl_files:
|
||||
print(f"No .jsonl files found in {args.dir}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Processing {len(jsonl_files)} files (threshold={args.threshold})\n")
|
||||
print(f"{'File':<50} {'Total':>6} {'Kept':>6} {'Removed':>8} {'Avg':>6}")
|
||||
print("-" * 82)
|
||||
|
||||
grand_total = grand_kept = grand_removed = 0
|
||||
for fpath in jsonl_files:
|
||||
out = str(fpath).replace(".jsonl", "_filtered.jsonl") if not args.dry_run else None
|
||||
result = filter_jsonl(str(fpath), args.threshold, out)
|
||||
if "error" in result:
|
||||
print(f"{str(fpath):<50} ERROR: {result['error']}")
|
||||
continue
|
||||
print(f"{fpath.name:<50} {result['total']:>6} {result['kept']:>6} {result['removed']:>8} {result['avg_score']:>6.3f}")
|
||||
grand_total += result["total"]
|
||||
grand_kept += result["kept"]
|
||||
grand_removed += result["removed"]
|
||||
|
||||
print("-" * 82)
|
||||
print(f"{'TOTAL':<50} {grand_total:>6} {grand_kept:>6} {grand_removed:>8}")
|
||||
|
||||
elif args.input:
|
||||
out = args.output or args.input.replace(".jsonl", "_filtered.jsonl")
|
||||
if args.dry_run:
|
||||
out = None
|
||||
result = filter_jsonl(args.input, args.threshold, out)
|
||||
if "error" in result:
|
||||
print(f"Error: {result['error']}")
|
||||
sys.exit(1)
|
||||
print(json.dumps(result, indent=2))
|
||||
if out:
|
||||
print(f"\nFiltered output written to: {out}")
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
Reference in New Issue
Block a user