Compare commits
3 Commits
fix/627
...
burn/750-1
| Author | SHA1 | Date | |
|---|---|---|---|
| 74f8088ab1 | |||
| 29fa9d50aa | |||
| 78e2e81704 |
138
scripts/normalize-code-blocks.py
Normal file
138
scripts/normalize-code-blocks.py
Normal file
@@ -0,0 +1,138 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
normalize-code-blocks.py — Fix inconsistent indentation in training data code blocks.
|
||||
|
||||
When code blocks are embedded in JSONL as triple-quoted strings, indentation
|
||||
accumulates from the surrounding context. This script normalizes code block
|
||||
content using textwrap.dedent and consistent 4-space indentation.
|
||||
|
||||
Usage:
|
||||
python3 scripts/normalize-code-blocks.py training/data/preference_pairs.jsonl
|
||||
python3 scripts/normalize-code-blocks.py --dry-run training/data/*.jsonl
|
||||
python3 scripts/normalize-code-blocks.py --check training/data/*.jsonl # CI mode
|
||||
"""
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
|
||||
# Matches ```python ... ``` or ``` ... ``` blocks inside string values
|
||||
CODE_BLOCK_RE = re.compile(
|
||||
r"(?P<open>```(?:python|py|bash|sh|javascript|js|typescript|ts|go|rust|ruby)?\s*\n)"
|
||||
r"(?P<code>.*?)"
|
||||
r"(?P<close>```)",
|
||||
re.DOTALL,
|
||||
)
|
||||
|
||||
|
||||
def normalize_code_block(match: re.Match) -> str:
|
||||
"""Normalize indentation in a single code block."""
|
||||
open_tag = match.group("open")
|
||||
code = match.group("code")
|
||||
close_tag = match.group("close")
|
||||
|
||||
if not code.strip():
|
||||
return match.group(0)
|
||||
|
||||
dedented = textwrap.dedent(code)
|
||||
|
||||
lines = dedented.split("\n")
|
||||
while lines and not lines[0].strip():
|
||||
lines.pop(0)
|
||||
while lines and not lines[-1].strip():
|
||||
lines.pop()
|
||||
|
||||
normalized = "\n".join(lines)
|
||||
return f"{open_tag}{normalized}\n{close_tag}"
|
||||
|
||||
|
||||
def process_line(line: str) -> tuple:
|
||||
"""Process a single JSONL line. Returns (new_line, num_fixes)."""
|
||||
try:
|
||||
obj = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
return line, 0
|
||||
|
||||
fixes = 0
|
||||
|
||||
def fix_strings(obj):
|
||||
nonlocal fixes
|
||||
if isinstance(obj, str):
|
||||
original = obj
|
||||
fixed = CODE_BLOCK_RE.sub(normalize_code_block, obj)
|
||||
if fixed != original:
|
||||
fixes += 1
|
||||
return fixed
|
||||
elif isinstance(obj, dict):
|
||||
return {k: fix_strings(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [fix_strings(item) for item in obj]
|
||||
return obj
|
||||
|
||||
fixed_obj = fix_strings(obj)
|
||||
return json.dumps(fixed_obj, ensure_ascii=False) + "\n", fixes
|
||||
|
||||
|
||||
def process_file(filepath: str, dry_run: bool = False) -> dict:
|
||||
"""Process a single JSONL file. Returns stats dict."""
|
||||
path = Path(filepath)
|
||||
if not path.exists():
|
||||
return {"file": str(filepath), "error": "not found", "fixes": 0, "lines": 0}
|
||||
|
||||
lines = path.read_text(encoding="utf-8").splitlines()
|
||||
fixed_lines = []
|
||||
total_fixes = 0
|
||||
|
||||
for line in lines:
|
||||
if not line.strip():
|
||||
fixed_lines.append(line)
|
||||
continue
|
||||
new_line, fixes = process_line(line)
|
||||
fixed_lines.append(new_line.rstrip("\n"))
|
||||
total_fixes += fixes
|
||||
|
||||
if total_fixes > 0 and not dry_run:
|
||||
path.write_text("\n".join(fixed_lines) + "\n", encoding="utf-8")
|
||||
|
||||
return {
|
||||
"file": str(filepath),
|
||||
"lines": len(lines),
|
||||
"fixes": total_fixes,
|
||||
"changed": total_fixes > 0,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Normalize code block indentation in JSONL training data"
|
||||
)
|
||||
parser.add_argument("files", nargs="+", help="JSONL files to process")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show changes without writing")
|
||||
parser.add_argument("--check", action="store_true", help="CI mode: exit 1 if fixes needed")
|
||||
args = parser.parse_args()
|
||||
|
||||
total_fixes = 0
|
||||
results = []
|
||||
|
||||
for filepath in args.files:
|
||||
result = process_file(filepath, dry_run=args.dry_run or args.check)
|
||||
results.append(result)
|
||||
total_fixes += result["fixes"]
|
||||
|
||||
if result["fixes"] > 0:
|
||||
status = "FIXED" if not args.dry_run and not args.check else "WOULD FIX"
|
||||
print(f" {status}: {result['file']} — {result['fixes']} code blocks normalized")
|
||||
else:
|
||||
print(f" OK: {result['file']}")
|
||||
|
||||
print(f"\nTotal: {total_fixes} code blocks normalized across {len(results)} files")
|
||||
|
||||
if args.check and total_fixes > 0:
|
||||
print("FAIL: Code block indentation issues found. Run without --check to fix.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,158 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quality Gate Integration — Pipeline Orchestrator Hook
|
||||
|
||||
Integrates the standalone quality gate with the pipeline orchestrator.
|
||||
Validates outputs before saving. Handles rejection and re-queue.
|
||||
|
||||
Usage:
|
||||
from quality_gate_integration import validate_before_save
|
||||
result = validate_before_save(output, pipeline_name="training-data")
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
# Quality thresholds
|
||||
QUALITY_THRESHOLDS = {
|
||||
"training-data": {
|
||||
"min_length": 50,
|
||||
"max_length": 50000,
|
||||
"require_json": True,
|
||||
"require_fields": ["description"],
|
||||
"reject_patterns": ["TODO", "FIXME", "PLACEHOLDER", "lorem ipsum"],
|
||||
},
|
||||
"scene-descriptions": {
|
||||
"min_length": 30,
|
||||
"max_length": 2000,
|
||||
"require_json": True,
|
||||
"require_fields": ["mood", "colors", "description"],
|
||||
"reject_patterns": ["TODO", "FIXME"],
|
||||
},
|
||||
"default": {
|
||||
"min_length": 10,
|
||||
"max_length": 100000,
|
||||
"require_json": False,
|
||||
"require_fields": [],
|
||||
"reject_patterns": ["TODO", "FIXME"],
|
||||
},
|
||||
}
|
||||
|
||||
# Stats tracking
|
||||
STATS_FILE = Path.home() / ".hermes" / "quality-gate-stats.json"
|
||||
|
||||
|
||||
def load_stats() -> dict:
|
||||
try:
|
||||
return json.loads(STATS_FILE.read_text())
|
||||
except Exception:
|
||||
return {"total": 0, "passed": 0, "rejected": 0, "by_pipeline": {}}
|
||||
|
||||
|
||||
def save_stats(stats: dict):
|
||||
STATS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
STATS_FILE.write_text(json.dumps(stats, indent=2) + "
|
||||
")
|
||||
|
||||
|
||||
def validate_output(output: str, pipeline: str = "default") -> dict:
|
||||
"""Validate output against quality gate thresholds."""
|
||||
thresholds = QUALITY_THRESHOLDS.get(pipeline, QUALITY_THRESHOLDS["default"])
|
||||
errors = []
|
||||
|
||||
# Length check
|
||||
if len(output) < thresholds["min_length"]:
|
||||
errors.append(f"Too short: {len(output)} < {thresholds['min_length']} chars")
|
||||
if len(output) > thresholds["max_length"]:
|
||||
errors.append(f"Too long: {len(output)} > {thresholds['max_length']} chars")
|
||||
|
||||
# JSON check
|
||||
if thresholds["require_json"]:
|
||||
try:
|
||||
data = json.loads(output)
|
||||
for field in thresholds["require_fields"]:
|
||||
if field not in data:
|
||||
errors.append(f"Missing required field: {field}")
|
||||
except json.JSONDecodeError:
|
||||
errors.append("Not valid JSON")
|
||||
|
||||
# Pattern rejection
|
||||
output_lower = output.lower()
|
||||
for pattern in thresholds["reject_patterns"]:
|
||||
if pattern.lower() in output_lower:
|
||||
errors.append(f"Contains rejected pattern: {pattern}")
|
||||
|
||||
return {
|
||||
"valid": len(errors) == 0,
|
||||
"errors": errors,
|
||||
"pipeline": pipeline,
|
||||
"output_length": len(output),
|
||||
"checked_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
|
||||
def validate_before_save(output: str, pipeline: str = "default",
|
||||
re_queue_on_fail: bool = True) -> dict:
|
||||
"""Validate output before saving. Returns decision + stats update."""
|
||||
result = validate_output(output, pipeline)
|
||||
|
||||
# Update stats
|
||||
stats = load_stats()
|
||||
stats["total"] = stats.get("total", 0) + 1
|
||||
if result["valid"]:
|
||||
stats["passed"] = stats.get("passed", 0) + 1
|
||||
else:
|
||||
stats["rejected"] = stats.get("rejected", 0) + 1
|
||||
stats.setdefault("by_pipeline", {}).setdefault(pipeline, {"total": 0, "passed": 0, "rejected": 0})
|
||||
stats["by_pipeline"][pipeline]["total"] += 1
|
||||
if result["valid"]:
|
||||
stats["by_pipeline"][pipeline]["passed"] += 1
|
||||
else:
|
||||
stats["by_pipeline"][pipeline]["rejected"] += 1
|
||||
save_stats(stats)
|
||||
|
||||
decision = {
|
||||
"action": "save" if result["valid"] else ("re_queue" if re_queue_on_fail else "reject"),
|
||||
"validation": result,
|
||||
"stats": {
|
||||
"total": stats["total"],
|
||||
"pass_rate": stats["passed"] / max(stats["total"], 1),
|
||||
},
|
||||
}
|
||||
|
||||
return decision
|
||||
|
||||
|
||||
def get_quality_report() -> str:
|
||||
"""Generate a quality gate report."""
|
||||
stats = load_stats()
|
||||
lines = [
|
||||
"# Quality Gate Report",
|
||||
"",
|
||||
f"Total validations: {stats.get('total', 0)}",
|
||||
f"Passed: {stats.get('passed', 0)}",
|
||||
f"Rejected: {stats.get('rejected', 0)}",
|
||||
f"Pass rate: {stats.get('passed', 0) / max(stats.get('total', 1), 1):.0%}",
|
||||
"",
|
||||
]
|
||||
for pipeline, pstats in stats.get("by_pipeline", {}).items():
|
||||
rate = pstats.get("passed", 0) / max(pstats.get("total", 1), 1)
|
||||
lines.append(f"- {pipeline}: {pstats.get('total', 0)} total, {rate:.0%} pass rate")
|
||||
return "
|
||||
".join(lines)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "report":
|
||||
print(get_quality_report())
|
||||
elif len(sys.argv) > 2:
|
||||
pipeline = sys.argv[1]
|
||||
output = sys.argv[2]
|
||||
result = validate_before_save(output, pipeline)
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print("Usage: quality_gate_integration.py [report|PIPELINE_NAME OUTPUT]")
|
||||
151
tests/test_normalize_code_blocks.py
Normal file
151
tests/test_normalize_code_blocks.py
Normal file
@@ -0,0 +1,151 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for normalize-code-blocks.py — issue #750"""
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
# Import from scripts/
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "scripts"))
|
||||
from normalize_code_blocks import normalize_code_block, process_line, process_file, CODE_BLOCK_RE
|
||||
|
||||
|
||||
class TestCodeBlockRegex:
|
||||
def test_matches_python_block(self):
|
||||
text = "```python\nprint('hi')\n```"
|
||||
assert CODE_BLOCK_RE.search(text)
|
||||
|
||||
def test_matches_plain_block(self):
|
||||
text = "```\nsome code\n```"
|
||||
assert CODE_BLOCK_RE.search(text)
|
||||
|
||||
def test_matches_bash_block(self):
|
||||
text = "```bash\necho hello\n```"
|
||||
assert CODE_BLOCK_RE.search(text)
|
||||
|
||||
def test_ignores_inline_backticks(self):
|
||||
text = "Use `code` inline"
|
||||
assert not CODE_BLOCK_RE.search(text)
|
||||
|
||||
def test_handles_multiline_code(self):
|
||||
text = "```python\ndef foo():\n return 1\n\ndef bar():\n return 2\n```"
|
||||
match = CODE_BLOCK_RE.search(text)
|
||||
assert match
|
||||
assert "def foo" in match.group("code")
|
||||
|
||||
|
||||
class TestNormalizeCodeBlock:
|
||||
def test_strips_leading_indent(self):
|
||||
match = CODE_BLOCK_RE.search("```python\n print('hi')\n```")
|
||||
result = normalize_code_block(match)
|
||||
assert " print" not in result
|
||||
assert "print('hi')" in result
|
||||
|
||||
def test_dedents_mixed_indent(self):
|
||||
code = "```python\n def foo():\n return 1\n def bar():\n return 2\n```"
|
||||
match = CODE_BLOCK_RE.search(code)
|
||||
result = normalize_code_block(match)
|
||||
lines = result.split("\n")
|
||||
# First non-tag line should have 0 indent
|
||||
code_lines = [l for l in lines if l.strip() and not l.startswith("```")]
|
||||
assert code_lines[0].startswith("def foo")
|
||||
|
||||
def test_strips_trailing_blank_lines(self):
|
||||
match = CODE_BLOCK_RE.search("```python\nprint('hi')\n\n\n```")
|
||||
result = normalize_code_block(match)
|
||||
assert result.endswith("print('hi')\n```")
|
||||
|
||||
def test_preserves_language_tag(self):
|
||||
match = CODE_BLOCK_RE.search("```python\n x = 1\n```")
|
||||
result = normalize_code_block(match)
|
||||
assert result.startswith("```python")
|
||||
|
||||
def test_empty_block_unchanged(self):
|
||||
match = CODE_BLOCK_RE.search("```python\n \n```")
|
||||
original = match.group(0)
|
||||
result = normalize_code_block(match)
|
||||
assert result == original
|
||||
|
||||
def test_diff_markers_preserved(self):
|
||||
code = "```\n+def new_func():\n+ return 1\n-def old_func():\n- return 0\n```"
|
||||
match = CODE_BLOCK_RE.search(code)
|
||||
result = normalize_code_block(match)
|
||||
assert "+def new_func" in result
|
||||
assert "-def old_func" in result
|
||||
|
||||
|
||||
class TestProcessLine:
|
||||
def test_valid_json_no_code_blocks(self):
|
||||
line = json.dumps({"prompt": "hello world"})
|
||||
new_line, fixes = process_line(line)
|
||||
assert fixes == 0
|
||||
|
||||
def test_valid_json_with_code_block(self):
|
||||
obj = {"prompt": "Here is code:\n```python\n x = 1\n```"}
|
||||
line = json.dumps(obj)
|
||||
new_line, fixes = process_line(line)
|
||||
assert fixes == 1
|
||||
parsed = json.loads(new_line)
|
||||
assert " x = 1" not in parsed["prompt"]
|
||||
|
||||
def test_nested_dict_code_blocks(self):
|
||||
obj = {
|
||||
"prompt": "code: ```python\n a = 1\n```",
|
||||
"chosen": "```python\n b = 2\n```",
|
||||
}
|
||||
line = json.dumps(obj)
|
||||
new_line, fixes = process_line(line)
|
||||
assert fixes == 2
|
||||
|
||||
def test_invalid_json_returned_unchanged(self):
|
||||
line = "{broken json"
|
||||
new_line, fixes = process_line(line)
|
||||
assert new_line == line
|
||||
assert fixes == 0
|
||||
|
||||
def test_list_field_code_blocks(self):
|
||||
obj = {"items": ["```python\n x = 1\n```", "no code here"]}
|
||||
line = json.dumps(obj)
|
||||
new_line, fixes = process_line(line)
|
||||
assert fixes == 1
|
||||
|
||||
|
||||
class TestProcessFile:
|
||||
def test_fixes_file_in_place(self, tmp_path):
|
||||
f = tmp_path / "test.jsonl"
|
||||
lines = [
|
||||
json.dumps({"prompt": "```python\n x = 1\n```"}),
|
||||
json.dumps({"prompt": "no code"}),
|
||||
]
|
||||
f.write_text("\n".join(lines) + "\n")
|
||||
|
||||
result = process_file(str(f))
|
||||
assert result["fixes"] == 1
|
||||
assert result["lines"] == 2
|
||||
|
||||
# Verify file was actually modified
|
||||
content = f.read_text()
|
||||
assert " x = 1" not in content
|
||||
|
||||
def test_dry_run_no_write(self, tmp_path):
|
||||
f = tmp_path / "test.jsonl"
|
||||
original = json.dumps({"prompt": "```python\n x = 1\n```"})
|
||||
f.write_text(original + "\n")
|
||||
|
||||
result = process_file(str(f), dry_run=True)
|
||||
assert result["fixes"] == 1
|
||||
|
||||
# File unchanged
|
||||
assert f.read_text().strip() == original
|
||||
|
||||
def test_missing_file(self, tmp_path):
|
||||
result = process_file(str(tmp_path / "nope.jsonl"))
|
||||
assert "error" in result
|
||||
|
||||
def test_clean_file_no_fixes(self, tmp_path):
|
||||
f = tmp_path / "clean.jsonl"
|
||||
f.write_text(json.dumps({"prompt": "no code blocks here"}) + "\n")
|
||||
result = process_file(str(f))
|
||||
assert result["fixes"] == 0
|
||||
Reference in New Issue
Block a user