Compare commits
2 Commits
step35/423
...
burn/750-1
| Author | SHA1 | Date | |
|---|---|---|---|
| aa1fb845d1 | |||
| 0452457f1f |
139
scripts/normalize-code-blocks.py
Normal file
139
scripts/normalize-code-blocks.py
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
normalize-code-blocks.py — Fix inconsistent indentation in training data code blocks.
|
||||||
|
|
||||||
|
When code blocks are embedded in JSONL as triple-quoted strings, indentation
|
||||||
|
accumulates from the surrounding context. This script normalizes code block
|
||||||
|
content using textwrap.dedent and consistent 4-space indentation.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 scripts/normalize-code-blocks.py training/data/preference_pairs.jsonl
|
||||||
|
python3 scripts/normalize-code-blocks.py --dry-run training/data/*.jsonl
|
||||||
|
python3 scripts/normalize-code-blocks.py --check training/data/*.jsonl # CI mode
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import textwrap
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Matches ```python ... ``` or ``` ... ``` blocks inside string values
|
||||||
|
CODE_BLOCK_RE = re.compile(
|
||||||
|
r'(?P<open>```(?:python|py|bash|sh|javascript|js|typescript|ts|go|rust|ruby)?\s*\n)'
|
||||||
|
r'(?P<code>.*?)'
|
||||||
|
r'(?P<close>```)',
|
||||||
|
re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_code_block(match: re.Match) -> str:
|
||||||
|
"""Normalize indentation in a single code block."""
|
||||||
|
open_tag = match.group("open")
|
||||||
|
code = match.group("code")
|
||||||
|
close_tag = match.group("close")
|
||||||
|
|
||||||
|
# Skip empty blocks
|
||||||
|
if not code.strip():
|
||||||
|
return match.group(0)
|
||||||
|
|
||||||
|
# Dedent the code
|
||||||
|
dedented = textwrap.dedent(code)
|
||||||
|
|
||||||
|
# Strip leading/trailing blank lines
|
||||||
|
lines = dedented.split("\n")
|
||||||
|
while lines and not lines[0].strip():
|
||||||
|
lines.pop(0)
|
||||||
|
while lines and not lines[-1].strip():
|
||||||
|
lines.pop()
|
||||||
|
|
||||||
|
normalized = "\n".join(lines)
|
||||||
|
|
||||||
|
return f"{open_tag}{normalized}\n{close_tag}"
|
||||||
|
|
||||||
|
|
||||||
|
def process_line(line: str) -> tuple[str, int]:
|
||||||
|
"""Process a single JSONL line. Returns (new_line, num_fixes)."""
|
||||||
|
try:
|
||||||
|
obj = json.loads(line)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return line, 0
|
||||||
|
|
||||||
|
fixes = 0
|
||||||
|
|
||||||
|
def fix_strings(obj):
|
||||||
|
nonlocal fixes
|
||||||
|
if isinstance(obj, str):
|
||||||
|
original = obj
|
||||||
|
fixed = CODE_BLOCK_RE.sub(normalize_code_block, obj)
|
||||||
|
if fixed != original:
|
||||||
|
fixes += 1
|
||||||
|
return fixed
|
||||||
|
elif isinstance(obj, dict):
|
||||||
|
return {k: fix_strings(v) for k, v in obj.items()}
|
||||||
|
elif isinstance(obj, list):
|
||||||
|
return [fix_strings(item) for item in obj]
|
||||||
|
return obj
|
||||||
|
|
||||||
|
fixed_obj = fix_strings(obj)
|
||||||
|
return json.dumps(fixed_obj, ensure_ascii=False) + "\n", fixes
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Normalize code block indentation in JSONL training data")
|
||||||
|
parser.add_argument("files", nargs="+", help="JSONL files to process")
|
||||||
|
parser.add_argument("--dry-run", action="store_true", help="Show changes without writing")
|
||||||
|
parser.add_argument("--check", action="store_true", help="CI mode: exit 1 if fixes needed")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
total_fixes = 0
|
||||||
|
total_lines = 0
|
||||||
|
files_changed = 0
|
||||||
|
|
||||||
|
for filepath in args.files:
|
||||||
|
path = Path(filepath)
|
||||||
|
if not path.exists():
|
||||||
|
print(f"SKIP: {path} not found", file=sys.stderr)
|
||||||
|
continue
|
||||||
|
|
||||||
|
lines = path.read_text().splitlines(keepends=True)
|
||||||
|
fixed_lines = []
|
||||||
|
file_fixes = 0
|
||||||
|
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if not line.strip():
|
||||||
|
fixed_lines.append(line)
|
||||||
|
continue
|
||||||
|
fixed_line, n = process_line(line)
|
||||||
|
fixed_lines.append(fixed_line)
|
||||||
|
file_fixes += n
|
||||||
|
total_lines += 1
|
||||||
|
|
||||||
|
if file_fixes > 0:
|
||||||
|
files_changed += 1
|
||||||
|
total_fixes += file_fixes
|
||||||
|
print(f"{'CHECK' if args.check else 'FIX'}: {path} — {file_fixes} code blocks normalized")
|
||||||
|
|
||||||
|
if args.check:
|
||||||
|
# Show diff
|
||||||
|
for i, (old, new) in enumerate(zip(lines, fixed_lines)):
|
||||||
|
if old != new:
|
||||||
|
print(f" Line {i+1}: indentation changed")
|
||||||
|
elif not args.dry_run:
|
||||||
|
path.write_text("".join(fixed_lines))
|
||||||
|
print(f" Written: {path}")
|
||||||
|
else:
|
||||||
|
print(f"OK: {path} — no indentation issues")
|
||||||
|
|
||||||
|
print(f"\nSummary: {total_fixes} code blocks fixed across {files_changed} files ({total_lines} lines processed)")
|
||||||
|
|
||||||
|
if args.check and total_fixes > 0:
|
||||||
|
print("FAIL: Code block indentation issues found. Run without --check to fix.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
139
tests/test_normalize_code_blocks.py
Normal file
139
tests/test_normalize_code_blocks.py
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Tests for normalize-code-blocks.py — training data code block indentation fix (#750)."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import textwrap
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "scripts"))
|
||||||
|
from normalize_code_blocks import normalize_code_block, process_line, CODE_BLOCK_RE
|
||||||
|
|
||||||
|
|
||||||
|
class TestNormalizeCodeBlock:
|
||||||
|
def test_basic_dedent(self):
|
||||||
|
block = "```python\n from fastapi import FastAPI\n app = FastAPI()\n```"
|
||||||
|
result = CODE_BLOCK_RE.sub(normalize_code_block, block)
|
||||||
|
assert " from fastapi" not in result
|
||||||
|
assert "from fastapi" in result
|
||||||
|
|
||||||
|
def test_preserves_language_tag(self):
|
||||||
|
block = "```python\n x = 1\n```"
|
||||||
|
result = CODE_BLOCK_RE.sub(normalize_code_block, block)
|
||||||
|
assert result.startswith("```python")
|
||||||
|
|
||||||
|
def test_empty_block_unchanged(self):
|
||||||
|
block = "```python\n \n \n```"
|
||||||
|
result = CODE_BLOCK_RE.sub(normalize_code_block, block)
|
||||||
|
assert result == block
|
||||||
|
|
||||||
|
def test_multiple_blocks(self):
|
||||||
|
text = 'First: ```python\n x = 1\n``` and second: ```python\n y = 2\n```'
|
||||||
|
result = CODE_BLOCK_RE.sub(normalize_code_block, text)
|
||||||
|
assert " x = 1" not in result
|
||||||
|
assert " y = 2" not in result
|
||||||
|
assert "x = 1" in result
|
||||||
|
assert "y = 2" in result
|
||||||
|
|
||||||
|
def test_bash_block(self):
|
||||||
|
block = "```bash\n echo hello\n ls -la\n```"
|
||||||
|
result = CODE_BLOCK_RE.sub(normalize_code_block, block)
|
||||||
|
assert " echo" not in result
|
||||||
|
assert "echo hello" in result
|
||||||
|
|
||||||
|
def test_unlabeled_block(self):
|
||||||
|
block = "```\n some code\n```"
|
||||||
|
result = CODE_BLOCK_RE.sub(normalize_code_block, block)
|
||||||
|
assert " some code" not in result
|
||||||
|
|
||||||
|
def test_mixed_indentation(self):
|
||||||
|
block = "```python\n def foo():\n return 42\n```"
|
||||||
|
result = CODE_BLOCK_RE.sub(normalize_code_block, block)
|
||||||
|
lines = result.split("\n")
|
||||||
|
# First code line should not have leading spaces from embedding
|
||||||
|
code_lines = [l for l in lines if l.strip() and not l.startswith("```")]
|
||||||
|
assert code_lines[0].startswith("def")
|
||||||
|
|
||||||
|
def test_strips_leading_trailing_blanks(self):
|
||||||
|
block = "```python\n\n x = 1\n\n```"
|
||||||
|
result = CODE_BLOCK_RE.sub(normalize_code_block, block)
|
||||||
|
assert "\n\n" not in result.split("```python")[1].split("```")[0]
|
||||||
|
|
||||||
|
|
||||||
|
class TestProcessLine:
|
||||||
|
def test_valid_jsonl_with_code(self):
|
||||||
|
obj = {"prompt": "write code", "response": "```python\n x = 1\n```"}
|
||||||
|
line = json.dumps(obj)
|
||||||
|
fixed, n = process_line(line)
|
||||||
|
parsed = json.loads(fixed)
|
||||||
|
assert n == 1
|
||||||
|
assert " x = 1" not in parsed["response"]
|
||||||
|
|
||||||
|
def test_no_code_blocks(self):
|
||||||
|
obj = {"text": "hello world"}
|
||||||
|
line = json.dumps(obj)
|
||||||
|
fixed, n = process_line(line)
|
||||||
|
assert n == 0
|
||||||
|
assert json.loads(fixed)["text"] == "hello world"
|
||||||
|
|
||||||
|
def test_invalid_jsonl(self):
|
||||||
|
line = "not valid json {{{"
|
||||||
|
fixed, n = process_line(line)
|
||||||
|
assert n == 0
|
||||||
|
assert fixed == line
|
||||||
|
|
||||||
|
def test_nested_code_blocks(self):
|
||||||
|
obj = {
|
||||||
|
"messages": [
|
||||||
|
{"role": "user", "content": "write code"},
|
||||||
|
{"role": "assistant", "content": "```python\n def f():\n pass\n```"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
line = json.dumps(obj)
|
||||||
|
fixed, n = process_line(line)
|
||||||
|
assert n == 1
|
||||||
|
parsed = json.loads(fixed)
|
||||||
|
assert " def f" not in parsed["messages"][1]["content"]
|
||||||
|
|
||||||
|
def test_multiple_fields_with_code(self):
|
||||||
|
obj = {
|
||||||
|
"terse": "```python\n x = 1\n```",
|
||||||
|
"rich": "```python\n y = 2\n```"
|
||||||
|
}
|
||||||
|
line = json.dumps(obj)
|
||||||
|
fixed, n = process_line(line)
|
||||||
|
parsed = json.loads(fixed)
|
||||||
|
assert n == 2
|
||||||
|
assert " x = 1" not in parsed["terse"]
|
||||||
|
assert " y = 2" not in parsed["rich"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestEndToEnd:
|
||||||
|
def test_file_processing(self):
|
||||||
|
with tempfile.NamedTemporaryFile(mode="w", suffix=".jsonl", delete=False) as f:
|
||||||
|
f.write(json.dumps({"r": "```python\n x = 1\n```"}) + "\n")
|
||||||
|
f.write(json.dumps({"r": "no code here"}) + "\n")
|
||||||
|
f.write(json.dumps({"r": "```python\n def g():\n return 99\n```"}) + "\n")
|
||||||
|
f.flush()
|
||||||
|
|
||||||
|
# Process using the script logic
|
||||||
|
lines = Path(f.name).read_text().splitlines(keepends=True)
|
||||||
|
fixed = []
|
||||||
|
total = 0
|
||||||
|
for line in lines:
|
||||||
|
fl, n = process_line(line)
|
||||||
|
fixed.append(fl)
|
||||||
|
total += n
|
||||||
|
|
||||||
|
os.unlink(f.name)
|
||||||
|
assert total == 2
|
||||||
|
# Verify first line is fixed
|
||||||
|
first = json.loads(fixed[0])
|
||||||
|
assert " x = 1" not in first["r"]
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import unittest
|
||||||
|
unittest.main()
|
||||||
Reference in New Issue
Block a user