Compare commits

..

3 Commits

3 changed files with 289 additions and 160 deletions

View File

@@ -0,0 +1,138 @@
#!/usr/bin/env python3
"""
normalize-code-blocks.py — Fix inconsistent indentation in training data code blocks.
When code blocks are embedded in JSONL as triple-quoted strings, indentation
accumulates from the surrounding context. This script normalizes code block
content using textwrap.dedent and consistent 4-space indentation.
Usage:
python3 scripts/normalize-code-blocks.py training/data/preference_pairs.jsonl
python3 scripts/normalize-code-blocks.py --dry-run training/data/*.jsonl
python3 scripts/normalize-code-blocks.py --check training/data/*.jsonl # CI mode
"""
import argparse
import json
import re
import sys
import textwrap
from pathlib import Path
# Matches ```python ... ``` or ``` ... ``` blocks inside string values
CODE_BLOCK_RE = re.compile(
r"(?P<open>```(?:python|py|bash|sh|javascript|js|typescript|ts|go|rust|ruby)?\s*\n)"
r"(?P<code>.*?)"
r"(?P<close>```)",
re.DOTALL,
)
def normalize_code_block(match: re.Match) -> str:
"""Normalize indentation in a single code block."""
open_tag = match.group("open")
code = match.group("code")
close_tag = match.group("close")
if not code.strip():
return match.group(0)
dedented = textwrap.dedent(code)
lines = dedented.split("\n")
while lines and not lines[0].strip():
lines.pop(0)
while lines and not lines[-1].strip():
lines.pop()
normalized = "\n".join(lines)
return f"{open_tag}{normalized}\n{close_tag}"
def process_line(line: str) -> tuple:
"""Process a single JSONL line. Returns (new_line, num_fixes)."""
try:
obj = json.loads(line)
except json.JSONDecodeError:
return line, 0
fixes = 0
def fix_strings(obj):
nonlocal fixes
if isinstance(obj, str):
original = obj
fixed = CODE_BLOCK_RE.sub(normalize_code_block, obj)
if fixed != original:
fixes += 1
return fixed
elif isinstance(obj, dict):
return {k: fix_strings(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [fix_strings(item) for item in obj]
return obj
fixed_obj = fix_strings(obj)
return json.dumps(fixed_obj, ensure_ascii=False) + "\n", fixes
def process_file(filepath: str, dry_run: bool = False) -> dict:
"""Process a single JSONL file. Returns stats dict."""
path = Path(filepath)
if not path.exists():
return {"file": str(filepath), "error": "not found", "fixes": 0, "lines": 0}
lines = path.read_text(encoding="utf-8").splitlines()
fixed_lines = []
total_fixes = 0
for line in lines:
if not line.strip():
fixed_lines.append(line)
continue
new_line, fixes = process_line(line)
fixed_lines.append(new_line.rstrip("\n"))
total_fixes += fixes
if total_fixes > 0 and not dry_run:
path.write_text("\n".join(fixed_lines) + "\n", encoding="utf-8")
return {
"file": str(filepath),
"lines": len(lines),
"fixes": total_fixes,
"changed": total_fixes > 0,
}
def main():
parser = argparse.ArgumentParser(
description="Normalize code block indentation in JSONL training data"
)
parser.add_argument("files", nargs="+", help="JSONL files to process")
parser.add_argument("--dry-run", action="store_true", help="Show changes without writing")
parser.add_argument("--check", action="store_true", help="CI mode: exit 1 if fixes needed")
args = parser.parse_args()
total_fixes = 0
results = []
for filepath in args.files:
result = process_file(filepath, dry_run=args.dry_run or args.check)
results.append(result)
total_fixes += result["fixes"]
if result["fixes"] > 0:
status = "FIXED" if not args.dry_run and not args.check else "WOULD FIX"
print(f" {status}: {result['file']}{result['fixes']} code blocks normalized")
else:
print(f" OK: {result['file']}")
print(f"\nTotal: {total_fixes} code blocks normalized across {len(results)} files")
if args.check and total_fixes > 0:
print("FAIL: Code block indentation issues found. Run without --check to fix.")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,160 +0,0 @@
#!/usr/bin/env python3
"""
pr-backlog-triage.py — Analyze and triage open PR backlog.
Identifies duplicate PRs (same issue number), stale PRs (old with no activity),
and generates a triage report.
Usage:
python3 scripts/pr-backlog-triage.py --report # Print report
python3 scripts/pr-backlog-triage.py --close-dupes # Close duplicate PRs (keep newest)
python3 scripts/pr-backlog-triage.py --dry-run # Show what would be closed
"""
import argparse
import json
import re
import sys
import urllib.request
from collections import defaultdict
from datetime import datetime, timezone
GITEA_URL = "https://forge.alexanderwhitestone.com"
TOKEN_PATH = "/Users/apayne/.config/gitea/token"
REPO = "Timmy_Foundation/timmy-config"
def load_token():
with open(TOKEN_PATH) as f:
return f.read().strip()
def api_get(path, token):
req = urllib.request.Request(
f"{GITEA_URL}/api/v1/repos/{REPO}{path}",
headers={"Authorization": f"token {token}"}
)
return json.loads(urllib.request.urlopen(req, timeout=30).read())
def api_patch(path, token, data):
req = urllib.request.Request(
f"{GITEA_URL}/api/v1/repos/{REPO}{path}",
data=json.dumps(data).encode(),
headers={"Authorization": f"token {token}", "Content-Type": "application/json"},
method="PATCH"
)
return json.loads(urllib.request.urlopen(req, timeout=15).read())
def api_post(path, token, data):
req = urllib.request.Request(
f"{GITEA_URL}/api/v1/repos/{REPO}{path}",
data=json.dumps(data).encode(),
headers={"Authorization": f"token {token}", "Content-Type": "application/json"},
method="POST"
)
return json.loads(urllib.request.urlopen(req, timeout=15).read())
def extract_issue_refs(title, body):
"""Extract issue numbers referenced in title or body."""
text = f"{title} {body or ''}"
# Match #123 or (fixes #123) or (closes #123)
refs = set(int(m) for m in re.findall(r'#(\d{2,5})', text))
return refs
def main():
parser = argparse.ArgumentParser(description="Triage open PR backlog")
parser.add_argument("--report", action="store_true", help="Print triage report")
parser.add_argument("--close-dupes", action="store_true", help="Close duplicate PRs (keep newest)")
parser.add_argument("--dry-run", action="store_true", help="Show what would be closed")
args = parser.parse_args()
if not args.report and not args.close_dupes:
args.report = True
token = load_token()
prs = api_get("/pulls?state=open&limit=100", token)
print(f"Found {len(prs)} open PRs\n")
# Build issue → PR mapping
issue_to_prs = defaultdict(list)
for pr in prs:
refs = extract_issue_refs(pr["title"], pr.get("body", ""))
for ref in refs:
issue_to_prs[ref].append(pr)
# Find duplicates (same issue referenced by multiple PRs)
duplicates = {}
for issue_num, pr_list in issue_to_prs.items():
if len(pr_list) > 1:
# Sort by number (newest first)
sorted_prs = sorted(pr_list, key=lambda p: -p["number"])
duplicates[issue_num] = sorted_prs
if args.report:
print(f"{'='*60}")
print(f"DUPLICATE PRs ({len(duplicates)} issues with multiple PRs)")
print(f"{'='*60}")
for issue_num, pr_list in sorted(duplicates.items()):
print(f"\nIssue #{issue_num}: {len(pr_list)} PRs")
for i, pr in enumerate(pr_list):
marker = "KEEP" if i == 0 else "CLOSE"
print(f" [{marker}] PR #{pr['number']}: {pr['title'][:70]}")
print(f" branch={pr['head']['ref']} created={pr['created_at'][:10]}")
total_dupes = sum(len(v) - 1 for v in duplicates.values())
print(f"\nTotal duplicate PRs that could be closed: {total_dupes}")
# Check for PRs referencing closed issues
print(f"\n{'='*60}")
print("PRs referencing CLOSED issues:")
print(f"{'='*60}")
closed_issue_prs = []
for issue_num in issue_to_prs:
try:
issue = api_get(f"/../../issues/{issue_num}", token)
if issue.get("state") == "closed":
for pr in issue_to_prs[issue_num]:
closed_issue_prs.append((issue_num, pr))
except Exception:
pass
for issue_num, pr in sorted(closed_issue_prs, key=lambda x: -x[1]["number"]):
print(f" PR #{pr['number']}: {pr['title'][:70]} (issue #{issue_num} is CLOSED)")
if args.close_dupes:
closed = 0
for issue_num, pr_list in duplicates.items():
# Keep the newest (first in list), close the rest
keep = pr_list[0]
close_list = pr_list[1:]
for pr in close_list:
if args.dry_run:
print(f"DRY RUN: Would close PR #{pr['number']} (duplicate of #{keep['number']} for issue #{issue_num})")
else:
# Add comment
try:
api_post(f"/issues/{pr['number']}/comments", token, {
"body": f"Closing as duplicate. PR #{keep['number']} is newer and addresses the same issue (#{issue_num})."
})
except Exception:
pass
# Close the PR
try:
api_patch(f"/pulls/{pr['number']}", token, {"state": "closed"})
print(f"Closed PR #{pr['number']} (duplicate of #{keep['number']})")
closed += 1
except Exception as e:
print(f"Error closing PR #{pr['number']}: {e}")
print(f"\nClosed {closed} duplicate PRs")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,151 @@
#!/usr/bin/env python3
"""Tests for normalize-code-blocks.py — issue #750"""
import json
import sys
import tempfile
from pathlib import Path
import pytest
# Import from scripts/
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "scripts"))
from normalize_code_blocks import normalize_code_block, process_line, process_file, CODE_BLOCK_RE
class TestCodeBlockRegex:
def test_matches_python_block(self):
text = "```python\nprint('hi')\n```"
assert CODE_BLOCK_RE.search(text)
def test_matches_plain_block(self):
text = "```\nsome code\n```"
assert CODE_BLOCK_RE.search(text)
def test_matches_bash_block(self):
text = "```bash\necho hello\n```"
assert CODE_BLOCK_RE.search(text)
def test_ignores_inline_backticks(self):
text = "Use `code` inline"
assert not CODE_BLOCK_RE.search(text)
def test_handles_multiline_code(self):
text = "```python\ndef foo():\n return 1\n\ndef bar():\n return 2\n```"
match = CODE_BLOCK_RE.search(text)
assert match
assert "def foo" in match.group("code")
class TestNormalizeCodeBlock:
def test_strips_leading_indent(self):
match = CODE_BLOCK_RE.search("```python\n print('hi')\n```")
result = normalize_code_block(match)
assert " print" not in result
assert "print('hi')" in result
def test_dedents_mixed_indent(self):
code = "```python\n def foo():\n return 1\n def bar():\n return 2\n```"
match = CODE_BLOCK_RE.search(code)
result = normalize_code_block(match)
lines = result.split("\n")
# First non-tag line should have 0 indent
code_lines = [l for l in lines if l.strip() and not l.startswith("```")]
assert code_lines[0].startswith("def foo")
def test_strips_trailing_blank_lines(self):
match = CODE_BLOCK_RE.search("```python\nprint('hi')\n\n\n```")
result = normalize_code_block(match)
assert result.endswith("print('hi')\n```")
def test_preserves_language_tag(self):
match = CODE_BLOCK_RE.search("```python\n x = 1\n```")
result = normalize_code_block(match)
assert result.startswith("```python")
def test_empty_block_unchanged(self):
match = CODE_BLOCK_RE.search("```python\n \n```")
original = match.group(0)
result = normalize_code_block(match)
assert result == original
def test_diff_markers_preserved(self):
code = "```\n+def new_func():\n+ return 1\n-def old_func():\n- return 0\n```"
match = CODE_BLOCK_RE.search(code)
result = normalize_code_block(match)
assert "+def new_func" in result
assert "-def old_func" in result
class TestProcessLine:
def test_valid_json_no_code_blocks(self):
line = json.dumps({"prompt": "hello world"})
new_line, fixes = process_line(line)
assert fixes == 0
def test_valid_json_with_code_block(self):
obj = {"prompt": "Here is code:\n```python\n x = 1\n```"}
line = json.dumps(obj)
new_line, fixes = process_line(line)
assert fixes == 1
parsed = json.loads(new_line)
assert " x = 1" not in parsed["prompt"]
def test_nested_dict_code_blocks(self):
obj = {
"prompt": "code: ```python\n a = 1\n```",
"chosen": "```python\n b = 2\n```",
}
line = json.dumps(obj)
new_line, fixes = process_line(line)
assert fixes == 2
def test_invalid_json_returned_unchanged(self):
line = "{broken json"
new_line, fixes = process_line(line)
assert new_line == line
assert fixes == 0
def test_list_field_code_blocks(self):
obj = {"items": ["```python\n x = 1\n```", "no code here"]}
line = json.dumps(obj)
new_line, fixes = process_line(line)
assert fixes == 1
class TestProcessFile:
def test_fixes_file_in_place(self, tmp_path):
f = tmp_path / "test.jsonl"
lines = [
json.dumps({"prompt": "```python\n x = 1\n```"}),
json.dumps({"prompt": "no code"}),
]
f.write_text("\n".join(lines) + "\n")
result = process_file(str(f))
assert result["fixes"] == 1
assert result["lines"] == 2
# Verify file was actually modified
content = f.read_text()
assert " x = 1" not in content
def test_dry_run_no_write(self, tmp_path):
f = tmp_path / "test.jsonl"
original = json.dumps({"prompt": "```python\n x = 1\n```"})
f.write_text(original + "\n")
result = process_file(str(f), dry_run=True)
assert result["fixes"] == 1
# File unchanged
assert f.read_text().strip() == original
def test_missing_file(self, tmp_path):
result = process_file(str(tmp_path / "nope.jsonl"))
assert "error" in result
def test_clean_file_no_fixes(self, tmp_path):
f = tmp_path / "clean.jsonl"
f.write_text(json.dumps({"prompt": "no code blocks here"}) + "\n")
result = process_file(str(f))
assert result["fixes"] == 0