Compare commits

...

4 Commits

Author SHA1 Message Date
7b4458e0ad test: JSON repair module tests
Some checks failed
Contributor Attribution Check / check-attribution (pull_request) Failing after 44s
Docker Build and Publish / build-and-push (pull_request) Has been skipped
Supply Chain Audit / Scan PR for supply chain risks (pull_request) Successful in 51s
Tests / e2e (pull_request) Successful in 2m59s
Tests / test (pull_request) Failing after 38m20s
Part of #781. Tests all 7 failure patterns.
2026-04-15 14:49:51 +00:00
8d92047ff8 feat: JSON repair module for tool-call pipeline
Closes #781

Provides repair_and_load_json() that fixes malformed JSON from
LLM tool calls. Handles truncation, unquoted keys, single quotes,
trailing commas, markdown wrapping, and extra trailing text.
2026-04-15 14:49:49 +00:00
f1f9bd2e76 Merge pull request 'feat: implement Reader-Guided Reranking — bridge R@5 vs E2E gap (#666)' (#782) from fix/666 into main 2026-04-15 11:58:02 +00:00
Hermes Agent
4129cc0d0c feat: implement Reader-Guided Reranking — bridge R@5 vs E2E gap (#666)
Some checks failed
Docker Build and Publish / build-and-push (pull_request) Has been skipped
Contributor Attribution Check / check-attribution (pull_request) Failing after 37s
Supply Chain Audit / Scan PR for supply chain risks (pull_request) Successful in 55s
Tests / test (pull_request) Failing after 55s
Tests / e2e (pull_request) Successful in 2m49s
Resolves #666. RIDER reranks retrieved passages by how well the LLM
can actually answer from them, bridging the gap between high retrieval
recall (98.4% R@5) and low end-to-end accuracy (17%).

agent/rider.py (256 lines):
- RIDER class with rerank(passages, query) method
- Batch LLM prediction from each passage individually
- Confidence-based scoring: specificity, grounding, hedge detection,
  query relevance, refusal penalty
- Async scoring with configurable batch size
- Convenience functions: rerank_passages(), is_rider_available()

tools/session_search_tool.py:
- Wired RIDER into session search pipeline after FTS5 results
- Reranks sessions by LLM answerability before summarization
- Graceful fallback if RIDER unavailable

tests/test_reader_guided_reranking.py (10 tests):
- Empty passages, few passages, disabled mode
- Confidence scoring: short answers, hedging, grounding, refusal
- Convenience function, availability check

Config via env vars: RIDER_ENABLED, RIDER_TOP_K, RIDER_TOP_N,
RIDER_MAX_TOKENS, RIDER_BATCH_SIZE.
2026-04-15 07:40:15 -04:00
5 changed files with 774 additions and 0 deletions

283
agent/json_repair.py Normal file
View File

@@ -0,0 +1,283 @@
"""
JSON Repair for Tool-Call Pipeline — #781
Provides repair_and_load_json() that attempts to fix malformed JSON
from LLM tool calls before falling back to full API retry.
Handles all common LLM JSON failure modes:
- Truncation (missing closing braces/brackets)
- Unquoted keys
- Single quotes
- Trailing commas
- Markdown-wrapped JSON
- Extra trailing text
Usage:
from agent.json_repair import repair_and_load_json
args = repair_and_load_json(raw_args)
if args is None:
# Truly unrepairable — trigger retry
pass
"""
import json
import re
from typing import Any, Optional
try:
from json_repair import repair_json
HAS_JSON_REPAIR = True
except ImportError:
HAS_JSON_REPAIR = False
def repair_and_load_json(text: str, default: Any = None) -> Any:
"""
Parse JSON, repairing common LLM malformations on failure.
Tries json.loads() first (fast path). On failure, attempts repair
via json-repair library. Returns default if unrepairable.
Args:
text: Raw JSON string (possibly malformed)
default: Value to return if repair fails
Returns:
Parsed JSON object, or default if unrepairable
"""
if not text or not isinstance(text, str):
return default
text = text.strip()
if not text:
return default
# Fast path: try native parse first
try:
return json.loads(text)
except (json.JSONDecodeError, TypeError, ValueError):
pass
# Repair path
repaired = _repair_json_string(text)
if repaired is None:
return default
try:
return json.loads(repaired)
except (json.JSONDecodeError, TypeError, ValueError):
return default
def _repair_json_string(text: str) -> Optional[str]:
"""
Attempt to repair a malformed JSON string.
Returns repaired string, or None if unrepairable.
"""
if not text:
return None
# Strip markdown wrapping
text = _strip_markdown(text)
# Try json-repair library if available
if HAS_JSON_REPAIR:
try:
repaired = repair_json(text)
if repaired and repaired != text:
return repaired
except Exception:
pass
# Manual repair attempts
repaired = _manual_repair(text)
return repaired
def _strip_markdown(text: str) -> str:
"""Strip markdown code block wrapping."""
# ```json ... ```
match = re.match(r'^```(?:json)?\s*\n?(.*?)\n?```\s*$', text, re.DOTALL)
if match:
return match.group(1).strip()
# ` ... `
match = re.match(r'^`(.+)`$', text, re.DOTALL)
if match:
return match.group(1).strip()
return text
def _manual_repair(text: str) -> Optional[str]:
"""Manual JSON repair heuristics."""
original = text
# Remove trailing non-JSON text after valid JSON
# Find the last } or ] that closes the root
text = _extract_json(text)
if text is None:
return None
# Fix trailing commas
text = re.sub(r',\s*([}\]])', r'\1', text)
# Fix single quotes -> double quotes (simple cases)
# Only if there are no double quotes already
if '"' not in text and "'" in text:
text = text.replace("'", '"')
# Fix unquoted keys: {key: value} -> {"key": value}
text = re.sub(r'([{,]\s*)([a-zA-Z_][a-zA-Z0-9_]*)\s*:', r'\1"\2":', text)
# Try to close unclosed braces/brackets
text = _close_brackets(text)
return text if text != original else None
def _extract_json(text: str) -> Optional[str]:
"""Extract JSON from text that may have trailing content."""
text = text.strip()
if not text:
return None
# Find the start of JSON (first { or [)
start_obj = text.find('{')
start_arr = text.find('[')
if start_obj == -1 and start_arr == -1:
return None
if start_arr == -1 or (start_obj != -1 and start_obj < start_arr):
start = start_obj
open_char, close_char = '{', '}'
else:
start = start_arr
open_char, close_char = '[', ']'
# Find matching close, handling nesting
depth = 0
in_string = False
escape_next = False
for i in range(start, len(text)):
c = text[i]
if escape_next:
escape_next = False
continue
if c == '\\':
escape_next = True
continue
if c == '"' and not escape_next:
in_string = not in_string
continue
if in_string:
continue
if c == open_char:
depth += 1
elif c == close_char:
depth -= 1
if depth == 0:
return text[start:i+1]
# Unclosed — return from start to end, will be closed by _close_brackets
return text[start:] if start > 0 else text
def _close_brackets(text: str) -> str:
"""Close unclosed braces and brackets."""
open_count = 0
close_count = 0
in_string = False
escape_next = False
for c in text:
if escape_next:
escape_next = False
continue
if c == '\\':
escape_next = True
continue
if c == '"' and not escape_next:
in_string = not in_string
continue
if in_string:
continue
if c == '{':
open_count += 1
elif c == '}':
close_count += 1
elif c == '[':
open_count += 1
elif c == ']':
close_count += 1
# Close unclosed strings first
if in_string:
text += '"'
# Close unclosed braces/brackets
diff = open_count - close_count
if diff > 0:
# Determine what to close based on last unclosed
last_open_obj = text.rfind('{')
last_open_arr = text.rfind('[')
if last_open_obj > last_open_arr:
text += '}' * diff
else:
text += ']' * diff
return text
def repair_tool_call_arguments(arguments: str) -> tuple[str, bool]:
"""
Repair tool call arguments and return (repaired_args, was_repaired).
Specifically designed for the tool-call pipeline.
"""
if not arguments or not arguments.strip():
return '{}', True
try:
json.loads(arguments)
return arguments, False # Already valid
except (json.JSONDecodeError, TypeError, ValueError):
pass
repaired = repair_and_load_json(arguments)
if repaired is not None:
return json.dumps(repaired), True
return '{}', True # Unrepairable — return empty dict
# Metrics tracking
_json_repairs_attempted = 0
_json_repairs_succeeded = 0
def get_repair_metrics() -> dict:
"""Get JSON repair metrics."""
return {
"attempted": _json_repairs_attempted,
"succeeded": _json_repairs_succeeded,
"success_rate": _json_repairs_succeeded / _json_repairs_attempted if _json_repairs_attempted > 0 else 0,
}
def _track_repair(success: bool):
"""Track repair attempt."""
global _json_repairs_attempted, _json_repairs_succeeded
_json_repairs_attempted += 1
if success:
_json_repairs_succeeded += 1

256
agent/rider.py Normal file
View File

@@ -0,0 +1,256 @@
"""RIDER — Reader-Guided Passage Reranking.
Bridges the R@5 vs E2E accuracy gap by using the LLM's own predictions
to rerank retrieved passages. Passages the LLM can actually answer from
get ranked higher than passages that merely match keywords.
Research: RIDER achieves +10-20 top-1 accuracy gains over naive retrieval
by aligning retrieval quality with reader utility.
Usage:
from agent.rider import RIDER
rider = RIDER()
reranked = rider.rerank(passages, query, top_n=3)
"""
from __future__ import annotations
import asyncio
import logging
import os
from typing import Any, Dict, List, Optional, Tuple
logger = logging.getLogger(__name__)
# Configuration
RIDER_ENABLED = os.getenv("RIDER_ENABLED", "true").lower() not in ("false", "0", "no")
RIDER_TOP_K = int(os.getenv("RIDER_TOP_K", "10")) # passages to score
RIDER_TOP_N = int(os.getenv("RIDER_TOP_N", "3")) # passages to return after reranking
RIDER_MAX_TOKENS = int(os.getenv("RIDER_MAX_TOKENS", "50")) # max tokens for prediction
RIDER_BATCH_SIZE = int(os.getenv("RIDER_BATCH_SIZE", "5")) # parallel predictions
class RIDER:
"""Reader-Guided Passage Reranking.
Takes passages retrieved by FTS5/vector search and reranks them by
how well the LLM can answer the query from each passage individually.
"""
def __init__(self, auxiliary_task: str = "rider"):
"""Initialize RIDER.
Args:
auxiliary_task: Task name for auxiliary client resolution.
"""
self._auxiliary_task = auxiliary_task
def rerank(
self,
passages: List[Dict[str, Any]],
query: str,
top_n: int = RIDER_TOP_N,
) -> List[Dict[str, Any]]:
"""Rerank passages by reader confidence.
Args:
passages: List of passage dicts. Must have 'content' or 'text' key.
May have 'session_id', 'snippet', 'rank', 'score', etc.
query: The user's search query.
top_n: Number of passages to return after reranking.
Returns:
Reranked passages (top_n), each with added 'rider_score' and
'rider_prediction' fields.
"""
if not RIDER_ENABLED or not passages:
return passages[:top_n]
if len(passages) <= top_n:
# Score them anyway for the prediction metadata
return self._score_and_rerank(passages, query, top_n)
return self._score_and_rerank(passages[:RIDER_TOP_K], query, top_n)
def _score_and_rerank(
self,
passages: List[Dict[str, Any]],
query: str,
top_n: int,
) -> List[Dict[str, Any]]:
"""Score each passage with the reader, then rerank by confidence."""
try:
from model_tools import _run_async
scored = _run_async(self._score_all_passages(passages, query))
except Exception as e:
logger.debug("RIDER scoring failed: %s — returning original order", e)
return passages[:top_n]
# Sort by confidence (descending)
scored.sort(key=lambda p: p.get("rider_score", 0), reverse=True)
return scored[:top_n]
async def _score_all_passages(
self,
passages: List[Dict[str, Any]],
query: str,
) -> List[Dict[str, Any]]:
"""Score all passages in batches."""
scored = []
for i in range(0, len(passages), RIDER_BATCH_SIZE):
batch = passages[i:i + RIDER_BATCH_SIZE]
tasks = [
self._score_single_passage(p, query, idx + i)
for idx, p in enumerate(batch)
]
results = await asyncio.gather(*tasks, return_exceptions=True)
for passage, result in zip(batch, results):
if isinstance(result, Exception):
logger.debug("RIDER passage %d scoring failed: %s", i, result)
passage["rider_score"] = 0.0
passage["rider_prediction"] = ""
passage["rider_confidence"] = "error"
else:
score, prediction, confidence = result
passage["rider_score"] = score
passage["rider_prediction"] = prediction
passage["rider_confidence"] = confidence
scored.append(passage)
return scored
async def _score_single_passage(
self,
passage: Dict[str, Any],
query: str,
idx: int,
) -> Tuple[float, str, str]:
"""Score a single passage by asking the LLM to predict an answer.
Returns:
(confidence_score, prediction, confidence_label)
"""
content = passage.get("content") or passage.get("text") or passage.get("snippet", "")
if not content or len(content) < 10:
return 0.0, "", "empty"
# Truncate passage to reasonable size for the prediction task
content = content[:2000]
prompt = (
f"Question: {query}\n\n"
f"Context: {content}\n\n"
f"Based ONLY on the context above, provide a brief answer to the question. "
f"If the context does not contain enough information to answer, respond with "
f"'INSUFFICIENT_CONTEXT'. Be specific and concise."
)
try:
from agent.auxiliary_client import get_text_auxiliary_client, auxiliary_max_tokens_param
client, model = get_text_auxiliary_client(task=self._auxiliary_task)
if not client:
return 0.5, "", "no_client"
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": prompt}],
**auxiliary_max_tokens_param(RIDER_MAX_TOKENS),
temperature=0,
)
prediction = (response.choices[0].message.content or "").strip()
# Confidence scoring based on the prediction
if not prediction:
return 0.1, "", "empty_response"
if "INSUFFICIENT_CONTEXT" in prediction.upper():
return 0.15, prediction, "insufficient"
# Calculate confidence from response characteristics
confidence = self._calculate_confidence(prediction, query, content)
return confidence, prediction, "predicted"
except Exception as e:
logger.debug("RIDER prediction failed for passage %d: %s", idx, e)
return 0.0, "", "error"
def _calculate_confidence(
self,
prediction: str,
query: str,
passage: str,
) -> float:
"""Calculate confidence score from prediction quality signals.
Heuristics:
- Short, specific answers = higher confidence
- Answer terms overlap with passage = higher confidence
- Hedging language = lower confidence
- Answer directly addresses query terms = higher confidence
"""
score = 0.5 # base
# Specificity bonus: shorter answers tend to be more confident
words = len(prediction.split())
if words <= 5:
score += 0.2
elif words <= 15:
score += 0.1
elif words > 50:
score -= 0.1
# Passage grounding: does the answer use terms from the passage?
passage_lower = passage.lower()
answer_terms = set(prediction.lower().split())
passage_terms = set(passage_lower.split())
overlap = len(answer_terms & passage_terms)
if overlap > 3:
score += 0.15
elif overlap > 0:
score += 0.05
# Query relevance: does the answer address query terms?
query_terms = set(query.lower().split())
query_overlap = len(answer_terms & query_terms)
if query_overlap > 1:
score += 0.1
# Hedge penalty: hedging language suggests uncertainty
hedge_words = {"maybe", "possibly", "might", "could", "perhaps",
"not sure", "unclear", "don't know", "cannot"}
if any(h in prediction.lower() for h in hedge_words):
score -= 0.2
# "I cannot" / "I don't" penalty (model refusing rather than answering)
if prediction.lower().startswith(("i cannot", "i don't", "i can't", "there is no")):
score -= 0.15
return max(0.0, min(1.0, score))
def rerank_passages(
passages: List[Dict[str, Any]],
query: str,
top_n: int = RIDER_TOP_N,
) -> List[Dict[str, Any]]:
"""Convenience function for passage reranking."""
rider = RIDER()
return rider.rerank(passages, query, top_n)
def is_rider_available() -> bool:
"""Check if RIDER can run (auxiliary client available)."""
if not RIDER_ENABLED:
return False
try:
from agent.auxiliary_client import get_text_auxiliary_client
client, model = get_text_auxiliary_client(task="rider")
return client is not None and model is not None
except Exception:
return False

136
tests/test_json_repair.py Normal file
View File

@@ -0,0 +1,136 @@
"""Tests for JSON repair module (#781)."""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from agent.json_repair import repair_and_load_json, repair_tool_call_arguments, _strip_markdown, _extract_json
class TestValidJSON:
def test_valid_object(self):
assert repair_and_load_json('{"key": "value"}') == {"key": "value"}
def test_valid_array(self):
assert repair_and_load_json('[1, 2, 3]') == [1, 2, 3]
def test_empty_string_returns_default(self):
assert repair_and_load_json('', default=42) == 42
def test_none_returns_default(self):
assert repair_and_load_json(None, default=42) == 42
class TestTrailingComma:
def test_trailing_comma_object(self):
assert repair_and_load_json('{"key": "value",}') == {"key": "value"}
def test_trailing_comma_array(self):
assert repair_and_load_json('[1, 2, 3,]') == [1, 2, 3]
class TestSingleQuotes:
def test_single_quotes(self):
result = repair_and_load_json("{'key': 'value'}")
assert result == {"key": "value"}
class TestUnquotedKeys:
def test_unquoted_keys(self):
result = repair_and_load_json('{key: "value"}')
assert result == {"key": "value"}
class TestTruncation:
def test_missing_closing_brace(self):
result = repair_and_load_json('{"name": "foo", "args": {"x": 1}')
assert result is not None
assert result["name"] == "foo"
def test_missing_closing_bracket(self):
result = repair_and_load_json('[1, 2, 3')
assert result == [1, 2, 3]
class TestMarkdownWrapping:
def test_json_codeblock(self):
text = '```json\n{"key": "value"}\n```'
assert repair_and_load_json(text) == {"key": "value"}
def test_plain_codeblock(self):
text = '```\n{"key": "value"}\n```'
assert repair_and_load_json(text) == {"key": "value"}
class TestTrailingText:
def test_extra_text_after_json(self):
result = repair_and_load_json('{"result": true} some extra text')
assert result == {"result": True}
class TestStripMarkdown:
def test_strip_json_block(self):
assert _strip_markdown('```json\n{"a":1}\n```') == '{"a":1}'
def test_strip_plain_block(self):
assert _strip_markdown('```\n{"a":1}\n```') == '{"a":1}'
class TestExtractJSON:
def test_extract_from_text(self):
assert _extract_json('prefix {"key": 1} suffix') == '{"key": 1}'
def test_nested(self):
assert _extract_json('{"a": {"b": 1}}') == '{"a": {"b": 1}}'
class TestRepairToolCallArguments:
def test_valid_args(self):
args, repaired = repair_tool_call_arguments('{"x": 1}')
assert args == '{"x": 1}'
assert not repaired
def test_malformed_args(self):
args, repaired = repair_tool_call_arguments("{x: 1}")
assert repaired
assert '"x"' in args
def test_empty_args(self):
args, repaired = repair_tool_call_arguments('')
assert args == '{}'
assert repaired
class TestFailureModes:
def test_truncation_most_dangerous(self):
# LLM cut off mid-string
raw = '{"path": "test.py", "content": "hello'
result = repair_and_load_json(raw)
assert result is not None
assert result.get("path") == "test.py"
def test_empty_arguments(self):
# Auto-coerced to {}
result = repair_and_load_json('')
assert result is None # Returns default (None)
def test_non_json_text(self):
result = repair_and_load_json('not json at all', default={})
assert result == {}
if __name__ == "__main__":
import inspect
tests = []
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and name.startswith('Test'):
for method_name, method in inspect.getmembers(obj):
if method_name.startswith('test_'):
tests.append((f"{name}.{method_name}", method))
for name, test in tests:
print(f"Running {name}...")
test(None)
print(" PASS")
print(f"\n{len(tests)} tests passed.")

View File

@@ -0,0 +1,82 @@
"""Tests for Reader-Guided Reranking (RIDER) — issue #666."""
import pytest
from unittest.mock import MagicMock, patch
from agent.rider import RIDER, rerank_passages, is_rider_available
class TestRIDERClass:
def test_init(self):
rider = RIDER()
assert rider._auxiliary_task == "rider"
def test_rerank_empty_passages(self):
rider = RIDER()
result = rider.rerank([], "test query")
assert result == []
def test_rerank_fewer_than_top_n(self):
"""If passages <= top_n, return all (with scores if possible)."""
rider = RIDER()
passages = [{"content": "test content", "session_id": "s1"}]
result = rider.rerank(passages, "test query", top_n=3)
assert len(result) == 1
@patch("agent.rider.RIDER_ENABLED", False)
def test_rerank_disabled(self):
"""When disabled, return original order."""
rider = RIDER()
passages = [
{"content": f"content {i}", "session_id": f"s{i}"}
for i in range(5)
]
result = rider.rerank(passages, "test query", top_n=3)
assert result == passages[:3]
class TestConfidenceCalculation:
@pytest.fixture
def rider(self):
return RIDER()
def test_short_specific_answer(self, rider):
score = rider._calculate_confidence("Paris", "What is the capital of France?", "Paris is the capital of France.")
assert score > 0.5
def test_hedged_answer(self, rider):
score = rider._calculate_confidence(
"Maybe it could be Paris, but I'm not sure",
"What is the capital of France?",
"Paris is the capital.",
)
assert score < 0.5
def test_passage_grounding(self, rider):
score = rider._calculate_confidence(
"The system uses SQLite for storage",
"What database is used?",
"The system uses SQLite for persistent storage with FTS5 indexing.",
)
assert score > 0.5
def test_refusal_penalty(self, rider):
score = rider._calculate_confidence(
"I cannot answer this from the given context",
"What is X?",
"Some unrelated content",
)
assert score < 0.5
class TestRerankPassages:
def test_convenience_function(self):
"""Test the module-level convenience function."""
passages = [{"content": "test", "session_id": "s1"}]
result = rerank_passages(passages, "query", top_n=1)
assert len(result) == 1
class TestIsRiderAvailable:
def test_returns_bool(self):
result = is_rider_available()
assert isinstance(result, bool)

View File

@@ -394,6 +394,23 @@ def session_search(
if len(seen_sessions) >= limit:
break
# RIDER: Reader-guided reranking — sort sessions by LLM answerability
# This bridges the R@5 vs E2E accuracy gap by prioritizing passages
# the LLM can actually answer from, not just keyword matches.
try:
from agent.rider import rerank_passages, is_rider_available
if is_rider_available() and len(seen_sessions) > 1:
rider_passages = [
{"session_id": sid, "content": info.get("snippet", ""), "rank": i + 1}
for i, (sid, info) in enumerate(seen_sessions.items())
]
reranked = rerank_passages(rider_passages, query, top_n=len(rider_passages))
# Reorder seen_sessions by RIDER score
reranked_sids = [p["session_id"] for p in reranked]
seen_sessions = {sid: seen_sessions[sid] for sid in reranked_sids if sid in seen_sessions}
except Exception as e:
logging.debug("RIDER reranking skipped: %s", e)
# Prepare all sessions for parallel summarization
tasks = []
for session_id, match_info in seen_sessions.items():