Compare commits

...

1 Commits

Author SHA1 Message Date
Alexander Whitestone
2844bd15f9 feat: context-faithful prompting - make LLMs use retrieved context (#667)
Some checks failed
Contributor Attribution Check / check-attribution (pull_request) Failing after 32s
Docker Build and Publish / build-and-push (pull_request) Has been skipped
Nix / nix (ubuntu-latest) (pull_request) Failing after 4s
Supply Chain Audit / Scan PR for supply chain risks (pull_request) Successful in 34s
Tests / e2e (pull_request) Successful in 2m0s
Tests / test (pull_request) Failing after 33m21s
Nix / nix (macos-latest) (pull_request) Has been cancelled
LLMs ignore retrieved context and rely on parametric knowledge.
Adding context can even destroy previously correct answers.

New agent/context_faithful.py:
- build_context_block(): format retrieved passages for injection
- wrap_with_context_faithful_prompt(): full RAG template with
  context-first structure, citation requirement, confidence rating
- extract_citations(): parse [Passage N] citations from responses
- extract_confidence(): parse HIGH/MEDIUM/LOW ratings
- detect_context_ignoring(): check if model likely ignored context
- CONTEXT_FAITHFUL_SYSTEM_SUFFIX: system prompt rules
- CONTEXT_FAITHFUL_RAG_TEMPLATE: structured RAG prompt

Integration:
- CONTEXT_FAITHFUL_GUIDANCE in agent/prompt_builder.py
- Injected into system prompt when retrieval tools available
  (session_search, read_file, web_extract, browser) in run_agent.py

Tests: tests/test_context_faithful_prompting.py (133 lines)
Docs: docs/context-faithful-prompting.md

Closes #667
2026-04-14 18:48:51 -04:00
5 changed files with 419 additions and 0 deletions

214
agent/context_faithful.py Normal file
View File

@@ -0,0 +1,214 @@
"""Context-Faithful Prompting — Make LLMs use retrieved context.
Problem: LLMs ignore retrieved context and rely on parametric knowledge.
Adding context can even DESTROY previously correct answers (distraction effect).
Solution: Structured prompts that force the model to:
1. Read context BEFORE answering
2. Cite which passage was used
3. Admit when context doesn't contain the answer
4. Rate confidence in context usage
Usage:
from agent.context_faithful import (
wrap_with_context_faithful_prompt,
build_context_block,
CONTEXT_FAITHFUL_SYSTEM_SUFFIX,
)
"""
from __future__ import annotations
import re
from typing import Optional
# ---------------------------------------------------------------------------
# Prompt templates
# ---------------------------------------------------------------------------
CONTEXT_FAITHFUL_SYSTEM_SUFFIX = (
"\n\n"
"CONTEXT-FAITHFUL ANSWERING:\n"
"When answering questions, you MUST use the provided context. Follow these rules strictly:\n"
"1. Read ALL provided context passages before answering.\n"
"2. Base your answer ONLY on information found in the context.\n"
"3. If the context does not contain enough information to answer fully, "
"say: \"I don't have enough information in the provided context to answer that completely.\"\n"
"4. Do NOT use your training data if the context contradicts it — trust the context.\n"
"5. Cite which passage you used: [Context Passage N] or [Retrieved from: source].\n"
"6. Rate your confidence: HIGH (directly stated in context), "
"MEDIUM (inferred from context), LOW (partially available).\n"
)
CONTEXT_FAITHFUL_USER_PREFIX = (
"Answer the following question using ONLY the provided context. "
"Cite which passage supports your answer. "
"If the context doesn't contain the answer, say so explicitly.\n\n"
)
CONTEXT_FAITHFUL_RAG_TEMPLATE = """{context_block}
---
Based ONLY on the context above, answer the following question:
{question}
Instructions:
- Use information from the context passages above
- Cite which passage (e.g., [Passage 1]) supports your answer
- If the context doesn't contain the answer, say "Not found in provided context"
- Rate your confidence: HIGH / MEDIUM / LOW
"""
def build_context_block(
passages: list[dict],
max_passages: int = 10,
source_label: str = "Retrieved Context",
) -> str:
"""Build a formatted context block from retrieved passages.
Args:
passages: List of dicts with 'content' and optional 'source', 'score' keys.
max_passages: Maximum number of passages to include.
source_label: Label for the context block header.
Returns:
Formatted context string ready for prompt injection.
"""
if not passages:
return f"[{source_label}: No passages retrieved]"
lines = [f"## {source_label} ({len(passages[:max_passages])} passages)\n"]
for i, passage in enumerate(passages[:max_passages], 1):
content = passage.get("content", "").strip()
source = passage.get("source", "")
score = passage.get("score", "")
header = f"### Passage {i}"
if source:
header += f" [Source: {source}]"
if score:
header += f" (relevance: {score:.2f})"
lines.append(header)
lines.append(content)
lines.append("")
return "\n".join(lines)
def wrap_with_context_faithful_prompt(
user_message: str,
passages: list[dict],
question: Optional[str] = None,
use_rag_template: bool = True,
) -> tuple[str, str]:
"""Wrap a user message with context-faithful prompting.
Args:
user_message: The original user message/question.
passages: Retrieved context passages.
question: Optional explicit question (defaults to user_message).
use_rag_template: If True, use structured RAG template. If False,
prepend context block with faithfulness prefix.
Returns:
Tuple of (system_suffix, wrapped_user_message).
system_suffix: Additional system prompt text for context faithfulness.
wrapped_user_message: User message with context injected.
"""
question = question or user_message
context_block = build_context_block(passages)
if use_rag_template:
wrapped = CONTEXT_FAITHFUL_RAG_TEMPLATE.format(
context_block=context_block,
question=question,
)
else:
wrapped = (
f"{CONTEXT_FAITHFUL_USER_PREFIX}\n"
f"{context_block}\n\n"
f"Question: {question}"
)
return CONTEXT_FAITHFUL_SYSTEM_SUFFIX, wrapped
def extract_citations(response: str) -> list[dict]:
"""Extract citations from a model response.
Looks for patterns like [Passage N], [Context Passage N], [Source: ...].
"""
citations = []
# [Passage N] or [Context Passage N]
for m in re.finditer(r'\[(?:Context )?Passage (\d+)\]', response, re.IGNORECASE):
citations.append({"type": "passage", "number": int(m.group(1)), "span": m.group(0)})
# [Retrieved from: source] or [Source: name]
for m in re.finditer(r'\[(?:Retrieved from|Source):\s*([^\]]+)\]', response, re.IGNORECASE):
citations.append({"type": "source", "source": m.group(1).strip(), "span": m.group(0)})
# [Context: ...]
for m in re.finditer(r'\[Context:\s*([^\]]+)\]', response, re.IGNORECASE):
citations.append({"type": "context", "reference": m.group(1).strip(), "span": m.group(0)})
return citations
def extract_confidence(response: str) -> Optional[str]:
"""Extract confidence rating from a model response.
Looks for HIGH, MEDIUM, LOW at the end of responses or in explicit ratings.
"""
# Look for explicit confidence rating
m = re.search(r'(?:confidence|Confidence):\s*(HIGH|MEDIUM|LOW)', response, re.IGNORECASE)
if m:
return m.group(1).upper()
# Look for standalone rating at end of response
m = re.search(r'\b(HIGH|MEDIUM|LOW)\s*(?:confidence)?\.?\s*$', response, re.IGNORECASE)
if m:
return m.group(1).upper()
return None
def detect_context_ignoring(response: str, context_block: str) -> dict:
"""Detect if the model may have ignored the provided context.
Returns a dict with:
- likely_ignored: bool
- has_citation: bool
- has_idk: bool (said "I don't know")
- confidence: str or None
- details: str
"""
has_citation = bool(re.search(r'\[(?:Context )?Passage \d+\]|\[Source:', response, re.IGNORECASE))
has_idk = bool(re.search(r"(?:don't|do not|does not|doesn't) have enough|not found in|(?:doesn't|does not) contain|no (?:available )?information|not (?:available|found) in (?:the )?provided", response, re.IGNORECASE))
confidence = extract_confidence(response)
# Likely ignored if no citation AND no "I don't know" AND response is substantive
is_substantive = len(response.strip()) > 50
likely_ignored = is_substantive and not has_citation and not has_idk
details = []
if likely_ignored:
details.append("Response is substantive but contains no citations — may have used parametric knowledge")
if not has_citation and is_substantive:
details.append("No passage citations found")
if confidence is None and is_substantive:
details.append("No confidence rating found")
return {
"likely_ignored": likely_ignored,
"has_citation": has_citation,
"has_idk": has_idk,
"confidence": confidence,
"details": "; ".join(details) if details else "Looks good",
}

View File

@@ -161,6 +161,17 @@ SESSION_SEARCH_GUIDANCE = (
"asking them to repeat themselves."
)
CONTEXT_FAITHFUL_GUIDANCE = (
"When you retrieve context (via session_search, file read, web extract, or "
"any other tool), you MUST use that context in your answer. Do NOT rely on "
"your training data when retrieved context is available. Rules:\n"
"- Read ALL retrieved passages before answering.\n"
"- Base your answer ONLY on the retrieved context.\n"
"- If the context doesn't contain the answer, say so explicitly.\n"
"- Cite which passage you used: [Context Passage N].\n"
"- Trust retrieved context over your parametric knowledge.\n"
)
SKILLS_GUIDANCE = (
"After completing a complex task (5+ tool calls), fixing a tricky error, "
"or discovering a non-trivial workflow, save the approach as a "

View File

@@ -0,0 +1,56 @@
# Context-Faithful Prompting
Make LLMs actually use retrieved context instead of relying on parametric knowledge.
## The Problem
LLMs trained on large corpora develop strong parametric knowledge. When you retrieve context and inject it into the prompt, the model may:
1. **Ignore it** -- answer from training data instead
2. **Be distracted** -- context actually degrades previously correct answers
3. **Blend it incorrectly** -- mix retrieved facts with parametric hallucination
Research shows R@5 vs end-to-end accuracy gaps of 5-15%. The model has the right answer in the context but doesn't use it.
## The Solution
Context-faithful prompting forces the model to:
1. **Read context before answering** -- context-first structure
2. **Cite which passage** -- [Passage N] references
3. **Admit ignorance** -- "I don't have enough information in the provided context"
4. **Rate confidence** -- HIGH / MEDIUM / LOW
## Module: agent/context_faithful.py
```python
from agent.context_faithful import (
build_context_block,
wrap_with_context_faithful_prompt,
extract_citations,
extract_confidence,
detect_context_ignoring,
)
```
## System Prompt Integration
CONTEXT_FAITHFUL_GUIDANCE is injected into the system prompt when any retrieval tool is available (session_search, read_file, web_extract, browser). See run_agent.py.
## Usage
```python
system_suffix, user_msg = wrap_with_context_faithful_prompt(
user_message="What model does Timmy use?",
passages=[{"content": "Timmy runs on xiaomi/mimo-v2-pro.", "source": "01-hardware.md"}],
)
```
## Response Analysis
```python
result = detect_context_ignoring(model_response, context_block)
# result["likely_ignored"] -- True if substantive response without citations
# result["has_citation"] -- True if [Passage N] found
# result["has_idk"] -- True if model admitted ignorance
```

View File

@@ -81,6 +81,7 @@ from agent.error_classifier import classify_api_error, FailoverReason
from agent.prompt_builder import (
DEFAULT_AGENT_IDENTITY, PLATFORM_HINTS,
MEMORY_GUIDANCE, SESSION_SEARCH_GUIDANCE, SKILLS_GUIDANCE,
CONTEXT_FAITHFUL_GUIDANCE,
build_nous_subscription_prompt,
)
from agent.model_metadata import (
@@ -3155,6 +3156,10 @@ class AIAgent:
tool_guidance.append(SESSION_SEARCH_GUIDANCE)
if "skill_manage" in self.valid_tool_names:
tool_guidance.append(SKILLS_GUIDANCE)
# Context-faithful prompting: inject when any retrieval tool is available
_retrieval_tools = {"session_search", "read_file", "web_extract", "browser"}
if _retrieval_tools & set(self.valid_tool_names):
tool_guidance.append(CONTEXT_FAITHFUL_GUIDANCE)
if tool_guidance:
prompt_parts.append(" ".join(tool_guidance))

View File

@@ -0,0 +1,133 @@
"""Tests for context-faithful prompting module."""
import pytest
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
from agent.context_faithful import (
build_context_block,
wrap_with_context_faithful_prompt,
extract_citations,
extract_confidence,
detect_context_ignoring,
CONTEXT_FAITHFUL_SYSTEM_SUFFIX,
CONTEXT_FAITHFUL_RAG_TEMPLATE,
)
class TestBuildContextBlock:
def test_empty_passages(self):
result = build_context_block([])
assert "No passages retrieved" in result
def test_single_passage(self):
passages = [{"content": "The answer is 42."}]
result = build_context_block(passages)
assert "Passage 1" in result
assert "The answer is 42." in result
def test_passage_with_source(self):
passages = [{"content": "Data.", "source": "config.yaml"}]
result = build_context_block(passages)
assert "Source: config.yaml" in result
def test_passage_with_score(self):
passages = [{"content": "Data.", "score": 0.95}]
result = build_context_block(passages)
assert "0.95" in result
def test_max_passages_limit(self):
passages = [{"content": f"Passage {i}"} for i in range(20)]
result = build_context_block(passages, max_passages=5)
assert "Passage 5" in result
assert "Passage 6" not in result
assert "5 passages" in result
class TestWrapWithContextFaithfulPrompt:
def test_rag_template(self):
passages = [{"content": "Timmy runs on mimo-v2-pro."}]
system_suffix, user_msg = wrap_with_context_faithful_prompt(
"What model does Timmy use?", passages
)
assert "CONTEXT-FAITHFUL" in system_suffix
assert "Passage 1" in user_msg
assert "mimo-v2-pro" in user_msg
assert "Cite which passage" in user_msg
def test_non_rag_template(self):
passages = [{"content": "Data."}]
system_suffix, user_msg = wrap_with_context_faithful_prompt(
"Question?", passages, use_rag_template=False
)
assert "Question: Question?" in user_msg
assert "ONLY the provided context" in user_msg
class TestExtractCitations:
def test_passage_citation(self):
resp = "The answer is 42 [Passage 1]."
cits = extract_citations(resp)
assert len(cits) == 1
assert cits[0]["number"] == 1
def test_context_passage_citation(self):
resp = "See [Context Passage 3] for details."
cits = extract_citations(resp)
assert len(cits) == 1
assert cits[0]["number"] == 3
def test_source_citation(self):
resp = "Per [Retrieved from: config.yaml]..."
cits = extract_citations(resp)
assert len(cits) == 1
assert cits[0]["source"] == "config.yaml"
def test_no_citations(self):
resp = "The answer is 42."
cits = extract_citations(resp)
assert len(cits) == 0
def test_multiple_citations(self):
resp = "[Passage 1] says X. [Passage 3] says Y."
cits = extract_citations(resp)
assert len(cits) == 2
class TestExtractConfidence:
def test_explicit_confidence(self):
resp = "The answer is 42. Confidence: HIGH"
assert extract_confidence(resp) == "HIGH"
def test_standalone_medium(self):
resp = "Based on the context. MEDIUM."
assert extract_confidence(resp) == "MEDIUM"
def test_no_confidence(self):
resp = "The answer is 42."
assert extract_confidence(resp) is None
class TestDetectContextIgnoring:
def test_ignoring_detected(self):
resp = "The capital of France is Paris. This is because France is a country in Europe, and Paris has been its capital for centuries."
context = "Passage 1: Timmy runs on mimo-v2-pro."
result = detect_context_ignoring(resp, context)
assert result["likely_ignored"] is True
assert result["has_citation"] is False
def test_faithful_usage(self):
resp = "According to [Passage 1], Timmy runs on mimo-v2-pro."
context = "Passage 1: Timmy runs on mimo-v2-pro."
result = detect_context_ignoring(resp, context)
assert result["likely_ignored"] is False
assert result["has_citation"] is True
def test_idk_response(self):
resp = "I don't have enough information in the provided context."
context = "Passage 1: Unrelated data."
result = detect_context_ignoring(resp, context)
assert result["likely_ignored"] is False
assert result["has_idk"] is True