Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 29925de52d | |||
| 80b18940c3 |
@@ -1,221 +0,0 @@
|
|||||||
"""
|
|
||||||
Session Compaction with Fact Extraction — #748
|
|
||||||
|
|
||||||
Before compressing a long conversation, extracts durable facts
|
|
||||||
(user preferences, corrections, project details) and saves them
|
|
||||||
to the fact store. Then compresses the conversation.
|
|
||||||
|
|
||||||
This ensures key information survives context limits.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
from agent.session_compaction import compact_session
|
|
||||||
|
|
||||||
# In the conversation loop, when context is near limit:
|
|
||||||
compact_session(messages, fact_store)
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
from typing import Any, Dict, List, Optional, Tuple
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Fact Extraction Patterns
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# Patterns that indicate durable facts worth preserving
|
|
||||||
_FACT_PATTERNS = [
|
|
||||||
# User preferences
|
|
||||||
(r"(?:i prefer|i like|i always|my preference is|remember that i)\s+(.+?)(?:\.|$)", "user_pref"),
|
|
||||||
(r"(?:call me|my name is|i\'m)\s+([A-Z][a-z]+)", "user_name"),
|
|
||||||
(r"(?:don\'t|do not|never)\s+(?:use|do|show|tell)\s+(.+?)(?:\.|$)", "user_constraint"),
|
|
||||||
|
|
||||||
# Corrections
|
|
||||||
(r"(?:actually|no,?|correction:?)\s+(.+?)(?:\.|$)", "correction"),
|
|
||||||
(r"(?:that\'s wrong|not correct|i meant)\s+(.+?)(?:\.|$)", "correction"),
|
|
||||||
|
|
||||||
# Project facts
|
|
||||||
(r"(?:the project|this repo|the codebase)\s+(?:is|has|uses|runs)\s+(.+?)(?:\.|$)", "project_fact"),
|
|
||||||
(r"(?:we use|our stack is|deployed on)\s+(.+?)(?:\.|$)", "project_fact"),
|
|
||||||
|
|
||||||
# Technical facts
|
|
||||||
(r"(?:the server|the service|the endpoint)\s+(?:is|runs on|listens on)\s+(.+?)(?:\.|$)", "technical"),
|
|
||||||
(r"(?:port|url|address|host)\s*(?::|is|=)\s*(.+?)(?:\.|$)", "technical"),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def extract_facts_from_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Scan conversation messages for durable facts.
|
|
||||||
|
|
||||||
Returns list of fact dicts suitable for fact_store.
|
|
||||||
"""
|
|
||||||
facts = []
|
|
||||||
seen = set() # Deduplicate
|
|
||||||
|
|
||||||
for msg in messages:
|
|
||||||
if msg.get("role") != "user":
|
|
||||||
continue
|
|
||||||
|
|
||||||
content = msg.get("content", "")
|
|
||||||
if not isinstance(content, str) or len(content) < 10:
|
|
||||||
continue
|
|
||||||
|
|
||||||
for pattern, category in _FACT_PATTERNS:
|
|
||||||
matches = re.findall(pattern, content, re.IGNORECASE)
|
|
||||||
for match in matches:
|
|
||||||
if isinstance(match, tuple):
|
|
||||||
match = match[0] if match else ""
|
|
||||||
fact_text = match.strip()
|
|
||||||
|
|
||||||
if len(fact_text) < 5 or len(fact_text) > 200:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Deduplicate
|
|
||||||
dedup_key = f"{category}:{fact_text.lower()}"
|
|
||||||
if dedup_key in seen:
|
|
||||||
continue
|
|
||||||
seen.add(dedup_key)
|
|
||||||
|
|
||||||
facts.append({
|
|
||||||
"content": fact_text,
|
|
||||||
"category": category,
|
|
||||||
"source": "session_compaction",
|
|
||||||
"trust": 0.7, # Medium trust — extracted, not explicitly stated
|
|
||||||
})
|
|
||||||
|
|
||||||
return facts
|
|
||||||
|
|
||||||
|
|
||||||
def extract_preferences(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
||||||
"""Extract user preferences specifically."""
|
|
||||||
prefs = []
|
|
||||||
|
|
||||||
pref_patterns = [
|
|
||||||
r"(?:i prefer|i like|i want|use|always)\s+(.+?)(?:\.|$)",
|
|
||||||
r"(?:my (?:preferred|favorite|default))\s+(?:is|are)\s+(.+?)(?:\.|$)",
|
|
||||||
r"(?:set|configure|make)\s+(?:it to|the default to)\s+(.+?)(?:\.|$)",
|
|
||||||
]
|
|
||||||
|
|
||||||
for msg in messages:
|
|
||||||
if msg.get("role") != "user":
|
|
||||||
continue
|
|
||||||
content = msg.get("content", "")
|
|
||||||
if not isinstance(content, str):
|
|
||||||
continue
|
|
||||||
|
|
||||||
for pattern in pref_patterns:
|
|
||||||
matches = re.findall(pattern, content, re.IGNORECASE)
|
|
||||||
for match in matches:
|
|
||||||
if isinstance(match, str) and len(match) > 5 and len(match) < 200:
|
|
||||||
prefs.append({
|
|
||||||
"content": match.strip(),
|
|
||||||
"category": "user_pref",
|
|
||||||
"source": "session_compaction",
|
|
||||||
"trust": 0.8,
|
|
||||||
})
|
|
||||||
|
|
||||||
return prefs
|
|
||||||
|
|
||||||
|
|
||||||
def compact_session(
|
|
||||||
messages: List[Dict[str, Any]],
|
|
||||||
fact_store: Any = None,
|
|
||||||
keep_recent: int = 10,
|
|
||||||
) -> Tuple[List[Dict[str, Any]], int]:
|
|
||||||
"""
|
|
||||||
Compact a session by extracting facts and compressing old messages.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
messages: Full conversation history
|
|
||||||
fact_store: Optional fact_store instance for saving facts
|
|
||||||
keep_recent: Number of recent messages to keep uncompressed
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (compacted_messages, facts_extracted)
|
|
||||||
"""
|
|
||||||
if len(messages) <= keep_recent * 2:
|
|
||||||
return messages, 0
|
|
||||||
|
|
||||||
# Split into old (to compress) and recent (to keep)
|
|
||||||
split_point = len(messages) - keep_recent
|
|
||||||
old_messages = messages[:split_point]
|
|
||||||
recent_messages = messages[split_point:]
|
|
||||||
|
|
||||||
# Extract facts from old messages
|
|
||||||
facts = extract_facts_from_messages(old_messages)
|
|
||||||
prefs = extract_preferences(old_messages)
|
|
||||||
all_facts = facts + prefs
|
|
||||||
|
|
||||||
# Save facts to store if available
|
|
||||||
saved_count = 0
|
|
||||||
if fact_store and all_facts:
|
|
||||||
for fact in all_facts:
|
|
||||||
try:
|
|
||||||
if hasattr(fact_store, 'store'):
|
|
||||||
fact_store.store(
|
|
||||||
content=fact["content"],
|
|
||||||
category=fact["category"],
|
|
||||||
tags=["session_compaction"],
|
|
||||||
)
|
|
||||||
saved_count += 1
|
|
||||||
elif hasattr(fact_store, 'add'):
|
|
||||||
fact_store.add(fact["content"])
|
|
||||||
saved_count += 1
|
|
||||||
except Exception:
|
|
||||||
pass # Don't let fact saving block compaction
|
|
||||||
|
|
||||||
# Create summary of old messages
|
|
||||||
summary_parts = []
|
|
||||||
if saved_count > 0:
|
|
||||||
summary_parts.append(f"[Session compacted: {saved_count} facts extracted and saved]")
|
|
||||||
|
|
||||||
# Count message types
|
|
||||||
user_msgs = sum(1 for m in old_messages if m.get("role") == "user")
|
|
||||||
asst_msgs = sum(1 for m in old_messages if m.get("role") == "assistant")
|
|
||||||
summary_parts.append(f"[Previous conversation: {user_msgs} user messages, {asst_msgs} assistant responses]")
|
|
||||||
|
|
||||||
summary = " ".join(summary_parts)
|
|
||||||
|
|
||||||
# Build compacted messages
|
|
||||||
compacted = []
|
|
||||||
|
|
||||||
# Add summary as system message
|
|
||||||
if summary:
|
|
||||||
compacted.append({
|
|
||||||
"role": "system",
|
|
||||||
"content": summary,
|
|
||||||
"_compacted": True,
|
|
||||||
})
|
|
||||||
|
|
||||||
# Add extracted facts as system context
|
|
||||||
if all_facts:
|
|
||||||
facts_text = "Known facts from previous conversation:\n"
|
|
||||||
for fact in all_facts[:20]: # Limit to 20 facts
|
|
||||||
facts_text += f"- [{fact['category']}] {fact['content']}\n"
|
|
||||||
|
|
||||||
compacted.append({
|
|
||||||
"role": "system",
|
|
||||||
"content": facts_text,
|
|
||||||
"_extracted_facts": True,
|
|
||||||
})
|
|
||||||
|
|
||||||
# Add recent messages
|
|
||||||
compacted.extend(recent_messages)
|
|
||||||
|
|
||||||
return compacted, saved_count
|
|
||||||
|
|
||||||
|
|
||||||
def should_compact(messages: List[Dict[str, Any]], max_tokens: int = 80000) -> bool:
|
|
||||||
"""
|
|
||||||
Determine if compaction is needed based on message count/length.
|
|
||||||
|
|
||||||
Simple heuristic: compact if we have many messages or very long content.
|
|
||||||
"""
|
|
||||||
if len(messages) < 50:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Estimate token count (rough: 4 chars per token)
|
|
||||||
total_chars = sum(len(str(m.get("content", ""))) for m in messages)
|
|
||||||
estimated_tokens = total_chars // 4
|
|
||||||
|
|
||||||
return estimated_tokens > max_tokens * 0.8 # Compact at 80% of limit
|
|
||||||
@@ -72,6 +72,12 @@ def cron_list(show_all: bool = False):
|
|||||||
deliver = [deliver]
|
deliver = [deliver]
|
||||||
deliver_str = ", ".join(deliver)
|
deliver_str = ", ".join(deliver)
|
||||||
|
|
||||||
|
model = job.get("model")
|
||||||
|
provider = job.get("provider")
|
||||||
|
model_str = ""
|
||||||
|
if model:
|
||||||
|
model_str = f" @ {provider}/{model}" if provider else f" @ {model}"
|
||||||
|
|
||||||
skills = job.get("skills") or ([job["skill"]] if job.get("skill") else [])
|
skills = job.get("skills") or ([job["skill"]] if job.get("skill") else [])
|
||||||
if state == "paused":
|
if state == "paused":
|
||||||
status = color("[paused]", Colors.YELLOW)
|
status = color("[paused]", Colors.YELLOW)
|
||||||
@@ -168,6 +174,8 @@ def cron_create(args):
|
|||||||
skill=getattr(args, "skill", None),
|
skill=getattr(args, "skill", None),
|
||||||
skills=_normalize_skills(getattr(args, "skill", None), getattr(args, "skills", None)),
|
skills=_normalize_skills(getattr(args, "skill", None), getattr(args, "skills", None)),
|
||||||
script=getattr(args, "script", None),
|
script=getattr(args, "script", None),
|
||||||
|
model=getattr(args, "model", None),
|
||||||
|
provider=getattr(args, "provider", None),
|
||||||
)
|
)
|
||||||
if not result.get("success"):
|
if not result.get("success"):
|
||||||
print(color(f"Failed to create job: {result.get('error', 'unknown error')}", Colors.RED))
|
print(color(f"Failed to create job: {result.get('error', 'unknown error')}", Colors.RED))
|
||||||
@@ -180,6 +188,10 @@ def cron_create(args):
|
|||||||
job_data = result.get("job", {})
|
job_data = result.get("job", {})
|
||||||
if job_data.get("script"):
|
if job_data.get("script"):
|
||||||
print(f" Script: {job_data['script']}")
|
print(f" Script: {job_data['script']}")
|
||||||
|
if job_data.get("model"):
|
||||||
|
provider = job_data.get("provider", "")
|
||||||
|
model_str = f"{provider}/{job_data['model']}" if provider else job_data["model"]
|
||||||
|
print(f" Model: {model_str}")
|
||||||
print(f" Next run: {result['next_run_at']}")
|
print(f" Next run: {result['next_run_at']}")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@@ -217,6 +229,8 @@ def cron_edit(args):
|
|||||||
deliver=getattr(args, "deliver", None),
|
deliver=getattr(args, "deliver", None),
|
||||||
repeat=getattr(args, "repeat", None),
|
repeat=getattr(args, "repeat", None),
|
||||||
skills=final_skills,
|
skills=final_skills,
|
||||||
|
model=getattr(args, "model", None),
|
||||||
|
provider=getattr(args, "provider", None),
|
||||||
script=getattr(args, "script", None),
|
script=getattr(args, "script", None),
|
||||||
)
|
)
|
||||||
if not result.get("success"):
|
if not result.get("success"):
|
||||||
|
|||||||
73
tests/test_cron_model_preservation.py
Normal file
73
tests/test_cron_model_preservation.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
"""Tests for cron model/provider config preservation (#222)."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import pytest
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_job_preserves_model_and_provider():
|
||||||
|
"""create_job should store model and provider in the job dict."""
|
||||||
|
from cron.jobs import create_job, load_jobs, save_jobs
|
||||||
|
import tempfile, os
|
||||||
|
|
||||||
|
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
|
||||||
|
json.dump([], f)
|
||||||
|
tmp_path = f.name
|
||||||
|
|
||||||
|
try:
|
||||||
|
with patch("cron.jobs._JOBS_FILE", tmp_path):
|
||||||
|
job = create_job(
|
||||||
|
schedule="0 * * * *",
|
||||||
|
prompt="test prompt",
|
||||||
|
model="xiaomi/mimo-v2-pro",
|
||||||
|
provider="nous",
|
||||||
|
)
|
||||||
|
assert job["model"] == "xiaomi/mimo-v2-pro"
|
||||||
|
assert job["provider"] == "nous"
|
||||||
|
|
||||||
|
# Verify persisted
|
||||||
|
jobs = load_jobs()
|
||||||
|
assert jobs[0]["model"] == "xiaomi/mimo-v2-pro"
|
||||||
|
assert jobs[0]["provider"] == "nous"
|
||||||
|
finally:
|
||||||
|
os.unlink(tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_job_preserves_model():
|
||||||
|
"""update_job should preserve model/provider when updating other fields."""
|
||||||
|
from cron.jobs import create_job, update_job
|
||||||
|
|
||||||
|
with patch("cron.jobs._JOBS_FILE", "/tmp/test_cron_jobs.json"):
|
||||||
|
import os
|
||||||
|
if os.path.exists("/tmp/test_cron_jobs.json"):
|
||||||
|
os.unlink("/tmp/test_cron_jobs.json")
|
||||||
|
|
||||||
|
job = create_job(
|
||||||
|
schedule="0 * * * *",
|
||||||
|
prompt="test",
|
||||||
|
model="xiaomi/mimo-v2-pro",
|
||||||
|
provider="nous",
|
||||||
|
)
|
||||||
|
# Update prompt — model should be preserved
|
||||||
|
updated = update_job(job["id"], {"prompt": "new prompt"})
|
||||||
|
assert updated["model"] == "xiaomi/mimo-v2-pro"
|
||||||
|
assert updated["provider"] == "nous"
|
||||||
|
assert updated["prompt"] == "new prompt"
|
||||||
|
|
||||||
|
os.unlink("/tmp/test_cron_jobs.json")
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_job_without_model_is_none():
|
||||||
|
"""create_job without model/provider should store None."""
|
||||||
|
from cron.jobs import create_job
|
||||||
|
|
||||||
|
with patch("cron.jobs._JOBS_FILE", "/tmp/test_cron_none.json"):
|
||||||
|
import os
|
||||||
|
if os.path.exists("/tmp/test_cron_none.json"):
|
||||||
|
os.unlink("/tmp/test_cron_none.json")
|
||||||
|
|
||||||
|
job = create_job(schedule="0 * * * *", prompt="test")
|
||||||
|
assert job["model"] is None
|
||||||
|
assert job["provider"] is None
|
||||||
|
|
||||||
|
os.unlink("/tmp/test_cron_none.json")
|
||||||
@@ -1,84 +0,0 @@
|
|||||||
"""Tests for session compaction with fact extraction (#748)."""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
||||||
|
|
||||||
from agent.session_compaction import (
|
|
||||||
extract_facts_from_messages,
|
|
||||||
extract_preferences,
|
|
||||||
compact_session,
|
|
||||||
should_compact,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_extract_preferences():
|
|
||||||
msgs = [
|
|
||||||
{"role": "user", "content": "I prefer using Python for this"},
|
|
||||||
{"role": "assistant", "content": "OK"},
|
|
||||||
{"role": "user", "content": "Always use tabs, not spaces"},
|
|
||||||
]
|
|
||||||
prefs = extract_preferences(msgs)
|
|
||||||
assert len(prefs) >= 1
|
|
||||||
|
|
||||||
|
|
||||||
def test_extract_facts():
|
|
||||||
msgs = [
|
|
||||||
{"role": "user", "content": "The server runs on port 8080"},
|
|
||||||
{"role": "user", "content": "Actually, the port is 8081"},
|
|
||||||
{"role": "user", "content": "Hello"}, # Too short, should be skipped
|
|
||||||
]
|
|
||||||
facts = extract_facts_from_messages(msgs)
|
|
||||||
assert len(facts) >= 1
|
|
||||||
assert any("technical" in f["category"] for f in facts)
|
|
||||||
|
|
||||||
|
|
||||||
def test_extract_deduplicates():
|
|
||||||
msgs = [
|
|
||||||
{"role": "user", "content": "I prefer Python"},
|
|
||||||
{"role": "user", "content": "I prefer Python"},
|
|
||||||
]
|
|
||||||
facts = extract_facts_from_messages(msgs)
|
|
||||||
assert len(facts) == 1
|
|
||||||
|
|
||||||
|
|
||||||
def test_compact_session():
|
|
||||||
messages = []
|
|
||||||
for i in range(30):
|
|
||||||
messages.append({"role": "user", "content": f"Message {i}: I prefer Python for server {i}"})
|
|
||||||
messages.append({"role": "assistant", "content": f"Response {i}"})
|
|
||||||
|
|
||||||
compacted, count = compact_session(messages, keep_recent=10)
|
|
||||||
assert len(compacted) < len(messages)
|
|
||||||
assert count >= 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_compact_keeps_recent():
|
|
||||||
messages = []
|
|
||||||
for i in range(30):
|
|
||||||
messages.append({"role": "user", "content": f"Message {i}"})
|
|
||||||
messages.append({"role": "assistant", "content": f"Response {i}"})
|
|
||||||
|
|
||||||
compacted, _ = compact_session(messages, keep_recent=10)
|
|
||||||
# Should have summary + facts + 10 recent
|
|
||||||
assert len(compacted) >= 10
|
|
||||||
|
|
||||||
|
|
||||||
def test_should_compact_short():
|
|
||||||
messages = [{"role": "user", "content": "hi"} for _ in range(10)]
|
|
||||||
assert not should_compact(messages)
|
|
||||||
|
|
||||||
|
|
||||||
def test_should_compact_long():
|
|
||||||
messages = [{"role": "user", "content": "x" * 1000} for _ in range(100)]
|
|
||||||
assert should_compact(messages)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
tests = [test_extract_preferences, test_extract_facts, test_extract_deduplicates,
|
|
||||||
test_compact_session, test_compact_keeps_recent, test_should_compact_short, test_should_compact_long]
|
|
||||||
for t in tests:
|
|
||||||
print(f"Running {t.__name__}...")
|
|
||||||
t()
|
|
||||||
print(" PASS")
|
|
||||||
print("\nAll tests passed.")
|
|
||||||
Reference in New Issue
Block a user