diff --git a/agent/evolution/memory_compressor.py b/agent/evolution/memory_compressor.py new file mode 100644 index 000000000..f7faeb37b --- /dev/null +++ b/agent/evolution/memory_compressor.py @@ -0,0 +1,49 @@ +"""Phase 7: Long-Context Memory Compression. + +Compresses years of session transcripts into a hierarchical, searchable "Life Log". +""" + +import logging +import json +from typing import List, Dict, Any +from agent.gemini_adapter import GeminiAdapter +from agent.symbolic_memory import SymbolicMemory + +logger = logging.getLogger(__name__) + +class MemoryCompressor: + def __init__(self): + self.adapter = GeminiAdapter() + self.symbolic = SymbolicMemory() + + def compress_transcripts(self, transcripts: str) -> Dict[str, Any]: + """Compresses massive transcripts into a hierarchical memory map.""" + logger.info("Compressing transcripts into hierarchical memory map.") + + prompt = f""" +The following are session transcripts spanning a long period: +{transcripts} + +Please perform a deep, recursive summarization of these transcripts. +Identify key themes, major decisions, evolving preferences, and significant events. +Create a hierarchical 'Life Log' map and extract high-fidelity symbolic triples for the Knowledge Graph. + +Format the output as JSON: +{{ + "summary": "...", + "hierarchy": {{...}}, + "triples": [{{"s": "subject", "p": "predicate", "o": "object"}}] +}} +""" + result = self.adapter.generate( + model="gemini-3.1-pro-preview", + prompt=prompt, + system_instruction="You are Timmy's Memory Compressor. Your goal is to turn massive context into structured, searchable wisdom.", + thinking=True, + response_mime_type="application/json" + ) + + memory_data = json.loads(result["text"]) + self.symbolic.ingest_text(json.dumps(memory_data["triples"])) + logger.info(f"Ingested {len(memory_data['triples'])} new memory triples.") + return memory_data