diff --git a/agent/evolution/code_refactorer.py b/agent/evolution/code_refactorer.py new file mode 100644 index 000000000..f36c09afe --- /dev/null +++ b/agent/evolution/code_refactorer.py @@ -0,0 +1,60 @@ +"""Phase 9: Codebase-Wide Refactoring & Optimization. + +Performs a "Deep Audit" of the codebase to identify bottlenecks and vulnerabilities. +""" + +import logging +import json +from typing import List, Dict, Any +from agent.gemini_adapter import GeminiAdapter + +logger = logging.getLogger(__name__) + +class CodeRefactorer: + def __init__(self): + self.adapter = GeminiAdapter() + + def audit_codebase(self, file_contents: Dict[str, str]) -> Dict[str, Any]: + """Performs a deep audit of the provided codebase files.""" + logger.info(f"Auditing {len(file_contents)} files for refactoring and optimization.") + + # Combine file contents for context + context = "\n".join([f"--- {path} ---\n{content}" for path, content in file_contents.items()]) + + prompt = f""" +Codebase Context: +{context} + +Please perform a 'Deep Audit' of this codebase. +Identify: +1. Performance bottlenecks (e.g., inefficient loops, redundant API calls). +2. Security vulnerabilities (e.g., hardcoded keys, PII leaks, insecure defaults). +3. Architectural debt (e.g., tight coupling, lack of modularity). + +Generate a set of 'Refactoring Patches' to address these issues. + +Format the output as JSON: +{{ + "audit_report": "...", + "vulnerabilities": [...], + "performance_issues": [...], + "patches": [ + {{ + "file": "...", + "description": "...", + "original_code": "...", + "replacement_code": "..." + }} + ] +}} +""" + result = self.adapter.generate( + model="gemini-3.1-pro-preview", + prompt=prompt, + system_instruction="You are Timmy's Code Refactorer. Your goal is to make the codebase as efficient, secure, and sovereign as possible.", + thinking=True, + response_mime_type="application/json" + ) + + audit_data = json.loads(result["text"]) + return audit_data diff --git a/agent/evolution/memory_compressor.py b/agent/evolution/memory_compressor.py new file mode 100644 index 000000000..f7faeb37b --- /dev/null +++ b/agent/evolution/memory_compressor.py @@ -0,0 +1,49 @@ +"""Phase 7: Long-Context Memory Compression. + +Compresses years of session transcripts into a hierarchical, searchable "Life Log". +""" + +import logging +import json +from typing import List, Dict, Any +from agent.gemini_adapter import GeminiAdapter +from agent.symbolic_memory import SymbolicMemory + +logger = logging.getLogger(__name__) + +class MemoryCompressor: + def __init__(self): + self.adapter = GeminiAdapter() + self.symbolic = SymbolicMemory() + + def compress_transcripts(self, transcripts: str) -> Dict[str, Any]: + """Compresses massive transcripts into a hierarchical memory map.""" + logger.info("Compressing transcripts into hierarchical memory map.") + + prompt = f""" +The following are session transcripts spanning a long period: +{transcripts} + +Please perform a deep, recursive summarization of these transcripts. +Identify key themes, major decisions, evolving preferences, and significant events. +Create a hierarchical 'Life Log' map and extract high-fidelity symbolic triples for the Knowledge Graph. + +Format the output as JSON: +{{ + "summary": "...", + "hierarchy": {{...}}, + "triples": [{{"s": "subject", "p": "predicate", "o": "object"}}] +}} +""" + result = self.adapter.generate( + model="gemini-3.1-pro-preview", + prompt=prompt, + system_instruction="You are Timmy's Memory Compressor. Your goal is to turn massive context into structured, searchable wisdom.", + thinking=True, + response_mime_type="application/json" + ) + + memory_data = json.loads(result["text"]) + self.symbolic.ingest_text(json.dumps(memory_data["triples"])) + logger.info(f"Ingested {len(memory_data['triples'])} new memory triples.") + return memory_data diff --git a/agent/evolution/multilingual_expander.py b/agent/evolution/multilingual_expander.py new file mode 100644 index 000000000..d7bb9c40e --- /dev/null +++ b/agent/evolution/multilingual_expander.py @@ -0,0 +1,46 @@ +"""Phase 8: Multilingual Sovereign Expansion. + +Fine-tunes for high-fidelity reasoning in 50+ languages to ensure sovereignty is global. +""" + +import logging +import json +from typing import List, Dict, Any +from agent.gemini_adapter import GeminiAdapter + +logger = logging.getLogger(__name__) + +class MultilingualExpander: + def __init__(self): + self.adapter = GeminiAdapter() + + def generate_multilingual_traces(self, language: str, concept: str) -> Dict[str, Any]: + """Generates synthetic reasoning traces in a specific language.""" + logger.info(f"Generating multilingual traces for {language} on concept: {concept}") + + prompt = f""" +Concept: {concept} +Language: {language} + +Please generate a high-fidelity reasoning trace in {language} that explores the concept of {concept} within Timmy's sovereign framework. +Focus on translating the core principles of SOUL.md (sovereignty, service, honesty) accurately into the cultural and linguistic context of {language}. + +Format the output as JSON: +{{ + "language": "{language}", + "concept": "{concept}", + "reasoning_trace": "...", + "cultural_nuances": "...", + "translation_verification": "..." +}} +""" + result = self.adapter.generate( + model="gemini-3.1-pro-preview", + prompt=prompt, + system_instruction=f"You are Timmy's Multilingual Expander. Ensure the message of sovereignty is accurately translated into {language}.", + response_mime_type="application/json", + thinking=True + ) + + trace_data = json.loads(result["text"]) + return trace_data