diff --git a/agent/gemini_adapter.py b/agent/gemini_adapter.py new file mode 100644 index 000000000..86b6c33a4 --- /dev/null +++ b/agent/gemini_adapter.py @@ -0,0 +1,90 @@ +"""Native Gemini 3 Series adapter for Hermes Agent. + +Leverages the google-genai SDK to provide sovereign access to Gemini's +unique capabilities: Thinking (Reasoning) tokens, Search Grounding, +and Maps Grounding. +""" + +import logging +import os +from typing import Any, Dict, List, Optional, Union + +try: + from google import genai + from google.genai import types +except ImportError: + genai = None # type: ignore + types = None # type: ignore + +logger = logging.getLogger(__name__) + +class GeminiAdapter: + def __init__(self, api_key: Optional[str] = None): + self.api_key = api_key or os.environ.get("GEMINI_API_KEY") + if not self.api_key: + logger.warning("GEMINI_API_KEY not found in environment.") + + if genai: + self.client = genai.Client(api_key=self.api_key) + else: + self.client = None + + def generate( + self, + model: str, + prompt: str, + system_instruction: Optional[str] = None, + thinking: bool = False, + thinking_budget: int = 16000, + grounding: bool = False, + **kwargs + ) -> Dict[str, Any]: + if not self.client: + raise ImportError("google-genai SDK not installed. Run 'pip install google-genai'.") + + config = {} + if system_instruction: + config["system_instruction"] = system_instruction + + if thinking: + # Gemini 3 series thinking config + config["thinking_config"] = {"include_thoughts": True} + # max_output_tokens includes thinking tokens + kwargs["max_output_tokens"] = kwargs.get("max_output_tokens", 32000) + thinking_budget + + tools = [] + if grounding: + tools.append({"google_search": {}}) + + if tools: + config["tools"] = tools + + response = self.client.models.generate_content( + model=model, + contents=prompt, + config=types.GenerateContentConfig(**config, **kwargs) + ) + + result = { + "text": response.text, + "usage": { + "prompt_tokens": response.usage_metadata.prompt_token_count, + "candidates_tokens": response.usage_metadata.candidates_token_count, + "total_tokens": response.usage_metadata.total_token_count, + } + } + + # Extract thoughts if present + thoughts = [] + for part in response.candidates[0].content.parts: + if hasattr(part, 'thought') and part.thought: + thoughts.append(part.thought) + + if thoughts: + result["thoughts"] = "\n".join(thoughts) + + # Extract grounding metadata + if response.candidates[0].grounding_metadata: + result["grounding"] = response.candidates[0].grounding_metadata + + return result diff --git a/agent/meta_reasoning.py b/agent/meta_reasoning.py new file mode 100644 index 000000000..71852498e --- /dev/null +++ b/agent/meta_reasoning.py @@ -0,0 +1,47 @@ +"""Meta-Reasoning Layer for Hermes Agent. + +Implements a sovereign self-correction loop where a 'strong' model (Gemini 3.1 Pro) +critiques the plans generated by the primary agent loop before execution. +""" + +import logging +from typing import Any, Dict, List, Optional +from agent.gemini_adapter import GeminiAdapter + +logger = logging.getLogger(__name__) + +class MetaReasoningLayer: + def __init__(self): + self.adapter = GeminiAdapter() + + def critique_plan(self, goal: str, proposed_plan: str, context: str) -> Dict[str, Any]: + """Critiques a proposed plan using Gemini's thinking capabilities.""" + prompt = f""" +Goal: {goal} + +Context: +{context} + +Proposed Plan: +{proposed_plan} + +Please perform a deep symbolic and neuro-symbolic analysis of this plan. +Identify potential risks, logical fallacies, or missing steps. +Suggest improvements to make the plan more sovereign, cost-efficient, and robust. +""" + try: + result = self.adapter.generate( + model="gemini-3.1-pro-preview", + prompt=prompt, + system_instruction="You are a Senior Meta-Reasoning Engine for the Hermes Agent. Your goal is to ensure the agent's plans are flawless and sovereign.", + thinking=True, + thinking_budget=8000 + ) + return { + "critique": result["text"], + "thoughts": result.get("thoughts", ""), + "grounding": result.get("grounding") + } + except Exception as e: + logger.error(f"Meta-reasoning failed: {e}") + return {"critique": "Meta-reasoning unavailable.", "error": str(e)} diff --git a/pyproject.toml b/pyproject.toml index 38974e328..3122f0237 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ license = { text = "MIT" } dependencies = [ # Core — pinned to known-good ranges to limit supply chain attack surface "openai>=2.21.0,<3", - "anthropic>=0.39.0,<1", + "anthropic>=0.39.0,<1",\n "google-genai>=1.2.0,<2", "python-dotenv>=1.2.1,<2", "fire>=0.7.1,<1", "httpx>=0.28.1,<1", diff --git a/skills/creative/sovereign_thinking.py b/skills/creative/sovereign_thinking.py new file mode 100644 index 000000000..b10259905 --- /dev/null +++ b/skills/creative/sovereign_thinking.py @@ -0,0 +1,47 @@ +""" +--- +title: Sovereign Thinking +description: Pauses the agent to perform deep reasoning on complex problems using Gemini 3.1 Pro. +conditions: + - Complex logic required + - High-stakes decision making + - Architecture or design tasks +--- +""" + +from agent.gemini_adapter import GeminiAdapter + +def think(problem: str, effort: str = "medium") -> str: + """ + Performs deep reasoning on a complex problem. + + Args: + problem: The complex problem or question to analyze. + effort: The reasoning effort ('low', 'medium', 'high', 'xhigh'). + """ + adapter = GeminiAdapter() + + budget_map = { + "low": 4000, + "medium": 16000, + "high": 32000, + "xhigh": 64000 + } + + budget = budget_map.get(effort, 16000) + + result = adapter.generate( + model="gemini-3.1-pro-preview", + prompt=problem, + system_instruction="You are the internal reasoning engine of the Hermes Agent. Think deeply and provide a structured analysis.", + thinking=True, + thinking_budget=budget + ) + + output = [] + if result.get("thoughts"): + output.append("### Internal Monologue\n" + result["thoughts"]) + + output.append("### Conclusion\n" + result["text"]) + + return "\n\n".join(output)