91 lines
2.8 KiB
Python
91 lines
2.8 KiB
Python
"""Native Gemini 3 Series adapter for Hermes Agent.
|
|
|
|
Leverages the google-genai SDK to provide sovereign access to Gemini's
|
|
unique capabilities: Thinking (Reasoning) tokens, Search Grounding,
|
|
and Maps Grounding.
|
|
"""
|
|
|
|
import logging
|
|
import os
|
|
from typing import Any, Dict, List, Optional, Union
|
|
|
|
try:
|
|
from google import genai
|
|
from google.genai import types
|
|
except ImportError:
|
|
genai = None # type: ignore
|
|
types = None # type: ignore
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class GeminiAdapter:
|
|
def __init__(self, api_key: Optional[str] = None):
|
|
self.api_key = api_key or os.environ.get("GEMINI_API_KEY")
|
|
if not self.api_key:
|
|
logger.warning("GEMINI_API_KEY not found in environment.")
|
|
|
|
if genai:
|
|
self.client = genai.Client(api_key=self.api_key)
|
|
else:
|
|
self.client = None
|
|
|
|
def generate(
|
|
self,
|
|
model: str,
|
|
prompt: str,
|
|
system_instruction: Optional[str] = None,
|
|
thinking: bool = False,
|
|
thinking_budget: int = 16000,
|
|
grounding: bool = False,
|
|
**kwargs
|
|
) -> Dict[str, Any]:
|
|
if not self.client:
|
|
raise ImportError("google-genai SDK not installed. Run 'pip install google-genai'.")
|
|
|
|
config = {}
|
|
if system_instruction:
|
|
config["system_instruction"] = system_instruction
|
|
|
|
if thinking:
|
|
# Gemini 3 series thinking config
|
|
config["thinking_config"] = {"include_thoughts": True}
|
|
# max_output_tokens includes thinking tokens
|
|
kwargs["max_output_tokens"] = kwargs.get("max_output_tokens", 32000) + thinking_budget
|
|
|
|
tools = []
|
|
if grounding:
|
|
tools.append({"google_search": {}})
|
|
|
|
if tools:
|
|
config["tools"] = tools
|
|
|
|
response = self.client.models.generate_content(
|
|
model=model,
|
|
contents=prompt,
|
|
config=types.GenerateContentConfig(**config, **kwargs)
|
|
)
|
|
|
|
result = {
|
|
"text": response.text,
|
|
"usage": {
|
|
"prompt_tokens": response.usage_metadata.prompt_token_count,
|
|
"candidates_tokens": response.usage_metadata.candidates_token_count,
|
|
"total_tokens": response.usage_metadata.total_token_count,
|
|
}
|
|
}
|
|
|
|
# Extract thoughts if present
|
|
thoughts = []
|
|
for part in response.candidates[0].content.parts:
|
|
if hasattr(part, 'thought') and part.thought:
|
|
thoughts.append(part.thought)
|
|
|
|
if thoughts:
|
|
result["thoughts"] = "\n".join(thoughts)
|
|
|
|
# Extract grounding metadata
|
|
if response.candidates[0].grounding_metadata:
|
|
result["grounding"] = response.candidates[0].grounding_metadata
|
|
|
|
return result
|