diff --git a/tools/skills_guard.py b/tools/skills_guard.py index 34a4294e8..0b6d7fee7 100644 --- a/tools/skills_guard.py +++ b/tools/skills_guard.py @@ -946,6 +946,11 @@ def llm_audit_skill(skill_path: Path, static_result: ScanResult, client = OpenAI( base_url=OPENROUTER_BASE_URL, api_key=api_key, + default_headers={ + "HTTP-Referer": "https://github.com/NousResearch/hermes-agent", + "X-OpenRouter-Title": "Hermes Agent", + "X-OpenRouter-Categories": "productivity,cli-agent", + }, ) response = client.chat.completions.create( model=model, diff --git a/trajectory_compressor.py b/trajectory_compressor.py index dedae1ade..3f49c617b 100644 --- a/trajectory_compressor.py +++ b/trajectory_compressor.py @@ -351,16 +351,27 @@ class TrajectoryCompressor: from openai import OpenAI, AsyncOpenAI + # OpenRouter app attribution headers (only for OpenRouter endpoints) + extra = {} + if "openrouter" in self.config.base_url.lower(): + extra["default_headers"] = { + "HTTP-Referer": "https://github.com/NousResearch/hermes-agent", + "X-OpenRouter-Title": "Hermes Agent", + "X-OpenRouter-Categories": "productivity,cli-agent", + } + # Sync client (for backwards compatibility) self.client = OpenAI( api_key=api_key, - base_url=self.config.base_url + base_url=self.config.base_url, + **extra, ) # Async client for parallel processing self.async_client = AsyncOpenAI( api_key=api_key, - base_url=self.config.base_url + base_url=self.config.base_url, + **extra, ) print(f"✅ Initialized OpenRouter client: {self.config.summarization_model}")