Compare commits

..

1 Commits

Author SHA1 Message Date
4eec7491a7 fix(cron): include token usage in cron job output (#490)
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m7s
Cron job run_job() ran the agent but never extracted token
counts from the result dict. Usage data was accumulated by
the agent during the conversation, then discarded on return.

Fix: Add _format_token_usage() helper that extracts
input_tokens, output_tokens, total_tokens, api_calls, and
estimated_cost_usd from the agent result. Include a Token
Usage section in all three output templates (success,
script failure, sync fallback).

Closes #490
2026-04-14 11:39:37 +00:00
3 changed files with 30 additions and 163 deletions

View File

@@ -643,6 +643,27 @@ def _build_job_prompt(job: dict) -> str:
return "\n".join(parts)
def _format_token_usage(result: dict) -> str:
"""Extract token usage from agent result dict for cron output display."""
inp = result.get("input_tokens", 0) or 0
out = result.get("output_tokens", 0) or 0
total = result.get("total_tokens", 0) or 0
cost = result.get("estimated_cost_usd")
calls = result.get("api_calls", 0) or 0
if inp == 0 and out == 0 and total == 0:
return "" # No usage data available
lines = [f"- Input tokens: {inp:,}"]
lines.append(f"- Output tokens: {out:,}")
lines.append(f"- Total tokens: {total:,}")
lines.append(f"- API calls: {calls}")
if cost is not None:
lines.append(f"- Estimated cost: ${cost:.4f}")
return "\n".join(lines)
def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
"""
Execute a single cron job.
@@ -877,6 +898,8 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
result = agent.run_conversation(prompt)
final_response = result.get("final_response", "") or ""
logged_response = final_response if final_response else "(No response generated)"
_token_usage = _format_token_usage(result)
_token_section = f"\n## Token Usage\n\n{_token_usage}\n" if _token_usage else ""
output = f"""# Cron Job: {job_name}
**Job ID:** {job_id}
@@ -890,7 +913,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
## Response
{logged_response}
"""
{_token_section}"""
logger.info("Job '%s' completed (sync fallback)", job_name)
return True, output, final_response, None
@@ -969,6 +992,8 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
"Job '%s': agent reported script failure — %s",
job_name, _script_failed_reason,
)
_token_usage = _format_token_usage(result)
_token_section = f"\n## Token Usage\n\n{_token_usage}\n" if _token_usage else ""
output = f"""# Cron Job: {job_name} (SCRIPT FAILED)
**Job ID:** {job_id}
@@ -982,9 +1007,11 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
## Response
{logged_response}
"""
{_token_section}"""
return False, output, final_response, _script_failed_reason
_token_usage = _format_token_usage(result)
_token_section = f"\n## Token Usage\n\n{_token_usage}\n" if _token_usage else ""
output = f"""# Cron Job: {job_name}
**Job ID:** {job_id}
@@ -998,7 +1025,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
## Response
{logged_response}
"""
{_token_section}"""
logger.info("Job '%s' completed successfully", job_name)
return True, output, final_response, None

View File

@@ -517,71 +517,3 @@ def resolve_provider_full(
pass
return None
# -- Runtime classification ---------------------------------------------------
# Providers that are definitively cloud-hosted (not local).
# Used by _classify_runtime() to distinguish cloud vs unknown.
_CLOUD_PREFIXES: frozenset[str] = frozenset(HERMES_OVERLAYS.keys()) | frozenset({
# Common aliases that normalize to cloud providers
"openai", "gemini", "google", "google-gemini", "google-ai-studio",
"claude", "claude-code", "copilot", "github", "github-copilot",
"glm", "z-ai", "z.ai", "zhipu", "zai",
"kimi", "kimi-coding", "moonshot",
"minimax", "minimax-china", "minimax_cn",
"deep-seek",
"dashscope", "aliyun", "qwen", "alibaba-cloud", "alibaba",
"hf", "hugging-face", "huggingface-hub", "huggingface",
"ai-gateway", "aigateway", "vercel-ai-gateway",
"opencode-zen", "zen",
"opencode-go-sub",
"kilocode", "kilo-code", "kilo-gateway", "kilo",
})
# Providers that are definitively local (self-hosted, no external API).
_LOCAL_PROVIDERS: frozenset[str] = frozenset({
"ollama", "local",
"vllm", "llamacpp", "llama.cpp", "llama-cpp", "lmstudio", "lm-studio",
})
def _classify_runtime(provider: Optional[str], model: str) -> str:
"""Classify a provider/model pair into a runtime category.
Returns one of:
``"cloud"`` — the request targets a known remote/hosted provider.
``"local"`` — the request targets a self-hosted/local inference server.
``"unknown"`` — provider is unrecognised or not specified without enough
context to determine the runtime type.
Edge-case rules (in order):
1. If *provider* is set and is a known local provider → ``"local"``.
2. If *provider* is set and is a known cloud provider → ``"cloud"``.
3. If *provider* is set but **not** in either known set → ``"unknown"``.
(Previously fell through to ``"local"`` — this was the bug.)
4. If *provider* is empty/None, inspect the model string for a recognised
cloud prefix (e.g. ``"openai/gpt-4o"`` → ``"cloud"``).
5. Everything else → ``"unknown"``.
"""
p = (provider or "").strip().lower()
if p:
# Rule 1: known local provider
if p in _LOCAL_PROVIDERS:
return "local"
# Rule 2: known cloud provider
if p in _CLOUD_PREFIXES:
return "cloud"
# Rule 3: provider is set but unrecognised — do NOT default to "local"
return "unknown"
# Rule 4: no provider — try to infer from the model string
m = (model or "").strip().lower()
if "/" in m:
model_prefix = m.split("/", 1)[0]
if model_prefix in _CLOUD_PREFIXES:
return "cloud"
# Rule 5: insufficient context
return "unknown"

View File

@@ -1,92 +0,0 @@
"""Tests for _classify_runtime() edge cases.
Covers the bug reported in #556: unknown provider with a model string
incorrectly returned "local" instead of "unknown".
"""
import pytest
from hermes_cli.providers import _classify_runtime
class TestClassifyRuntimeLocalProviders:
def test_ollama_no_model(self):
assert _classify_runtime("ollama", "") == "local"
def test_ollama_with_model(self):
assert _classify_runtime("ollama", "llama3:8b") == "local"
def test_local_provider_no_model(self):
assert _classify_runtime("local", "") == "local"
def test_local_provider_with_model(self):
assert _classify_runtime("local", "my-model") == "local"
def test_vllm_provider(self):
assert _classify_runtime("vllm", "meta/llama-3") == "local"
def test_llamacpp_provider(self):
assert _classify_runtime("llamacpp", "mistral") == "local"
class TestClassifyRuntimeCloudProviders:
def test_anthropic_provider(self):
assert _classify_runtime("anthropic", "claude-opus-4-6") == "cloud"
def test_openrouter_provider(self):
assert _classify_runtime("openrouter", "anthropic/claude-opus-4-6") == "cloud"
def test_nous_provider(self):
assert _classify_runtime("nous", "hermes-3") == "cloud"
def test_gemini_provider(self):
assert _classify_runtime("gemini", "gemini-pro") == "cloud"
def test_deepseek_provider(self):
assert _classify_runtime("deepseek", "deepseek-chat") == "cloud"
class TestClassifyRuntimeUnknownProviders:
"""Regression tests for #556: unknown provider should return 'unknown', not 'local'."""
def test_unknown_provider_with_model(self):
"""Core bug: 'custom' provider with model must not return 'local'."""
assert _classify_runtime("custom", "my-model") == "unknown"
def test_unknown_provider_no_model(self):
"""Unknown provider with no model should return 'unknown'."""
assert _classify_runtime("custom", "") == "unknown"
def test_arbitrary_provider_with_model(self):
"""Any unrecognised provider string with a model returns 'unknown'."""
assert _classify_runtime("my-private-llm", "some-model") == "unknown"
def test_arbitrary_provider_no_model(self):
assert _classify_runtime("my-private-llm", "") == "unknown"
def test_whitespace_only_provider_treated_as_empty(self):
"""Provider with only whitespace is treated as absent."""
# No model either → unknown
assert _classify_runtime(" ", "") == "unknown"
class TestClassifyRuntimeEmptyProvider:
def test_empty_provider_cloud_prefixed_model(self):
"""Empty provider with cloud-prefixed model returns 'cloud'."""
assert _classify_runtime("", "openrouter/gpt-4o") == "cloud"
def test_none_provider_cloud_prefixed_model(self):
assert _classify_runtime(None, "anthropic/claude-opus-4-6") == "cloud"
def test_empty_provider_no_model(self):
assert _classify_runtime("", "") == "unknown"
def test_none_provider_no_model(self):
assert _classify_runtime(None, "") == "unknown"
def test_empty_provider_non_cloud_prefixed_model(self):
"""No provider, model without a recognized prefix → unknown."""
assert _classify_runtime("", "my-model") == "unknown"
def test_empty_provider_model_with_unknown_prefix(self):
"""Model prefix that isn't a known cloud provider → unknown."""
assert _classify_runtime("", "myprivate/llm-7b") == "unknown"