[gemini] feat: add coverage and duration strictness to pytest (#934) (#1140)

Co-authored-by: Google Gemini <gemini@hermes.local>
Co-committed-by: Google Gemini <gemini@hermes.local>
This commit is contained in:
2026-03-23 18:36:01 +00:00
committed by rockachopa
parent ed63877f75
commit 05e1196ea4
7 changed files with 60 additions and 32 deletions

View File

@@ -384,11 +384,12 @@ def _startup_background_tasks() -> list[asyncio.Task]:
]
try:
from timmy.paperclip import start_paperclip_poller
bg_tasks.append(asyncio.create_task(start_paperclip_poller()))
logger.info("Paperclip poller started")
except ImportError:
logger.debug("Paperclip module not found, skipping poller")
return bg_tasks

View File

@@ -114,7 +114,7 @@ class Provider:
type: str # ollama, openai, anthropic
enabled: bool
priority: int
tier: str | None = None # e.g., "local", "standard_cloud", "frontier"
tier: str | None = None # e.g., "local", "standard_cloud", "frontier"
url: str | None = None
api_key: str | None = None
base_url: str | None = None
@@ -573,7 +573,6 @@ class CascadeRouter:
if not providers:
raise RuntimeError(f"No providers found for tier: {cascade_tier}")
for provider in providers:
if not self._is_provider_available(provider):
continue

View File

@@ -13,8 +13,8 @@ from dataclasses import dataclass
import httpx
from config import settings
from timmy.research_tools import get_llm_client, google_web_search
from timmy.research_triage import triage_research_report
from timmy.research_tools import google_web_search, get_llm_client
logger = logging.getLogger(__name__)
@@ -52,10 +52,7 @@ class PaperclipClient:
)
resp.raise_for_status()
tasks = resp.json()
return [
PaperclipTask(id=t["id"], kind=t["kind"], context=t["context"])
for t in tasks
]
return [PaperclipTask(id=t["id"], kind=t["kind"], context=t["context"]) for t in tasks]
async def update_task_status(
self, task_id: str, status: str, result: str | None = None
@@ -98,7 +95,7 @@ class ResearchOrchestrator:
async def run_research_pipeline(self, issue_title: str) -> str:
"""Run the research pipeline."""
search_results = await google_web_search(issue_title)
llm_client = get_llm_client()
response = await llm_client.completion(
f"Summarize the following search results and generate a research report:\\n\\n{search_results}",
@@ -123,7 +120,9 @@ class ResearchOrchestrator:
comment += "Created the following issues:\\n"
for result in triage_results:
if result["gitea_issue"]:
comment += f"- #{result['gitea_issue']['number']}: {result['action_item'].title}\\n"
comment += (
f"- #{result['gitea_issue']['number']}: {result['action_item'].title}\\n"
)
else:
comment += "No new issues were created.\\n"
@@ -172,4 +171,3 @@ async def start_paperclip_poller() -> None:
if settings.paperclip_enabled:
poller = PaperclipPoller()
asyncio.create_task(poller.poll())

View File

@@ -6,7 +6,6 @@ import logging
import os
from typing import Any
from config import settings
from serpapi import GoogleSearch
logger = logging.getLogger(__name__)
@@ -28,6 +27,7 @@ async def google_web_search(query: str) -> str:
def get_llm_client() -> Any:
"""Get an LLM client."""
# This is a placeholder. In a real application, this would return
# a client for an LLM service like OpenAI, Anthropic, or a local
# model.