forked from Rockachopa/Timmy-time-dashboard
Compare commits
5 Commits
kimi/issue
...
kimi/issue
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3de7db770f | ||
| 9f2728f529 | |||
| cd3dc5d989 | |||
| e4de539bf3 | |||
| b2057f72e1 |
@@ -10,6 +10,11 @@ from pydantic_settings import BaseSettings, SettingsConfigDict
|
|||||||
APP_START_TIME: _datetime = _datetime.now(UTC)
|
APP_START_TIME: _datetime = _datetime.now(UTC)
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_ollama_url(url: str) -> str:
|
||||||
|
"""Replace localhost with 127.0.0.1 to avoid IPv6 resolution delays."""
|
||||||
|
return url.replace("localhost", "127.0.0.1")
|
||||||
|
|
||||||
|
|
||||||
class Settings(BaseSettings):
|
class Settings(BaseSettings):
|
||||||
"""Central configuration — all env-var access goes through this class."""
|
"""Central configuration — all env-var access goes through this class."""
|
||||||
|
|
||||||
@@ -19,6 +24,11 @@ class Settings(BaseSettings):
|
|||||||
# Ollama host — override with OLLAMA_URL env var or .env file
|
# Ollama host — override with OLLAMA_URL env var or .env file
|
||||||
ollama_url: str = "http://localhost:11434"
|
ollama_url: str = "http://localhost:11434"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def normalized_ollama_url(self) -> str:
|
||||||
|
"""Return ollama_url with localhost replaced by 127.0.0.1."""
|
||||||
|
return normalize_ollama_url(self.ollama_url)
|
||||||
|
|
||||||
# LLM model passed to Agno/Ollama — override with OLLAMA_MODEL
|
# LLM model passed to Agno/Ollama — override with OLLAMA_MODEL
|
||||||
# qwen3:30b is the primary model — better reasoning and tool calling
|
# qwen3:30b is the primary model — better reasoning and tool calling
|
||||||
# than llama3.1:8b-instruct while still running locally on modest hardware.
|
# than llama3.1:8b-instruct while still running locally on modest hardware.
|
||||||
@@ -392,7 +402,7 @@ def check_ollama_model_available(model_name: str) -> bool:
|
|||||||
import json
|
import json
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
url = settings.ollama_url.replace("localhost", "127.0.0.1")
|
url = settings.normalized_ollama_url
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/tags",
|
f"{url}/api/tags",
|
||||||
method="GET",
|
method="GET",
|
||||||
|
|||||||
@@ -329,33 +329,35 @@ async def _discord_token_watcher() -> None:
|
|||||||
logger.warning("Discord auto-start failed: %s", exc)
|
logger.warning("Discord auto-start failed: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
def _startup_init() -> None:
|
||||||
async def lifespan(app: FastAPI):
|
"""Validate config and enable event persistence."""
|
||||||
"""Application lifespan manager with non-blocking startup."""
|
|
||||||
|
|
||||||
# Validate security config (no-op in test mode)
|
|
||||||
from config import validate_startup
|
from config import validate_startup
|
||||||
|
|
||||||
validate_startup()
|
validate_startup()
|
||||||
|
|
||||||
# Enable event persistence (unified EventBus + swarm event_log)
|
|
||||||
from infrastructure.events.bus import init_event_bus_persistence
|
from infrastructure.events.bus import init_event_bus_persistence
|
||||||
|
|
||||||
init_event_bus_persistence()
|
init_event_bus_persistence()
|
||||||
|
|
||||||
# Create all background tasks without waiting for them
|
|
||||||
briefing_task = asyncio.create_task(_briefing_scheduler())
|
|
||||||
thinking_task = asyncio.create_task(_thinking_scheduler())
|
|
||||||
loop_qa_task = asyncio.create_task(_loop_qa_scheduler())
|
|
||||||
presence_task = asyncio.create_task(_presence_watcher())
|
|
||||||
|
|
||||||
# Initialize Spark Intelligence engine
|
|
||||||
from spark.engine import get_spark_engine
|
from spark.engine import get_spark_engine
|
||||||
|
|
||||||
if get_spark_engine().enabled:
|
if get_spark_engine().enabled:
|
||||||
logger.info("Spark Intelligence active — event capture enabled")
|
logger.info("Spark Intelligence active — event capture enabled")
|
||||||
|
|
||||||
# Auto-prune old vector store memories on startup
|
|
||||||
|
def _startup_background_tasks() -> list[asyncio.Task]:
|
||||||
|
"""Spawn all recurring background tasks (non-blocking)."""
|
||||||
|
return [
|
||||||
|
asyncio.create_task(_briefing_scheduler()),
|
||||||
|
asyncio.create_task(_thinking_scheduler()),
|
||||||
|
asyncio.create_task(_loop_qa_scheduler()),
|
||||||
|
asyncio.create_task(_presence_watcher()),
|
||||||
|
asyncio.create_task(_start_chat_integrations_background()),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _startup_pruning() -> None:
|
||||||
|
"""Auto-prune old memories, thoughts, and events on startup."""
|
||||||
if settings.memory_prune_days > 0:
|
if settings.memory_prune_days > 0:
|
||||||
try:
|
try:
|
||||||
from timmy.memory_system import prune_memories
|
from timmy.memory_system import prune_memories
|
||||||
@@ -373,7 +375,6 @@ async def lifespan(app: FastAPI):
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.debug("Memory auto-prune skipped: %s", exc)
|
logger.debug("Memory auto-prune skipped: %s", exc)
|
||||||
|
|
||||||
# Auto-prune old thoughts on startup
|
|
||||||
if settings.thoughts_prune_days > 0:
|
if settings.thoughts_prune_days > 0:
|
||||||
try:
|
try:
|
||||||
from timmy.thinking import thinking_engine
|
from timmy.thinking import thinking_engine
|
||||||
@@ -391,7 +392,6 @@ async def lifespan(app: FastAPI):
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.debug("Thought auto-prune skipped: %s", exc)
|
logger.debug("Thought auto-prune skipped: %s", exc)
|
||||||
|
|
||||||
# Auto-prune old system events on startup
|
|
||||||
if settings.events_prune_days > 0:
|
if settings.events_prune_days > 0:
|
||||||
try:
|
try:
|
||||||
from swarm.event_log import prune_old_events
|
from swarm.event_log import prune_old_events
|
||||||
@@ -409,7 +409,6 @@ async def lifespan(app: FastAPI):
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.debug("Event auto-prune skipped: %s", exc)
|
logger.debug("Event auto-prune skipped: %s", exc)
|
||||||
|
|
||||||
# Warn if memory vault exceeds size limit
|
|
||||||
if settings.memory_vault_max_mb > 0:
|
if settings.memory_vault_max_mb > 0:
|
||||||
try:
|
try:
|
||||||
vault_path = Path(settings.repo_root) / "memory" / "notes"
|
vault_path = Path(settings.repo_root) / "memory" / "notes"
|
||||||
@@ -425,6 +424,42 @@ async def lifespan(app: FastAPI):
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.debug("Vault size check skipped: %s", exc)
|
logger.debug("Vault size check skipped: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
|
async def _shutdown_cleanup(
|
||||||
|
bg_tasks: list[asyncio.Task],
|
||||||
|
workshop_heartbeat,
|
||||||
|
) -> None:
|
||||||
|
"""Stop chat bots, MCP sessions, heartbeat, and cancel background tasks."""
|
||||||
|
from integrations.chat_bridge.vendors.discord import discord_bot
|
||||||
|
from integrations.telegram_bot.bot import telegram_bot
|
||||||
|
|
||||||
|
await discord_bot.stop()
|
||||||
|
await telegram_bot.stop()
|
||||||
|
|
||||||
|
try:
|
||||||
|
from timmy.mcp_tools import close_mcp_sessions
|
||||||
|
|
||||||
|
await close_mcp_sessions()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("MCP shutdown: %s", exc)
|
||||||
|
|
||||||
|
await workshop_heartbeat.stop()
|
||||||
|
|
||||||
|
for task in bg_tasks:
|
||||||
|
task.cancel()
|
||||||
|
try:
|
||||||
|
await task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def lifespan(app: FastAPI):
|
||||||
|
"""Application lifespan manager with non-blocking startup."""
|
||||||
|
_startup_init()
|
||||||
|
bg_tasks = _startup_background_tasks()
|
||||||
|
_startup_pruning()
|
||||||
|
|
||||||
# Start Workshop presence heartbeat with WS relay
|
# Start Workshop presence heartbeat with WS relay
|
||||||
from dashboard.routes.world import broadcast_world_state
|
from dashboard.routes.world import broadcast_world_state
|
||||||
from timmy.workshop_state import WorkshopHeartbeat
|
from timmy.workshop_state import WorkshopHeartbeat
|
||||||
@@ -432,10 +467,7 @@ async def lifespan(app: FastAPI):
|
|||||||
workshop_heartbeat = WorkshopHeartbeat(on_change=broadcast_world_state)
|
workshop_heartbeat = WorkshopHeartbeat(on_change=broadcast_world_state)
|
||||||
await workshop_heartbeat.start()
|
await workshop_heartbeat.start()
|
||||||
|
|
||||||
# Start chat integrations in background
|
# Register session logger with error capture
|
||||||
chat_task = asyncio.create_task(_start_chat_integrations_background())
|
|
||||||
|
|
||||||
# Register session logger with error capture (breaks infrastructure → timmy circular dep)
|
|
||||||
try:
|
try:
|
||||||
from infrastructure.error_capture import register_error_recorder
|
from infrastructure.error_capture import register_error_recorder
|
||||||
from timmy.session_logger import get_session_logger
|
from timmy.session_logger import get_session_logger
|
||||||
@@ -448,30 +480,7 @@ async def lifespan(app: FastAPI):
|
|||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
# Cleanup on shutdown
|
await _shutdown_cleanup(bg_tasks, workshop_heartbeat)
|
||||||
from integrations.chat_bridge.vendors.discord import discord_bot
|
|
||||||
from integrations.telegram_bot.bot import telegram_bot
|
|
||||||
|
|
||||||
await discord_bot.stop()
|
|
||||||
await telegram_bot.stop()
|
|
||||||
|
|
||||||
# Close MCP tool server sessions
|
|
||||||
try:
|
|
||||||
from timmy.mcp_tools import close_mcp_sessions
|
|
||||||
|
|
||||||
await close_mcp_sessions()
|
|
||||||
except Exception as exc:
|
|
||||||
logger.debug("MCP shutdown: %s", exc)
|
|
||||||
|
|
||||||
await workshop_heartbeat.stop()
|
|
||||||
|
|
||||||
for task in [briefing_task, thinking_task, chat_task, loop_qa_task, presence_task]:
|
|
||||||
if task:
|
|
||||||
task.cancel()
|
|
||||||
try:
|
|
||||||
await task
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
app = FastAPI(
|
app = FastAPI(
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ def _check_ollama_sync() -> DependencyStatus:
|
|||||||
try:
|
try:
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
url = settings.ollama_url.replace("localhost", "127.0.0.1")
|
url = settings.normalized_ollama_url
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/tags",
|
f"{url}/api/tags",
|
||||||
method="GET",
|
method="GET",
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import logging
|
|||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
|
|
||||||
from config import settings
|
from config import normalize_ollama_url, settings
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -307,7 +307,7 @@ class MultiModalManager:
|
|||||||
import json
|
import json
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
url = self.ollama_url.replace("localhost", "127.0.0.1")
|
url = normalize_ollama_url(self.ollama_url)
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/tags",
|
f"{url}/api/tags",
|
||||||
method="GET",
|
method="GET",
|
||||||
@@ -462,7 +462,7 @@ class MultiModalManager:
|
|||||||
|
|
||||||
logger.info("Pulling model: %s", model_name)
|
logger.info("Pulling model: %s", model_name)
|
||||||
|
|
||||||
url = self.ollama_url.replace("localhost", "127.0.0.1")
|
url = normalize_ollama_url(self.ollama_url)
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/pull",
|
f"{url}/api/pull",
|
||||||
method="POST",
|
method="POST",
|
||||||
|
|||||||
@@ -388,6 +388,101 @@ class CascadeRouter:
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def _select_model(
|
||||||
|
self, provider: Provider, model: str | None, content_type: ContentType
|
||||||
|
) -> tuple[str | None, bool]:
|
||||||
|
"""Select the best model for the request, with vision fallback.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (selected_model, is_fallback_model).
|
||||||
|
"""
|
||||||
|
selected_model = model or provider.get_default_model()
|
||||||
|
is_fallback = False
|
||||||
|
|
||||||
|
if content_type != ContentType.TEXT and selected_model:
|
||||||
|
if provider.type == "ollama" and self._mm_manager:
|
||||||
|
from infrastructure.models.multimodal import ModelCapability
|
||||||
|
|
||||||
|
if content_type == ContentType.VISION:
|
||||||
|
supports = self._mm_manager.model_supports(
|
||||||
|
selected_model, ModelCapability.VISION
|
||||||
|
)
|
||||||
|
if not supports:
|
||||||
|
fallback = self._get_fallback_model(provider, selected_model, content_type)
|
||||||
|
if fallback:
|
||||||
|
logger.info(
|
||||||
|
"Model %s doesn't support vision, falling back to %s",
|
||||||
|
selected_model,
|
||||||
|
fallback,
|
||||||
|
)
|
||||||
|
selected_model = fallback
|
||||||
|
is_fallback = True
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"No vision-capable model found on %s, trying anyway",
|
||||||
|
provider.name,
|
||||||
|
)
|
||||||
|
|
||||||
|
return selected_model, is_fallback
|
||||||
|
|
||||||
|
async def _attempt_with_retry(
|
||||||
|
self,
|
||||||
|
provider: Provider,
|
||||||
|
messages: list[dict],
|
||||||
|
model: str | None,
|
||||||
|
temperature: float,
|
||||||
|
max_tokens: int | None,
|
||||||
|
content_type: ContentType,
|
||||||
|
) -> dict:
|
||||||
|
"""Try a provider with retries, returning the result dict.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If all retry attempts fail.
|
||||||
|
Returns error strings collected during retries via the exception message.
|
||||||
|
"""
|
||||||
|
errors: list[str] = []
|
||||||
|
for attempt in range(self.config.max_retries_per_provider):
|
||||||
|
try:
|
||||||
|
return await self._try_provider(
|
||||||
|
provider=provider,
|
||||||
|
messages=messages,
|
||||||
|
model=model,
|
||||||
|
temperature=temperature,
|
||||||
|
max_tokens=max_tokens,
|
||||||
|
content_type=content_type,
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
error_msg = str(exc)
|
||||||
|
logger.warning(
|
||||||
|
"Provider %s attempt %d failed: %s",
|
||||||
|
provider.name,
|
||||||
|
attempt + 1,
|
||||||
|
error_msg,
|
||||||
|
)
|
||||||
|
errors.append(f"{provider.name}: {error_msg}")
|
||||||
|
|
||||||
|
if attempt < self.config.max_retries_per_provider - 1:
|
||||||
|
await asyncio.sleep(self.config.retry_delay_seconds)
|
||||||
|
|
||||||
|
raise RuntimeError("; ".join(errors))
|
||||||
|
|
||||||
|
def _is_provider_available(self, provider: Provider) -> bool:
|
||||||
|
"""Check if a provider should be tried (enabled + circuit breaker)."""
|
||||||
|
if not provider.enabled:
|
||||||
|
logger.debug("Skipping %s (disabled)", provider.name)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if provider.status == ProviderStatus.UNHEALTHY:
|
||||||
|
if self._can_close_circuit(provider):
|
||||||
|
provider.circuit_state = CircuitState.HALF_OPEN
|
||||||
|
provider.half_open_calls = 0
|
||||||
|
logger.info("Circuit breaker half-open for %s", provider.name)
|
||||||
|
else:
|
||||||
|
logger.debug("Skipping %s (circuit open)", provider.name)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
async def complete(
|
async def complete(
|
||||||
self,
|
self,
|
||||||
messages: list[dict],
|
messages: list[dict],
|
||||||
@@ -414,7 +509,6 @@ class CascadeRouter:
|
|||||||
Raises:
|
Raises:
|
||||||
RuntimeError: If all providers fail
|
RuntimeError: If all providers fail
|
||||||
"""
|
"""
|
||||||
# Detect content type for multi-modal routing
|
|
||||||
content_type = self._detect_content_type(messages)
|
content_type = self._detect_content_type(messages)
|
||||||
if content_type != ContentType.TEXT:
|
if content_type != ContentType.TEXT:
|
||||||
logger.debug("Detected %s content, selecting appropriate model", content_type.value)
|
logger.debug("Detected %s content, selecting appropriate model", content_type.value)
|
||||||
@@ -422,93 +516,34 @@ class CascadeRouter:
|
|||||||
errors = []
|
errors = []
|
||||||
|
|
||||||
for provider in self.providers:
|
for provider in self.providers:
|
||||||
# Skip disabled providers
|
if not self._is_provider_available(provider):
|
||||||
if not provider.enabled:
|
|
||||||
logger.debug("Skipping %s (disabled)", provider.name)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Skip unhealthy providers (circuit breaker)
|
selected_model, is_fallback_model = self._select_model(provider, model, content_type)
|
||||||
if provider.status == ProviderStatus.UNHEALTHY:
|
|
||||||
# Check if circuit breaker can close
|
|
||||||
if self._can_close_circuit(provider):
|
|
||||||
provider.circuit_state = CircuitState.HALF_OPEN
|
|
||||||
provider.half_open_calls = 0
|
|
||||||
logger.info("Circuit breaker half-open for %s", provider.name)
|
|
||||||
else:
|
|
||||||
logger.debug("Skipping %s (circuit open)", provider.name)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Determine which model to use
|
try:
|
||||||
selected_model = model or provider.get_default_model()
|
result = await self._attempt_with_retry(
|
||||||
is_fallback_model = False
|
provider,
|
||||||
|
messages,
|
||||||
|
selected_model,
|
||||||
|
temperature,
|
||||||
|
max_tokens,
|
||||||
|
content_type,
|
||||||
|
)
|
||||||
|
except RuntimeError as exc:
|
||||||
|
errors.append(str(exc))
|
||||||
|
self._record_failure(provider)
|
||||||
|
continue
|
||||||
|
|
||||||
# For non-text content, check if model supports it
|
self._record_success(provider, result.get("latency_ms", 0))
|
||||||
if content_type != ContentType.TEXT and selected_model:
|
return {
|
||||||
if provider.type == "ollama" and self._mm_manager:
|
"content": result["content"],
|
||||||
from infrastructure.models.multimodal import ModelCapability
|
"provider": provider.name,
|
||||||
|
"model": result.get("model", selected_model or provider.get_default_model()),
|
||||||
|
"latency_ms": result.get("latency_ms", 0),
|
||||||
|
"is_fallback_model": is_fallback_model,
|
||||||
|
}
|
||||||
|
|
||||||
# Check if selected model supports the required capability
|
|
||||||
if content_type == ContentType.VISION:
|
|
||||||
supports = self._mm_manager.model_supports(
|
|
||||||
selected_model, ModelCapability.VISION
|
|
||||||
)
|
|
||||||
if not supports:
|
|
||||||
# Find fallback model
|
|
||||||
fallback = self._get_fallback_model(
|
|
||||||
provider, selected_model, content_type
|
|
||||||
)
|
|
||||||
if fallback:
|
|
||||||
logger.info(
|
|
||||||
"Model %s doesn't support vision, falling back to %s",
|
|
||||||
selected_model,
|
|
||||||
fallback,
|
|
||||||
)
|
|
||||||
selected_model = fallback
|
|
||||||
is_fallback_model = True
|
|
||||||
else:
|
|
||||||
logger.warning(
|
|
||||||
"No vision-capable model found on %s, trying anyway",
|
|
||||||
provider.name,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Try this provider
|
|
||||||
for attempt in range(self.config.max_retries_per_provider):
|
|
||||||
try:
|
|
||||||
result = await self._try_provider(
|
|
||||||
provider=provider,
|
|
||||||
messages=messages,
|
|
||||||
model=selected_model,
|
|
||||||
temperature=temperature,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
content_type=content_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Success! Update metrics and return
|
|
||||||
self._record_success(provider, result.get("latency_ms", 0))
|
|
||||||
return {
|
|
||||||
"content": result["content"],
|
|
||||||
"provider": provider.name,
|
|
||||||
"model": result.get(
|
|
||||||
"model", selected_model or provider.get_default_model()
|
|
||||||
),
|
|
||||||
"latency_ms": result.get("latency_ms", 0),
|
|
||||||
"is_fallback_model": is_fallback_model,
|
|
||||||
}
|
|
||||||
|
|
||||||
except Exception as exc:
|
|
||||||
error_msg = str(exc)
|
|
||||||
logger.warning(
|
|
||||||
"Provider %s attempt %d failed: %s", provider.name, attempt + 1, error_msg
|
|
||||||
)
|
|
||||||
errors.append(f"{provider.name}: {error_msg}")
|
|
||||||
|
|
||||||
if attempt < self.config.max_retries_per_provider - 1:
|
|
||||||
await asyncio.sleep(self.config.retry_delay_seconds)
|
|
||||||
|
|
||||||
# All retries failed for this provider
|
|
||||||
self._record_failure(provider)
|
|
||||||
|
|
||||||
# All providers failed
|
|
||||||
raise RuntimeError(f"All providers failed: {'; '.join(errors)}")
|
raise RuntimeError(f"All providers failed: {'; '.join(errors)}")
|
||||||
|
|
||||||
async def _try_provider(
|
async def _try_provider(
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ def _pull_model(model_name: str) -> bool:
|
|||||||
|
|
||||||
logger.info("Pulling model: %s", model_name)
|
logger.info("Pulling model: %s", model_name)
|
||||||
|
|
||||||
url = settings.ollama_url.replace("localhost", "127.0.0.1")
|
url = settings.normalized_ollama_url
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/pull",
|
f"{url}/api/pull",
|
||||||
method="POST",
|
method="POST",
|
||||||
|
|||||||
@@ -95,6 +95,126 @@ def _parse_steps(plan_text: str) -> list[str]:
|
|||||||
return [line.strip() for line in plan_text.strip().splitlines() if line.strip()]
|
return [line.strip() for line in plan_text.strip().splitlines() if line.strip()]
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Extracted helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_content(run_result) -> str:
|
||||||
|
"""Extract text content from an agent run result."""
|
||||||
|
return run_result.content if hasattr(run_result, "content") else str(run_result)
|
||||||
|
|
||||||
|
|
||||||
|
def _clean(text: str) -> str:
|
||||||
|
"""Clean a model response using session's response cleaner."""
|
||||||
|
from timmy.session import _clean_response
|
||||||
|
|
||||||
|
return _clean_response(text)
|
||||||
|
|
||||||
|
|
||||||
|
async def _plan_task(
|
||||||
|
agent, task: str, session_id: str, max_steps: int
|
||||||
|
) -> tuple[list[str], bool] | str:
|
||||||
|
"""Run the planning phase — returns (steps, was_truncated) or error string."""
|
||||||
|
plan_prompt = (
|
||||||
|
f"Break this task into numbered steps (max {max_steps}). "
|
||||||
|
f"Return ONLY a numbered list, nothing else.\n\n"
|
||||||
|
f"Task: {task}"
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
plan_run = await asyncio.to_thread(
|
||||||
|
agent.run, plan_prompt, stream=False, session_id=f"{session_id}_plan"
|
||||||
|
)
|
||||||
|
plan_text = _extract_content(plan_run)
|
||||||
|
except Exception as exc: # broad catch intentional: agent.run can raise any error
|
||||||
|
logger.error("Agentic loop: planning failed: %s", exc)
|
||||||
|
return f"Planning failed: {exc}"
|
||||||
|
|
||||||
|
steps = _parse_steps(plan_text)
|
||||||
|
if not steps:
|
||||||
|
return "Planning produced no steps."
|
||||||
|
|
||||||
|
planned_count = len(steps)
|
||||||
|
steps = steps[:max_steps]
|
||||||
|
return steps, planned_count > len(steps)
|
||||||
|
|
||||||
|
|
||||||
|
async def _execute_step(
|
||||||
|
agent,
|
||||||
|
task: str,
|
||||||
|
step_desc: str,
|
||||||
|
step_num: int,
|
||||||
|
total_steps: int,
|
||||||
|
recent_results: list[str],
|
||||||
|
session_id: str,
|
||||||
|
) -> AgenticStep:
|
||||||
|
"""Execute a single step, returning an AgenticStep."""
|
||||||
|
step_start = time.monotonic()
|
||||||
|
context = (
|
||||||
|
f"Task: {task}\n"
|
||||||
|
f"Step {step_num}/{total_steps}: {step_desc}\n"
|
||||||
|
f"Recent progress: {recent_results[-2:] if recent_results else []}\n\n"
|
||||||
|
f"Execute this step and report what you did."
|
||||||
|
)
|
||||||
|
step_run = await asyncio.to_thread(
|
||||||
|
agent.run, context, stream=False, session_id=f"{session_id}_step{step_num}"
|
||||||
|
)
|
||||||
|
step_result = _clean(_extract_content(step_run))
|
||||||
|
return AgenticStep(
|
||||||
|
step_num=step_num,
|
||||||
|
description=step_desc,
|
||||||
|
result=step_result,
|
||||||
|
status="completed",
|
||||||
|
duration_ms=int((time.monotonic() - step_start) * 1000),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def _adapt_step(
|
||||||
|
agent,
|
||||||
|
step_desc: str,
|
||||||
|
step_num: int,
|
||||||
|
error: Exception,
|
||||||
|
step_start: float,
|
||||||
|
session_id: str,
|
||||||
|
) -> AgenticStep:
|
||||||
|
"""Attempt adaptation after a step failure."""
|
||||||
|
adapt_prompt = (
|
||||||
|
f"Step {step_num} failed with error: {error}\n"
|
||||||
|
f"Original step was: {step_desc}\n"
|
||||||
|
f"Adapt the plan and try an alternative approach for this step."
|
||||||
|
)
|
||||||
|
adapt_run = await asyncio.to_thread(
|
||||||
|
agent.run, adapt_prompt, stream=False, session_id=f"{session_id}_adapt{step_num}"
|
||||||
|
)
|
||||||
|
adapt_result = _clean(_extract_content(adapt_run))
|
||||||
|
return AgenticStep(
|
||||||
|
step_num=step_num,
|
||||||
|
description=f"[Adapted] {step_desc}",
|
||||||
|
result=adapt_result,
|
||||||
|
status="adapted",
|
||||||
|
duration_ms=int((time.monotonic() - step_start) * 1000),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _summarize(result: AgenticResult, total_steps: int, was_truncated: bool) -> None:
|
||||||
|
"""Fill in summary and final status on the result object (mutates in place)."""
|
||||||
|
completed = sum(1 for s in result.steps if s.status == "completed")
|
||||||
|
adapted = sum(1 for s in result.steps if s.status == "adapted")
|
||||||
|
failed = sum(1 for s in result.steps if s.status == "failed")
|
||||||
|
|
||||||
|
parts = [f"Completed {completed}/{total_steps} steps"]
|
||||||
|
if adapted:
|
||||||
|
parts.append(f"{adapted} adapted")
|
||||||
|
if failed:
|
||||||
|
parts.append(f"{failed} failed")
|
||||||
|
result.summary = f"{result.task}: {', '.join(parts)}."
|
||||||
|
|
||||||
|
if was_truncated or len(result.steps) < total_steps or failed:
|
||||||
|
result.status = "partial"
|
||||||
|
else:
|
||||||
|
result.status = "completed"
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# Core loop
|
# Core loop
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
@@ -125,88 +245,41 @@ async def run_agentic_loop(
|
|||||||
|
|
||||||
task_id = str(uuid.uuid4())[:8]
|
task_id = str(uuid.uuid4())[:8]
|
||||||
start_time = time.monotonic()
|
start_time = time.monotonic()
|
||||||
|
|
||||||
agent = _get_loop_agent()
|
agent = _get_loop_agent()
|
||||||
result = AgenticResult(task_id=task_id, task=task, summary="")
|
result = AgenticResult(task_id=task_id, task=task, summary="")
|
||||||
|
|
||||||
# ── Phase 1: Planning ──────────────────────────────────────────────────
|
# Phase 1: Planning
|
||||||
plan_prompt = (
|
plan = await _plan_task(agent, task, session_id, max_steps)
|
||||||
f"Break this task into numbered steps (max {max_steps}). "
|
if isinstance(plan, str):
|
||||||
f"Return ONLY a numbered list, nothing else.\n\n"
|
|
||||||
f"Task: {task}"
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
plan_run = await asyncio.to_thread(
|
|
||||||
agent.run, plan_prompt, stream=False, session_id=f"{session_id}_plan"
|
|
||||||
)
|
|
||||||
plan_text = plan_run.content if hasattr(plan_run, "content") else str(plan_run)
|
|
||||||
except Exception as exc: # broad catch intentional: agent.run can raise any error
|
|
||||||
logger.error("Agentic loop: planning failed: %s", exc)
|
|
||||||
result.status = "failed"
|
result.status = "failed"
|
||||||
result.summary = f"Planning failed: {exc}"
|
result.summary = plan
|
||||||
result.total_duration_ms = int((time.monotonic() - start_time) * 1000)
|
result.total_duration_ms = int((time.monotonic() - start_time) * 1000)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
steps = _parse_steps(plan_text)
|
steps, was_truncated = plan
|
||||||
if not steps:
|
|
||||||
result.status = "failed"
|
|
||||||
result.summary = "Planning produced no steps."
|
|
||||||
result.total_duration_ms = int((time.monotonic() - start_time) * 1000)
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Enforce max_steps — track if we truncated
|
|
||||||
planned_steps = len(steps)
|
|
||||||
steps = steps[:max_steps]
|
|
||||||
total_steps = len(steps)
|
total_steps = len(steps)
|
||||||
was_truncated = planned_steps > total_steps
|
|
||||||
|
|
||||||
# Broadcast plan
|
|
||||||
await _broadcast_progress(
|
await _broadcast_progress(
|
||||||
"agentic.plan_ready",
|
"agentic.plan_ready",
|
||||||
{
|
{"task_id": task_id, "task": task, "steps": steps, "total": total_steps},
|
||||||
"task_id": task_id,
|
|
||||||
"task": task,
|
|
||||||
"steps": steps,
|
|
||||||
"total": total_steps,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# ── Phase 2: Execution ─────────────────────────────────────────────────
|
# Phase 2: Execution
|
||||||
completed_results: list[str] = []
|
completed_results: list[str] = []
|
||||||
|
|
||||||
for i, step_desc in enumerate(steps, 1):
|
for i, step_desc in enumerate(steps, 1):
|
||||||
step_start = time.monotonic()
|
step_start = time.monotonic()
|
||||||
|
|
||||||
recent = completed_results[-2:] if completed_results else []
|
|
||||||
context = (
|
|
||||||
f"Task: {task}\n"
|
|
||||||
f"Step {i}/{total_steps}: {step_desc}\n"
|
|
||||||
f"Recent progress: {recent}\n\n"
|
|
||||||
f"Execute this step and report what you did."
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
step_run = await asyncio.to_thread(
|
step = await _execute_step(
|
||||||
agent.run, context, stream=False, session_id=f"{session_id}_step{i}"
|
agent,
|
||||||
)
|
task,
|
||||||
step_result = step_run.content if hasattr(step_run, "content") else str(step_run)
|
step_desc,
|
||||||
|
i,
|
||||||
# Clean the response
|
total_steps,
|
||||||
from timmy.session import _clean_response
|
completed_results,
|
||||||
|
session_id,
|
||||||
step_result = _clean_response(step_result)
|
|
||||||
|
|
||||||
step = AgenticStep(
|
|
||||||
step_num=i,
|
|
||||||
description=step_desc,
|
|
||||||
result=step_result,
|
|
||||||
status="completed",
|
|
||||||
duration_ms=int((time.monotonic() - step_start) * 1000),
|
|
||||||
)
|
)
|
||||||
result.steps.append(step)
|
result.steps.append(step)
|
||||||
completed_results.append(f"Step {i}: {step_result[:200]}")
|
completed_results.append(f"Step {i}: {step.result[:200]}")
|
||||||
|
|
||||||
# Broadcast progress
|
|
||||||
await _broadcast_progress(
|
await _broadcast_progress(
|
||||||
"agentic.step_complete",
|
"agentic.step_complete",
|
||||||
{
|
{
|
||||||
@@ -214,46 +287,18 @@ async def run_agentic_loop(
|
|||||||
"step": i,
|
"step": i,
|
||||||
"total": total_steps,
|
"total": total_steps,
|
||||||
"description": step_desc,
|
"description": step_desc,
|
||||||
"result": step_result[:200],
|
"result": step.result[:200],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
if on_progress:
|
if on_progress:
|
||||||
await on_progress(step_desc, i, total_steps)
|
await on_progress(step_desc, i, total_steps)
|
||||||
|
|
||||||
except Exception as exc: # broad catch intentional: agent.run can raise any error
|
except Exception as exc: # broad catch intentional: agent.run can raise any error
|
||||||
logger.warning("Agentic loop step %d failed: %s", i, exc)
|
logger.warning("Agentic loop step %d failed: %s", i, exc)
|
||||||
|
|
||||||
# ── Adaptation: ask model to adapt ─────────────────────────────
|
|
||||||
adapt_prompt = (
|
|
||||||
f"Step {i} failed with error: {exc}\n"
|
|
||||||
f"Original step was: {step_desc}\n"
|
|
||||||
f"Adapt the plan and try an alternative approach for this step."
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
adapt_run = await asyncio.to_thread(
|
step = await _adapt_step(agent, step_desc, i, exc, step_start, session_id)
|
||||||
agent.run,
|
|
||||||
adapt_prompt,
|
|
||||||
stream=False,
|
|
||||||
session_id=f"{session_id}_adapt{i}",
|
|
||||||
)
|
|
||||||
adapt_result = (
|
|
||||||
adapt_run.content if hasattr(adapt_run, "content") else str(adapt_run)
|
|
||||||
)
|
|
||||||
from timmy.session import _clean_response
|
|
||||||
|
|
||||||
adapt_result = _clean_response(adapt_result)
|
|
||||||
|
|
||||||
step = AgenticStep(
|
|
||||||
step_num=i,
|
|
||||||
description=f"[Adapted] {step_desc}",
|
|
||||||
result=adapt_result,
|
|
||||||
status="adapted",
|
|
||||||
duration_ms=int((time.monotonic() - step_start) * 1000),
|
|
||||||
)
|
|
||||||
result.steps.append(step)
|
result.steps.append(step)
|
||||||
completed_results.append(f"Step {i} (adapted): {adapt_result[:200]}")
|
completed_results.append(f"Step {i} (adapted): {step.result[:200]}")
|
||||||
|
|
||||||
await _broadcast_progress(
|
await _broadcast_progress(
|
||||||
"agentic.step_adapted",
|
"agentic.step_adapted",
|
||||||
{
|
{
|
||||||
@@ -262,46 +307,26 @@ async def run_agentic_loop(
|
|||||||
"total": total_steps,
|
"total": total_steps,
|
||||||
"description": step_desc,
|
"description": step_desc,
|
||||||
"error": str(exc),
|
"error": str(exc),
|
||||||
"adaptation": adapt_result[:200],
|
"adaptation": step.result[:200],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
if on_progress:
|
if on_progress:
|
||||||
await on_progress(f"[Adapted] {step_desc}", i, total_steps)
|
await on_progress(f"[Adapted] {step_desc}", i, total_steps)
|
||||||
|
except Exception as adapt_exc: # broad catch intentional
|
||||||
except Exception as adapt_exc: # broad catch intentional: agent.run can raise any error
|
|
||||||
logger.error("Agentic loop adaptation also failed: %s", adapt_exc)
|
logger.error("Agentic loop adaptation also failed: %s", adapt_exc)
|
||||||
step = AgenticStep(
|
result.steps.append(
|
||||||
step_num=i,
|
AgenticStep(
|
||||||
description=step_desc,
|
step_num=i,
|
||||||
result=f"Failed: {exc}; Adaptation also failed: {adapt_exc}",
|
description=step_desc,
|
||||||
status="failed",
|
result=f"Failed: {exc}; Adaptation also failed: {adapt_exc}",
|
||||||
duration_ms=int((time.monotonic() - step_start) * 1000),
|
status="failed",
|
||||||
|
duration_ms=int((time.monotonic() - step_start) * 1000),
|
||||||
|
)
|
||||||
)
|
)
|
||||||
result.steps.append(step)
|
|
||||||
completed_results.append(f"Step {i}: FAILED")
|
completed_results.append(f"Step {i}: FAILED")
|
||||||
|
|
||||||
# ── Phase 3: Summary ───────────────────────────────────────────────────
|
# Phase 3: Summary
|
||||||
completed_count = sum(1 for s in result.steps if s.status == "completed")
|
_summarize(result, total_steps, was_truncated)
|
||||||
adapted_count = sum(1 for s in result.steps if s.status == "adapted")
|
|
||||||
failed_count = sum(1 for s in result.steps if s.status == "failed")
|
|
||||||
parts = [f"Completed {completed_count}/{total_steps} steps"]
|
|
||||||
if adapted_count:
|
|
||||||
parts.append(f"{adapted_count} adapted")
|
|
||||||
if failed_count:
|
|
||||||
parts.append(f"{failed_count} failed")
|
|
||||||
result.summary = f"{task}: {', '.join(parts)}."
|
|
||||||
|
|
||||||
# Determine final status
|
|
||||||
if was_truncated:
|
|
||||||
result.status = "partial"
|
|
||||||
elif len(result.steps) < total_steps:
|
|
||||||
result.status = "partial"
|
|
||||||
elif any(s.status == "failed" for s in result.steps):
|
|
||||||
result.status = "partial"
|
|
||||||
else:
|
|
||||||
result.status = "completed"
|
|
||||||
|
|
||||||
result.total_duration_ms = int((time.monotonic() - start_time) * 1000)
|
result.total_duration_ms = int((time.monotonic() - start_time) * 1000)
|
||||||
|
|
||||||
await _broadcast_progress(
|
await _broadcast_progress(
|
||||||
|
|||||||
@@ -257,6 +257,28 @@ class ThinkingEngine:
|
|||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
content, seed_type = await self._generate_thought(prompt)
|
||||||
|
if not content:
|
||||||
|
return None
|
||||||
|
|
||||||
|
thought = self._store_thought(content, seed_type)
|
||||||
|
self._last_thought_id = thought.id
|
||||||
|
|
||||||
|
await self._finalize_thought(thought)
|
||||||
|
return thought
|
||||||
|
|
||||||
|
async def _generate_thought(self, prompt: str | None = None) -> tuple[str | None, str]:
|
||||||
|
"""Generate novel thought content via the dedup retry loop.
|
||||||
|
|
||||||
|
Gathers context, builds the LLM prompt, calls the agent, and
|
||||||
|
retries with a fresh seed if the result is too similar to recent
|
||||||
|
thoughts.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A (content, seed_type) tuple. *content* is ``None`` when the
|
||||||
|
cycle should be skipped (agent failure, empty response, or
|
||||||
|
all retries exhausted).
|
||||||
|
"""
|
||||||
memory_context = self._load_memory_context()
|
memory_context = self._load_memory_context()
|
||||||
system_context = self._gather_system_snapshot()
|
system_context = self._gather_system_snapshot()
|
||||||
recent_thoughts = self.get_recent_thoughts(limit=5)
|
recent_thoughts = self.get_recent_thoughts(limit=5)
|
||||||
@@ -284,11 +306,11 @@ class ThinkingEngine:
|
|||||||
raw = await self._call_agent(full_prompt)
|
raw = await self._call_agent(full_prompt)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.warning("Thinking cycle failed (Ollama likely down): %s", exc)
|
logger.warning("Thinking cycle failed (Ollama likely down): %s", exc)
|
||||||
return None
|
return None, seed_type
|
||||||
|
|
||||||
if not raw or not raw.strip():
|
if not raw or not raw.strip():
|
||||||
logger.debug("Thinking cycle produced empty response, skipping")
|
logger.debug("Thinking cycle produced empty response, skipping")
|
||||||
return None
|
return None, seed_type
|
||||||
|
|
||||||
content = raw.strip()
|
content = raw.strip()
|
||||||
|
|
||||||
@@ -308,48 +330,28 @@ class ThinkingEngine:
|
|||||||
"Thought still repetitive after %d retries, discarding",
|
"Thought still repetitive after %d retries, discarding",
|
||||||
self._MAX_DEDUP_RETRIES + 1,
|
self._MAX_DEDUP_RETRIES + 1,
|
||||||
)
|
)
|
||||||
return None
|
return None, seed_type
|
||||||
|
|
||||||
if not content:
|
return content, seed_type
|
||||||
return None
|
|
||||||
|
|
||||||
thought = self._store_thought(content, seed_type)
|
async def _finalize_thought(self, thought: Thought) -> None:
|
||||||
self._last_thought_id = thought.id
|
"""Run post-hooks, log, journal, and broadcast a stored thought."""
|
||||||
|
|
||||||
# Post-hook: check memory status periodically
|
|
||||||
self._maybe_check_memory()
|
self._maybe_check_memory()
|
||||||
|
|
||||||
# Post-hook: distill facts from recent thoughts periodically
|
|
||||||
await self._maybe_distill()
|
await self._maybe_distill()
|
||||||
|
|
||||||
# Post-hook: file Gitea issues for actionable observations
|
|
||||||
await self._maybe_file_issues()
|
await self._maybe_file_issues()
|
||||||
|
|
||||||
# Post-hook: check workspace for new messages from Hermes
|
|
||||||
await self._check_workspace()
|
await self._check_workspace()
|
||||||
|
|
||||||
# Post-hook: proactive memory status audit
|
|
||||||
self._maybe_check_memory_status()
|
self._maybe_check_memory_status()
|
||||||
|
|
||||||
# Post-hook: update MEMORY.md with latest reflection
|
|
||||||
self._update_memory(thought)
|
self._update_memory(thought)
|
||||||
|
|
||||||
# Log to swarm event system
|
|
||||||
self._log_event(thought)
|
self._log_event(thought)
|
||||||
|
|
||||||
# Append to daily journal file
|
|
||||||
self._write_journal(thought)
|
self._write_journal(thought)
|
||||||
|
|
||||||
# Broadcast to WebSocket clients
|
|
||||||
await self._broadcast(thought)
|
await self._broadcast(thought)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Thought [%s] (%s): %s",
|
"Thought [%s] (%s): %s",
|
||||||
thought.id[:8],
|
thought.id[:8],
|
||||||
seed_type,
|
thought.seed_type,
|
||||||
thought.content[:80],
|
thought.content[:80],
|
||||||
)
|
)
|
||||||
return thought
|
|
||||||
|
|
||||||
def get_recent_thoughts(self, limit: int = 20) -> list[Thought]:
|
def get_recent_thoughts(self, limit: int = 20) -> list[Thought]:
|
||||||
"""Retrieve the most recent thoughts."""
|
"""Retrieve the most recent thoughts."""
|
||||||
|
|||||||
@@ -250,6 +250,99 @@ def test_continuity_includes_recent(tmp_path):
|
|||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _generate_thought helper
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_thought_returns_content_and_seed_type(tmp_path):
|
||||||
|
"""_generate_thought should return (content, seed_type) on success."""
|
||||||
|
from timmy.thinking import SEED_TYPES
|
||||||
|
|
||||||
|
engine = _make_engine(tmp_path)
|
||||||
|
|
||||||
|
with patch.object(engine, "_call_agent", return_value="A novel idea."):
|
||||||
|
content, seed_type = await engine._generate_thought()
|
||||||
|
|
||||||
|
assert content == "A novel idea."
|
||||||
|
assert seed_type in SEED_TYPES
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_thought_with_prompt(tmp_path):
|
||||||
|
"""_generate_thought(prompt=...) should use 'prompted' seed type."""
|
||||||
|
engine = _make_engine(tmp_path)
|
||||||
|
|
||||||
|
with patch.object(engine, "_call_agent", return_value="A prompted idea."):
|
||||||
|
content, seed_type = await engine._generate_thought(prompt="Reflect on joy")
|
||||||
|
|
||||||
|
assert content == "A prompted idea."
|
||||||
|
assert seed_type == "prompted"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_thought_returns_none_on_agent_failure(tmp_path):
|
||||||
|
"""_generate_thought should return (None, ...) when the agent fails."""
|
||||||
|
engine = _make_engine(tmp_path)
|
||||||
|
|
||||||
|
with patch.object(engine, "_call_agent", side_effect=Exception("Ollama down")):
|
||||||
|
content, seed_type = await engine._generate_thought()
|
||||||
|
|
||||||
|
assert content is None
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_thought_returns_none_on_empty(tmp_path):
|
||||||
|
"""_generate_thought should return (None, ...) when agent returns empty."""
|
||||||
|
engine = _make_engine(tmp_path)
|
||||||
|
|
||||||
|
with patch.object(engine, "_call_agent", return_value=" "):
|
||||||
|
content, seed_type = await engine._generate_thought()
|
||||||
|
|
||||||
|
assert content is None
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _finalize_thought helper
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_finalize_thought_calls_all_hooks(tmp_path):
|
||||||
|
"""_finalize_thought should call all post-hooks, log, journal, and broadcast."""
|
||||||
|
engine = _make_engine(tmp_path)
|
||||||
|
thought = engine._store_thought("Test finalize.", "freeform")
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch.object(engine, "_maybe_check_memory") as m_mem,
|
||||||
|
patch.object(engine, "_maybe_distill", new_callable=AsyncMock) as m_distill,
|
||||||
|
patch.object(engine, "_maybe_file_issues", new_callable=AsyncMock) as m_issues,
|
||||||
|
patch.object(engine, "_check_workspace", new_callable=AsyncMock) as m_ws,
|
||||||
|
patch.object(engine, "_maybe_check_memory_status") as m_status,
|
||||||
|
patch.object(engine, "_update_memory") as m_update,
|
||||||
|
patch.object(engine, "_log_event") as m_log,
|
||||||
|
patch.object(engine, "_write_journal") as m_journal,
|
||||||
|
patch.object(engine, "_broadcast", new_callable=AsyncMock) as m_broadcast,
|
||||||
|
):
|
||||||
|
await engine._finalize_thought(thought)
|
||||||
|
|
||||||
|
m_mem.assert_called_once()
|
||||||
|
m_distill.assert_awaited_once()
|
||||||
|
m_issues.assert_awaited_once()
|
||||||
|
m_ws.assert_awaited_once()
|
||||||
|
m_status.assert_called_once()
|
||||||
|
m_update.assert_called_once_with(thought)
|
||||||
|
m_log.assert_called_once_with(thought)
|
||||||
|
m_journal.assert_called_once_with(thought)
|
||||||
|
m_broadcast.assert_awaited_once_with(thought)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# think_once (async)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_think_once_stores_thought(tmp_path):
|
async def test_think_once_stores_thought(tmp_path):
|
||||||
"""think_once should store a thought in the DB."""
|
"""think_once should store a thought in the DB."""
|
||||||
|
|||||||
Reference in New Issue
Block a user