refactor: break circular imports between packages (#164)
Some checks failed
Tests / lint (pull_request) Failing after 4s
Tests / test (pull_request) Has been skipped

- Move dashboard/store.py to infrastructure/chat_store.py (cross-cutting)
- dashboard/store.py now re-exports for backward compatibility
- Move _START_TIME to config.APP_START_TIME
- Replace infrastructure->timmy import with callback registration
- Zero timmy->dashboard imports, zero infrastructure->timmy imports

9 files changed, +42/-173 lines
This commit is contained in:
Alexander Whitestone
2026-03-15 12:51:56 -04:00
parent b3840238cb
commit 1a5555a216
10 changed files with 195 additions and 173 deletions

View File

@@ -5,6 +5,10 @@ from typing import Literal
from pydantic_settings import BaseSettings, SettingsConfigDict
from datetime import UTC, datetime as _datetime
APP_START_TIME: _datetime = _datetime.now(UTC)
class Settings(BaseSettings):
"""Central configuration — all env-var access goes through this class."""

View File

@@ -375,6 +375,15 @@ async def lifespan(app: FastAPI):
# Start chat integrations in background
chat_task = asyncio.create_task(_start_chat_integrations_background())
# Register session logger with error capture (breaks infrastructure → timmy circular dep)
try:
from infrastructure.error_capture import register_error_recorder
from timmy.session_logger import get_session_logger
register_error_recorder(get_session_logger().record_error)
except Exception:
pass
logger.info("✓ Dashboard ready for requests")
yield

View File

@@ -52,7 +52,7 @@ class HealthStatus(BaseModel):
# Simple uptime tracking
_START_TIME = datetime.now(UTC)
from config import APP_START_TIME as _START_TIME
# Ollama health cache (30-second TTL)
_ollama_cache: DependencyStatus | None = None

View File

@@ -1,153 +1,5 @@
"""Persistent chat message store backed by SQLite.
"""Backward-compatible re-export — canonical home is infrastructure.chat_store."""
Provides the same API as the original in-memory MessageLog so all callers
(dashboard routes, chat_api, thinking, briefing) work without changes.
from infrastructure.chat_store import DB_PATH, MAX_MESSAGES, Message, MessageLog, message_log
Data lives in ``data/chat.db`` — survives server restarts.
A configurable retention policy (default 500 messages) keeps the DB lean.
"""
import sqlite3
import threading
from collections.abc import Generator
from contextlib import closing, contextmanager
from dataclasses import dataclass
from pathlib import Path
# ── Data dir — resolved relative to repo root (two levels up from this file) ──
_REPO_ROOT = Path(__file__).resolve().parents[2]
DB_PATH: Path = _REPO_ROOT / "data" / "chat.db"
# Maximum messages to retain (oldest pruned on append)
MAX_MESSAGES: int = 500
@dataclass
class Message:
role: str # "user" | "agent" | "error"
content: str
timestamp: str
source: str = "browser" # "browser" | "api" | "telegram" | "discord" | "system"
@contextmanager
def _get_conn(db_path: Path | None = None) -> Generator[sqlite3.Connection, None, None]:
"""Open (or create) the chat database and ensure schema exists."""
path = db_path or DB_PATH
path.parent.mkdir(parents=True, exist_ok=True)
with closing(sqlite3.connect(str(path), check_same_thread=False)) as conn:
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("""
CREATE TABLE IF NOT EXISTS chat_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
role TEXT NOT NULL,
content TEXT NOT NULL,
timestamp TEXT NOT NULL,
source TEXT NOT NULL DEFAULT 'browser'
)
""")
conn.commit()
yield conn
class MessageLog:
"""SQLite-backed chat history — drop-in replacement for the old in-memory list."""
def __init__(self, db_path: Path | None = None) -> None:
self._db_path = db_path or DB_PATH
self._lock = threading.Lock()
self._conn: sqlite3.Connection | None = None
# Lazy connection — opened on first use, not at import time.
def _ensure_conn(self) -> sqlite3.Connection:
if self._conn is None:
# Open a persistent connection for the class instance
path = self._db_path or DB_PATH
path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(path), check_same_thread=False)
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("""
CREATE TABLE IF NOT EXISTS chat_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
role TEXT NOT NULL,
content TEXT NOT NULL,
timestamp TEXT NOT NULL,
source TEXT NOT NULL DEFAULT 'browser'
)
""")
conn.commit()
self._conn = conn
return self._conn
def append(self, role: str, content: str, timestamp: str, source: str = "browser") -> None:
with self._lock:
conn = self._ensure_conn()
conn.execute(
"INSERT INTO chat_messages (role, content, timestamp, source) VALUES (?, ?, ?, ?)",
(role, content, timestamp, source),
)
conn.commit()
self._prune(conn)
def all(self) -> list[Message]:
with self._lock:
conn = self._ensure_conn()
rows = conn.execute(
"SELECT role, content, timestamp, source FROM chat_messages ORDER BY id"
).fetchall()
return [
Message(
role=r["role"], content=r["content"], timestamp=r["timestamp"], source=r["source"]
)
for r in rows
]
def recent(self, limit: int = 50) -> list[Message]:
"""Return the *limit* most recent messages (oldest-first)."""
with self._lock:
conn = self._ensure_conn()
rows = conn.execute(
"SELECT role, content, timestamp, source FROM chat_messages "
"ORDER BY id DESC LIMIT ?",
(limit,),
).fetchall()
return [
Message(
role=r["role"], content=r["content"], timestamp=r["timestamp"], source=r["source"]
)
for r in reversed(rows)
]
def clear(self) -> None:
with self._lock:
conn = self._ensure_conn()
conn.execute("DELETE FROM chat_messages")
conn.commit()
def _prune(self, conn: sqlite3.Connection) -> None:
"""Keep at most MAX_MESSAGES rows, deleting the oldest."""
count = conn.execute("SELECT COUNT(*) FROM chat_messages").fetchone()[0]
if count > MAX_MESSAGES:
excess = count - MAX_MESSAGES
conn.execute(
"DELETE FROM chat_messages WHERE id IN "
"(SELECT id FROM chat_messages ORDER BY id LIMIT ?)",
(excess,),
)
conn.commit()
def close(self) -> None:
if self._conn is not None:
self._conn.close()
self._conn = None
def __len__(self) -> int:
with self._lock:
conn = self._ensure_conn()
return conn.execute("SELECT COUNT(*) FROM chat_messages").fetchone()[0]
# Module-level singleton shared across the app
message_log = MessageLog()
__all__ = ["DB_PATH", "MAX_MESSAGES", "Message", "MessageLog", "message_log"]

View File

@@ -0,0 +1,153 @@
"""Persistent chat message store backed by SQLite.
Provides the same API as the original in-memory MessageLog so all callers
(dashboard routes, chat_api, thinking, briefing) work without changes.
Data lives in ``data/chat.db`` — survives server restarts.
A configurable retention policy (default 500 messages) keeps the DB lean.
"""
import sqlite3
import threading
from collections.abc import Generator
from contextlib import closing, contextmanager
from dataclasses import dataclass
from pathlib import Path
# ── Data dir — resolved relative to repo root (three levels up from this file) ──
_REPO_ROOT = Path(__file__).resolve().parents[3]
DB_PATH: Path = _REPO_ROOT / "data" / "chat.db"
# Maximum messages to retain (oldest pruned on append)
MAX_MESSAGES: int = 500
@dataclass
class Message:
role: str # "user" | "agent" | "error"
content: str
timestamp: str
source: str = "browser" # "browser" | "api" | "telegram" | "discord" | "system"
@contextmanager
def _get_conn(db_path: Path | None = None) -> Generator[sqlite3.Connection, None, None]:
"""Open (or create) the chat database and ensure schema exists."""
path = db_path or DB_PATH
path.parent.mkdir(parents=True, exist_ok=True)
with closing(sqlite3.connect(str(path), check_same_thread=False)) as conn:
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("""
CREATE TABLE IF NOT EXISTS chat_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
role TEXT NOT NULL,
content TEXT NOT NULL,
timestamp TEXT NOT NULL,
source TEXT NOT NULL DEFAULT 'browser'
)
""")
conn.commit()
yield conn
class MessageLog:
"""SQLite-backed chat history — drop-in replacement for the old in-memory list."""
def __init__(self, db_path: Path | None = None) -> None:
self._db_path = db_path or DB_PATH
self._lock = threading.Lock()
self._conn: sqlite3.Connection | None = None
# Lazy connection — opened on first use, not at import time.
def _ensure_conn(self) -> sqlite3.Connection:
if self._conn is None:
# Open a persistent connection for the class instance
path = self._db_path or DB_PATH
path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(path), check_same_thread=False)
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("""
CREATE TABLE IF NOT EXISTS chat_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
role TEXT NOT NULL,
content TEXT NOT NULL,
timestamp TEXT NOT NULL,
source TEXT NOT NULL DEFAULT 'browser'
)
""")
conn.commit()
self._conn = conn
return self._conn
def append(self, role: str, content: str, timestamp: str, source: str = "browser") -> None:
with self._lock:
conn = self._ensure_conn()
conn.execute(
"INSERT INTO chat_messages (role, content, timestamp, source) VALUES (?, ?, ?, ?)",
(role, content, timestamp, source),
)
conn.commit()
self._prune(conn)
def all(self) -> list[Message]:
with self._lock:
conn = self._ensure_conn()
rows = conn.execute(
"SELECT role, content, timestamp, source FROM chat_messages ORDER BY id"
).fetchall()
return [
Message(
role=r["role"], content=r["content"], timestamp=r["timestamp"], source=r["source"]
)
for r in rows
]
def recent(self, limit: int = 50) -> list[Message]:
"""Return the *limit* most recent messages (oldest-first)."""
with self._lock:
conn = self._ensure_conn()
rows = conn.execute(
"SELECT role, content, timestamp, source FROM chat_messages "
"ORDER BY id DESC LIMIT ?",
(limit,),
).fetchall()
return [
Message(
role=r["role"], content=r["content"], timestamp=r["timestamp"], source=r["source"]
)
for r in reversed(rows)
]
def clear(self) -> None:
with self._lock:
conn = self._ensure_conn()
conn.execute("DELETE FROM chat_messages")
conn.commit()
def _prune(self, conn: sqlite3.Connection) -> None:
"""Keep at most MAX_MESSAGES rows, deleting the oldest."""
count = conn.execute("SELECT COUNT(*) FROM chat_messages").fetchone()[0]
if count > MAX_MESSAGES:
excess = count - MAX_MESSAGES
conn.execute(
"DELETE FROM chat_messages WHERE id IN "
"(SELECT id FROM chat_messages ORDER BY id LIMIT ?)",
(excess,),
)
conn.commit()
def close(self) -> None:
if self._conn is not None:
self._conn.close()
self._conn = None
def __len__(self) -> int:
with self._lock:
conn = self._ensure_conn()
return conn.execute("SELECT COUNT(*) FROM chat_messages").fetchone()[0]
# Module-level singleton shared across the app
message_log = MessageLog()

View File

@@ -22,6 +22,14 @@ logger = logging.getLogger(__name__)
# In-memory dedup cache: hash -> last_seen timestamp
_dedup_cache: dict[str, datetime] = {}
_error_recorder = None
def register_error_recorder(fn):
"""Register a callback for recording errors to session log."""
global _error_recorder
_error_recorder = fn
def _stack_hash(exc: Exception) -> str:
"""Create a stable hash of the exception type + traceback locations.
@@ -220,17 +228,14 @@ def capture_error(
logger.warning("Bug report notification error: %s", exc)
pass
# 4. Record in session logger
try:
from timmy.session_logger import get_session_logger
session_logger = get_session_logger()
session_logger.record_error(
error=f"{type(exc).__name__}: {str(exc)}",
context=source,
)
except Exception as exc:
logger.warning("Bug report session logging error: %s", exc)
pass
# 4. Record in session logger (via registered callback)
if _error_recorder is not None:
try:
_error_recorder(
error=f"{type(exc).__name__}: {str(exc)}",
context=source,
)
except Exception as log_exc:
logger.warning("Bug report session logging error: %s", log_exc)
return task_id

View File

@@ -192,7 +192,7 @@ def _gather_task_queue_summary() -> str:
def _gather_chat_summary(since: datetime) -> str:
"""Pull recent chat messages from the in-memory log."""
try:
from dashboard.store import message_log
from infrastructure.chat_store import message_log
messages = message_log.all()
# Filter to messages in the briefing window (best-effort: no timestamps)

View File

@@ -617,7 +617,7 @@ class ThinkingEngine:
# Recent chat activity (in-memory, no I/O)
try:
from dashboard.store import message_log
from infrastructure.chat_store import message_log
messages = message_log.all()
if messages:

View File

@@ -298,9 +298,9 @@ def get_live_system_status() -> dict[str, Any]:
# Uptime
try:
from dashboard.routes.health import _START_TIME
from config import APP_START_TIME
uptime = (datetime.now(UTC) - _START_TIME).total_seconds()
uptime = (datetime.now(UTC) - APP_START_TIME).total_seconds()
result["uptime_seconds"] = int(uptime)
except Exception as exc:
logger.debug("Uptime calculation failed: %s", exc)

View File

@@ -1,6 +1,7 @@
"""Tests for SQLite-backed chat persistence (issue #46)."""
from dashboard.store import Message, MessageLog
import infrastructure.chat_store as _chat_store
def test_persistence_across_instances(tmp_path):
@@ -24,10 +25,8 @@ def test_persistence_across_instances(tmp_path):
def test_retention_policy(tmp_path):
"""Oldest messages are pruned when count exceeds MAX_MESSAGES."""
import dashboard.store as store_mod
original_max = store_mod.MAX_MESSAGES
store_mod.MAX_MESSAGES = 5 # Small limit for testing
original_max = _chat_store.MAX_MESSAGES
_chat_store.MAX_MESSAGES = 5 # Small limit for testing
try:
db = tmp_path / "chat.db"
@@ -42,7 +41,7 @@ def test_retention_policy(tmp_path):
assert msgs[-1].content == "msg-7"
log.close()
finally:
store_mod.MAX_MESSAGES = original_max
_chat_store.MAX_MESSAGES = original_max
def test_clear_removes_all(tmp_path):