feat: enable SQLite WAL mode for all databases (AGI ticket #1) (#153)

This commit is contained in:
Alexander Whitestone
2026-03-08 16:07:02 -04:00
committed by GitHub
parent 11ba21418a
commit 82fb2417e3
31 changed files with 1042 additions and 170 deletions

View File

@@ -93,16 +93,19 @@ class UnifiedMemory:
conn = sqlite3.connect(str(self.db_path))
try:
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA busy_timeout=5000")
conn.executescript(_LOCAL_SCHEMA)
conn.commit()
logger.info("Brain local DB initialized at %s", self.db_path)
logger.info("Brain local DB initialized at %s (WAL mode)", self.db_path)
finally:
conn.close()
def _get_conn(self) -> sqlite3.Connection:
"""Get a SQLite connection."""
"""Get a SQLite connection with WAL mode and busy timeout."""
conn = sqlite3.connect(str(self.db_path))
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA busy_timeout=5000")
return conn
def _get_embedder(self):

View File

@@ -1,3 +1,6 @@
import logging as _logging
import os
import sys
from typing import Literal
from pydantic_settings import BaseSettings, SettingsConfigDict
@@ -339,37 +342,50 @@ def get_effective_ollama_model() -> str:
# ── Startup validation ───────────────────────────────────────────────────────
# Enforce security requirements — fail fast in production.
import logging as _logging
import sys
_startup_logger = _logging.getLogger("config")
_startup_validated = False
# Production mode: require secrets to be set
if settings.timmy_env == "production":
_missing = []
if not settings.l402_hmac_secret:
_missing.append("L402_HMAC_SECRET")
if not settings.l402_macaroon_secret:
_missing.append("L402_MACAROON_SECRET")
if _missing:
_startup_logger.error(
"PRODUCTION SECURITY ERROR: The following secrets must be set: %s\n"
'Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"\n'
"Set in .env file or environment variables.",
", ".join(_missing),
)
sys.exit(1)
_startup_logger.info("Production mode: security secrets validated ✓")
else:
# Development mode: warn but continue
if not settings.l402_hmac_secret:
_startup_logger.warning(
"SEC: L402_HMAC_SECRET is not set — "
"set a unique secret in .env before deploying to production."
)
if not settings.l402_macaroon_secret:
_startup_logger.warning(
"SEC: L402_MACAROON_SECRET is not set — "
"set a unique secret in .env before deploying to production."
)
def validate_startup(*, force: bool = False) -> None:
"""Enforce security requirements — call from app entry points, not import.
Skipped in test mode (TIMMY_TEST_MODE=1) unless force=True.
In production: sys.exit(1) if required secrets are missing.
In development: log warnings only.
"""
global _startup_validated
if _startup_validated and not force:
return
if os.environ.get("TIMMY_TEST_MODE") == "1" and not force:
_startup_validated = True
return
if settings.timmy_env == "production":
_missing = []
if not settings.l402_hmac_secret:
_missing.append("L402_HMAC_SECRET")
if not settings.l402_macaroon_secret:
_missing.append("L402_MACAROON_SECRET")
if _missing:
_startup_logger.error(
"PRODUCTION SECURITY ERROR: The following secrets must be set: %s\n"
'Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"\n'
"Set in .env file or environment variables.",
", ".join(_missing),
)
sys.exit(1)
_startup_logger.info("Production mode: security secrets validated ✓")
else:
if not settings.l402_hmac_secret:
_startup_logger.warning(
"SEC: L402_HMAC_SECRET is not set — "
"set a unique secret in .env before deploying to production."
)
if not settings.l402_macaroon_secret:
_startup_logger.warning(
"SEC: L402_MACAROON_SECRET is not set — "
"set a unique secret in .env before deploying to production."
)
_startup_validated = True

View File

@@ -9,7 +9,6 @@ Key improvements:
import asyncio
import logging
import os
from contextlib import asynccontextmanager
from pathlib import Path
@@ -200,13 +199,23 @@ async def _discord_token_watcher() -> None:
async def lifespan(app: FastAPI):
"""Application lifespan manager with non-blocking startup."""
# Validate security config (no-op in test mode)
from config import validate_startup
validate_startup()
# Enable event persistence (unified EventBus + swarm event_log)
from infrastructure.events.bus import init_event_bus_persistence
init_event_bus_persistence()
# Create all background tasks without waiting for them
briefing_task = asyncio.create_task(_briefing_scheduler())
# Initialize Spark Intelligence engine
from spark.engine import spark_engine
from spark.engine import get_spark_engine
if spark_engine.enabled:
if get_spark_engine().enabled:
logger.info("Spark Intelligence active — event capture enabled")
# Auto-prune old vector store memories on startup

View File

@@ -116,9 +116,9 @@ class SecurityHeadersMiddleware(BaseHTTPMiddleware):
# HTTPS enforcement (production only)
if self.production:
response.headers[
"Strict-Transport-Security"
] = "max-age=31536000; includeSubDomains; preload"
response.headers["Strict-Transport-Security"] = (
"max-age=31536000; includeSubDomains; preload"
)
async def dispatch(self, request: Request, call_next) -> Response:
"""Add security headers to the response.

View File

@@ -40,8 +40,7 @@ def _get_db() -> sqlite3.Connection:
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(DB_PATH))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("""
CREATE TABLE IF NOT EXISTS tasks (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
@@ -54,8 +53,7 @@ def _get_db() -> sqlite3.Connection:
created_at TEXT DEFAULT (datetime('now')),
completed_at TEXT
)
"""
)
""")
conn.commit()
return conn

View File

@@ -26,8 +26,7 @@ def _get_db() -> sqlite3.Connection:
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(DB_PATH))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("""
CREATE TABLE IF NOT EXISTS work_orders (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
@@ -42,8 +41,7 @@ def _get_db() -> sqlite3.Connection:
created_at TEXT DEFAULT (datetime('now')),
completed_at TEXT
)
"""
)
""")
conn.commit()
return conn

View File

@@ -1,14 +1,18 @@
"""Async Event Bus for inter-agent communication.
Agents publish and subscribe to events for loose coupling.
Events are typed and carry structured data.
Events are typed and carry structured data. Optionally persists
events to SQLite for durability and replay.
"""
import asyncio
import json
import logging
import sqlite3
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, Callable, Coroutine
from pathlib import Path
from typing import Any, Callable, Coroutine, Optional
logger = logging.getLogger(__name__)
@@ -27,32 +31,181 @@ class Event:
# Type alias for event handlers
EventHandler = Callable[[Event], Coroutine[Any, Any, None]]
# Schema for the unified events table
_EVENTS_SCHEMA = """
CREATE TABLE IF NOT EXISTS events (
id TEXT PRIMARY KEY,
event_type TEXT NOT NULL,
source TEXT DEFAULT '',
task_id TEXT DEFAULT '',
agent_id TEXT DEFAULT '',
data TEXT DEFAULT '{}',
timestamp TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_events_type ON events(event_type);
CREATE INDEX IF NOT EXISTS idx_events_source ON events(source);
CREATE INDEX IF NOT EXISTS idx_events_task ON events(task_id);
CREATE INDEX IF NOT EXISTS idx_events_ts ON events(timestamp);
"""
class EventBus:
"""Async event bus for publish/subscribe pattern.
Supports optional SQLite persistence via enable_persistence().
When enabled, all published events are durably stored and can be
replayed via the replay() method.
Usage:
bus = EventBus()
bus.enable_persistence(Path("data/events.db"))
# Subscribe to events
@bus.subscribe("agent.task.*")
async def handle_task(event: Event):
print(f"Task event: {event.data}")
# Publish events
await bus.publish(Event(
type="agent.task.assigned",
source="default",
data={"task_id": "123", "agent": "forge"}
))
# Replay persisted events
events = bus.replay(event_type="task.created")
"""
def __init__(self) -> None:
self._subscribers: dict[str, list[EventHandler]] = {}
self._history: list[Event] = []
self._max_history = 1000
self._persistence_db_path: Optional[Path] = None
logger.info("EventBus initialized")
# ── Persistence ──────────────────────────────────────────────────────
def enable_persistence(self, db_path: Path) -> None:
"""Enable SQLite persistence for all published events.
Args:
db_path: Path to the SQLite database file.
"""
self._persistence_db_path = db_path
self._init_persistence_db()
logger.info("EventBus persistence enabled at %s", db_path)
def _init_persistence_db(self) -> None:
"""Initialize the persistence database with schema."""
if self._persistence_db_path is None:
return
self._persistence_db_path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(self._persistence_db_path))
try:
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA busy_timeout=5000")
conn.executescript(_EVENTS_SCHEMA)
conn.commit()
finally:
conn.close()
def _get_persistence_conn(self) -> Optional[sqlite3.Connection]:
"""Get a connection to the persistence database."""
if self._persistence_db_path is None:
return None
conn = sqlite3.connect(str(self._persistence_db_path))
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA busy_timeout=5000")
return conn
def _persist_event(self, event: Event) -> None:
"""Write an event to the persistence database."""
conn = self._get_persistence_conn()
if conn is None:
return
try:
task_id = event.data.get("task_id", "")
agent_id = event.data.get("agent_id", "")
conn.execute(
"INSERT OR IGNORE INTO events "
"(id, event_type, source, task_id, agent_id, data, timestamp) "
"VALUES (?, ?, ?, ?, ?, ?, ?)",
(
event.id,
event.type,
event.source,
task_id,
agent_id,
json.dumps(event.data),
event.timestamp,
),
)
conn.commit()
except Exception as exc:
logger.debug("Failed to persist event: %s", exc)
finally:
conn.close()
# ── Replay ───────────────────────────────────────────────────────────
def replay(
self,
event_type: Optional[str] = None,
source: Optional[str] = None,
task_id: Optional[str] = None,
limit: int = 100,
) -> list[Event]:
"""Replay persisted events from SQLite with optional filters.
Args:
event_type: Filter by exact event type.
source: Filter by event source.
task_id: Filter by task_id.
limit: Max events to return (most recent first).
Returns:
List of Event objects from persistent storage.
"""
conn = self._get_persistence_conn()
if conn is None:
return []
try:
conditions = []
params: list = []
if event_type:
conditions.append("event_type = ?")
params.append(event_type)
if source:
conditions.append("source = ?")
params.append(source)
if task_id:
conditions.append("task_id = ?")
params.append(task_id)
where = " AND ".join(conditions) if conditions else "1=1"
sql = f"SELECT * FROM events WHERE {where} ORDER BY timestamp DESC LIMIT ?"
params.append(limit)
rows = conn.execute(sql, params).fetchall()
return [
Event(
id=row["id"],
type=row["event_type"],
source=row["source"],
data=json.loads(row["data"]) if row["data"] else {},
timestamp=row["timestamp"],
)
for row in rows
]
except Exception as exc:
logger.debug("Failed to replay events: %s", exc)
return []
finally:
conn.close()
# ── Subscribe / Publish ──────────────────────────────────────────────
def subscribe(self, event_pattern: str) -> Callable[[EventHandler], EventHandler]:
"""Decorator to subscribe to events matching a pattern.
@@ -87,10 +240,15 @@ class EventBus:
async def publish(self, event: Event) -> int:
"""Publish an event to all matching subscribers.
If persistence is enabled, the event is also written to SQLite.
Returns:
Number of handlers invoked
"""
# Store in history
# Persist to SQLite (graceful — never crash on persistence failure)
self._persist_event(event)
# Store in in-memory history
self._history.append(event)
if len(self._history) > self._max_history:
self._history = self._history[-self._max_history :]
@@ -105,7 +263,8 @@ class EventBus:
# Invoke handlers concurrently
if handlers:
await asyncio.gather(
*[self._invoke_handler(h, event) for h in handlers], return_exceptions=True
*[self._invoke_handler(h, event) for h in handlers],
return_exceptions=True,
)
logger.debug("Published event '%s' to %d handlers", event.type, len(handlers))
@@ -147,18 +306,39 @@ class EventBus:
return events[-limit:]
def clear_history(self) -> None:
"""Clear event history."""
"""Clear in-memory event history."""
self._history.clear()
# Module-level singleton
event_bus = EventBus()
# ── Lazy singleton ────────────────────────────────────────────────────────────
_event_bus: EventBus | None = None
def get_event_bus() -> EventBus:
"""Return the module-level EventBus, creating it on first access."""
global _event_bus
if _event_bus is None:
_event_bus = EventBus()
return _event_bus
def init_event_bus_persistence(db_path: Optional[Path] = None) -> None:
"""Enable persistence on the module-level EventBus singleton.
Call this during app startup to enable durable event storage.
If db_path is not provided, uses `data/events.db`.
"""
bus = get_event_bus()
if bus._persistence_db_path is not None:
return # already initialized
path = db_path or Path("data/events.db")
bus.enable_persistence(path)
# Convenience functions
async def emit(event_type: str, source: str, data: dict) -> int:
"""Quick emit an event."""
return await event_bus.publish(
return await get_event_bus().publish(
Event(
type=event_type,
source=source,
@@ -169,4 +349,11 @@ async def emit(event_type: str, source: str, data: dict) -> int:
def on(event_pattern: str) -> Callable[[EventHandler], EventHandler]:
"""Quick subscribe decorator."""
return event_bus.subscribe(event_pattern)
return get_event_bus().subscribe(event_pattern)
def __getattr__(name: str):
"""Module-level __getattr__ for lazy backward-compatible access to event_bus."""
if name == "event_bus":
return get_event_bus()
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@@ -67,8 +67,9 @@ def _get_conn() -> sqlite3.Connection:
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(DB_PATH))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA busy_timeout=5000")
conn.execute("""
CREATE TABLE IF NOT EXISTS custom_models (
name TEXT PRIMARY KEY,
format TEXT NOT NULL,
@@ -81,18 +82,15 @@ def _get_conn() -> sqlite3.Connection:
default_temperature REAL NOT NULL DEFAULT 0.7,
max_tokens INTEGER NOT NULL DEFAULT 2048
)
"""
)
conn.execute(
"""
""")
conn.execute("""
CREATE TABLE IF NOT EXISTS agent_model_assignments (
agent_id TEXT PRIMARY KEY,
model_name TEXT NOT NULL,
assigned_at TEXT NOT NULL,
FOREIGN KEY (model_name) REFERENCES custom_models(name)
)
"""
)
""")
conn.commit()
return conn

View File

@@ -31,8 +31,7 @@ logger = logging.getLogger(__name__)
class Orchestrator(Protocol):
"""Anything with an ``execute_task`` matching Timmy's orchestrator."""
async def execute_task(self, task_id: str, description: str, context: dict) -> Any:
...
async def execute_task(self, task_id: str, description: str, context: dict) -> Any: ...
def _wrap_orchestrator(orch: Orchestrator) -> Callable:

View File

@@ -44,8 +44,9 @@ def _get_conn() -> sqlite3.Connection:
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(DB_PATH))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA busy_timeout=5000")
conn.execute("""
CREATE TABLE IF NOT EXISTS spark_predictions (
id TEXT PRIMARY KEY,
task_id TEXT NOT NULL,
@@ -56,8 +57,7 @@ def _get_conn() -> sqlite3.Connection:
created_at TEXT NOT NULL,
evaluated_at TEXT
)
"""
)
""")
conn.execute("CREATE INDEX IF NOT EXISTS idx_pred_task ON spark_predictions(task_id)")
conn.execute("CREATE INDEX IF NOT EXISTS idx_pred_type ON spark_predictions(prediction_type)")
conn.commit()
@@ -277,8 +277,7 @@ def get_predictions(
def get_accuracy_stats() -> dict:
"""Return aggregate accuracy statistics for the EIDOS loop."""
conn = _get_conn()
row = conn.execute(
"""
row = conn.execute("""
SELECT
COUNT(*) AS total_predictions,
COUNT(evaluated_at) AS evaluated,
@@ -286,8 +285,7 @@ def get_accuracy_stats() -> dict:
MIN(CASE WHEN accuracy IS NOT NULL THEN accuracy END) AS min_accuracy,
MAX(CASE WHEN accuracy IS NOT NULL THEN accuracy END) AS max_accuracy
FROM spark_predictions
"""
).fetchone()
""").fetchone()
conn.close()
return {

View File

@@ -6,7 +6,8 @@ memory consolidation, and the advisory system.
Usage
-----
from spark.engine import spark_engine
from spark.engine import get_spark_engine
spark_engine = get_spark_engine()
# Capture a swarm event
spark_engine.on_task_posted(task_id, description)
@@ -346,14 +347,31 @@ class SparkEngine:
return spark_eidos.get_predictions(limit=limit)
# Module-level singleton — respects SPARK_ENABLED config
def _create_engine() -> SparkEngine:
try:
from config import settings
return SparkEngine(enabled=settings.spark_enabled)
except Exception:
return SparkEngine(enabled=True)
# ── Lazy singleton ────────────────────────────────────────────────────────────
_spark_engine: SparkEngine | None = None
spark_engine = _create_engine()
def get_spark_engine() -> SparkEngine:
"""Return the module-level SparkEngine, creating it on first access."""
global _spark_engine
if _spark_engine is None:
try:
from config import settings
_spark_engine = SparkEngine(enabled=settings.spark_enabled)
except Exception:
_spark_engine = SparkEngine(enabled=True)
return _spark_engine
def reset_spark_engine() -> None:
"""Reset the singleton for test isolation."""
global _spark_engine
_spark_engine = None
def __getattr__(name: str):
"""Module-level __getattr__ for lazy backward-compatible access to spark_engine."""
if name == "spark_engine":
return get_spark_engine()
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@@ -57,8 +57,9 @@ def _get_conn() -> sqlite3.Connection:
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(DB_PATH))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA busy_timeout=5000")
conn.execute("""
CREATE TABLE IF NOT EXISTS spark_events (
id TEXT PRIMARY KEY,
event_type TEXT NOT NULL,
@@ -69,10 +70,8 @@ def _get_conn() -> sqlite3.Connection:
importance REAL NOT NULL DEFAULT 0.5,
created_at TEXT NOT NULL
)
"""
)
conn.execute(
"""
""")
conn.execute("""
CREATE TABLE IF NOT EXISTS spark_memories (
id TEXT PRIMARY KEY,
memory_type TEXT NOT NULL,
@@ -83,8 +82,7 @@ def _get_conn() -> sqlite3.Connection:
created_at TEXT NOT NULL,
expires_at TEXT
)
"""
)
""")
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_type ON spark_events(event_type)")
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_agent ON spark_events(agent_id)")
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_task ON spark_events(task_id)")

View File

@@ -2,8 +2,12 @@
Provides EventType enum, EventLogEntry dataclass, and log_event() function
used by error_capture, thinking engine, and the event broadcaster.
Events are persisted to SQLite and also published to the unified EventBus
(infrastructure.events.bus) for subscriber notification.
"""
import json
import logging
import sqlite3
import uuid
@@ -73,8 +77,9 @@ def _ensure_db() -> sqlite3.Connection:
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(DB_PATH))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA busy_timeout=5000")
conn.execute("""
CREATE TABLE IF NOT EXISTS events (
id TEXT PRIMARY KEY,
event_type TEXT NOT NULL,
@@ -84,12 +89,45 @@ def _ensure_db() -> sqlite3.Connection:
data TEXT DEFAULT '{}',
timestamp TEXT NOT NULL
)
"""
)
""")
conn.commit()
return conn
def _publish_to_event_bus(entry: EventLogEntry) -> None:
"""Publish an event to the unified EventBus (non-blocking).
This bridges the synchronous log_event() callers to the async EventBus
so subscribers get notified of all events regardless of origin.
"""
try:
import asyncio
from infrastructure.events.bus import Event, event_bus
event = Event(
id=entry.id,
type=entry.event_type.value,
source=entry.source,
data={
**entry.data,
"task_id": entry.task_id,
"agent_id": entry.agent_id,
},
timestamp=entry.timestamp,
)
try:
asyncio.get_running_loop()
asyncio.create_task(event_bus.publish(event))
except RuntimeError:
# No event loop running — skip async publish
pass
except Exception:
# Graceful degradation — never crash on EventBus integration
pass
def log_event(
event_type: EventType,
source: str = "",
@@ -99,11 +137,9 @@ def log_event(
) -> EventLogEntry:
"""Record an event and return the entry.
Also broadcasts to WebSocket clients via the event broadcaster
(lazy import to avoid circular deps).
Persists to SQLite, publishes to EventBus for subscribers,
and broadcasts to WebSocket clients.
"""
import json
entry = EventLogEntry(
id=str(uuid.uuid4()),
event_type=event_type,
@@ -137,6 +173,9 @@ def log_event(
except Exception as exc:
logger.debug("Failed to persist event: %s", exc)
# Publish to unified EventBus (non-blocking)
_publish_to_event_bus(entry)
# Broadcast to WebSocket clients (non-blocking)
try:
from infrastructure.events.broadcaster import event_broadcaster
@@ -150,8 +189,6 @@ def log_event(
def get_task_events(task_id: str, limit: int = 50) -> list[EventLogEntry]:
"""Retrieve events for a specific task."""
import json
db = _ensure_db()
try:
rows = db.execute(

View File

@@ -29,8 +29,9 @@ def _ensure_db() -> sqlite3.Connection:
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(DB_PATH))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA busy_timeout=5000")
conn.execute("""
CREATE TABLE IF NOT EXISTS tasks (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
@@ -43,8 +44,7 @@ def _ensure_db() -> sqlite3.Connection:
created_at TEXT DEFAULT (datetime('now')),
completed_at TEXT
)
"""
)
""")
conn.commit()
return conn

View File

@@ -328,9 +328,7 @@ class TimmyOrchestrator(BaseAgent):
# Build session-specific context block for the prompt
recent_changes = self._session_context.get("git_log_oneline", "")
if recent_changes and recent_changes != "Git log unavailable":
self._session_context[
"recent_changes_block"
] = f"""
self._session_context["recent_changes_block"] = f"""
## Recent Changes to Your Codebase (last 15 commits):
```
{recent_changes}

View File

@@ -48,8 +48,7 @@ def _get_conn(db_path: Path = _DEFAULT_DB) -> sqlite3.Connection:
db_path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("""
CREATE TABLE IF NOT EXISTS approval_items (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
@@ -59,8 +58,7 @@ def _get_conn(db_path: Path = _DEFAULT_DB) -> sqlite3.Connection:
created_at TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending'
)
"""
)
""")
conn.commit()
return conn

View File

@@ -63,8 +63,7 @@ def _get_cache_conn(db_path: Path = _DEFAULT_DB) -> sqlite3.Connection:
db_path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("""
CREATE TABLE IF NOT EXISTS briefings (
id INTEGER PRIMARY KEY AUTOINCREMENT,
generated_at TEXT NOT NULL,
@@ -72,8 +71,7 @@ def _get_cache_conn(db_path: Path = _DEFAULT_DB) -> sqlite3.Connection:
period_end TEXT NOT NULL,
summary TEXT NOT NULL
)
"""
)
""")
conn.commit()
return conn

View File

@@ -122,8 +122,7 @@ def _get_conn() -> sqlite3.Connection:
_has_vss = False
# Create tables
conn.execute(
"""
conn.execute("""
CREATE TABLE IF NOT EXISTS memory_entries (
id TEXT PRIMARY KEY,
content TEXT NOT NULL,
@@ -136,8 +135,7 @@ def _get_conn() -> sqlite3.Connection:
embedding TEXT, -- JSON array of floats
timestamp TEXT NOT NULL
)
"""
)
""")
# Create indexes
conn.execute("CREATE INDEX IF NOT EXISTS idx_memory_agent ON memory_entries(agent_id)")
@@ -193,8 +191,8 @@ def store_memory(
conn = _get_conn()
conn.execute(
"""
INSERT INTO memory_entries
(id, content, source, context_type, agent_id, task_id, session_id,
INSERT INTO memory_entries
(id, content, source, context_type, agent_id, task_id, session_id,
metadata, embedding, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
@@ -505,7 +503,7 @@ def prune_memories(older_than_days: int = 90, keep_facts: bool = True) -> int:
if keep_facts:
cursor = conn.execute(
"""
DELETE FROM memory_entries
DELETE FROM memory_entries
WHERE timestamp < ? AND context_type != 'fact'
""",
(cutoff,),

View File

@@ -10,7 +10,6 @@ Handoff Protocol:
- Inject into next session automatically
"""
import hashlib
import logging
import re
from datetime import datetime, timezone
@@ -85,9 +84,9 @@ class HotMemory:
## Current Status
**Agent State:** Operational
**Mode:** Development
**Active Tasks:** 0
**Agent State:** Operational
**Mode:** Development
**Active Tasks:** 0
**Pending Decisions:** None
---
@@ -112,7 +111,7 @@ class HotMemory:
## User Profile
**Name:** (not set)
**Name:** (not set)
**Interests:** (to be learned)
---
@@ -160,9 +159,9 @@ class VaultMemory:
filepath = self.path / namespace / filename
# Add header
full_content = f"""# {name.replace('_', ' ').title()}
full_content = f"""# {name.replace("_", " ").title()}
> Created: {datetime.now(timezone.utc).isoformat()}
> Created: {datetime.now(timezone.utc).isoformat()}
> Namespace: {namespace}
---
@@ -236,8 +235,8 @@ class VaultMemory:
## Basic Information
**Name:** (unknown)
**Location:** (unknown)
**Name:** (unknown)
**Location:** (unknown)
**Occupation:** (unknown)
## Interests & Expertise
@@ -256,9 +255,7 @@ class VaultMemory:
---
*Last updated: {date}*
""".format(
date=datetime.now(timezone.utc).strftime("%Y-%m-%d")
)
""".format(date=datetime.now(timezone.utc).strftime("%Y-%m-%d"))
profile_path.write_text(default)
@@ -280,7 +277,7 @@ class HandoffProtocol:
"""Write handoff at session end."""
content = f"""# Last Session Handoff
**Session End:** {datetime.now(timezone.utc).isoformat()}
**Session End:** {datetime.now(timezone.utc).isoformat()}
**Duration:** (calculated on read)
## Summary
@@ -462,5 +459,26 @@ class MemorySystem:
return "\n\n---\n\n".join(context_parts)
# Module-level singleton
memory_system = MemorySystem()
# ── Lazy singleton ────────────────────────────────────────────────────────────
_memory_system: MemorySystem | None = None
def get_memory_system() -> MemorySystem:
"""Return the module-level MemorySystem, creating it on first access."""
global _memory_system
if _memory_system is None:
_memory_system = MemorySystem()
return _memory_system
def reset_memory_system() -> None:
"""Reset the singleton for test isolation."""
global _memory_system
_memory_system = None
def __getattr__(name: str):
"""Module-level __getattr__ for lazy backward-compatible access."""
if name == "memory_system":
return get_memory_system()
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@@ -115,8 +115,7 @@ class SemanticMemory:
"""Initialize SQLite with vector storage."""
self.db_path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(self.db_path))
conn.execute(
"""
conn.execute("""
CREATE TABLE IF NOT EXISTS chunks (
id TEXT PRIMARY KEY,
source TEXT NOT NULL,
@@ -125,8 +124,7 @@ class SemanticMemory:
created_at TEXT NOT NULL,
source_hash TEXT NOT NULL
)
"""
)
""")
conn.execute("CREATE INDEX IF NOT EXISTS idx_source ON chunks(source)")
conn.commit()
conn.close()

View File

@@ -88,8 +88,7 @@ def _get_conn(db_path: Path = _DEFAULT_DB) -> sqlite3.Connection:
db_path.parent.mkdir(parents=True, exist_ok=True)
conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row
conn.execute(
"""
conn.execute("""
CREATE TABLE IF NOT EXISTS thoughts (
id TEXT PRIMARY KEY,
content TEXT NOT NULL,
@@ -97,8 +96,7 @@ def _get_conn(db_path: Path = _DEFAULT_DB) -> sqlite3.Connection:
parent_id TEXT,
created_at TEXT NOT NULL
)
"""
)
""")
conn.execute("CREATE INDEX IF NOT EXISTS idx_thoughts_time ON thoughts(created_at)")
conn.commit()
return conn