Remove unused deps from poetry build, speed test suite to ~16s (#130)

This commit is contained in:
Alexander Whitestone
2026-03-05 18:07:59 -05:00
committed by GitHub
parent f2dacf4ee0
commit e8f1dea3ec
8 changed files with 29 additions and 22 deletions

View File

@@ -7,7 +7,6 @@ from __future__ import annotations
import json
import logging
import numpy as np
from typing import List, Union
logger = logging.getLogger(__name__)
@@ -47,7 +46,7 @@ class LocalEmbedder:
logger.error("sentence-transformers not installed. Run: pip install sentence-transformers")
raise
def encode(self, text: Union[str, List[str]]) -> np.ndarray:
def encode(self, text: Union[str, List[str]]):
"""Encode text to embedding vector(s).
Args:
@@ -64,20 +63,22 @@ class LocalEmbedder:
def encode_single(self, text: str) -> bytes:
"""Encode single text to bytes for SQLite storage.
Returns:
Float32 bytes
"""
import numpy as np
embedding = self.encode(text)
if len(embedding.shape) > 1:
embedding = embedding[0]
return embedding.astype(np.float32).tobytes()
def similarity(self, a: np.ndarray, b: np.ndarray) -> float:
def similarity(self, a, b) -> float:
"""Compute cosine similarity between two vectors.
Vectors should already be normalized from encode().
"""
import numpy as np
return float(np.dot(a, b))

View File

@@ -106,6 +106,8 @@ class UnifiedMemory:
def _get_embedder(self):
"""Lazy-load the embedding model."""
if self._embedder is None:
if os.environ.get("TIMMY_SKIP_EMBEDDINGS") == "1":
return None
try:
from brain.embeddings import LocalEmbedder
self._embedder = LocalEmbedder()

View File

@@ -29,14 +29,14 @@ def _get_model():
if _model is not None:
return _model
import os
# In test mode or low-memory environments, skip embedding model load
if os.environ.get("TIMMY_SKIP_EMBEDDINGS") == "1":
_has_embeddings = False
return None
try:
from sentence_transformers import SentenceTransformer
import os
# In test mode or low-memory environments, we might want to skip this
if os.environ.get("TIMMY_SKIP_EMBEDDINGS") == "1":
_has_embeddings = False
return None
_model = SentenceTransformer('all-MiniLM-L6-v2')
_has_embeddings = True
return _model

View File

@@ -37,6 +37,10 @@ def _get_embedding_model():
"""Lazy-load embedding model."""
global EMBEDDING_MODEL
if EMBEDDING_MODEL is None:
import os
if os.environ.get("TIMMY_SKIP_EMBEDDINGS") == "1":
EMBEDDING_MODEL = False
return EMBEDDING_MODEL
try:
from sentence_transformers import SentenceTransformer
EMBEDDING_MODEL = SentenceTransformer('all-MiniLM-L6-v2')