Features: - tools/graph_store.py: Sovereign triple-store with Gitea persistence - agent/symbolic_memory.py: Neural-to-symbolic bridge with multi-hop search - skills/memory/intersymbolic_graph.py: Graph query skill - Integrated into KnowledgeIngester for automatic symbolic extraction Tests added: - tests/tools/test_graph_store.py (127 lines) - tests/agent/test_symbolic_memory.py (144 lines) Reviewed and merged by Allegro (BURN MODE).
157 lines
4.9 KiB
Python
157 lines
4.9 KiB
Python
"""Tests for Knowledge Graph Store.
|
|
|
|
Generated by Allegro during PR #9 review.
|
|
"""
|
|
|
|
import pytest
|
|
from unittest.mock import MagicMock, patch
|
|
import json
|
|
import base64
|
|
|
|
|
|
class TestGraphStore:
|
|
"""Test suite for tools/graph_store.py"""
|
|
|
|
@pytest.fixture
|
|
def mock_gitea(self):
|
|
"""Mock GiteaClient."""
|
|
with patch('tools.graph_store.GiteaClient') as MockGitea:
|
|
mock = MagicMock()
|
|
MockGitea.return_value = mock
|
|
yield mock
|
|
|
|
@pytest.fixture
|
|
def store(self, mock_gitea):
|
|
"""Create GraphStore with mocked Gitea."""
|
|
from tools.graph_store import GraphStore
|
|
return GraphStore()
|
|
|
|
def test_load_empty_graph(self, store, mock_gitea):
|
|
"""Should return empty graph when file doesn't exist."""
|
|
mock_gitea.get_file.side_effect = Exception("404")
|
|
|
|
graph = store._load_graph()
|
|
|
|
assert graph == {"triples": [], "entities": {}}
|
|
|
|
def test_add_triples_new(self, store, mock_gitea):
|
|
"""Should add new triples."""
|
|
mock_gitea.get_file.side_effect = Exception("404") # New file
|
|
|
|
triples = [
|
|
{"s": "Timmy", "p": "is_a", "o": "AI"},
|
|
{"s": "Timmy", "p": "works_at", "o": "Foundation"}
|
|
]
|
|
|
|
count = store.add_triples(triples)
|
|
|
|
assert count == 2
|
|
mock_gitea.create_file.assert_called_once()
|
|
|
|
def test_add_triples_deduplication(self, store, mock_gitea):
|
|
"""Should not add duplicate triples."""
|
|
existing = {
|
|
"triples": [{"s": "Timmy", "p": "is_a", "o": "AI"}],
|
|
"entities": {}
|
|
}
|
|
mock_gitea.get_file.return_value = {
|
|
"content": base64.b64encode(json.dumps(existing).encode()).decode()
|
|
}
|
|
|
|
# Try to add same triple again
|
|
count = store.add_triples([{"s": "Timmy", "p": "is_a", "o": "AI"}])
|
|
|
|
assert count == 0 # No new triples added
|
|
|
|
def test_query_by_subject(self, store, mock_gitea):
|
|
"""Should filter by subject."""
|
|
existing = {
|
|
"triples": [
|
|
{"s": "Timmy", "p": "is_a", "o": "AI"},
|
|
{"s": "Allegro", "p": "is_a", "o": "AI"},
|
|
{"s": "Timmy", "p": "works_at", "o": "Foundation"}
|
|
],
|
|
"entities": {}
|
|
}
|
|
mock_gitea.get_file.return_value = {
|
|
"content": base64.b64encode(json.dumps(existing).encode()).decode()
|
|
}
|
|
|
|
results = store.query(subject="Timmy")
|
|
|
|
assert len(results) == 2
|
|
assert all(r["s"] == "Timmy" for r in results)
|
|
|
|
def test_query_by_predicate(self, store, mock_gitea):
|
|
"""Should filter by predicate."""
|
|
existing = {
|
|
"triples": [
|
|
{"s": "Timmy", "p": "is_a", "o": "AI"},
|
|
{"s": "Allegro", "p": "is_a", "o": "AI"},
|
|
{"s": "Timmy", "p": "works_at", "o": "Foundation"}
|
|
],
|
|
"entities": {}
|
|
}
|
|
mock_gitea.get_file.return_value = {
|
|
"content": base64.b64encode(json.dumps(existing).encode()).decode()
|
|
}
|
|
|
|
results = store.query(predicate="is_a")
|
|
|
|
assert len(results) == 2
|
|
assert all(r["p"] == "is_a" for r in results)
|
|
|
|
def test_query_by_object(self, store, mock_gitea):
|
|
"""Should filter by object."""
|
|
existing = {
|
|
"triples": [
|
|
{"s": "Timmy", "p": "is_a", "o": "AI"},
|
|
{"s": "Allegro", "p": "is_a", "o": "AI"},
|
|
{"s": "Timmy", "p": "works_at", "o": "Foundation"}
|
|
],
|
|
"entities": {}
|
|
}
|
|
mock_gitea.get_file.return_value = {
|
|
"content": base64.b64encode(json.dumps(existing).encode()).decode()
|
|
}
|
|
|
|
results = store.query(object="AI")
|
|
|
|
assert len(results) == 2
|
|
assert all(r["o"] == "AI" for r in results)
|
|
|
|
def test_query_combined_filters(self, store, mock_gitea):
|
|
"""Should support combined filters."""
|
|
existing = {
|
|
"triples": [
|
|
{"s": "Timmy", "p": "is_a", "o": "AI"},
|
|
{"s": "Timmy", "p": "works_at", "o": "Foundation"}
|
|
],
|
|
"entities": {}
|
|
}
|
|
mock_gitea.get_file.return_value = {
|
|
"content": base64.b64encode(json.dumps(existing).encode()).decode()
|
|
}
|
|
|
|
results = store.query(subject="Timmy", predicate="is_a")
|
|
|
|
assert len(results) == 1
|
|
assert results[0]["o"] == "AI"
|
|
|
|
|
|
class TestGraphStoreRaceCondition:
|
|
"""Document race condition behavior."""
|
|
|
|
def test_concurrent_writes_risk(self):
|
|
"""Document that concurrent writes may lose triples.
|
|
|
|
This is a known limitation of the read-modify-write pattern.
|
|
For MVP, this is acceptable. Future: implement file locking or
|
|
use atomic Gitea operations.
|
|
"""
|
|
pass # Documentation test
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pytest.main([__file__, "-v"])
|