@@ -1274,6 +1274,72 @@ class MnemosyneArchive:
|
||||
"unchanged": unchanged,
|
||||
}
|
||||
|
||||
def resonance(
|
||||
self,
|
||||
threshold: float = 0.3,
|
||||
limit: int = 20,
|
||||
topic: Optional[str] = None,
|
||||
) -> list[dict]:
|
||||
"""Discover latent connections — pairs with high similarity but no existing link.
|
||||
|
||||
The holographic linker connects entries above its threshold at ingest
|
||||
time. ``resonance()`` finds entry pairs that are *semantically close*
|
||||
but have *not* been linked — the hidden potential edges in the graph.
|
||||
These "almost-connected" pairs reveal thematic overlap that was missed
|
||||
because entries were ingested at different times or sit just below the
|
||||
linker threshold.
|
||||
|
||||
Args:
|
||||
threshold: Minimum similarity score to surface a pair (default 0.3).
|
||||
Pairs already linked are excluded regardless of score.
|
||||
limit: Maximum number of pairs to return (default 20).
|
||||
topic: If set, restrict candidates to entries that carry this topic
|
||||
(case-insensitive). Both entries in a pair must match.
|
||||
|
||||
Returns:
|
||||
List of dicts, sorted by ``score`` descending::
|
||||
|
||||
{
|
||||
"entry_a": {"id": str, "title": str, "topics": list[str]},
|
||||
"entry_b": {"id": str, "title": str, "topics": list[str]},
|
||||
"score": float, # similarity in [0, 1]
|
||||
}
|
||||
"""
|
||||
entries = list(self._entries.values())
|
||||
|
||||
if topic:
|
||||
topic_lower = topic.lower()
|
||||
entries = [e for e in entries if topic_lower in [t.lower() for t in e.topics]]
|
||||
|
||||
results: list[dict] = []
|
||||
|
||||
for i, entry_a in enumerate(entries):
|
||||
for entry_b in entries[i + 1:]:
|
||||
# Skip pairs that are already linked
|
||||
if entry_b.id in entry_a.links or entry_a.id in entry_b.links:
|
||||
continue
|
||||
|
||||
score = self.linker.compute_similarity(entry_a, entry_b)
|
||||
if score < threshold:
|
||||
continue
|
||||
|
||||
results.append({
|
||||
"entry_a": {
|
||||
"id": entry_a.id,
|
||||
"title": entry_a.title,
|
||||
"topics": entry_a.topics,
|
||||
},
|
||||
"entry_b": {
|
||||
"id": entry_b.id,
|
||||
"title": entry_b.title,
|
||||
"topics": entry_b.topics,
|
||||
},
|
||||
"score": round(score, 4),
|
||||
})
|
||||
|
||||
results.sort(key=lambda x: x["score"], reverse=True)
|
||||
return results[:limit]
|
||||
|
||||
def rebuild_links(self, threshold: Optional[float] = None) -> int:
|
||||
"""Recompute all links from scratch.
|
||||
|
||||
@@ -1308,66 +1374,36 @@ class MnemosyneArchive:
|
||||
|
||||
self._save()
|
||||
return total_links
|
||||
def resonance(
|
||||
self,
|
||||
min_similarity: float = 0.25,
|
||||
max_similarity: float = 1.0,
|
||||
limit: int = 20,
|
||||
topic: Optional[str] = None,
|
||||
) -> list[dict]:
|
||||
"""Find latent connections — semantically similar entry pairs that are NOT linked.
|
||||
|
||||
Surfaces pairs that the holographic linker didn't connect (typically because
|
||||
their similarity fell between the resonance floor and the linker's threshold,
|
||||
or because they were added at different times). These "almost connected" pairs
|
||||
can reveal hidden thematic patterns.
|
||||
|
||||
Args:
|
||||
min_similarity: Minimum similarity score to include (default 0.25).
|
||||
max_similarity: Maximum similarity score to include.
|
||||
limit: Maximum number of pairs to return.
|
||||
topic: If set, only consider entries that have this topic.
|
||||
|
||||
Returns:
|
||||
List of dicts with keys: entry_a, entry_b, title_a, title_b, similarity.
|
||||
Sorted by similarity descending.
|
||||
"""
|
||||
entries = list(self._entries.values())
|
||||
|
||||
# Filter by topic if specified
|
||||
if topic:
|
||||
entries = [e for e in entries if topic in e.topics]
|
||||
|
||||
if len(entries) < 2:
|
||||
return []
|
||||
|
||||
# Build set of existing links for fast lookup
|
||||
linked_pairs: set[tuple[str, str]] = set()
|
||||
for entry in entries:
|
||||
for linked_id in entry.links:
|
||||
pair = tuple(sorted([entry.id, linked_id]))
|
||||
linked_pairs.add(pair)
|
||||
|
||||
# Compute similarity for all pairs
|
||||
# ─── Discovery ──────────────────────────────────────────────
|
||||
def discover(self, count=5, prefer_fading=True, topic=None):
|
||||
import random
|
||||
candidates = list(self._entries.values())
|
||||
if topic: candidates = [e for e in candidates if topic.lower() in [t.lower() for t in e.topics]]
|
||||
if not candidates: return []
|
||||
scored = [(e, self._compute_vitality(e)) for e in candidates]
|
||||
weights = [max(0.01, 1.0 - v) if prefer_fading else max(0.01, v) for _, v in scored]
|
||||
selected = random.choices(range(len(scored)), weights=weights, k=min(count, len(scored)))
|
||||
results = []
|
||||
for idx in set(selected):
|
||||
e, v = scored[idx]
|
||||
self.touch(e.id)
|
||||
results.append({"entry_id": e.id, "title": e.title, "topics": e.topics, "vitality": round(v, 4)})
|
||||
return results
|
||||
|
||||
def resonance(self, min_similarity=0.25, max_similarity=1.0, limit=20, topic=None):
|
||||
entries = list(self._entries.values())
|
||||
if topic: entries = [e for e in entries if topic in e.topics]
|
||||
linked = set()
|
||||
for e in entries:
|
||||
for l in e.links: linked.add(tuple(sorted([e.id, l])))
|
||||
res = []
|
||||
for i in range(len(entries)):
|
||||
for j in range(i + 1, len(entries)):
|
||||
for j in range(i+1, len(entries)):
|
||||
a, b = entries[i], entries[j]
|
||||
|
||||
# Skip already-linked pairs
|
||||
pair = tuple(sorted([a.id, b.id]))
|
||||
if pair in linked_pairs:
|
||||
continue
|
||||
|
||||
score = self.linker.compute_similarity(a, b)
|
||||
if min_similarity <= score <= max_similarity:
|
||||
results.append({
|
||||
"entry_a": a.id,
|
||||
"entry_b": b.id,
|
||||
"title_a": a.title,
|
||||
"title_b": b.title,
|
||||
"similarity": round(score, 4),
|
||||
})
|
||||
|
||||
results.sort(key=lambda x: x["similarity"], reverse=True)
|
||||
return results[:limit]
|
||||
if tuple(sorted([a.id, b.id])) in linked: continue
|
||||
s = self.linker.compute_similarity(a, b)
|
||||
if min_similarity <= s <= max_similarity:
|
||||
res.append({"entry_a": a.id, "entry_b": b.id, "title_a": a.title, "title_b": b.title, "similarity": round(s, 4)})
|
||||
res.sort(key=lambda x: x["similarity"], reverse=True)
|
||||
return res[:limit]
|
||||
|
||||
Reference in New Issue
Block a user