Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
3f4515db38 feat(memory): add grounded observation synthesis layer
All checks were successful
Lint / lint (pull_request) Successful in 8s
2026-04-22 11:20:42 -04:00
8 changed files with 564 additions and 1742 deletions

View File

@@ -1,100 +0,0 @@
# Issue #954 Verification — maps skill guest_house / camp_site / bakery
Status: PASS
## Drift noted
Issue #954 asked for validation on `upstream/main` (commit `c5a814b23`).
Fresh `forge/main` did not contain `skills/productivity/maps/`, so the forge branch was behind upstream for this feature cluster.
This branch ports the upstream maps skill files into the forge checkout and adds regression coverage.
## Automated verification
Command:
```bash
pytest -q tests/skills/test_maps_client.py
```
Result:
- 5 passed
Coverage added:
- maps skill files exist in the repo
- `guest_house` category maps to `tourism=guest_house`
- `camp_site` category maps to `tourism=camp_site`
- `bakery` expands to both `shop=bakery` and `amenity=bakery`
- dual-key bakery results dedupe correctly
- skill documentation lists the new categories and supersedes `find-nearby`
## Manual evidence
### 1) guest_house lookup
Command:
```bash
python3 skills/productivity/maps/scripts/maps_client.py nearby --near "Bath, United Kingdom" --category guest_house --limit 3
```
Observed results:
- Henrietta House — 390.3 m
- The Windsor — 437.2 m
- The Old Rectory Bed & Breakfast — 495.7 m
All returned `tourism=guest_house` in the raw tags.
### 2) camp_site lookup
Command:
```bash
python3 skills/productivity/maps/scripts/maps_client.py nearby --near "Yosemite Valley, California" --category camp_site --limit 5
```
Observed result:
- Yellow Pine Administrative Campground — 90.3 m
Returned `tourism=camp_site` in the raw tags.
### 3) bakery lookup via `shop=bakery`
Command:
```bash
python3 skills/productivity/maps/scripts/maps_client.py nearby --near "Lawrenceville, New Jersey" --category bakery --radius 5000 --limit 10
```
Observed results:
- The Gingered Peach — 713.8 m
- WildFlour Bakery — 741.9 m
Both returned `shop=bakery` in the raw tags.
### 4) bakery lookup via `amenity=bakery`
Command:
```bash
python3 skills/productivity/maps/scripts/maps_client.py nearby --near "20735 Stevens Creek Boulevard, Cupertino, CA" --category bakery --radius 600 --limit 5
```
Observed result:
- Paris Baguette — 28.6 m
Returned `amenity=bakery` in the raw tags (and also includes `shop=bakery`), proving the dual-key union query reaches amenity-tagged bakeries too.
## Conclusion
PASS.
- `guest_house` resolves correctly
- `camp_site` resolves correctly
- `bakery` resolves through both supported keys
- forge/main drift from upstream/main was real and is addressed on this branch

View File

@@ -26,6 +26,7 @@ from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
from .store import MemoryStore
from .retrieval import FactRetriever
from .observations import ObservationSynthesizer
logger = logging.getLogger(__name__)
@@ -37,28 +38,29 @@ logger = logging.getLogger(__name__)
FACT_STORE_SCHEMA = {
"name": "fact_store",
"description": (
"Deep structured memory with algebraic reasoning. "
"Deep structured memory with algebraic reasoning and grounded observation synthesis. "
"Use alongside the memory tool — memory for always-on context, "
"fact_store for deep recall and compositional queries.\n\n"
"fact_store for deep recall, compositional queries, and higher-order observations.\n\n"
"ACTIONS (simple → powerful):\n"
"• add — Store a fact the user would expect you to remember.\n"
"• search — Keyword lookup ('editor config', 'deploy process').\n"
"• probe — Entity recall: ALL facts about a person/thing.\n"
"• related — What connects to an entity? Structural adjacency.\n"
"• reason — Compositional: facts connected to MULTIPLE entities simultaneously.\n"
"• observe — Synthesized higher-order observations backed by supporting facts.\n"
"• contradict — Memory hygiene: find facts making conflicting claims.\n"
"• update/remove/list — CRUD operations.\n\n"
"IMPORTANT: Before answering questions about the user, ALWAYS probe or reason first."
"IMPORTANT: Before answering questions about the user, ALWAYS probe/reason/observe first."
),
"parameters": {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["add", "search", "probe", "related", "reason", "contradict", "update", "remove", "list"],
"enum": ["add", "search", "probe", "related", "reason", "observe", "contradict", "update", "remove", "list"],
},
"content": {"type": "string", "description": "Fact content (required for 'add')."},
"query": {"type": "string", "description": "Search query (required for 'search')."},
"query": {"type": "string", "description": "Search query (required for 'search'/'observe')."},
"entity": {"type": "string", "description": "Entity name for 'probe'/'related'."},
"entities": {"type": "array", "items": {"type": "string"}, "description": "Entity names for 'reason'."},
"fact_id": {"type": "integer", "description": "Fact ID for 'update'/'remove'."},
@@ -66,6 +68,12 @@ FACT_STORE_SCHEMA = {
"tags": {"type": "string", "description": "Comma-separated tags."},
"trust_delta": {"type": "number", "description": "Trust adjustment for 'update'."},
"min_trust": {"type": "number", "description": "Minimum trust filter (default: 0.3)."},
"min_confidence": {"type": "number", "description": "Minimum observation confidence (default: 0.6)."},
"observation_type": {
"type": "string",
"enum": ["recurring_preference", "stable_direction", "behavioral_pattern"],
"description": "Optional observation type filter for 'observe'.",
},
"limit": {"type": "integer", "description": "Max results (default: 10)."},
},
"required": ["action"],
@@ -118,7 +126,9 @@ class HolographicMemoryProvider(MemoryProvider):
self._config = config or _load_plugin_config()
self._store = None
self._retriever = None
self._observation_synth = None
self._min_trust = float(self._config.get("min_trust_threshold", 0.3))
self._observation_min_confidence = float(self._config.get("observation_min_confidence", 0.6))
@property
def name(self) -> str:
@@ -177,6 +187,7 @@ class HolographicMemoryProvider(MemoryProvider):
hrr_weight=hrr_weight,
hrr_dim=hrr_dim,
)
self._observation_synth = ObservationSynthesizer(self._store)
self._session_id = session_id
def system_prompt_block(self) -> str:
@@ -193,30 +204,76 @@ class HolographicMemoryProvider(MemoryProvider):
"# Holographic Memory\n"
"Active. Empty fact store — proactively add facts the user would expect you to remember.\n"
"Use fact_store(action='add') to store durable structured facts about people, projects, preferences, decisions.\n"
"Use fact_store(action='observe') to synthesize higher-order observations with evidence.\n"
"Use fact_feedback to rate facts after using them (trains trust scores)."
)
return (
f"# Holographic Memory\n"
f"Active. {total} facts stored with entity resolution and trust scoring.\n"
f"Use fact_store to search, probe entities, reason across entities, or add facts.\n"
f"Use fact_store to search, probe entities, reason across entities, or synthesize observations.\n"
f"Use fact_feedback to rate facts after using them (trains trust scores)."
)
def prefetch(self, query: str, *, session_id: str = "") -> str:
if not self._retriever or not query:
if not query:
return ""
parts = []
raw_results = []
try:
results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
if not results:
return ""
if self._retriever:
raw_results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
except Exception as e:
logger.debug("Holographic prefetch fact search failed: %s", e)
raw_results = []
observations = []
try:
if self._observation_synth:
observations = self._observation_synth.observe(
query,
min_confidence=self._observation_min_confidence,
limit=3,
refresh=True,
)
except Exception as e:
logger.debug("Holographic prefetch observation search failed: %s", e)
observations = []
if not raw_results and observations:
seen_fact_ids = set()
evidence_backfill = []
for observation in observations:
for evidence in observation.get("evidence", []):
fact_id = evidence.get("fact_id")
if fact_id in seen_fact_ids:
continue
seen_fact_ids.add(fact_id)
evidence_backfill.append(evidence)
raw_results = evidence_backfill[:5]
if raw_results:
lines = []
for r in results:
for r in raw_results:
trust = r.get("trust_score", r.get("trust", 0))
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
return "## Holographic Memory\n" + "\n".join(lines)
except Exception as e:
logger.debug("Holographic prefetch failed: %s", e)
return ""
parts.append("## Holographic Memory\n" + "\n".join(lines))
if observations:
lines = []
for observation in observations:
evidence_ids = ", ".join(
f"#{item['fact_id']}" for item in observation.get("evidence", [])[:3]
) or "none"
lines.append(
f"- [{observation.get('confidence', 0.0):.2f}] "
f"{observation.get('observation_type', 'observation')}: "
f"{observation.get('summary', '')} "
f"(evidence: {evidence_ids})"
)
parts.append("## Holographic Observations\n" + "\n".join(lines))
return "\n\n".join(parts)
def sync_turn(self, user_content: str, assistant_content: str, *, session_id: str = "") -> None:
# Holographic memory stores explicit facts via tools, not auto-sync.
@@ -252,6 +309,7 @@ class HolographicMemoryProvider(MemoryProvider):
def shutdown(self) -> None:
self._store = None
self._retriever = None
self._observation_synth = None
# -- Tool handlers -------------------------------------------------------
@@ -305,6 +363,19 @@ class HolographicMemoryProvider(MemoryProvider):
)
return json.dumps({"results": results, "count": len(results)})
elif action == "observe":
synthesizer = self._observation_synth
if not synthesizer:
return tool_error("Observation synthesizer is not initialized")
observations = synthesizer.observe(
args.get("query", ""),
observation_type=args.get("observation_type"),
min_confidence=float(args.get("min_confidence", self._observation_min_confidence)),
limit=int(args.get("limit", 10)),
refresh=True,
)
return json.dumps({"observations": observations, "count": len(observations)})
elif action == "contradict":
results = retriever.contradict(
category=args.get("category"),

View File

@@ -0,0 +1,249 @@
"""Higher-order observation synthesis for holographic memory.
Builds grounded observations from accumulated facts and keeps them in a
separate retrieval layer with explicit evidence links back to supporting facts.
"""
from __future__ import annotations
import re
from typing import Any
from .store import MemoryStore
_TOKEN_RE = re.compile(r"[a-z0-9_]+")
_HIGHER_ORDER_CUES = {
"prefer",
"preference",
"preferences",
"style",
"pattern",
"patterns",
"behavior",
"behaviour",
"habit",
"habits",
"workflow",
"direction",
"trajectory",
"strategy",
"tend",
"usually",
}
_OBSERVATION_PATTERNS = [
{
"observation_type": "recurring_preference",
"subject": "communication_style",
"categories": {"user_pref", "general"},
"labels": {
"concise": ["concise", "terse", "brief", "short", "no fluff"],
"result_first": ["result-only", "result only", "outcome only", "quick", "quickly"],
"silent_ops": ["silent", "no status", "no repetitive status", "no questions"],
},
"summary_prefix": "Recurring preference",
},
{
"observation_type": "stable_direction",
"subject": "project_direction",
"categories": {"project", "general", "tool"},
"labels": {
"local_first": ["local-first", "local first", "local-only", "local only", "ollama", "own hardware"],
"gitea_first": ["gitea-first", "gitea first", "forge", "pull request", "pr flow", "issue flow"],
"ansible": ["ansible", "playbook", "role", "deploy via ansible"],
},
"summary_prefix": "Stable direction",
},
{
"observation_type": "behavioral_pattern",
"subject": "operator_workflow",
"categories": {"general", "project", "tool", "user_pref"},
"labels": {
"commit_early": ["commit early", "commits early", "commit after", "wip commit"],
"pr_first": ["open pr", "push a pr", "pull request", "pr immediately", "create pr"],
"dedup_guard": ["no dupes", "no duplicates", "avoid duplicate", "existing pr"],
},
"summary_prefix": "Behavioral pattern",
},
]
_TYPE_QUERY_HINTS = {
"recurring_preference": {"prefer", "preference", "style", "communication", "likes", "wants"},
"stable_direction": {"direction", "trajectory", "strategy", "project", "roadmap", "moving"},
"behavioral_pattern": {"pattern", "behavior", "workflow", "habit", "operator", "agent", "usually"},
}
class ObservationSynthesizer:
"""Synthesizes grounded observations from facts and retrieves them by query."""
def __init__(self, store: MemoryStore):
self.store = store
def synthesize(
self,
*,
persist: bool = True,
min_confidence: float = 0.6,
limit: int = 10,
) -> list[dict[str, Any]]:
facts = self.store.list_facts(min_trust=0.0, limit=1000)
observations: list[dict[str, Any]] = []
for pattern in _OBSERVATION_PATTERNS:
candidate = self._build_candidate(pattern, facts, min_confidence=min_confidence)
if not candidate:
continue
if persist:
candidate["observation_id"] = self.store.upsert_observation(
candidate["observation_type"],
candidate["subject"],
candidate["summary"],
candidate["confidence"],
candidate["evidence_fact_ids"],
metadata=candidate["metadata"],
)
candidate["evidence"] = self._expand_evidence(candidate["evidence_fact_ids"])
candidate["evidence_count"] = len(candidate["evidence"])
candidate.pop("evidence_fact_ids", None)
observations.append(candidate)
observations.sort(
key=lambda item: (item["confidence"], item.get("evidence_count", 0)),
reverse=True,
)
return observations[:limit]
def observe(
self,
query: str = "",
*,
observation_type: str | None = None,
min_confidence: float = 0.6,
limit: int = 10,
refresh: bool = True,
) -> list[dict[str, Any]]:
if refresh:
self.synthesize(persist=True, min_confidence=min_confidence, limit=limit)
observations = self.store.list_observations(
observation_type=observation_type,
min_confidence=min_confidence,
limit=max(limit * 4, 20),
)
if not observations:
return []
if not query:
return observations[:limit]
query_tokens = self._tokenize(query)
is_higher_order = bool(query_tokens & _HIGHER_ORDER_CUES)
ranked: list[dict[str, Any]] = []
for item in observations:
searchable = " ".join(
[
item.get("summary", ""),
item.get("subject", ""),
item.get("observation_type", ""),
" ".join(item.get("metadata", {}).get("labels", [])),
]
)
overlap = self._overlap_score(query_tokens, self._tokenize(searchable))
type_bonus = self._type_bonus(query_tokens, item.get("observation_type", ""))
if overlap <= 0 and type_bonus <= 0 and not is_higher_order:
continue
ranked_item = dict(item)
ranked_item["score"] = round(item.get("confidence", 0.0) + overlap + type_bonus, 3)
ranked.append(ranked_item)
if not ranked and is_higher_order:
ranked = [
{**item, "score": round(float(item.get("confidence", 0.0)), 3)}
for item in observations
]
ranked.sort(
key=lambda item: (item.get("score", 0.0), item.get("confidence", 0.0), item.get("evidence_count", 0)),
reverse=True,
)
return ranked[:limit]
def _build_candidate(
self,
pattern: dict[str, Any],
facts: list[dict[str, Any]],
*,
min_confidence: float,
) -> dict[str, Any] | None:
matched_fact_ids: set[int] = set()
matched_labels: dict[str, set[int]] = {label: set() for label in pattern["labels"]}
for fact in facts:
if fact.get("category") not in pattern["categories"]:
continue
haystack = f"{fact.get('content', '')} {fact.get('tags', '')}".lower()
local_match = False
for label, keywords in pattern["labels"].items():
if any(keyword in haystack for keyword in keywords):
matched_labels[label].add(int(fact["fact_id"]))
local_match = True
if local_match:
matched_fact_ids.add(int(fact["fact_id"]))
if len(matched_fact_ids) < 2:
return None
active_labels = sorted(label for label, ids in matched_labels.items() if ids)
confidence = min(0.95, 0.35 + 0.12 * len(matched_fact_ids) + 0.08 * len(active_labels))
confidence = round(confidence, 3)
if confidence < min_confidence:
return None
label_summary = ", ".join(label.replace("_", "-") for label in active_labels)
subject_text = pattern["subject"].replace("_", " ")
summary = (
f"{pattern['summary_prefix']}: {subject_text} trends toward {label_summary} "
f"based on {len(matched_fact_ids)} supporting facts."
)
return {
"observation_type": pattern["observation_type"],
"subject": pattern["subject"],
"summary": summary,
"confidence": confidence,
"metadata": {
"labels": active_labels,
"evidence_count": len(matched_fact_ids),
},
"evidence_fact_ids": sorted(matched_fact_ids),
}
def _expand_evidence(self, fact_ids: list[int]) -> list[dict[str, Any]]:
facts_by_id = {
fact["fact_id"]: fact
for fact in self.store.list_facts(min_trust=0.0, limit=1000)
}
return [facts_by_id[fact_id] for fact_id in fact_ids if fact_id in facts_by_id]
@staticmethod
def _tokenize(text: str) -> set[str]:
return set(_TOKEN_RE.findall(text.lower()))
@staticmethod
def _overlap_score(query_tokens: set[str], text_tokens: set[str]) -> float:
if not query_tokens or not text_tokens:
return 0.0
overlap = query_tokens & text_tokens
if not overlap:
return 0.0
return round(len(overlap) / max(len(query_tokens), 1), 3)
@staticmethod
def _type_bonus(query_tokens: set[str], observation_type: str) -> float:
hints = _TYPE_QUERY_HINTS.get(observation_type, set())
if not hints:
return 0.0
return 0.25 if query_tokens & hints else 0.0

View File

@@ -3,6 +3,7 @@ SQLite-backed fact store with entity resolution and trust scoring.
Single-user Hermes memory store plugin.
"""
import json
import re
import sqlite3
import threading
@@ -73,6 +74,28 @@ CREATE TABLE IF NOT EXISTS memory_banks (
fact_count INTEGER DEFAULT 0,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS observations (
observation_id INTEGER PRIMARY KEY AUTOINCREMENT,
observation_type TEXT NOT NULL,
subject TEXT NOT NULL,
summary TEXT NOT NULL,
confidence REAL DEFAULT 0.0,
metadata_json TEXT DEFAULT '{}',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
UNIQUE(observation_type, subject)
);
CREATE TABLE IF NOT EXISTS observation_evidence (
observation_id INTEGER REFERENCES observations(observation_id) ON DELETE CASCADE,
fact_id INTEGER REFERENCES facts(fact_id) ON DELETE CASCADE,
evidence_weight REAL DEFAULT 1.0,
PRIMARY KEY (observation_id, fact_id)
);
CREATE INDEX IF NOT EXISTS idx_observations_type ON observations(observation_type);
CREATE INDEX IF NOT EXISTS idx_observations_confidence ON observations(confidence DESC);
"""
# Trust adjustment constants
@@ -128,6 +151,7 @@ class MemoryStore:
def _init_db(self) -> None:
"""Create tables, indexes, and triggers if they do not exist. Enable WAL mode."""
self._conn.execute("PRAGMA journal_mode=WAL")
self._conn.execute("PRAGMA foreign_keys=ON")
self._conn.executescript(_SCHEMA)
# Migrate: add hrr_vector column if missing (safe for existing databases)
columns = {row[1] for row in self._conn.execute("PRAGMA table_info(facts)").fetchall()}
@@ -346,6 +370,115 @@ class MemoryStore:
rows = self._conn.execute(sql, params).fetchall()
return [self._row_to_dict(r) for r in rows]
def upsert_observation(
self,
observation_type: str,
subject: str,
summary: str,
confidence: float,
evidence_fact_ids: list[int],
metadata: dict | None = None,
) -> int:
"""Create or update a synthesized observation and its evidence links."""
with self._lock:
metadata_json = json.dumps(metadata or {}, sort_keys=True)
self._conn.execute(
"""
INSERT INTO observations (
observation_type, subject, summary, confidence, metadata_json
)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(observation_type, subject) DO UPDATE SET
summary = excluded.summary,
confidence = excluded.confidence,
metadata_json = excluded.metadata_json,
updated_at = CURRENT_TIMESTAMP
""",
(observation_type, subject, summary, confidence, metadata_json),
)
row = self._conn.execute(
"""
SELECT observation_id
FROM observations
WHERE observation_type = ? AND subject = ?
""",
(observation_type, subject),
).fetchone()
observation_id = int(row["observation_id"])
self._conn.execute(
"DELETE FROM observation_evidence WHERE observation_id = ?",
(observation_id,),
)
unique_fact_ids = sorted({int(fid) for fid in evidence_fact_ids})
if unique_fact_ids:
self._conn.executemany(
"""
INSERT OR IGNORE INTO observation_evidence (observation_id, fact_id)
VALUES (?, ?)
""",
[(observation_id, fact_id) for fact_id in unique_fact_ids],
)
self._conn.commit()
return observation_id
def list_observations(
self,
observation_type: str | None = None,
min_confidence: float = 0.0,
limit: int = 50,
) -> list[dict]:
"""List synthesized observations with expanded supporting evidence."""
with self._lock:
params: list = [min_confidence]
observation_clause = ""
if observation_type is not None:
observation_clause = "AND observation_type = ?"
params.append(observation_type)
params.append(limit)
rows = self._conn.execute(
f"""
SELECT observation_id, observation_type, subject, summary, confidence,
metadata_json, created_at, updated_at,
(
SELECT COUNT(*)
FROM observation_evidence oe
WHERE oe.observation_id = observations.observation_id
) AS evidence_count
FROM observations
WHERE confidence >= ?
{observation_clause}
ORDER BY confidence DESC, updated_at DESC
LIMIT ?
""",
params,
).fetchall()
results = []
for row in rows:
item = dict(row)
try:
item["metadata"] = json.loads(item.pop("metadata_json") or "{}")
except json.JSONDecodeError:
item["metadata"] = {}
item["evidence"] = self._get_observation_evidence(int(item["observation_id"]))
results.append(item)
return results
def _get_observation_evidence(self, observation_id: int) -> list[dict]:
rows = self._conn.execute(
"""
SELECT f.fact_id, f.content, f.category, f.tags, f.trust_score,
f.retrieval_count, f.helpful_count, f.created_at, f.updated_at
FROM observation_evidence oe
JOIN facts f ON f.fact_id = oe.fact_id
WHERE oe.observation_id = ?
ORDER BY f.trust_score DESC, f.updated_at DESC
""",
(observation_id,),
).fetchall()
return [self._row_to_dict(row) for row in rows]
def record_feedback(self, fact_id: int, helpful: bool) -> dict:
"""Record user feedback and adjust trust asymmetrically.

View File

@@ -1,199 +0,0 @@
---
name: maps
description: >
Location intelligence — geocode a place, reverse-geocode coordinates,
find nearby places (46 POI categories), driving/walking/cycling
distance + time, turn-by-turn directions, timezone lookup, bounding
box + area for a named place, and POI search within a rectangle.
Uses OpenStreetMap + Overpass + OSRM. Free, no API key.
version: 1.2.0
author: Mibayy
license: MIT
metadata:
hermes:
tags: [maps, geocoding, places, routing, distance, directions, nearby, location, openstreetmap, nominatim, overpass, osrm]
category: productivity
requires_toolsets: [terminal]
supersedes: [find-nearby]
---
# Maps Skill
Location intelligence using free, open data sources. 8 commands, 44 POI
categories, zero dependencies (Python stdlib only), no API key required.
Data sources: OpenStreetMap/Nominatim, Overpass API, OSRM, TimeAPI.io.
This skill supersedes the old `find-nearby` skill — all of find-nearby's
functionality is covered by the `nearby` command below, with the same
`--near "<place>"` shortcut and multi-category support.
## When to Use
- User sends a Telegram location pin (latitude/longitude in the message) → `nearby`
- User wants coordinates for a place name → `search`
- User has coordinates and wants the address → `reverse`
- User asks for nearby restaurants, hospitals, pharmacies, hotels, etc. → `nearby`
- User wants driving/walking/cycling distance or travel time → `distance`
- User wants turn-by-turn directions between two places → `directions`
- User wants timezone information for a location → `timezone`
- User wants to search for POIs within a geographic area → `area` + `bbox`
## Prerequisites
Python 3.8+ (stdlib only — no pip installs needed).
Script path: `~/.hermes/skills/maps/scripts/maps_client.py`
## Commands
```bash
MAPS=~/.hermes/skills/maps/scripts/maps_client.py
```
### search — Geocode a place name
```bash
python3 $MAPS search "Eiffel Tower"
python3 $MAPS search "1600 Pennsylvania Ave, Washington DC"
```
Returns: lat, lon, display name, type, bounding box, importance score.
### reverse — Coordinates to address
```bash
python3 $MAPS reverse 48.8584 2.2945
```
Returns: full address breakdown (street, city, state, country, postcode).
### nearby — Find places by category
```bash
# By coordinates (from a Telegram location pin, for example)
python3 $MAPS nearby 48.8584 2.2945 restaurant --limit 10
python3 $MAPS nearby 40.7128 -74.0060 hospital --radius 2000
# By address / city / zip / landmark — --near auto-geocodes
python3 $MAPS nearby --near "Times Square, New York" --category cafe
python3 $MAPS nearby --near "90210" --category pharmacy
# Multiple categories merged into one query
python3 $MAPS nearby --near "downtown austin" --category restaurant --category bar --limit 10
```
46 categories: restaurant, cafe, bar, hospital, pharmacy, hotel, guest_house,
camp_site, supermarket, atm, gas_station, parking, museum, park, school,
university, bank, police, fire_station, library, airport, train_station,
bus_stop, church, mosque, synagogue, dentist, doctor, cinema, theatre, gym,
swimming_pool, post_office, convenience_store, bakery, bookshop, laundry,
car_wash, car_rental, bicycle_rental, taxi, veterinary, zoo, playground,
stadium, nightclub.
Each result includes: `name`, `address`, `lat`/`lon`, `distance_m`,
`maps_url` (clickable Google Maps link), `directions_url` (Google Maps
directions from the search point), and promoted tags when available —
`cuisine`, `hours` (opening_hours), `phone`, `website`.
### distance — Travel distance and time
```bash
python3 $MAPS distance "Paris" --to "Lyon"
python3 $MAPS distance "New York" --to "Boston" --mode driving
python3 $MAPS distance "Big Ben" --to "Tower Bridge" --mode walking
```
Modes: driving (default), walking, cycling. Returns road distance, duration,
and straight-line distance for comparison.
### directions — Turn-by-turn navigation
```bash
python3 $MAPS directions "Eiffel Tower" --to "Louvre Museum" --mode walking
python3 $MAPS directions "JFK Airport" --to "Times Square" --mode driving
```
Returns numbered steps with instruction, distance, duration, road name, and
maneuver type (turn, depart, arrive, etc.).
### timezone — Timezone for coordinates
```bash
python3 $MAPS timezone 48.8584 2.2945
python3 $MAPS timezone 35.6762 139.6503
```
Returns timezone name, UTC offset, and current local time.
### area — Bounding box and area for a place
```bash
python3 $MAPS area "Manhattan, New York"
python3 $MAPS area "London"
```
Returns bounding box coordinates, width/height in km, and approximate area.
Useful as input for the bbox command.
### bbox — Search within a bounding box
```bash
python3 $MAPS bbox 40.75 -74.00 40.77 -73.98 restaurant --limit 20
```
Finds POIs within a geographic rectangle. Use `area` first to get the
bounding box coordinates for a named place.
## Working With Telegram Location Pins
When a user sends a location pin, the message contains `latitude:` and
`longitude:` fields. Extract those and pass them straight to `nearby`:
```bash
# User sent a pin at 36.17, -115.14 and asked "find cafes nearby"
python3 $MAPS nearby 36.17 -115.14 cafe --radius 1500
```
Present results as a numbered list with names, distances, and the
`maps_url` field so the user gets a tap-to-open link in chat. For "open
now?" questions, check the `hours` field; if missing or unclear, verify
with `web_search` since OSM hours are community-maintained and not always
current.
## Workflow Examples
**"Find Italian restaurants near the Colosseum":**
1. `nearby --near "Colosseum Rome" --category restaurant --radius 500`
— one command, auto-geocoded
**"What's near this location pin they sent?":**
1. Extract lat/lon from the Telegram message
2. `nearby LAT LON cafe --radius 1500`
**"How do I walk from hotel to conference center?":**
1. `directions "Hotel Name" --to "Conference Center" --mode walking`
**"What restaurants are in downtown Seattle?":**
1. `area "Downtown Seattle"` → get bounding box
2. `bbox S W N E restaurant --limit 30`
## Pitfalls
- Nominatim ToS: max 1 req/s (handled automatically by the script)
- `nearby` requires lat/lon OR `--near "<address>"` — one of the two is needed
- OSRM routing coverage is best for Europe and North America
- Overpass API can be slow during peak hours; the script automatically
falls back between mirrors (overpass-api.de → overpass.kumi.systems)
- `distance` and `directions` use `--to` flag for the destination (not positional)
- If a zip code alone gives ambiguous results globally, include country/state
## Verification
```bash
python3 ~/.hermes/skills/maps/scripts/maps_client.py search "Statue of Liberty"
# Should return lat ~40.689, lon ~-74.044
python3 ~/.hermes/skills/maps/scripts/maps_client.py nearby --near "Times Square" --category restaurant --limit 3
# Should return a list of restaurants within ~500m of Times Square
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,96 @@
import json
import pytest
from plugins.memory.holographic import HolographicMemoryProvider
from plugins.memory.holographic.store import MemoryStore
@pytest.fixture()
def store(tmp_path):
db_path = tmp_path / "memory.db"
s = MemoryStore(db_path=str(db_path), default_trust=0.5)
yield s
s.close()
@pytest.fixture()
def provider(tmp_path):
p = HolographicMemoryProvider(
config={
"db_path": str(tmp_path / "memory.db"),
"default_trust": 0.5,
}
)
p.initialize(session_id="test-session")
yield p
if p._store:
p._store.close()
class TestObservationSynthesis:
def test_observe_action_persists_observation_with_evidence_links(self, provider):
fact_ids = [
provider._store.add_fact('User prefers concise status updates', category='user_pref'),
provider._store.add_fact('User wants result-only replies with no fluff', category='user_pref'),
]
result = json.loads(
provider.handle_tool_call(
'fact_store',
{
'action': 'observe',
'query': 'What communication style does the user prefer?',
'limit': 5,
},
)
)
assert result['count'] == 1
observation = result['observations'][0]
assert observation['observation_type'] == 'recurring_preference'
assert observation['confidence'] >= 0.6
assert sorted(item['fact_id'] for item in observation['evidence']) == sorted(fact_ids)
stored = provider._store.list_observations(limit=10)
assert len(stored) == 1
assert stored[0]['observation_type'] == 'recurring_preference'
assert stored[0]['evidence_count'] == 2
assert len(provider._store.list_facts(limit=10)) == 2
def test_observe_action_synthesizes_three_observation_types(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
provider._store.add_fact('User wants result-only communication', category='user_pref')
provider._store.add_fact('Project is moving to a local-first deployment model', category='project')
provider._store.add_fact('Project direction stays Gitea-first for issue and PR flow', category='project')
provider._store.add_fact('Operator always commits early before moving on', category='general')
provider._store.add_fact('Operator pushes a PR immediately after each meaningful fix', category='general')
result = json.loads(provider.handle_tool_call('fact_store', {'action': 'observe', 'limit': 10}))
types = {item['observation_type'] for item in result['observations']}
assert {'recurring_preference', 'stable_direction', 'behavioral_pattern'} <= types
def test_single_fact_does_not_create_overconfident_observation(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
result = json.loads(
provider.handle_tool_call(
'fact_store',
{'action': 'observe', 'query': 'What does the user prefer?', 'limit': 5},
)
)
assert result['count'] == 0
assert provider._store.list_observations(limit=10) == []
def test_prefetch_surfaces_observations_as_separate_layer(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
provider._store.add_fact('User wants result-only communication', category='user_pref')
prefetch = provider.prefetch('What communication style does the user prefer?')
assert '## Holographic Observations' in prefetch
assert '## Holographic Memory' in prefetch
assert 'recurring_preference' in prefetch
assert 'evidence' in prefetch.lower()

View File

@@ -1,135 +0,0 @@
"""Regression tests for the bundled maps skill."""
from __future__ import annotations
import importlib.util
from pathlib import Path
from types import SimpleNamespace
SCRIPT_PATH = (
Path(__file__).resolve().parents[2]
/ "skills/productivity/maps/scripts/maps_client.py"
)
SKILL_PATH = (
Path(__file__).resolve().parents[2]
/ "skills/productivity/maps/SKILL.md"
)
def load_module():
assert SCRIPT_PATH.exists(), f"missing maps client script: {SCRIPT_PATH}"
spec = importlib.util.spec_from_file_location("maps_client_test", SCRIPT_PATH)
module = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(module)
return module
def test_maps_skill_files_exist():
assert SCRIPT_PATH.exists()
assert SKILL_PATH.exists()
def test_category_tags_cover_guest_house_camp_site_and_dual_key_bakery():
module = load_module()
assert module.CATEGORY_TAGS["guest_house"] == ("tourism", "guest_house")
assert module.CATEGORY_TAGS["camp_site"] == ("tourism", "camp_site")
assert module.CATEGORY_TAGS["bakery"] == [
("shop", "bakery"),
("amenity", "bakery"),
]
assert module._tags_for("bakery") == [
("shop", "bakery"),
("amenity", "bakery"),
]
def test_build_overpass_queries_include_all_supported_tags():
module = load_module()
bakery_query = module.build_overpass_nearby(
None,
None,
40.0,
-74.0,
500,
10,
tag_pairs=module._tags_for("bakery"),
)
assert 'node["shop"="bakery"]' in bakery_query
assert 'way["shop"="bakery"]' in bakery_query
assert 'node["amenity"="bakery"]' in bakery_query
assert 'way["amenity"="bakery"]' in bakery_query
guest_house_query = module.build_overpass_nearby(
None,
None,
40.0,
-74.0,
500,
10,
tag_pairs=module._tags_for("guest_house"),
)
assert 'node["tourism"="guest_house"]' in guest_house_query
assert 'way["tourism"="guest_house"]' in guest_house_query
camp_site_bbox = module.build_overpass_bbox(
None,
None,
39.0,
-75.0,
41.0,
-73.0,
10,
tag_pairs=module._tags_for("camp_site"),
)
assert 'node["tourism"="camp_site"]' in camp_site_bbox
assert 'way["tourism"="camp_site"]' in camp_site_bbox
def test_cmd_nearby_dedupes_dual_tag_bakery_results(monkeypatch, capsys):
module = load_module()
duplicate_bakery = {
"elements": [
{
"type": "node",
"id": 101,
"lat": 40.0,
"lon": -74.0,
"tags": {"name": "Wild Flour", "shop": "bakery"},
},
{
"type": "node",
"id": 101,
"lat": 40.0,
"lon": -74.0,
"tags": {"name": "Wild Flour", "amenity": "bakery"},
},
]
}
monkeypatch.setattr(module, "overpass_query", lambda query: duplicate_bakery)
args = SimpleNamespace(
lat="40.0",
lon="-74.0",
near=None,
category="bakery",
category_list=[],
radius=500,
limit=10,
)
module.cmd_nearby(args)
out = capsys.readouterr().out
assert '"count": 1' in out
assert '"Wild Flour"' in out
def test_skill_doc_lists_new_categories_and_supersession():
text = SKILL_PATH.read_text(encoding="utf-8")
assert "guest_house" in text
assert "camp_site" in text
assert "bakery" in text
assert "supersedes: [find-nearby]" in text