Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
0a814f5bef fix: vendor vision benchmark fixtures (#868)
All checks were successful
Lint / lint (pull_request) Successful in 11s
2026-04-22 11:37:04 -04:00
30 changed files with 343 additions and 181 deletions

View File

@@ -1,194 +1,354 @@
[
{
"id": "screenshot_github_home",
"url": "https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png",
"url": "test_images/screenshot_github_home.png",
"category": "screenshot",
"expected_keywords": ["github", "logo", "mark"],
"expected_keywords": [
"github",
"logo",
"mark"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "diagram_mermaid_flow",
"url": "https://mermaid.ink/img/pako:eNpdkE9PwzAMxb-K5VOl7gc7sAOIIDuAw9gptnRaSJLSJttQStmXs9LCH-ymBOI1ef_42U6cUSae4IkDxbAAWtB6siSZXVhjQTlgl1nigHg5fRBOzSfebopROCu_cytObSfgLSE1ANOeZWkO2IH5upZxYot8m1hqAdpD_63WRl0xdUG1jdl9kPiOb_EWk2JBtPaiKkF4eVIYgO0EtkW-RSgC4gJ6HJYRG1UNdN0HNVd0Bftjj7X8P92qPj-F8l8T3w",
"url": "test_images/diagram_mermaid_flow.png",
"category": "diagram",
"expected_keywords": ["flow", "diagram", "process"],
"expected_keywords": [
"flow",
"diagram",
"process"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": false}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": false
}
},
{
"id": "photo_random_1",
"url": "https://picsum.photos/seed/vision1/400/300",
"url": "test_images/photo_random_1.png",
"category": "photo",
"expected_keywords": [],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "photo_random_2",
"url": "https://picsum.photos/seed/vision2/400/300",
"url": "test_images/photo_random_2.png",
"category": "photo",
"expected_keywords": [],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "chart_simple_bar",
"url": "https://quickchart.io/chart?c={type:'bar',data:{labels:['Q1','Q2','Q3','Q4'],datasets:[{label:'Revenue',data:[100,150,200,250]}]}}",
"url": "test_images/chart_simple_bar.png",
"category": "chart",
"expected_keywords": ["bar", "chart", "revenue"],
"expected_keywords": [
"bar",
"chart",
"revenue"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": true}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": true
}
},
{
"id": "chart_pie",
"url": "https://quickchart.io/chart?c={type:'pie',data:{labels:['A','B','C'],datasets:[{data:[30,50,20]}]}}",
"url": "test_images/chart_pie.png",
"category": "chart",
"expected_keywords": ["pie", "chart", "percentage"],
"expected_keywords": [
"pie",
"chart",
"percentage"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": true}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": true
}
},
{
"id": "diagram_org_chart",
"url": "https://mermaid.ink/img/pako:eNpdkE9PwzAMxb-K5VOl7gc7sAOIIDuAw9gptnRaSJLSJttQStmXs9LCH-ymBOI1ef_42U6cUSae4IkDxbAAWtB6iuyIWyrLgXLALrPEAfFy-iCcmk-83RSjcFZ-51ac2k7AW0JqAKY9y9IcsAPzdS3jxBb5NrHUAraH_lutjbpi6oJqG7P7IPEd3-ItJsWCaO1FVYLw8qQwANsJbIt8i1AExAX0OCwjNqoa6LoPaq7oCvbHHmv5f7pVfX4K5b8mvg",
"url": "test_images/diagram_org_chart.png",
"category": "diagram",
"expected_keywords": ["organization", "hierarchy", "chart"],
"expected_keywords": [
"organization",
"hierarchy",
"chart"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": false}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": false
}
},
{
"id": "screenshot_terminal",
"url": "https://raw.githubusercontent.com/nicehash/nicehash-quick-start/main/images/nicehash-terminal.png",
"url": "test_images/screenshot_terminal.png",
"category": "screenshot",
"expected_keywords": ["terminal", "command", "output"],
"expected_keywords": [
"terminal",
"command",
"output"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "photo_random_3",
"url": "https://picsum.photos/seed/vision3/400/300",
"url": "test_images/photo_random_3.png",
"category": "photo",
"expected_keywords": [],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "chart_line",
"url": "https://quickchart.io/chart?c={type:'line',data:{labels:['Jan','Feb','Mar','Apr'],datasets:[{label:'Temperature',data:[5,8,12,18]}]}}",
"url": "test_images/chart_line.png",
"category": "chart",
"expected_keywords": ["line", "chart", "temperature"],
"expected_keywords": [
"line",
"chart",
"temperature"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": true}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": true
}
},
{
"id": "diagram_sequence",
"url": "https://mermaid.ink/img/pako:eNpdkE9PwzAMxb-K5VOl7gc7sAOIIDuAw9gptnRaSJLSJttQStmXs9LCH-ymBOI1ef_42U6cUSae4IkDxbAAWtB6iuyIWyrLgXLALrPEAfFy-iCcmk-83RSjcFZ-51ac2k7AW0JqAKY9y9IcsAPzdS3jxBb5NrHUAraH_lutjbpi6oJqG7P7IPEd3-ItJsWCaO1FVYLw8qQwANsJbIt8i1AExAX0OCwjNqoa6LoPaq7oCvbHHmv5f7pVfX4K5b8mvg",
"url": "test_images/diagram_sequence.png",
"category": "diagram",
"expected_keywords": ["sequence", "interaction", "message"],
"expected_keywords": [
"sequence",
"interaction",
"message"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": false}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": false
}
},
{
"id": "photo_random_4",
"url": "https://picsum.photos/seed/vision4/400/300",
"url": "test_images/photo_random_4.png",
"category": "photo",
"expected_keywords": [],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "screenshot_webpage",
"url": "https://github.githubassets.com/images/modules/site/social-cards.png",
"url": "test_images/screenshot_webpage.png",
"category": "screenshot",
"expected_keywords": ["github", "page", "web"],
"expected_keywords": [
"github",
"page",
"web"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "chart_radar",
"url": "https://quickchart.io/chart?c={type:'radar',data:{labels:['Speed','Power','Defense','Magic'],datasets:[{label:'Hero',data:[80,60,70,90]}]}}",
"url": "test_images/chart_radar.png",
"category": "chart",
"expected_keywords": ["radar", "chart", "skill"],
"expected_keywords": [
"radar",
"chart",
"skill"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": true}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": true
}
},
{
"id": "photo_random_5",
"url": "https://picsum.photos/seed/vision5/400/300",
"url": "test_images/photo_random_5.png",
"category": "photo",
"expected_keywords": [],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "diagram_class",
"url": "https://mermaid.ink/img/pako:eNpdkE9PwzAMxb-K5VOl7gc7sAOIIDuAw9gptnRaSJLSJttQStmXs9LCH-ymBOI1ef_42U6cUSae4IkDxbAAWtB6iuyIWyrLgXLALrPEAfFy-iCcmk-83RSjcFZ-51ac2k7AW0JqAKY9y9IcsAPzdS3jxBb5NrHUAraH_lutjbpi6oJqG7P7IPEd3-ItJsWCaO1FVYLw8qQwANsJbIt8i1AExAX0OCwjNqoa6LoPaq7oCvbHHmv5f7pVfX4K5b8mvg",
"url": "test_images/diagram_class.png",
"category": "diagram",
"expected_keywords": ["class", "object", "attribute"],
"expected_keywords": [
"class",
"object",
"attribute"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": false}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": false
}
},
{
"id": "chart_doughnut",
"url": "https://quickchart.io/chart?c={type:'doughnut',data:{labels:['Desktop','Mobile','Tablet'],datasets:[{data:[60,30,10]}]}}",
"url": "test_images/chart_doughnut.png",
"category": "chart",
"expected_keywords": ["doughnut", "chart", "device"],
"expected_keywords": [
"doughnut",
"chart",
"device"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": true}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": true
}
},
{
"id": "photo_random_6",
"url": "https://picsum.photos/seed/vision6/400/300",
"url": "test_images/photo_random_6.png",
"category": "photo",
"expected_keywords": [],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "screenshot_error",
"url": "https://http.cat/404.jpg",
"url": "test_images/screenshot_error.png",
"category": "screenshot",
"expected_keywords": ["404", "error", "cat"],
"expected_keywords": [
"404",
"error",
"cat"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": true}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": true
}
},
{
"id": "diagram_network",
"url": "https://mermaid.ink/img/pako:eNpdkE9PwzAMxb-K5VOl7gc7sAOIIDuAw9gptnRaSJLSJttQStmXs9LCH-ymBOI1ef_42U6cUSae4IkDxbAAWtB6iuyIWyrLgXLALrPEAfFy-iCcmk-83RSjcFZ-51ac2k7AW0JqAKY9y9IcsAPzdS3jxBb5NrHUAraH_lutjbpi6oJqG7P7IPEd3-ItJsWCaO1FVYLw8qQwANsJbIt8i1AExAX0OCwjNqoa6LoPaq7oCvbHHmv5f7pVfX4K5b8mvg",
"url": "test_images/diagram_network.png",
"category": "diagram",
"expected_keywords": ["network", "node", "connection"],
"expected_keywords": [
"network",
"node",
"connection"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": false}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": false
}
},
{
"id": "photo_random_7",
"url": "https://picsum.photos/seed/vision7/400/300",
"url": "test_images/photo_random_7.png",
"category": "photo",
"expected_keywords": [],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "chart_stacked_bar",
"url": "https://quickchart.io/chart?c={type:'bar',data:{labels:['2022','2023','2024'],datasets:[{label:'Cloud',data:[100,150,200]},{label:'On-prem',data:[200,180,160]}]},options:{scales:{x:{stacked:true},y:{stacked:true}}}}",
"url": "test_images/chart_stacked_bar.png",
"category": "chart",
"expected_keywords": ["stacked", "bar", "chart"],
"expected_keywords": [
"stacked",
"bar",
"chart"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 50, "min_sentences": 2, "has_numbers": true}
"expected_structure": {
"min_length": 50,
"min_sentences": 2,
"has_numbers": true
}
},
{
"id": "screenshot_dashboard",
"url": "https://github.githubassets.com/images/modules/site/features-code-search.png",
"url": "test_images/screenshot_dashboard.png",
"category": "screenshot",
"expected_keywords": ["search", "code", "feature"],
"expected_keywords": [
"search",
"code",
"feature"
],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
},
{
"id": "photo_random_8",
"url": "https://picsum.photos/seed/vision8/400/300",
"url": "test_images/photo_random_8.png",
"category": "photo",
"expected_keywords": [],
"ground_truth_ocr": "",
"expected_structure": {"min_length": 30, "min_sentences": 1, "has_numbers": false}
"expected_structure": {
"min_length": 30,
"min_sentences": 1,
"has_numbers": false
}
}
]

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

View File

@@ -11,17 +11,19 @@ Usage:
# Single image test
python benchmarks/vision_benchmark.py --url https://example.com/image.png
python benchmarks/vision_benchmark.py --url benchmarks/test_images/photo_random_1.png
# Generate test report
python benchmarks/vision_benchmark.py --images benchmarks/test_images.json --output benchmarks/vision_results.json
Test image dataset: benchmarks/test_images.json (50-100 diverse images)
Test image dataset: benchmarks/test_images.json (committed local fixtures under benchmarks/test_images/)
"""
import argparse
import asyncio
import base64
import json
import mimetypes
import os
import statistics
import sys
@@ -67,6 +69,28 @@ EVAL_PROMPTS = {
# ---------------------------------------------------------------------------
def _is_remote_image_source(image_source: str) -> bool:
return image_source.startswith(("http://", "https://", "data:", "file://"))
def _image_source_to_payload_url(image_source: str) -> str:
"""Convert local image paths into data URLs; keep remote URLs unchanged."""
if image_source.startswith(("http://", "https://", "data:")):
return image_source
resolved = image_source[len("file://"):] if image_source.startswith("file://") else image_source
local_path = Path(os.path.expanduser(resolved)).resolve()
if not local_path.is_file():
return image_source
mime_type, _ = mimetypes.guess_type(str(local_path))
if not mime_type:
mime_type = "application/octet-stream"
encoded = base64.b64encode(local_path.read_bytes()).decode("ascii")
return f"data:{mime_type};base64,{encoded}"
async def analyze_with_model(
image_url: str,
prompt: str,
@@ -84,6 +108,8 @@ async def analyze_with_model(
"""
import httpx
image_payload_url = _image_source_to_payload_url(image_url)
provider = model_config["provider"]
model_id = model_config["model_id"]
@@ -93,7 +119,7 @@ async def analyze_with_model(
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": image_url}},
{"type": "image_url", "image_url": {"url": image_payload_url}},
],
}
]
@@ -570,8 +596,18 @@ def generate_sample_dataset() -> List[dict]:
def load_dataset(path: str) -> List[dict]:
"""Load test dataset from JSON file."""
with open(path) as f:
return json.load(f)
dataset_path = Path(path).resolve()
with open(dataset_path) as f:
dataset = json.load(f)
base_dir = dataset_path.parent
for image in dataset:
image_url = image.get("url")
if not image_url or _is_remote_image_source(image_url):
continue
image["url"] = str((base_dir / image_url).resolve())
return dataset
# ---------------------------------------------------------------------------
@@ -582,7 +618,7 @@ def load_dataset(path: str) -> List[dict]:
async def main():
parser = argparse.ArgumentParser(description="Vision Benchmark Suite (Issue #817)")
parser.add_argument("--images", help="Path to test images JSON file")
parser.add_argument("--url", help="Single image URL to test")
parser.add_argument("--url", help="Single image URL or local file path to test")
parser.add_argument("--category", default="photo", help="Category for single URL")
parser.add_argument("--output", default=None, help="Output JSON file")
parser.add_argument("--runs", type=int, default=1, help="Runs per model per image")

33
cli.py
View File

@@ -6852,12 +6852,11 @@ class HermesCLI:
self._voice_stop_and_transcribe()
# Audio cue: single beep BEFORE starting stream (avoid CoreAudio conflict)
if self._voice_beeps_enabled():
try:
from tools.voice_mode import play_beep
play_beep(frequency=880, count=1)
except Exception:
pass
try:
from tools.voice_mode import play_beep
play_beep(frequency=880, count=1)
except Exception:
pass
try:
self._voice_recorder.start(on_silence_stop=_on_silence)
@@ -6905,12 +6904,11 @@ class HermesCLI:
wav_path = self._voice_recorder.stop()
# Audio cue: double beep after stream stopped (no CoreAudio conflict)
if self._voice_beeps_enabled():
try:
from tools.voice_mode import play_beep
play_beep(frequency=660, count=2)
except Exception:
pass
try:
from tools.voice_mode import play_beep
play_beep(frequency=660, count=2)
except Exception:
pass
if wav_path is None:
_cprint(f"{_DIM}No speech detected.{_RST}")
@@ -7061,17 +7059,6 @@ class HermesCLI:
_cprint(f"Unknown voice subcommand: {subcommand}")
_cprint("Usage: /voice [on|off|tts|status]")
def _voice_beeps_enabled(self) -> bool:
"""Return whether CLI voice mode should play record start/stop beeps."""
try:
from hermes_cli.config import load_config
voice_cfg = load_config().get("voice", {})
if isinstance(voice_cfg, dict):
return bool(voice_cfg.get("beep_enabled", True))
except Exception:
pass
return True
def _enable_voice_mode(self):
"""Enable voice mode after checking requirements."""
if self._voice_mode:

View File

@@ -11,12 +11,14 @@ import pytest
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
from vision_benchmark import (
analyze_with_model,
compute_ocr_accuracy,
compute_description_completeness,
compute_structural_accuracy,
aggregate_results,
to_markdown,
generate_sample_dataset,
load_dataset,
MODELS,
EVAL_PROMPTS,
)
@@ -197,6 +199,71 @@ class TestMarkdown:
class TestDataset:
def test_repo_dataset_uses_local_image_paths(self):
dataset_path = Path(__file__).parent.parent / "benchmarks" / "test_images.json"
dataset = json.loads(dataset_path.read_text())
assert dataset, "benchmark dataset should not be empty"
assert all(not entry["url"].startswith(("http://", "https://")) for entry in dataset)
def test_load_dataset_resolves_relative_local_paths(self, tmp_path):
images_dir = tmp_path / "images"
images_dir.mkdir()
image_path = images_dir / "sample.png"
image_path.write_bytes(b"png-bytes")
dataset_path = tmp_path / "dataset.json"
dataset_path.write_text(json.dumps([
{
"id": "sample",
"url": "images/sample.png",
"category": "photo",
"expected_keywords": [],
"expected_structure": {"min_length": 30, "min_sentences": 1},
}
]))
loaded = load_dataset(str(dataset_path))
assert loaded[0]["url"] == str(image_path.resolve())
@pytest.mark.asyncio
async def test_analyze_with_model_encodes_local_file_as_data_url(self, tmp_path, monkeypatch):
image_path = tmp_path / "tiny.png"
image_path.write_bytes(
bytes.fromhex(
"89504E470D0A1A0A"
"0000000D49484452000000010000000108060000001F15C489"
"0000000D49444154789C6360000002000154A24F5D00000000"
"49454E44AE426082"
)
)
fake_response = MagicMock()
fake_response.raise_for_status.return_value = None
fake_response.json.return_value = {
"choices": [{"message": {"content": "Looks like a tiny image."}}],
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
}
fake_client = MagicMock()
fake_client.post = AsyncMock(return_value=fake_response)
fake_ctx = MagicMock()
fake_ctx.__aenter__ = AsyncMock(return_value=fake_client)
fake_ctx.__aexit__ = AsyncMock(return_value=None)
monkeypatch.setenv("OPENROUTER_API_KEY", "test-key")
with patch("httpx.AsyncClient", return_value=fake_ctx):
result = await analyze_with_model(
str(image_path),
"Describe this image",
{"provider": "openrouter", "model_id": "fake/model"},
)
assert result["success"] is True
sent_url = fake_client.post.await_args.kwargs["json"]["messages"][0]["content"][1]["image_url"]["url"]
assert sent_url.startswith("data:image/png;base64,")
def test_sample_dataset_has_entries(self):
dataset = generate_sample_dataset()
assert len(dataset) >= 4

View File

@@ -4,31 +4,13 @@ state management, streaming TTS activation, voice message prefix, _vprint."""
import ast
import os
import queue
import sys
import threading
import types
from types import SimpleNamespace
from unittest.mock import MagicMock, patch
import pytest
def _ensure_cli_import_shims():
sys.modules.setdefault(
"agent.auxiliary_client",
types.SimpleNamespace(
call_llm=lambda *args, **kwargs: "",
async_call_llm=lambda *args, **kwargs: "",
extract_content_or_reasoning=lambda *args, **kwargs: "",
resolve_provider_client=lambda *args, **kwargs: (None, None, None, None),
get_async_text_auxiliary_client=lambda *args, **kwargs: None,
),
)
_ensure_cli_import_shims()
def _make_voice_cli(**overrides):
"""Create a minimal HermesCLI with only voice-related attrs initialized.
@@ -36,7 +18,6 @@ def _make_voice_cli(**overrides):
needed. Only the voice state attributes (from __init__ lines 3749-3758)
are populated.
"""
_ensure_cli_import_shims()
from cli import HermesCLI
cli = HermesCLI.__new__(HermesCLI)
@@ -952,58 +933,6 @@ class TestEnableVoiceModeReal:
assert cli._voice_mode is True
class TestVoiceBeepConfigReal:
"""Tests the CLI voice beep toggle."""
@patch("hermes_cli.config.load_config", return_value={"voice": {}})
def test_beeps_enabled_by_default(self, _cfg):
cli = _make_voice_cli()
assert cli._voice_beeps_enabled() is True
@patch("hermes_cli.config.load_config", return_value={"voice": {"beep_enabled": False}})
def test_beeps_can_be_disabled(self, _cfg):
cli = _make_voice_cli()
assert cli._voice_beeps_enabled() is False
@patch("cli._cprint")
@patch("cli.threading.Thread")
@patch("tools.voice_mode.play_beep")
@patch("tools.voice_mode.create_audio_recorder")
@patch(
"tools.voice_mode.check_voice_requirements",
return_value={
"available": True,
"audio_available": True,
"stt_available": True,
"details": "OK",
"missing_packages": [],
},
)
@patch(
"hermes_cli.config.load_config",
return_value={
"voice": {
"beep_enabled": False,
"silence_threshold": 200,
"silence_duration": 3.0,
}
},
)
def test_start_recording_skips_beep_when_disabled(
self, _cfg, _req, mock_create, mock_beep, mock_thread, _cp
):
recorder = MagicMock()
recorder.supports_silence_autostop = True
mock_create.return_value = recorder
mock_thread.return_value = MagicMock(start=MagicMock())
cli = _make_voice_cli()
cli._voice_start_recording()
recorder.start.assert_called_once()
mock_beep.assert_not_called()
class TestDisableVoiceModeReal:
"""Tests _disable_voice_mode with real CLI instance."""
@@ -1158,16 +1087,6 @@ class TestVoiceStopAndTranscribeReal:
cli._voice_stop_and_transcribe()
assert cli._pending_input.empty()
@patch("cli._cprint")
@patch("hermes_cli.config.load_config", return_value={"voice": {"beep_enabled": False}})
@patch("tools.voice_mode.play_beep")
def test_no_speech_detected_skips_beep_when_disabled(self, mock_beep, _cfg, _cp):
recorder = MagicMock()
recorder.stop.return_value = None
cli = _make_voice_cli(_voice_recording=True, _voice_recorder=recorder)
cli._voice_stop_and_transcribe()
mock_beep.assert_not_called()
@patch("cli._cprint")
@patch("cli.os.unlink")
@patch("cli.os.path.isfile", return_value=True)
@@ -1237,18 +1156,12 @@ class TestVoiceStopAndTranscribeReal:
@patch("cli._cprint")
@patch("tools.voice_mode.play_beep")
def test_continuous_restarts_on_no_speech(self, _beep, _cp):
import time
recorder = MagicMock()
recorder.stop.return_value = None
cli = _make_voice_cli(_voice_recording=True, _voice_recorder=recorder,
_voice_continuous=True)
cli._voice_start_recording = MagicMock()
cli._voice_stop_and_transcribe()
for _ in range(50):
if cli._voice_start_recording.call_count:
break
time.sleep(0.01)
cli._voice_start_recording.assert_called_once()
@patch("cli._cprint")

View File

@@ -149,7 +149,7 @@ Two-stage algorithm detects when you've finished speaking:
If no speech is detected at all for 15 seconds, recording stops automatically.
Both `silence_threshold` and `silence_duration` are configurable in `config.yaml`. You can also disable the record start/stop beeps with `voice.beep_enabled: false`.
Both `silence_threshold` and `silence_duration` are configurable in `config.yaml`.
### Streaming TTS
@@ -383,7 +383,6 @@ voice:
record_key: "ctrl+b" # Key to start/stop recording
max_recording_seconds: 120 # Maximum recording length
auto_tts: false # Auto-enable TTS when voice mode starts
beep_enabled: true # Play record start/stop beeps
silence_threshold: 200 # RMS level (0-32767) below which counts as silence
silence_duration: 3.0 # Seconds of silence before auto-stop