1
0
This repository has been archived on 2026-03-24. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
Timmy-time-dashboard/src/dashboard/templates/voice_button.html
Alexander Whitestone 622a6a9204 polish: extract inline CSS, add connection status, panel macro, favicon, ollama cache, toast system (#164)
Major:
- Extract all inline <style> blocks from 22 Jinja2 templates into
  static/css/mission-control.css — single cacheable stylesheet
- Add tox lint check that fails on inline <style> in templates

Minor:
1. Connection status indicator in topbar (green/amber/red dot) reflecting
   WebSocket + Ollama reachability, with auto-reconnect
2. Jinja2 {% macro panel(title) %} in macros.html — eliminates repeated
   .card.mc-panel markup; index.html converted as example
3. SVG favicon (purple T + orange dot)
4. 30-second TTL cache on _check_ollama() to avoid blocking the event loop
   on every health poll (asyncio.to_thread was already in place)
5. Toast notification system (McToast.show) for transient status messages —
   wired into connection status for Ollama/WebSocket state changes

Enforcement:
- CLAUDE.md updated with conventions 11-14 (no inline CSS, use panel macro,
  use toasts, never block the event loop)
- tox lint + pre-push environments now fail on inline <style> blocks

https://claude.ai/code/session_014FQ785MQdyJQ4BAXrRSo9w

Co-authored-by: Claude <noreply@anthropic.com>
2026-03-11 09:52:57 -04:00

126 lines
4.3 KiB
HTML

{% extends "base.html" %}
{% block title %}{{ page_title }}{% endblock %}
{% block extra_styles %}{% endblock %}
{% block content %}
<div class="voice-page py-3">
<div class="card mc-panel">
<div class="card-header mc-panel-header" style="text-align:center;">// VOICE CONTROL</div>
<div class="card-body">
<p style="color: var(--text-dim); font-size: 0.85rem; margin-bottom: 0;">Hold the button and speak to Timmy</p>
<div class="voice-status" id="voice-status">Tap and hold to speak</div>
<button class="voice-button" id="voice-btn"
onmousedown="startListening()"
onmouseup="stopListening()"
ontouchstart="startListening()"
ontouchend="stopListening()">
&#x1F3A4;
</button>
<div id="voice-result" class="voice-result" style="display: none;">
<div class="voice-transcript">
<strong style="color:var(--text-dim);">You said:</strong> <span id="transcript-text"></span>
</div>
<div class="voice-response">
<strong>Timmy:</strong> <span id="response-text"></span>
</div>
</div>
<div class="voice-tips">
<h3>Try saying:</h3>
<ul>
<li>"What's the status?"</li>
<li>"Launch a research agent"</li>
<li>"Create a task to find Bitcoin news"</li>
<li>"Show me the marketplace"</li>
<li>"Emergency stop"</li>
</ul>
</div>
</div>
</div>
</div>
<script>
var recognition = null;
var isListening = false;
if ('webkitSpeechRecognition' in window || 'SpeechRecognition' in window) {
var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
recognition = new SpeechRecognition();
recognition.continuous = false;
recognition.interimResults = false;
recognition.lang = 'en-US';
recognition.onstart = function() {
isListening = true;
document.getElementById('voice-status').textContent = 'Listening...';
document.getElementById('voice-btn').classList.add('listening');
};
recognition.onresult = function(event) {
var transcript = event.results[0][0].transcript;
processVoiceCommand(transcript);
};
recognition.onerror = function(event) {
console.error('Speech recognition error:', event.error);
document.getElementById('voice-status').textContent = 'Error: ' + event.error;
resetButton();
};
recognition.onend = function() {
isListening = false;
resetButton();
};
} else {
document.getElementById('voice-status').textContent = 'Speech recognition not supported in this browser';
document.getElementById('voice-btn').disabled = true;
}
function startListening() {
if (recognition && !isListening) { recognition.start(); }
}
function stopListening() {
if (recognition && isListening) { recognition.stop(); }
}
function resetButton() {
document.getElementById('voice-status').textContent = 'Tap and hold to speak';
document.getElementById('voice-btn').classList.remove('listening');
}
async function processVoiceCommand(text) {
document.getElementById('transcript-text').textContent = text;
document.getElementById('voice-status').textContent = 'Processing...';
try {
var response = await fetch('/voice/command', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: 'text=' + encodeURIComponent(text)
});
var data = await response.json();
document.getElementById('response-text').textContent = data.command.response;
document.getElementById('voice-result').style.display = 'block';
document.getElementById('voice-status').textContent = 'Done!';
if ('speechSynthesis' in window) {
var utterance = new SpeechSynthesisUtterance(data.command.response);
utterance.rate = 1.1;
window.speechSynthesis.speak(utterance);
}
} catch (e) {
document.getElementById('response-text').textContent = 'Sorry, I had trouble processing that.';
document.getElementById('voice-result').style.display = 'block';
document.getElementById('voice-status').textContent = 'Error';
}
setTimeout(resetButton, 2000);
}
</script>
{% endblock %}