forked from Rockachopa/Timmy-time-dashboard
feat: Mission Control v2 — swarm, L402, voice, marketplace, React dashboard
Major expansion of the Timmy Time Dashboard: Backend modules: - Swarm subsystem: registry, manager, bidder, coordinator, agent_runner, swarm_node, tasks, comms - L402/Lightning: payment_handler, l402_proxy with HMAC macaroons - Voice NLU: regex-based intent detection (chat, status, swarm, task, help, voice) - Notifications: push notifier for swarm events - Shortcuts: Siri Shortcuts iOS integration endpoints - WebSocket: live dashboard event manager - Inter-agent: agent-to-agent messaging layer Dashboard routes: - /swarm/* — swarm management and agent registry - /marketplace — agent catalog with sat pricing - /voice/* — voice command processing - /mobile — mobile status endpoint - /swarm/live — WebSocket live feed React web dashboard (dashboard-web/): - Sovereign Terminal design — dark theme with Bitcoin orange accents - Three-column layout: status sidebar, workspace tabs, context panel - Chat, Swarm, Tasks, Marketplace tab views - JetBrains Mono typography, terminal aesthetic - Framer Motion animations throughout Tests: 228 passing (expanded from 93) Includes Kimi's additional templates and QA work.
This commit is contained in:
193
src/dashboard/templates/voice_button.html
Normal file
193
src/dashboard/templates/voice_button.html
Normal file
@@ -0,0 +1,193 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}{{ page_title }}{% endblock %}
|
||||
|
||||
{% block extra_styles %}
|
||||
<style>
|
||||
.voice-button {
|
||||
width: 200px;
|
||||
height: 200px;
|
||||
border-radius: 50%;
|
||||
background: linear-gradient(135deg, var(--accent), var(--accent-dim));
|
||||
border: none;
|
||||
color: var(--bg-primary);
|
||||
font-size: 4rem;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
margin: 40px auto;
|
||||
box-shadow: 0 0 40px rgba(0, 255, 136, 0.3);
|
||||
}
|
||||
|
||||
.voice-button:hover {
|
||||
transform: scale(1.05);
|
||||
box-shadow: 0 0 60px rgba(0, 255, 136, 0.5);
|
||||
}
|
||||
|
||||
.voice-button:active, .voice-button.listening {
|
||||
transform: scale(0.95);
|
||||
background: var(--danger);
|
||||
box-shadow: 0 0 60px rgba(255, 68, 68, 0.5);
|
||||
animation: pulse-red 1s infinite;
|
||||
}
|
||||
|
||||
@keyframes pulse-red {
|
||||
0%, 100% { box-shadow: 0 0 40px rgba(255, 68, 68, 0.5); }
|
||||
50% { box-shadow: 0 0 80px rgba(255, 68, 68, 0.8); }
|
||||
}
|
||||
|
||||
.voice-status {
|
||||
text-align: center;
|
||||
font-size: 1.25rem;
|
||||
color: var(--text-secondary);
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
.voice-result {
|
||||
background: var(--bg-tertiary);
|
||||
border-radius: 12px;
|
||||
padding: 20px;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.voice-transcript {
|
||||
font-size: 1.125rem;
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
|
||||
.voice-response {
|
||||
color: var(--accent);
|
||||
font-style: italic;
|
||||
}
|
||||
</style>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="card" style="max-width: 600px; margin: 0 auto;">
|
||||
<div class="card-header" style="text-align: center;">
|
||||
<h2 class="card-title">🎙️ Voice Control</h2>
|
||||
<p style="color: var(--text-secondary);">Hold the button and speak to Timmy</p>
|
||||
</div>
|
||||
|
||||
<div class="voice-status" id="voice-status">Tap and hold to speak</div>
|
||||
|
||||
<button class="voice-button" id="voice-btn"
|
||||
onmousedown="startListening()"
|
||||
onmouseup="stopListening()"
|
||||
ontouchstart="startListening()"
|
||||
ontouchend="stopListening()">
|
||||
🎤
|
||||
</button>
|
||||
|
||||
<div id="voice-result" class="voice-result" style="display: none;">
|
||||
<div class="voice-transcript">
|
||||
<strong>You said:</strong> <span id="transcript-text"></span>
|
||||
</div>
|
||||
<div class="voice-response">
|
||||
<strong>Timmy:</strong> <span id="response-text"></span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div style="margin-top: 30px; padding: 20px; background: var(--bg-tertiary); border-radius: 8px;">
|
||||
<h3 style="margin-bottom: 12px;">Try saying:</h3>
|
||||
<ul style="color: var(--text-secondary); line-height: 2;">
|
||||
<li>"What's the status?"</li>
|
||||
<li>"Launch a research agent"</li>
|
||||
<li>"Create a task to find Bitcoin news"</li>
|
||||
<li>"Show me the marketplace"</li>
|
||||
<li>"Emergency stop"</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let recognition = null;
|
||||
let isListening = false;
|
||||
|
||||
// Initialize Web Speech API
|
||||
if ('webkitSpeechRecognition' in window || 'SpeechRecognition' in window) {
|
||||
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
||||
recognition = new SpeechRecognition();
|
||||
recognition.continuous = false;
|
||||
recognition.interimResults = false;
|
||||
recognition.lang = 'en-US';
|
||||
|
||||
recognition.onstart = function() {
|
||||
isListening = true;
|
||||
document.getElementById('voice-status').textContent = 'Listening...';
|
||||
document.getElementById('voice-btn').classList.add('listening');
|
||||
};
|
||||
|
||||
recognition.onresult = function(event) {
|
||||
const transcript = event.results[0][0].transcript;
|
||||
processVoiceCommand(transcript);
|
||||
};
|
||||
|
||||
recognition.onerror = function(event) {
|
||||
console.error('Speech recognition error:', event.error);
|
||||
document.getElementById('voice-status').textContent = 'Error: ' + event.error;
|
||||
resetButton();
|
||||
};
|
||||
|
||||
recognition.onend = function() {
|
||||
isListening = false;
|
||||
resetButton();
|
||||
};
|
||||
} else {
|
||||
document.getElementById('voice-status').textContent = 'Speech recognition not supported in this browser';
|
||||
document.getElementById('voice-btn').disabled = true;
|
||||
}
|
||||
|
||||
function startListening() {
|
||||
if (recognition && !isListening) {
|
||||
recognition.start();
|
||||
}
|
||||
}
|
||||
|
||||
function stopListening() {
|
||||
if (recognition && isListening) {
|
||||
recognition.stop();
|
||||
}
|
||||
}
|
||||
|
||||
function resetButton() {
|
||||
document.getElementById('voice-status').textContent = 'Tap and hold to speak';
|
||||
document.getElementById('voice-btn').classList.remove('listening');
|
||||
}
|
||||
|
||||
async function processVoiceCommand(text) {
|
||||
document.getElementById('transcript-text').textContent = text;
|
||||
document.getElementById('voice-status').textContent = 'Processing...';
|
||||
|
||||
try {
|
||||
const response = await fetch('/voice/command', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
body: `text=${encodeURIComponent(text)}`
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
document.getElementById('response-text').textContent = data.command.response;
|
||||
document.getElementById('voice-result').style.display = 'block';
|
||||
document.getElementById('voice-status').textContent = 'Done!';
|
||||
|
||||
// Speak response if supported
|
||||
if ('speechSynthesis' in window) {
|
||||
const utterance = new SpeechSynthesisUtterance(data.command.response);
|
||||
utterance.rate = 1.1;
|
||||
window.speechSynthesis.speak(utterance);
|
||||
}
|
||||
|
||||
} catch (e) {
|
||||
document.getElementById('response-text').textContent = 'Sorry, I had trouble processing that.';
|
||||
document.getElementById('voice-result').style.display = 'block';
|
||||
document.getElementById('voice-status').textContent = 'Error';
|
||||
}
|
||||
|
||||
setTimeout(resetButton, 2000);
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
||||
Reference in New Issue
Block a user