ilia bdbf09a9ac feat: Implement voice I/O services (TICKET-006, TICKET-010, TICKET-014)
 TICKET-006: Wake-word Detection Service
- Implemented wake-word detection using openWakeWord
- HTTP/WebSocket server on port 8002
- Real-time detection with configurable threshold
- Event emission for ASR integration
- Location: home-voice-agent/wake-word/

 TICKET-010: ASR Service
- Implemented ASR using faster-whisper
- HTTP endpoint for file transcription
- WebSocket endpoint for streaming transcription
- Support for multiple audio formats
- Auto language detection
- GPU acceleration support
- Location: home-voice-agent/asr/

 TICKET-014: TTS Service
- Implemented TTS using Piper
- HTTP endpoint for text-to-speech synthesis
- Low-latency processing (< 500ms)
- Multiple voice support
- WAV audio output
- Location: home-voice-agent/tts/

 TICKET-047: Updated Hardware Purchases
- Marked Pi5 kit, SSD, microphone, and speakers as purchased
- Updated progress log with purchase status

📚 Documentation:
- Added VOICE_SERVICES_README.md with complete testing guide
- Each service includes README.md with usage instructions
- All services ready for Pi5 deployment

🧪 Testing:
- Created test files for each service
- All imports validated
- FastAPI apps created successfully
- Code passes syntax validation

🚀 Ready for:
- Pi5 deployment
- End-to-end voice flow testing
- Integration with MCP server

Files Added:
- wake-word/detector.py
- wake-word/server.py
- wake-word/requirements.txt
- wake-word/README.md
- wake-word/test_detector.py
- asr/service.py
- asr/server.py
- asr/requirements.txt
- asr/README.md
- asr/test_service.py
- tts/service.py
- tts/server.py
- tts/requirements.txt
- tts/README.md
- tts/test_service.py
- VOICE_SERVICES_README.md

Files Modified:
- tickets/done/TICKET-047_hardware-purchases.md

Files Moved:
- tickets/backlog/TICKET-006_prototype-wake-word-node.md → tickets/done/
- tickets/backlog/TICKET-010_streaming-asr-service.md → tickets/done/
- tickets/backlog/TICKET-014_tts-service.md → tickets/done/
2026-01-12 22:22:38 -05:00

376 lines
12 KiB
Python

"""
Dashboard API endpoints for web interface.
Extends MCP server with dashboard-specific endpoints.
"""
from fastapi import APIRouter, HTTPException
from fastapi.responses import JSONResponse
from typing import List, Dict, Any, Optional
from pathlib import Path
import sqlite3
import json
from datetime import datetime
router = APIRouter(prefix="/api/dashboard", tags=["dashboard"])
# Database paths
CONVERSATIONS_DB = Path(__file__).parent.parent.parent / "data" / "conversations.db"
TIMERS_DB = Path(__file__).parent.parent.parent / "data" / "timers.db"
MEMORY_DB = Path(__file__).parent.parent.parent / "data" / "memory.db"
TASKS_DIR = Path(__file__).parent.parent.parent / "data" / "tasks" / "home"
NOTES_DIR = Path(__file__).parent.parent.parent / "data" / "notes" / "home"
@router.get("/status")
async def get_system_status():
"""Get overall system status."""
try:
# Check if databases exist
conversations_exist = CONVERSATIONS_DB.exists()
timers_exist = TIMERS_DB.exists()
memory_exist = MEMORY_DB.exists()
# Count conversations
conversation_count = 0
if conversations_exist:
conn = sqlite3.connect(str(CONVERSATIONS_DB))
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM sessions")
conversation_count = cursor.fetchone()[0]
conn.close()
# Count active timers
timer_count = 0
if timers_exist:
conn = sqlite3.connect(str(TIMERS_DB))
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM timers WHERE status = 'active'")
timer_count = cursor.fetchone()[0]
conn.close()
# Count tasks
task_count = 0
if TASKS_DIR.exists():
for status_dir in ["todo", "in-progress", "review"]:
status_path = TASKS_DIR / status_dir
if status_path.exists():
task_count += len(list(status_path.glob("*.md")))
return {
"status": "operational",
"databases": {
"conversations": conversations_exist,
"timers": timers_exist,
"memory": memory_exist
},
"counts": {
"conversations": conversation_count,
"active_timers": timer_count,
"pending_tasks": task_count
}
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/conversations")
async def list_conversations(limit: int = 20, offset: int = 0):
"""List recent conversations."""
if not CONVERSATIONS_DB.exists():
return {"conversations": [], "total": 0}
try:
conn = sqlite3.connect(str(CONVERSATIONS_DB))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Get total count
cursor.execute("SELECT COUNT(*) FROM sessions")
total = cursor.fetchone()[0]
# Get conversations
cursor.execute("""
SELECT session_id, agent_type, created_at, last_activity
FROM sessions
ORDER BY last_activity DESC
LIMIT ? OFFSET ?
""", (limit, offset))
rows = cursor.fetchall()
conn.close()
conversations = [
{
"session_id": row["session_id"],
"agent_type": row["agent_type"],
"created_at": row["created_at"],
"last_activity": row["last_activity"]
}
for row in rows
]
return {
"conversations": conversations,
"total": total,
"limit": limit,
"offset": offset
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/conversations/{session_id}")
async def get_conversation(session_id: str):
"""Get conversation details."""
if not CONVERSATIONS_DB.exists():
raise HTTPException(status_code=404, detail="Conversation not found")
try:
conn = sqlite3.connect(str(CONVERSATIONS_DB))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Get session
cursor.execute("""
SELECT session_id, agent_type, created_at, last_activity
FROM sessions
WHERE session_id = ?
""", (session_id,))
session_row = cursor.fetchone()
if not session_row:
conn.close()
raise HTTPException(status_code=404, detail="Conversation not found")
# Get messages
cursor.execute("""
SELECT role, content, timestamp, tool_calls, tool_results
FROM messages
WHERE session_id = ?
ORDER BY timestamp ASC
""", (session_id,))
message_rows = cursor.fetchall()
conn.close()
messages = []
for row in message_rows:
msg = {
"role": row["role"],
"content": row["content"],
"timestamp": row["timestamp"]
}
if row["tool_calls"]:
msg["tool_calls"] = json.loads(row["tool_calls"])
if row["tool_results"]:
msg["tool_results"] = json.loads(row["tool_results"])
messages.append(msg)
return {
"session_id": session_row["session_id"],
"agent_type": session_row["agent_type"],
"created_at": session_row["created_at"],
"last_activity": session_row["last_activity"],
"messages": messages
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/conversations/{session_id}")
async def delete_conversation(session_id: str):
"""Delete a conversation."""
if not CONVERSATIONS_DB.exists():
raise HTTPException(status_code=404, detail="Conversation not found")
try:
conn = sqlite3.connect(str(CONVERSATIONS_DB))
cursor = conn.cursor()
# Delete messages
cursor.execute("DELETE FROM messages WHERE session_id = ?", (session_id,))
# Delete session
cursor.execute("DELETE FROM sessions WHERE session_id = ?", (session_id,))
conn.commit()
deleted = cursor.rowcount > 0
conn.close()
if not deleted:
raise HTTPException(status_code=404, detail="Conversation not found")
return {"success": True, "message": "Conversation deleted"}
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/tasks")
async def list_tasks(status: Optional[str] = None):
"""List tasks from Kanban board."""
if not TASKS_DIR.exists():
return {"tasks": []}
try:
tasks = []
status_dirs = [status] if status else ["backlog", "todo", "in-progress", "review", "done"]
for status_dir in status_dirs:
status_path = TASKS_DIR / status_dir
if not status_path.exists():
continue
for task_file in status_path.glob("*.md"):
try:
content = task_file.read_text()
# Parse YAML frontmatter (simplified)
if content.startswith("---"):
parts = content.split("---", 2)
if len(parts) >= 3:
frontmatter = parts[1]
body = parts[2].strip()
metadata = {}
for line in frontmatter.split("\n"):
if ":" in line:
key, value = line.split(":", 1)
key = key.strip()
value = value.strip().strip('"').strip("'")
metadata[key] = value
tasks.append({
"id": task_file.stem,
"title": metadata.get("title", task_file.stem),
"status": status_dir,
"description": body,
"created": metadata.get("created", ""),
"updated": metadata.get("updated", ""),
"priority": metadata.get("priority", "medium")
})
except Exception:
continue
return {"tasks": tasks}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/timers")
async def list_timers():
"""List active timers and reminders."""
if not TIMERS_DB.exists():
return {"timers": [], "reminders": []}
try:
conn = sqlite3.connect(str(TIMERS_DB))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Get active timers and reminders
cursor.execute("""
SELECT id, name, duration_seconds, target_time, created_at, status, type, message
FROM timers
WHERE status = 'active'
ORDER BY created_at DESC
""")
rows = cursor.fetchall()
conn.close()
timers = []
reminders = []
for row in rows:
item = {
"id": row["id"],
"name": row["name"],
"status": row["status"],
"created_at": row["created_at"]
}
# Add timer-specific fields
if row["duration_seconds"] is not None:
item["duration_seconds"] = row["duration_seconds"]
# Add reminder-specific fields
if row["target_time"] is not None:
item["target_time"] = row["target_time"]
# Add message if present
if row["message"]:
item["message"] = row["message"]
# Categorize by type
if row["type"] == "timer":
timers.append(item)
elif row["type"] == "reminder":
reminders.append(item)
return {
"timers": timers,
"reminders": reminders
}
except Exception as e:
import traceback
error_detail = f"{str(e)}\n{traceback.format_exc()}"
raise HTTPException(status_code=500, detail=error_detail)
@router.get("/logs")
async def search_logs(
limit: int = 50,
level: Optional[str] = None,
agent_type: Optional[str] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None
):
"""Search logs."""
log_dir = Path(__file__).parent.parent.parent / "data" / "logs"
if not log_dir.exists():
return {"logs": []}
try:
# Get most recent log file
log_files = sorted(log_dir.glob("llm_*.log"), reverse=True)
if not log_files:
return {"logs": []}
logs = []
count = 0
# Read from most recent log file
for line in log_files[0].read_text().splitlines():
if count >= limit:
break
try:
log_entry = json.loads(line)
# Apply filters
if level and log_entry.get("level") != level.upper():
continue
if agent_type and log_entry.get("agent_type") != agent_type:
continue
if start_date and log_entry.get("timestamp", "") < start_date:
continue
if end_date and log_entry.get("timestamp", "") > end_date:
continue
logs.append(log_entry)
count += 1
except Exception:
continue
return {
"logs": logs,
"total": len(logs)
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))