PR for #1017: Feature: Agent "Voice" Customization UI #1024
110
.env.example
110
.env.example
@@ -1,100 +1,14 @@
|
||||
# Timmy Time — Mission Control
|
||||
# Copy this file to .env and uncomment lines you want to override.
|
||||
# .env is gitignored and never committed.
|
||||
#
|
||||
# For cloud deployment, deploy/setup.sh generates this automatically.
|
||||
# GEMINI_API_KEY: Required for Gemini AI API calls.
|
||||
# AI Studio automatically injects this at runtime from user secrets.
|
||||
# Users configure this via the Secrets panel in the AI Studio UI.
|
||||
GEMINI_API_KEY="MY_GEMINI_API_KEY"
|
||||
|
||||
# ── Cloud / Production ──────────────────────────────────────────────────────
|
||||
# Your domain for automatic HTTPS via Let's Encrypt.
|
||||
# Set to your actual domain (e.g., timmy.example.com) for HTTPS.
|
||||
# Leave as "localhost" for IP-only HTTP access.
|
||||
# DOMAIN=localhost
|
||||
# APP_URL: The URL where this applet is hosted.
|
||||
# AI Studio automatically injects this at runtime with the Cloud Run service URL.
|
||||
# Used for self-referential links, OAuth callbacks, and API endpoints.
|
||||
APP_URL="MY_APP_URL"
|
||||
|
||||
# Ollama host (default: http://localhost:11434)
|
||||
# In production (docker-compose.prod.yml), this is set to http://ollama:11434 automatically.
|
||||
# OLLAMA_URL=http://localhost:11434
|
||||
|
||||
# LLM model to use via Ollama (default: qwen3:30b)
|
||||
# OLLAMA_MODEL=qwen3:30b
|
||||
|
||||
# Ollama context window size (default: 4096 tokens)
|
||||
# Set higher for more context, lower to save RAM. 0 = model default.
|
||||
# qwen3:30b + 4096 ctx ≈ 19GB VRAM; default ctx ≈ 45GB.
|
||||
# OLLAMA_NUM_CTX=4096
|
||||
|
||||
# Enable FastAPI interactive docs at /docs and /redoc (default: false)
|
||||
# DEBUG=true
|
||||
|
||||
# ── AirLLM / big-brain backend ───────────────────────────────────────────────
|
||||
# Inference backend: "ollama" (default) | "airllm" | "auto"
|
||||
# "auto" → uses AirLLM on Apple Silicon if installed, otherwise Ollama.
|
||||
# Requires: pip install ".[bigbrain]"
|
||||
# TIMMY_MODEL_BACKEND=ollama
|
||||
|
||||
# AirLLM model size (default: 70b).
|
||||
# 8b ~16 GB RAM | 70b ~140 GB RAM | 405b ~810 GB RAM
|
||||
# AIRLLM_MODEL_SIZE=70b
|
||||
|
||||
# ── Grok (xAI) — premium cloud augmentation ──────────────────────────────────
|
||||
# Enable Grok as an opt-in premium backend for frontier reasoning.
|
||||
# Local-first ethos is preserved — Grok only activates when explicitly enabled.
|
||||
# GROK_ENABLED=false
|
||||
# XAI_API_KEY=xai-...
|
||||
# GROK_DEFAULT_MODEL=grok-3-fast
|
||||
# GROK_MAX_SATS_PER_QUERY=200
|
||||
# GROK_FREE=false
|
||||
|
||||
# ── L402 Lightning secrets ───────────────────────────────────────────────────
|
||||
# HMAC secret for invoice verification. MUST be changed in production.
|
||||
# Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
# L402_HMAC_SECRET=<your-secret-here>
|
||||
|
||||
# HMAC secret for macaroon signing. MUST be changed in production.
|
||||
# L402_MACAROON_SECRET=<your-secret-here>
|
||||
|
||||
# Lightning backend: "mock" (default) | "lnd"
|
||||
# LIGHTNING_BACKEND=mock
|
||||
|
||||
# ── Environment & Privacy ───────────────────────────────────────────────────
|
||||
# Environment mode: "development" (default) | "production"
|
||||
# In production, security secrets MUST be set or the app will refuse to start.
|
||||
# TIMMY_ENV=development
|
||||
|
||||
# Disable Agno telemetry for sovereign/air-gapped deployments.
|
||||
# Default is false (disabled) to align with local-first AI vision.
|
||||
# TELEMETRY_ENABLED=false
|
||||
|
||||
# ── Telegram bot ──────────────────────────────────────────────────────────────
|
||||
# Bot token from @BotFather on Telegram.
|
||||
# Alternatively, configure via the /telegram/setup dashboard endpoint at runtime.
|
||||
# Requires: pip install ".[telegram]"
|
||||
# TELEGRAM_TOKEN=
|
||||
|
||||
# ── Discord bot ──────────────────────────────────────────────────────────────
|
||||
# Bot token from https://discord.com/developers/applications
|
||||
# Alternatively, configure via the /discord/setup dashboard endpoint at runtime.
|
||||
# Requires: pip install ".[discord]"
|
||||
# Optional: pip install pyzbar Pillow (for QR code invite detection from screenshots)
|
||||
# DISCORD_TOKEN=
|
||||
|
||||
# ── Autoresearch — autonomous ML experiment loops ────────────────────────────
|
||||
# Enable autonomous experiment loops (Karpathy autoresearch pattern).
|
||||
# AUTORESEARCH_ENABLED=false
|
||||
# AUTORESEARCH_WORKSPACE=data/experiments
|
||||
# AUTORESEARCH_TIME_BUDGET=300
|
||||
# AUTORESEARCH_MAX_ITERATIONS=100
|
||||
# AUTORESEARCH_METRIC=val_bpb
|
||||
|
||||
# ── Auth Gate (nginx auth_request) ─────────────────────────────────────────
|
||||
# Required when running auth-gate.py for nginx auth_request.
|
||||
# Generate secret with: python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
# AUTH_GATE_SECRET=<your-secret-here>
|
||||
# AUTH_GATE_USER=<your-username>
|
||||
# AUTH_GATE_PASS=<your-password>
|
||||
|
||||
# ── Docker Production ────────────────────────────────────────────────────────
|
||||
# When deploying with docker-compose.prod.yml:
|
||||
# - Containers run as non-root user "timmy" (defined in Dockerfile)
|
||||
# - No source bind mounts — code is baked into the image
|
||||
# - Set TIMMY_ENV=production to enforce security checks
|
||||
# - All secrets below MUST be set before production deployment
|
||||
# Gitea Configuration
|
||||
GITEA_URL="http://143.198.27.163:3000"
|
||||
GITEA_TOKEN="4ed4c019fc1a46e1fa23d9f480127c8e3b115f7b"
|
||||
GITEA_USERNAME="gemini"
|
||||
|
||||
106
.gitignore
vendored
106
.gitignore
vendored
@@ -1,104 +1,8 @@
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*.pyo
|
||||
.Python
|
||||
node_modules/
|
||||
build/
|
||||
dist/
|
||||
*.egg-info/
|
||||
.eggs/
|
||||
|
||||
# Virtual envs
|
||||
.venv/
|
||||
venv/
|
||||
env/
|
||||
|
||||
# Secrets / local config — commit only .env.example (the template)
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
discord_credentials.txt
|
||||
|
||||
# Backup / temp files
|
||||
*~
|
||||
\#*\#
|
||||
*.backup
|
||||
*.tar.gz
|
||||
|
||||
# SQLite — never commit databases or WAL/SHM artifacts
|
||||
*.db
|
||||
*.db-shm
|
||||
*.db-wal
|
||||
*.db-journal
|
||||
|
||||
# Runtime PID files
|
||||
.watchdog.pid
|
||||
|
||||
# Chat platform state files (contain bot tokens)
|
||||
telegram_state.json
|
||||
discord_state.json
|
||||
|
||||
# Testing
|
||||
.pytest_cache/
|
||||
.coverage
|
||||
coverage.xml
|
||||
htmlcov/
|
||||
reports/
|
||||
|
||||
# Self-modify reports (auto-generated)
|
||||
data/self_modify_reports/
|
||||
|
||||
# Error logs (auto-generated)
|
||||
logs/
|
||||
src/data/
|
||||
|
||||
# Handoff context (session-scoped)
|
||||
.handoff/
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
.claude/
|
||||
|
||||
# Local content — user-specific or generated
|
||||
MEMORY.md
|
||||
memory/self/*
|
||||
!memory/self/soul.md
|
||||
TIMMYTIME
|
||||
introduction.txt
|
||||
messages.txt
|
||||
morning_briefing.txt
|
||||
markdown_report.md
|
||||
data/timmy_soul.jsonl
|
||||
scripts/migrate_to_zeroclaw.py
|
||||
workspace/
|
||||
|
||||
# Loop orchestration state
|
||||
.loop/
|
||||
|
||||
# Legacy junk from old Timmy sessions (one-word fragments, cruft)
|
||||
Hi
|
||||
Im Timmy*
|
||||
his
|
||||
keep
|
||||
clean
|
||||
directory
|
||||
my_name_is_timmy*
|
||||
timmy_read_me_*
|
||||
issue_12_proposal.md
|
||||
|
||||
# Memory notes (session-scoped, not committed)
|
||||
memory/notes/
|
||||
|
||||
# Gitea Actions runner state
|
||||
.runner
|
||||
|
||||
# macOS
|
||||
coverage/
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
.timmy_gitea_token
|
||||
*.log
|
||||
.env*
|
||||
!.env.example
|
||||
|
||||
Reference in New Issue
Block a user