Files
the-nexus/intelligence/deepdive/docker-compose.yml

55 lines
1.5 KiB
YAML
Raw Normal View History

# Deep Dive — Full Containerized Deployment
# Issue: #830 — Sovereign NotebookLM Daily Briefing
#
# Usage:
# docker compose up -d # Start stack
# docker compose run --rm deepdive --dry-run # Test pipeline
# docker compose run --rm deepdive --today # Live run
#
# For daily automation, use systemd timer or host cron calling:
# docker compose -f /path/to/docker-compose.yml run --rm deepdive --today
services:
deepdive:
build:
context: .
dockerfile: Dockerfile
container_name: deepdive
image: deepdive:latest
volumes:
# Mount your config from host
- ./config.yaml:/app/config.yaml:ro
# Persist cache and outputs
- deepdive-cache:/app/cache
- deepdive-output:/app/output
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- ELEVENLABS_API_KEY=${ELEVENLABS_API_KEY:-}
- TELEGRAM_BOT_TOKEN=${TELEGRAM_BOT_TOKEN:-}
- TELEGRAM_HOME_CHANNEL=${TELEGRAM_HOME_CHANNEL:-}
- DEEPDIVE_CACHE_DIR=/app/cache
command: ["--dry-run"]
# Optional: attach to Ollama for local LLM inference
# networks:
# - deepdive-net
# Optional: Local LLM backend (uncomment if using local inference)
# ollama:
# image: ollama/ollama:latest
# container_name: deepdive-ollama
# volumes:
# - ollama-models:/root/.ollama
# ports:
# - "11434:11434"
# networks:
# - deepdive-net
volumes:
deepdive-cache:
deepdive-output:
# ollama-models:
# networks:
# deepdive-net: