Files
the-nexus/intelligence/deepdive/docker-compose.yml
Ezra (Archivist) d2f103654f
Some checks failed
Deploy Nexus / deploy (push) Has been cancelled
intelligence(deepdive): Docker deployment scaffold for #830
- Add Dockerfile for production containerized pipeline
- Add docker-compose.yml for full stack deployment
- Add .dockerignore for clean builds
- Add deploy.sh: one-command build, test, and systemd timer install

This provides a sovereign, reproducible deployment path for the
Deep Dive daily briefing pipeline.
2026-04-05 20:40:58 +00:00

55 lines
1.5 KiB
YAML

# Deep Dive — Full Containerized Deployment
# Issue: #830 — Sovereign NotebookLM Daily Briefing
#
# Usage:
# docker compose up -d # Start stack
# docker compose run --rm deepdive --dry-run # Test pipeline
# docker compose run --rm deepdive --today # Live run
#
# For daily automation, use systemd timer or host cron calling:
# docker compose -f /path/to/docker-compose.yml run --rm deepdive --today
services:
deepdive:
build:
context: .
dockerfile: Dockerfile
container_name: deepdive
image: deepdive:latest
volumes:
# Mount your config from host
- ./config.yaml:/app/config.yaml:ro
# Persist cache and outputs
- deepdive-cache:/app/cache
- deepdive-output:/app/output
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- ELEVENLABS_API_KEY=${ELEVENLABS_API_KEY:-}
- TELEGRAM_BOT_TOKEN=${TELEGRAM_BOT_TOKEN:-}
- TELEGRAM_HOME_CHANNEL=${TELEGRAM_HOME_CHANNEL:-}
- DEEPDIVE_CACHE_DIR=/app/cache
command: ["--dry-run"]
# Optional: attach to Ollama for local LLM inference
# networks:
# - deepdive-net
# Optional: Local LLM backend (uncomment if using local inference)
# ollama:
# image: ollama/ollama:latest
# container_name: deepdive-ollama
# volumes:
# - ollama-models:/root/.ollama
# ports:
# - "11434:11434"
# networks:
# - deepdive-net
volumes:
deepdive-cache:
deepdive-output:
# ollama-models:
# networks:
# deepdive-net: