# Deep Dive Intelligence Pipeline — Production Container # Issue: #830 — Sovereign NotebookLM Daily Briefing # # Build: # docker build -t deepdive:latest . # Run dry-run: # docker run --rm -v $(pwd)/config.yaml:/app/config.yaml deepdive:latest --dry-run FROM python:3.11-slim # Install system dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ ffmpeg \ wget \ curl \ ca-certificates \ git \ && rm -rf /var/lib/apt/lists/* WORKDIR /app # Install Python dependencies first (layer caching) COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt # Pre-download embedding model for faster cold starts RUN python3 -c "from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L6-v2')" # Copy application code COPY pipeline.py tts_engine.py fleet_context.py telegram_command.py quality_eval.py ./ COPY prompts/ ./prompts/ COPY tests/ ./tests/ COPY Makefile README.md QUICKSTART.md OPERATIONAL_READINESS.md ./ # Create cache and output directories RUN mkdir -p /app/cache /app/output ENV DEEPDIVE_CACHE_DIR=/app/cache ENV PYTHONUNBUFFERED=1 # Default: run pipeline with mounted config ENTRYPOINT ["python3", "pipeline.py", "--config", "/app/config.yaml"] CMD ["--dry-run"]