# ── Development Compose ─────────────────────────────────────────────────────── # # Services # dashboard FastAPI app (always on) # celery-worker (behind 'celery' profile) # openfang (behind 'openfang' profile) # # Usage # make docker-build build the image # make docker-up start dashboard # make docker-down stop everything # make docker-logs tail logs # # ── Security note ───────────────────────────────────────────────────────── # Override user per-environment — see docker-compose.dev.yml / docker-compose.prod.yml # # ── Ollama host access ────────────────────────────────────────────────────── # By default OLLAMA_URL points to http://host.docker.internal:11434 which # reaches Ollama running on the Docker host (macOS/Windows native). # # Linux: The extra_hosts entry maps host.docker.internal → host-gateway, # which resolves to the host IP on Docker 20.10+. services: # ── Dashboard (FastAPI) ────────────────────────────────────────────────── dashboard: build: . image: timmy-time:latest container_name: timmy-dashboard user: "" # see security note above ports: - "8000:8000" volumes: - timmy-data:/app/data - ./src:/app/src # live-reload: source changes reflect immediately - ./static:/app/static # live-reload: CSS/asset changes reflect immediately environment: DEBUG: "true" OLLAMA_URL: "${OLLAMA_URL:-http://host.docker.internal:11434}" # Grok (xAI) — opt-in premium cloud backend GROK_ENABLED: "${GROK_ENABLED:-false}" XAI_API_KEY: "${XAI_API_KEY:-}" GROK_DEFAULT_MODEL: "${GROK_DEFAULT_MODEL:-grok-3-fast}" # vLLM backend — set TIMMY_LLM_BACKEND=vllm to activate TIMMY_LLM_BACKEND: "${TIMMY_LLM_BACKEND:-ollama}" VLLM_URL: "${VLLM_URL:-http://localhost:8001}" VLLM_MODEL: "${VLLM_MODEL:-Qwen/Qwen2.5-14B-Instruct}" extra_hosts: - "host.docker.internal:host-gateway" # Linux: maps to host IP networks: - timmy-net restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 5s retries: 3 start_period: 30s # ── Celery Worker — background task processing ────────────────────────── celery-worker: build: . image: timmy-time:latest container_name: timmy-celery-worker user: "" command: ["celery", "-A", "infrastructure.celery.app", "worker", "--loglevel=info", "--concurrency=2"] volumes: - timmy-data:/app/data - ./src:/app/src environment: OLLAMA_URL: "${OLLAMA_URL:-http://host.docker.internal:11434}" extra_hosts: - "host.docker.internal:host-gateway" networks: - timmy-net restart: unless-stopped profiles: - celery # ── vLLM — high-throughput inference server (GPU optional) ────────────── # Requires the 'vllm' profile: docker compose --profile vllm up # # GPU (NVIDIA): set VLLM_MODEL and ensure nvidia-container-toolkit is installed. # CPU-only: add --device cpu to VLLM_EXTRA_ARGS (slower, but works anywhere). # # The dashboard reaches vLLM at http://vllm:8001 (inside timmy-net). # Set VLLM_URL=http://vllm:8001 in the dashboard environment when using this service. vllm: image: vllm/vllm-openai:latest container_name: timmy-vllm profiles: - vllm ports: - "8001:8001" environment: # Model to load — override with VLLM_MODEL env var VLLM_MODEL: "${VLLM_MODEL:-Qwen/Qwen2.5-7B-Instruct}" command: > --model ${VLLM_MODEL:-Qwen/Qwen2.5-7B-Instruct} --port 8001 --host 0.0.0.0 ${VLLM_EXTRA_ARGS:-} volumes: - vllm-cache:/root/.cache/huggingface networks: - timmy-net restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8001/health"] interval: 30s timeout: 10s retries: 5 start_period: 120s # GPU support — uncomment to enable NVIDIA GPU passthrough # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: all # capabilities: [gpu] # ── OpenFang — vendored agent runtime sidecar ──────────────────────────── openfang: build: context: . dockerfile: docker/Dockerfile.openfang image: timmy-openfang:latest container_name: timmy-openfang profiles: - openfang environment: OLLAMA_URL: "${OLLAMA_URL:-http://host.docker.internal:11434}" OPENFANG_DATA_DIR: "/app/data" extra_hosts: - "host.docker.internal:host-gateway" volumes: - openfang-data:/app/data networks: - timmy-net restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/health"] interval: 30s timeout: 5s retries: 3 start_period: 15s # ── Volumes ────────────────────────────────────────────────────────────────── volumes: timmy-data: driver: local driver_opts: type: none o: bind device: "${PWD}/data" openfang-data: driver: local vllm-cache: driver: local # ── Internal network ──────────────────────────────────────────────────────── networks: timmy-net: driver: bridge