# ── Timmy Time — Microservices Architecture ────────────────────────────────── # # Clean separation of concerns with independent, scalable services: # - ollama LLM inference engine # - dashboard FastAPI coordinator + UI # - timmy Sovereign AI agent # - workers Swarm worker pool (scale with --scale worker=N) # # Usage: # docker compose -f docker-compose.microservices.yml up -d # docker compose -f docker-compose.microservices.yml logs -f dashboard # docker compose -f docker-compose.microservices.yml up --scale worker=4 services: # ── Ollama LLM Service ──────────────────────────────────────────────────── ollama: build: context: . dockerfile: docker/Dockerfile.ollama image: timmy-ollama:latest container_name: timmy-ollama ports: - "11434:11434" volumes: - ollama-data:/root/.ollama environment: OLLAMA_HOST: "0.0.0.0:11434" networks: - timmy-net restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"] interval: 30s timeout: 5s retries: 3 start_period: 30s # ── Dashboard Service ───────────────────────────────────────────────────── dashboard: build: context: . dockerfile: docker/Dockerfile.dashboard image: timmy-dashboard:latest container_name: timmy-dashboard ports: - "8000:8000" volumes: - timmy-data:/app/data - ./src:/app/src - ./static:/app/static environment: DEBUG: "true" OLLAMA_URL: "http://ollama:11434" GROK_ENABLED: "${GROK_ENABLED:-false}" XAI_API_KEY: "${XAI_API_KEY:-}" GROK_DEFAULT_MODEL: "${GROK_DEFAULT_MODEL:-grok-3-fast}" networks: - timmy-net depends_on: ollama: condition: service_healthy restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 5s retries: 3 start_period: 15s # ── Timmy Agent Service ─────────────────────────────────────────────────── timmy: build: context: . dockerfile: docker/Dockerfile.agent image: timmy-agent:latest container_name: timmy-agent volumes: - timmy-data:/app/data - ./src:/app/src environment: COORDINATOR_URL: "http://dashboard:8000" OLLAMA_URL: "http://ollama:11434" TIMMY_AGENT_ID: "timmy" networks: - timmy-net depends_on: dashboard: condition: service_healthy ollama: condition: service_healthy restart: unless-stopped # ── OpenFang — vendored agent runtime sidecar ──────────────────────────────── # Rust binary providing real tool execution (browser, OSINT, forecasting). # Timmy's coordinator delegates hand execution here via REST API. openfang: build: context: . dockerfile: docker/Dockerfile.openfang image: timmy-openfang:latest container_name: timmy-openfang profiles: - openfang environment: OLLAMA_URL: "http://ollama:11434" OPENFANG_DATA_DIR: "/app/data" volumes: - openfang-data:/app/data networks: - timmy-net depends_on: ollama: condition: service_healthy restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/health"] interval: 30s timeout: 5s retries: 3 start_period: 15s # ── Swarm Worker Pool (Template) ────────────────────────────────────────── # Scale: docker compose -f docker-compose.microservices.yml up --scale worker=4 worker: build: context: . dockerfile: docker/Dockerfile.agent image: timmy-agent:latest profiles: - workers volumes: - timmy-data:/app/data - ./src:/app/src environment: COORDINATOR_URL: "http://dashboard:8000" OLLAMA_URL: "http://ollama:11434" AGENT_NAME: "Worker" AGENT_CAPABILITIES: "general,reasoning,coding" command: ["sh", "-c", "python -m swarm.agent_runner --agent-id worker-$(hostname) --name Worker"] networks: - timmy-net depends_on: dashboard: condition: service_healthy ollama: condition: service_healthy restart: unless-stopped # ── Volumes ─────────────────────────────────────────────────────────────────── volumes: timmy-data: driver: local driver_opts: type: none o: bind device: "${PWD}/data" ollama-data: driver: local openfang-data: driver: local # ── Network ─────────────────────────────────────────────────────────────────── networks: timmy-net: driver: bridge