Adds 3 new personas (Pixel, Lyra, Reel) and 5 new tool modules: - Git/DevOps tools (GitPython): clone, status, diff, log, blame, branch, add, commit, push, pull, stash — wired to Forge and Helm personas - Image generation (FLUX via diffusers): text-to-image, storyboards, variations — Pixel persona - Music generation (ACE-Step 1.5): full songs with vocals+instrumentals, instrumental tracks, vocal-only tracks — Lyra persona - Video generation (Wan 2.1 via diffusers): text-to-video, image-to-video clips — Reel persona - Creative Director pipeline: multi-step orchestration that chains storyboard → music → video → assembly into 3+ minute final videos - Video assembler (MoviePy + FFmpeg): stitch clips, overlay audio, title cards, subtitles, final export Also includes: - Spark Intelligence tool-level + creative pipeline event capture - Creative Studio dashboard page (/creative/ui) with 4 tabs - Config settings for all new models and output directories - pyproject.toml creative optional extra for GPU dependencies - 107 new tests covering all modules (624 total, all passing) https://claude.ai/code/session_01KJm6jQkNi3aA3yoQJn636c
67 lines
3.1 KiB
Python
67 lines
3.1 KiB
Python
from typing import Literal
|
|
|
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
|
|
|
|
class Settings(BaseSettings):
|
|
# Ollama host — override with OLLAMA_URL env var or .env file
|
|
ollama_url: str = "http://localhost:11434"
|
|
|
|
# LLM model passed to Agno/Ollama — override with OLLAMA_MODEL
|
|
ollama_model: str = "llama3.2"
|
|
|
|
# Set DEBUG=true to enable /docs and /redoc (disabled by default)
|
|
debug: bool = False
|
|
|
|
# Telegram bot token — set via TELEGRAM_TOKEN env var or the /telegram/setup endpoint
|
|
telegram_token: str = ""
|
|
|
|
# ── AirLLM / backend selection ───────────────────────────────────────────
|
|
# "ollama" — always use Ollama (default, safe everywhere)
|
|
# "airllm" — always use AirLLM (requires pip install ".[bigbrain]")
|
|
# "auto" — use AirLLM on Apple Silicon if airllm is installed,
|
|
# fall back to Ollama otherwise
|
|
timmy_model_backend: Literal["ollama", "airllm", "auto"] = "ollama"
|
|
|
|
# AirLLM model size when backend is airllm or auto.
|
|
# Larger = smarter, but needs more RAM / disk.
|
|
# 8b ~16 GB | 70b ~140 GB | 405b ~810 GB
|
|
airllm_model_size: Literal["8b", "70b", "405b"] = "70b"
|
|
|
|
# ── Spark Intelligence ────────────────────────────────────────────────
|
|
# Enable/disable the Spark cognitive layer.
|
|
# When enabled, Spark captures swarm events, runs EIDOS predictions,
|
|
# consolidates memories, and generates advisory recommendations.
|
|
spark_enabled: bool = True
|
|
|
|
# ── Git / DevOps ──────────────────────────────────────────────────────
|
|
git_default_repo_dir: str = "~/repos"
|
|
|
|
# ── Creative — Image Generation (Pixel) ───────────────────────────────
|
|
flux_model_id: str = "black-forest-labs/FLUX.1-schnell"
|
|
image_output_dir: str = "data/images"
|
|
image_default_steps: int = 4
|
|
|
|
# ── Creative — Music Generation (Lyra) ────────────────────────────────
|
|
music_output_dir: str = "data/music"
|
|
ace_step_model: str = "ace-step/ACE-Step-v1.5"
|
|
|
|
# ── Creative — Video Generation (Reel) ────────────────────────────────
|
|
video_output_dir: str = "data/video"
|
|
wan_model_id: str = "Wan-AI/Wan2.1-T2V-1.3B"
|
|
video_default_resolution: str = "480p"
|
|
|
|
# ── Creative — Pipeline / Assembly ────────────────────────────────────
|
|
creative_output_dir: str = "data/creative"
|
|
video_transition_duration: float = 1.0
|
|
default_video_codec: str = "libx264"
|
|
|
|
model_config = SettingsConfigDict(
|
|
env_file=".env",
|
|
env_file_encoding="utf-8",
|
|
extra="ignore",
|
|
)
|
|
|
|
|
|
settings = Settings()
|