# MLX LoRA Training Config — Apple Silicon # Replaces: autolora/train_modal.py for local training # # Usage: # mlx_lm.lora --config training/mlx-lora.yaml # # Runs on Mac M-series. No cloud needed for small models. # v0.1 was trained this way: 1,214 samples, 7.8 GB peak, 48 tok/s on M3 Max. model: mlx-community/Hermes-3-Llama-3.1-8B-4bit data: data/mlx_curated train: true iters: 1000 batch_size: 2 learning_rate: 2e-6 lora_layers: 16 steps_per_report: 10 steps_per_eval: 100 save_every: 200 adapter_path: ./output/mlx-adapters