Files
timmy-home/scripts/provision-timmy-vps.sh

261 lines
6.5 KiB
Bash
Raw Permalink Normal View History

#!/bin/bash
# Timmy VPS Provisioning Script
# Transforms fresh Ubuntu 22.04+ VPS into sovereign local-first wizard
set -e
TIMMY_USER="${TIMMY_USER:-root}"
TIMMY_HOME="${TIMMY_HOME:-/root}"
TIMMY_DIR="$TIMMY_HOME/timmy"
REPO_URL="${REPO_URL:-http://143.198.27.163:3000/Timmy_Foundation/timmy-home.git}"
MODEL_URL="${MODEL_URL:-https://huggingface.co/TheBloke/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/hermes-3-llama-3.1-8b.Q4_K_M.gguf}"
MODEL_NAME="${MODEL_NAME:-hermes-3-8b.Q4_K_M.gguf}"
echo "========================================"
echo " Timmy VPS Provisioning"
echo "========================================"
echo ""
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log() {
echo -e "${GREEN}[TIMMY]${NC} $1"
}
warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if running as root
if [ "$EUID" -ne 0 ]; then
error "Please run as root"
exit 1
fi
# Check Ubuntu version
if ! grep -q "Ubuntu 22.04\|Ubuntu 24.04" /etc/os-release; then
warn "Not Ubuntu 22.04/24.04 - may not work correctly"
fi
log "Step 1/8: Installing system dependencies..."
export DEBIAN_FRONTEND=noninteractive
apt-get update -qq
apt-get install -y -qq \
build-essential \
cmake \
git \
curl \
wget \
python3 \
python3-pip \
python3-venv \
libopenblas-dev \
pkg-config \
ufw \
jq \
sqlite3 \
libsqlite3-dev \
2>&1 | tail -5
log "Step 2/8: Setting up directory structure..."
mkdir -p "$TIMMY_DIR"/{soul,scripts,logs,shared,models,configs}
mkdir -p "$TIMMY_HOME/.config/systemd/user"
log "Step 3/8: Building llama.cpp from source..."
if [ ! -f "$TIMMY_DIR/llama-server" ]; then
cd /tmp
git clone --depth 1 https://github.com/ggerganov/llama.cpp.git 2>/dev/null || true
cd llama.cpp
# Build with OpenBLAS for CPU optimization
cmake -B build \
-DGGML_BLAS=ON \
-DGGML_BLAS_VENDOR=OpenBLAS \
-DLLAMA_BUILD_TESTS=OFF \
-DLLAMA_BUILD_EXAMPLES=OFF \
-DCMAKE_BUILD_TYPE=Release
cmake --build build --config Release -j$(nproc)
# Copy binaries
cp build/bin/llama-server "$TIMMY_DIR/"
cp build/bin/llama-cli "$TIMMY_DIR/"
log "llama.cpp built successfully"
else
log "llama.cpp already exists, skipping build"
fi
log "Step 4/8: Downloading model weights..."
if [ ! -f "$TIMMY_DIR/models/$MODEL_NAME" ]; then
cd "$TIMMY_DIR/models"
wget -q --show-progress "$MODEL_URL" -O "$MODEL_NAME" || {
error "Failed to download model. Continuing anyway..."
}
log "Model downloaded"
else
log "Model already exists, skipping download"
fi
log "Step 5/8: Setting up llama-server systemd service..."
cat > /etc/systemd/system/llama-server.service << EOF
[Unit]
Description=llama.cpp inference server for Timmy
After=network.target
[Service]
Type=simple
User=$TIMMY_USER
WorkingDirectory=$TIMMY_DIR
ExecStart=$TIMMY_DIR/llama-server \\
-m $TIMMY_DIR/models/$MODEL_NAME \\
--host 127.0.0.1 \\
--port 8081 \\
-c 8192 \\
-np 1 \\
--jinja \\
-ngl 0
Restart=always
RestartSec=10
Environment="HOME=$TIMMY_HOME"
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable llama-server.service
log "Step 6/8: Cloning timmy-home repo and setting up agent..."
if [ ! -d "$TIMMY_DIR/timmy-home" ]; then
cd "$TIMMY_DIR"
git clone "$REPO_URL" timmy-home 2>/dev/null || warn "Could not clone repo"
fi
# Create minimal Python environment for agent
if [ ! -d "$TIMMY_DIR/venv" ]; then
python3 -m venv "$TIMMY_DIR/venv"
"$TIMMY_DIR/venv/bin/pip" install -q requests pyyaml 2>&1 | tail -3
fi
log "Step 7/8: Setting up Timmy agent systemd service..."
cat > /etc/systemd/system/timmy-agent.service << EOF
[Unit]
Description=Timmy Agent Harness
After=llama-server.service
Requires=llama-server.service
[Service]
Type=simple
User=$TIMMY_USER
WorkingDirectory=$TIMMY_DIR
ExecStart=$TIMMY_DIR/venv/bin/python $TIMMY_DIR/timmy-home/agent/agent_daemon.py
Restart=always
RestartSec=30
Environment="HOME=$TIMMY_HOME"
Environment="TIMMY_MODEL_URL=http://127.0.0.1:8081"
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable timmy-agent.service
log "Step 8/8: Configuring firewall..."
# Reset UFW
ufw --force reset 2>/dev/null || true
ufw default deny incoming
ufw default allow outgoing
# Allow SSH
ufw allow 22/tcp
# Allow Syncthing (sync protocol)
ufw allow 22000/tcp
ufw allow 22000/udp
# Allow Syncthing (discovery)
ufw allow 21027/udp
# Note: llama-server on 8081 is NOT exposed (localhost only)
ufw --force enable
log "Starting services..."
systemctl start llama-server.service || warn "llama-server failed to start (may need model)"
# Wait for llama-server to be ready
log "Waiting for llama-server to be ready..."
for i in {1..30}; do
if curl -s http://127.0.0.1:8081/health >/dev/null 2>&1; then
log "llama-server is healthy!"
break
fi
sleep 2
done
# Create status script
cat > "$TIMMY_DIR/scripts/status.sh" << 'EOF'
#!/bin/bash
echo "=== Timmy VPS Status ==="
echo ""
echo "Services:"
systemctl is-active llama-server.service && echo " llama-server: RUNNING" || echo " llama-server: STOPPED"
systemctl is-active timmy-agent.service && echo " timmy-agent: RUNNING" || echo " timmy-agent: STOPPED"
echo ""
echo "Inference Health:"
curl -s http://127.0.0.1:8081/health | jq . 2>/dev/null || echo " Not responding"
echo ""
echo "Disk Usage:"
df -h $HOME | tail -1
echo ""
echo "Memory:"
free -h | grep Mem
EOF
chmod +x "$TIMMY_DIR/scripts/status.sh"
# Create README
cat > "$TIMMY_DIR/README.txt" << EOF
Timmy Sovereign Wizard VPS
==========================
Quick Commands:
$TIMMY_DIR/scripts/status.sh - Check system status
systemctl status llama-server - Check inference service
systemctl status timmy-agent - Check agent service
Directories:
$TIMMY_DIR/models/ - AI model weights
$TIMMY_DIR/soul/ - SOUL.md and conscience files
$TIMMY_DIR/logs/ - Agent logs
$TIMMY_DIR/shared/ - Syncthing shared folder
Inference Endpoint:
http://127.0.0.1:8081 (localhost only)
Provisioning complete!
EOF
echo ""
echo "========================================"
log "Provisioning Complete!"
echo "========================================"
echo ""
echo "Status:"
"$TIMMY_DIR/scripts/status.sh"
echo ""
echo "Next steps:"
echo " 1. Run syncthing setup: curl -sL $REPO_URL/raw/branch/main/scripts/setup-syncthing.sh | bash"
echo " 2. Check inference: curl http://127.0.0.1:8081/health"
echo " 3. Review logs: journalctl -u llama-server -f"
echo ""