- Add EPIC.md with resurrection plan - Create Hermes profile with Bezalel persona - Add llama-server.sh for Gemma 4 inference - Update start_bezalel.sh with stack checks - Add README with quick start guide Backend: llama.cpp Model: Gemma 4 26B MoE (Apache 2.0) Frontend: Hermes profile No OpenAI. No cloud. Pure sovereign stack.
42 lines
1.0 KiB
Bash
Executable File
42 lines
1.0 KiB
Bash
Executable File
#!/bin/bash
|
|
# Bezalel Startup Script — Gemma 4 + llama.cpp
|
|
# Master Craftsman Resurrection
|
|
|
|
set -e
|
|
|
|
export HERMES_HOME=/root/wizards/bezalel/home
|
|
export HOME=/root/wizards/bezalel
|
|
export PATH=/root/wizards/bezalel/hermes-agent/.venv/bin:$PATH
|
|
|
|
# Colors
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
NC='\033[0m'
|
|
|
|
echo -e "${GREEN}=== BEZALEL RESURRECTION ===${NC}"
|
|
echo "Backend: llama.cpp"
|
|
echo "Model: Gemma 4 26B MoE"
|
|
echo "Frontend: Hermes"
|
|
echo ""
|
|
|
|
# Check llama.cpp server is running
|
|
if ! curl -s http://localhost:8080/health > /dev/null 2>&1; then
|
|
echo -e "${YELLOW}WARNING: llama.cpp server not detected${NC}"
|
|
echo "Start server first with:"
|
|
echo " ./llama-server.sh"
|
|
echo ""
|
|
read -p "Start server now? (y/n) " -n 1 -r
|
|
echo
|
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
|
./llama-server.sh &
|
|
sleep 5
|
|
else
|
|
echo "Exiting. Start llama-server first."
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
echo -e "${GREEN}Starting Bezalel Hermes Gateway...${NC}"
|
|
cd /root/wizards/bezalel/hermes-agent
|
|
exec hermes gateway run --replace
|