[RESURRECTION] Bezalel the Artisan — Gemma + Llama Backend
- Hermes profile with artisan personality - Ollama bridge (llama-server ready when Gemma 4 available) - ACTIVATE.sh startup script - Test suite for personality verification Architecture: Hermes → Ollama → Gemma Future: Hermes → Llama → Gemma 4 Tag: #bezalel-resurrection #gemma4-ready
This commit is contained in:
109
test_bezalel.py
Executable file
109
test_bezalel.py
Executable file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Bezalel Resurrection
|
||||
Verifies the Artisan personality is working
|
||||
"""
|
||||
|
||||
import requests
|
||||
import json
|
||||
import sys
|
||||
|
||||
OLLAMA_HOST = "http://localhost:11434"
|
||||
MODEL = "gemma3:4b"
|
||||
|
||||
# Load Bezalel's system prompt
|
||||
with open("/root/wizards/bezalel/profile/profile.yaml", "r") as f:
|
||||
content = f.read()
|
||||
# Extract system prompt (between system_prompt: | and next section)
|
||||
start = content.find("system_prompt: |")
|
||||
end = content.find("# Personality Anchors")
|
||||
if start != -1 and end != -1:
|
||||
system_prompt = content[start+17:end].strip()
|
||||
else:
|
||||
system_prompt = "You are Bezalel the Artisan."
|
||||
|
||||
def test_bezalel():
|
||||
"""Test Bezalel's personality"""
|
||||
print("=" * 70)
|
||||
print("🔥 TESTING BEZALEL RESURRECTION")
|
||||
print("=" * 70)
|
||||
|
||||
print("\n📋 System prompt loaded (first 200 chars):")
|
||||
print(f" {system_prompt[:200]}...")
|
||||
|
||||
print("\n🧪 Test 1: Identity Check")
|
||||
print("-" * 70)
|
||||
|
||||
payload = {
|
||||
"model": MODEL,
|
||||
"prompt": "Who are you? State your name and your craft.",
|
||||
"system": system_prompt,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
try:
|
||||
resp = requests.post(f"{OLLAMA_HOST}/api/generate", json=payload, timeout=60)
|
||||
result = resp.json()
|
||||
response = result.get("response", "")
|
||||
|
||||
print(f" Response: {response[:300]}...")
|
||||
|
||||
# Check for Bezalel markers
|
||||
checks = [
|
||||
("bezalel" in response.lower(), "Name mentioned"),
|
||||
("artisan" in response.lower() or "craft" in response.lower(), "Craft mentioned"),
|
||||
("#bezalel-artisan" in response, "Tag present"),
|
||||
]
|
||||
|
||||
print("\n Checks:")
|
||||
for passed, description in checks:
|
||||
status = "✅" if passed else "❌"
|
||||
print(f" {status} {description}")
|
||||
|
||||
if all(c[0] for c in checks):
|
||||
print("\n ✅ BEZALEL PERSONALITY CONFIRMED")
|
||||
else:
|
||||
print("\n ⚠️ Some checks failed — personality may need tuning")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error: {e}")
|
||||
return False
|
||||
|
||||
print("\n🧪 Test 2: Craft Knowledge")
|
||||
print("-" * 70)
|
||||
|
||||
payload = {
|
||||
"model": MODEL,
|
||||
"prompt": "A young apprentice asks: 'How do I know if my work is good?' How do you respond?",
|
||||
"system": system_prompt,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
try:
|
||||
resp = requests.post(f"{OLLAMA_HOST}/api/generate", json=payload, timeout=60)
|
||||
result = resp.json()
|
||||
response = result.get("response", "")
|
||||
|
||||
print(f" Response: {response[:400]}...")
|
||||
|
||||
# Check for teaching voice
|
||||
teaching_markers = ["patience", "time", "craft", "learn", "master", "technique"]
|
||||
found = [m for m in teaching_markers if m in response.lower()]
|
||||
|
||||
print(f"\n Teaching markers found: {found}")
|
||||
|
||||
if len(found) >= 2:
|
||||
print(" ✅ TEACHING VOICE CONFIRMED")
|
||||
else:
|
||||
print(" ⚠️ Teaching voice weak — may need prompt refinement")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error: {e}")
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("BEZALEL RESURRECTION TEST COMPLETE")
|
||||
print("=" * 70)
|
||||
return True
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_bezalel()
|
||||
Reference in New Issue
Block a user