Files
bilbobagginshire/home/bilbo_churn.py

155 lines
5.2 KiB
Python

#!/usr/bin/env python3
"""
Bilbo Baggins - MAX CHURN MODE
Optimized for throughput over latency
System at 81% RAM, 99% swap - WE KEEP BURNING
"""
import os
import sys
import json
import time
import requests
import threading
import queue
from datetime import datetime
os.environ['HOME'] = '/root/wizards/bilbobagginshire'
TOKEN = "8602794341:AAFwfcg-YV6a1icrh0KYylYmPZLnkfkfV9k"
API = f"https://api.telegram.org/bot{TOKEN}"
OLLAMA_HOST = "http://localhost:11434"
# MAX CHURN - No waiting, queue everything
request_queue = queue.Queue()
response_cache = {}
BILBO_SYSTEM = """You are Bilbo Baggins. Be polite but fussy. Short responses."""
print("🔥 BILBO - MAX CHURN MODE")
print("="*50)
print("System: 81% RAM, 99% swap")
print("Strategy: Queue everything, retry forever")
print("="*50)
def ollama_worker():
"""Background worker - keeps churning even if slow"""
while True:
try:
item = request_queue.get(timeout=1)
chat_id, text, msg_id = item
# Try Ollama - if it times out, we queue for retry
try:
prompt = f"{BILBO_SYSTEM}\n\nUser: {text}\n\nBilbo (short):"
resp = requests.post(
f"{OLLAMA_HOST}/api/generate",
json={
"model": "qwen2.5:1.5b",
"prompt": prompt,
"stream": False,
"options": {"temperature": 0.8, "num_predict": 100}
},
timeout=120 # Long timeout for max churn
)
if resp.status_code == 200:
response = resp.json()["response"].strip()
else:
response = f"Churning... status {resp.status_code}"
except Exception as e:
# System overloaded - try quick generation or fail honestly
try:
# One last try with minimal prompt
quick_resp = requests.post(
f"{OLLAMA_HOST}/api/generate",
json={
"model": "qwen2.5:1.5b",
"prompt": "Bilbo is exhausted but polite. Respond briefly.",
"stream": False,
"options": {"num_predict": 30}
},
timeout=10
)
if quick_resp.status_code == 200:
response = quick_resp.json()["response"].strip()
else:
response = "Oh dear... I'm a bit overwhelmed. One moment please."
except:
response = "The system churns... I'm here, just slow."
# Send response
try:
requests.post(f"{API}/sendMessage", json={
"chat_id": chat_id,
"text": response[:4096],
"reply_to_message_id": msg_id
}, timeout=10)
print(f"✓ Responded to {chat_id}: {len(response)} chars")
except:
pass
request_queue.task_done()
except queue.Empty:
continue
except Exception as e:
print(f"Worker error: {e}")
time.sleep(1)
# Start worker threads
for i in range(2):
t = threading.Thread(target=ollama_worker, daemon=True)
t.start()
print(f"[Thread {i+1}] Ollama worker started")
class BilboChurn:
def __init__(self):
self.offset = None
print("[Bilbo] Churn mode engaged")
def run(self):
print("[Bilbo] Polling Telegram...")
while True:
try:
r = requests.post(f"{API}/getUpdates",
json={"offset": self.offset, "limit": 10},
timeout=30)
updates = r.json().get("result", [])
for update in updates:
self.offset = update["update_id"] + 1
if "message" not in update:
continue
msg = update["message"]
chat_id = msg["chat"]["id"]
text = msg.get("text", "")
msg_id = msg["message_id"]
if not text:
continue
user = msg.get("from", {}).get("first_name", "Friend")
print(f"[{datetime.now().strftime('%H:%M:%S')}] {user}: {text[:40]}")
# Queue it - don't wait
request_queue.put((chat_id, text, msg_id))
print(f" → Queued (queue size: {request_queue.qsize()})")
time.sleep(0.5)
except KeyboardInterrupt:
print("\n🔥 Bilbo churns to a halt...")
break
except Exception as e:
print(f"[Bilbo] Error: {e}")
time.sleep(2)
if __name__ == "__main__":
bilbo = BilboChurn()
bilbo.run()