Compare commits
6 Commits
fix/issue-
...
fix/680-py
| Author | SHA1 | Date | |
|---|---|---|---|
| 4910b74d62 | |||
| d120526244 | |||
| 8596ff761b | |||
| 7553fd4f3e | |||
| 71082fe06f | |||
| 6d678e938e |
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Full Nostr agent-to-agent communication demo - FINAL WORKING
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Soul Eval Gate — The Conscience of the Training Pipeline
|
||||
|
||||
|
||||
@@ -1,53 +1,6 @@
|
||||
"""Sovereign orchestration — Huey replaces 3,843 lines of homebrew."""
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from huey import SqliteHuey, crontab
|
||||
from pathlib import Path
|
||||
|
||||
from huey import SqliteHuey, signals
|
||||
|
||||
huey = SqliteHuey(filename=str(Path.home() / ".hermes" / "orchestration.db"))
|
||||
|
||||
# === Token Tracking ===
|
||||
TOKEN_LOG = Path.home() / ".hermes" / "token_usage.jsonl"
|
||||
|
||||
|
||||
def log_token_usage(task_name, result):
|
||||
"""Log token usage from a completed pipeline task.
|
||||
|
||||
Reads input_tokens/output_tokens from the agent result dict.
|
||||
Auto-detects pipeline name from task context.
|
||||
Appends to JSONL for downstream analysis.
|
||||
"""
|
||||
if not isinstance(result, dict):
|
||||
return
|
||||
|
||||
input_tokens = result.get("input_tokens", 0)
|
||||
output_tokens = result.get("output_tokens", 0)
|
||||
|
||||
if input_tokens == 0 and output_tokens == 0:
|
||||
return
|
||||
|
||||
# Auto-detect pipeline name from task function name
|
||||
pipeline = task_name.replace("_task", "").replace("_", "-")
|
||||
|
||||
entry = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"pipeline": pipeline,
|
||||
"input_tokens": input_tokens,
|
||||
"output_tokens": output_tokens,
|
||||
"total_tokens": input_tokens + output_tokens,
|
||||
"task": task_name,
|
||||
}
|
||||
|
||||
TOKEN_LOG.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(TOKEN_LOG, "a") as f:
|
||||
f.write(json.dumps(entry) + "\n")
|
||||
|
||||
|
||||
@huey.signal(signals.SIGNAL_COMPLETE)
|
||||
def on_task_complete(signal, task, task_value=None, **kwargs):
|
||||
"""Huey hook: log token usage after each pipeline task completes."""
|
||||
task_name = getattr(task, "name", "unknown")
|
||||
log_token_usage(task_name, task_value)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
MODEL ?= timmy:v0.1-q4
|
||||
BASELINE ?= hermes3:latest
|
||||
OLLAMA_URL ?= http://localhost:11434
|
||||
PYTHON ?= python3
|
||||
OUTPUT ?= output
|
||||
|
||||
# ── Training ──────────────────────────────────────────────────────────
|
||||
@@ -23,7 +24,7 @@ train-cloud: ## QLoRA fine-tune on cloud GPU (Axolotl)
|
||||
axolotl train axolotl.yaml
|
||||
|
||||
train-local: ## LoRA fine-tune on Apple Silicon (MLX)
|
||||
python -m mlx_lm.lora --config mlx-lora.yaml
|
||||
$(PYTHON) -m mlx_lm.lora --config mlx-lora.yaml
|
||||
|
||||
# ── Evaluation ────────────────────────────────────────────────────────
|
||||
|
||||
@@ -45,7 +46,7 @@ vibes: ## Run vibes check — hand-picked prompts, human review
|
||||
@echo "Date: $$(date '+%Y-%m-%d %H:%M')" > $(OUTPUT)/vibes-$(MODEL).md
|
||||
@echo "Model: $(MODEL)" >> $(OUTPUT)/vibes-$(MODEL).md
|
||||
@echo "" >> $(OUTPUT)/vibes-$(MODEL).md
|
||||
@python -c "\
|
||||
@$(PYTHON) -c "\
|
||||
import yaml, subprocess, sys; \
|
||||
prompts = yaml.safe_load(open('data/prompts_vibes.yaml'))['prompts']; \
|
||||
f = open('$(OUTPUT)/vibes-$(MODEL).md', 'a'); \
|
||||
@@ -69,19 +70,19 @@ vibes: ## Run vibes check — hand-picked prompts, human review
|
||||
# ── Data Pipeline ─────────────────────────────────────────────────────
|
||||
|
||||
ingest: ## Pull heartbeat trajectories into training data
|
||||
python ingest_trajectories.py \
|
||||
$(PYTHON) ingest_trajectories.py \
|
||||
--trajectories ~/.nexus/trajectories/ \
|
||||
--curated data/curated_dataset.jsonl \
|
||||
--output data/merged_training_data.jsonl
|
||||
@echo "Merged dataset ready. Convert for MLX with: make convert"
|
||||
|
||||
curated: ## Regenerate curated exemplar dataset
|
||||
python build_curated.py
|
||||
$(PYTHON) build_curated.py
|
||||
@echo "Curated dataset regenerated."
|
||||
|
||||
convert: ## Convert merged dataset to MLX format (train/valid split)
|
||||
@mkdir -p data/mlx_curated
|
||||
python -c "\
|
||||
$(PYTHON) -c "\
|
||||
import json; \
|
||||
lines = open('data/merged_training_data.jsonl').readlines(); \
|
||||
sessions = [json.loads(l) for l in lines]; \
|
||||
|
||||
Reference in New Issue
Block a user