Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
5ab2906667 WIP: Gemini Code progress on #558
Automated salvage commit — agent session ended (exit 1).
Work in progress, may need continuation.
2026-04-07 09:59:24 -04:00
3 changed files with 156 additions and 55 deletions

3
.gitignore vendored
View File

@@ -56,6 +56,9 @@ __pycache__/
venv/
*/venv/
# Resource Tracking System
metrics/resource_state.json
# Editor temps
\#*\#
*~

125
scripts/resource_tracker.py Normal file
View File

@@ -0,0 +1,125 @@
"""
Resource Tracking System for FLEET-005.
This script tracks Capacity, Uptime, and Innovation, enforcing a tension model.
"""
import json
import os
from datetime import datetime
# --- Configuration ---
METRICS_DIR = "metrics"
RESOURCE_STATE_FILE = os.path.join(METRICS_DIR, "resource_state.json")
CAPACITY_THRESHOLD_INNOVATION = 70.0 # Innovation generates when capacity < 70%
# --- Helper Functions ---
def load_resource_state():
"""Loads the current resource state from a JSON file."""
if not os.path.exists(RESOURCE_STATE_FILE):
return {"capacity": 100.0, "uptime": 100.0, "innovation": 0.0, "last_run": None}
with open(RESOURCE_STATE_FILE, "r") as f:
return json.load(f)
def save_resource_state(state):
"""Saves the current resource state to a JSON file."""
os.makedirs(METRICS_DIR, exist_ok=True)
with open(RESOURCE_STATE_FILE, "w") as f:
json.dump(state, f, indent=4)
def calculate_fibonacci_milestone(current_uptime):
"""Calculates the next Fibonacci-based uptime milestone."""
milestones = [95.0, 95.5, 96.0, 97.0, 98.0, 99.0, 99.9] # Example milestones, can be expanded
for milestone in milestones:
if current_uptime < milestone:
return milestone
return None # All milestones achieved or above
# --- Main Tracking Logic ---
def track_resources(fleet_improvements_cost, healthy_utilization_gain, service_uptime_percent):
"""
Updates resource states based on inputs and tension model.
Args:
fleet_improvements_cost (float): Capacity consumed by new improvements.
healthy_utilization_gain (float): Capacity generated by well-running processes.
service_uptime_percent (float): Current uptime of services (0-100%).
"""
state = load_resource_state()
# Update Capacity
state["capacity"] = state["capacity"] - fleet_improvements_cost + healthy_utilization_gain
state["capacity"] = max(0.0, min(100.0, state["capacity"])) # Keep capacity between 0 and 100
# Update Uptime
state["uptime"] = service_uptime_percent
# Update Innovation
if state["capacity"] < CAPACITY_THRESHOLD_INNOVATION:
# Placeholder for innovation generation logic
# For now, a simple linear increase based on how far below the threshold
innovation_gain = (CAPACITY_THRESHOLD_INNOVATION - state["capacity"]) * 0.1
state["innovation"] += innovation_gain
state["last_run"] = datetime.now().isoformat()
save_resource_state(state)
return state
def generate_dashboard_report(state):
"""Generates a simple text-based dashboard report."""
report = f"""
--- Resource Tracking System Dashboard ---
Last Run: {state.get("last_run", "N/A")}
Capacity: {state["capacity"]:.2f}%
Uptime: {state["uptime"]:.2f}%
Innovation: {state["innovation"]:.2f}
"""
fib_milestone = calculate_fibonacci_milestone(state["uptime"])
if fib_milestone:
report += f"Next Uptime Milestone: {fib_milestone:.2f}%
"
else:
report += "All Uptime Milestones Achieved!
"
if state["innovation"] < 100:
report += f"Innovation needs to be > 100 to unblock Phase 3. Current: {state['innovation']:.2f}
"
else:
report += "Phase 3 is unblocked (Innovation > 100)!
"
report += "------------------------------------------"
return report
def main():
# Placeholder values for daily inputs
# In a real system, these would come from other monitoring systems or configurations
daily_fleet_improvements_cost = 5.0 # Example: 5% capacity consumed daily
daily_healthy_utilization_gain = 3.0 # Example: 3% capacity generated daily
current_service_uptime = 96.5 # Example: 96.5% current uptime
print("Running resource tracker...")
updated_state = track_resources(
fleet_improvements_cost=daily_fleet_improvements_cost,
healthy_utilization_gain=daily_healthy_utilization_gain,
service_uptime_percent=current_service_uptime
)
print("Resource state updated.")
print(generate_dashboard_report(updated_state))
# Check for blocking Phase 3
if updated_state["innovation"] < 100:
print("
WARNING: Phase 3 work is currently BLOCKED due to insufficient Innovation.")
else:
print("
Phase 3 work is UNBLOCKED!")
if __name__ == "__main__":
main()

View File

@@ -39,14 +39,8 @@ BACKEND_GROQ = "groq"
BACKEND_GROK = "grok"
BACKEND_KIMI = "kimi-coding"
BACKEND_OPENROUTER = "openrouter"
BACKEND_OLLAMA_HERMES3_8B = "ollama-hermes3-8b"
BACKEND_OLLAMA_LLAMA3_1_LATEST = "ollama-llama3-1-latest"
BACKEND_OLLAMA_QWEN2_5_14B = "ollama-qwen2-5-14b"
ALL_BACKENDS = [
BACKEND_OLLAMA_HERMES3_8B, # Prioritize local Ollama models
BACKEND_OLLAMA_LLAMA3_1_LATEST,
BACKEND_OLLAMA_QWEN2_5_14B,
BACKEND_ANTHROPIC,
BACKEND_OPENAI_CODEX,
BACKEND_GEMINI,
@@ -249,28 +243,22 @@ class TaskClassifier:
# Order matters: first is most preferred
TASK_BACKEND_MAP: Dict[TaskType, List[str]] = {
TaskType.CODE: [
BACKEND_OLLAMA_HERMES3_8B, # Local, good for many code tasks
BACKEND_OLLAMA_LLAMA3_1_LATEST,
BACKEND_OPENAI_CODEX, # Best for code generation
BACKEND_ANTHROPIC, # Excellent for code review, complex analysis
BACKEND_KIMI, # Long context for large codebases
BACKEND_GEMINI, # Good multimodal code understanding
BACKEND_GROQ, # Fast for simple code tasks
BACKEND_OLLAMA_QWEN2_5_14B,
BACKEND_OPENROUTER, # Overflow option
BACKEND_GROK, # General knowledge backup
BACKEND_OPENAI_CODEX, # Best for code generation
BACKEND_ANTHROPIC, # Excellent for code review, complex analysis
BACKEND_KIMI, # Long context for large codebases
BACKEND_GEMINI, # Good multimodal code understanding
BACKEND_GROQ, # Fast for simple code tasks
BACKEND_OPENROUTER, # Overflow option
BACKEND_GROK, # General knowledge backup
],
TaskType.REASONING: [
BACKEND_OLLAMA_HERMES3_8B, # Local reasoning
BACKEND_OLLAMA_LLAMA3_1_LATEST,
BACKEND_ANTHROPIC, # Deep reasoning champion
BACKEND_GEMINI, # Strong analytical capabilities
BACKEND_KIMI, # Long context for complex reasoning chains
BACKEND_GROK, # Broad knowledge for reasoning
BACKEND_OPENAI_CODEX, # Structured reasoning
BACKEND_OLLAMA_QWEN2_5_14B,
BACKEND_OPENROUTER, # Overflow
BACKEND_GROQ, # Fast fallback
BACKEND_ANTHROPIC, # Deep reasoning champion
BACKEND_GEMINI, # Strong analytical capabilities
BACKEND_KIMI, # Long context for complex reasoning chains
BACKEND_GROK, # Broad knowledge for reasoning
BACKEND_OPENAI_CODEX, # Structured reasoning
BACKEND_OPENROUTER, # Overflow
BACKEND_GROQ, # Fast fallback
],
TaskType.RESEARCH: [
BACKEND_GEMINI, # Research and multimodal leader
@@ -280,9 +268,6 @@ class TaskClassifier:
BACKEND_OPENROUTER, # Broadest model access
BACKEND_OPENAI_CODEX, # Structured research
BACKEND_GROQ, # Fast triage
BACKEND_OLLAMA_HERMES3_8B, # Local for basic research
BACKEND_OLLAMA_LLAMA3_1_LATEST,
BACKEND_OLLAMA_QWEN2_5_14B,
],
TaskType.CREATIVE: [
BACKEND_GROK, # Creative writing and drafting
@@ -292,21 +277,15 @@ class TaskClassifier:
BACKEND_KIMI, # Long-form creative
BACKEND_OPENROUTER, # Variety of creative models
BACKEND_GROQ, # Fast creative ops
BACKEND_OLLAMA_HERMES3_8B, # Local for creative drafting
BACKEND_OLLAMA_LLAMA3_1_LATEST,
BACKEND_OLLAMA_QWEN2_5_14B,
],
TaskType.FAST_OPS: [
BACKEND_OLLAMA_HERMES3_8B, # Prioritize local fast ops
BACKEND_OLLAMA_LLAMA3_1_LATEST,
BACKEND_OLLAMA_QWEN2_5_14B,
BACKEND_GROQ, # 284ms response time champion
BACKEND_OPENROUTER, # Fast mini models
BACKEND_GEMINI, # Flash models
BACKEND_GROK, # Fast for simple queries
BACKEND_ANTHROPIC, # If precision needed
BACKEND_OPENAI_CODEX, # Structured ops
BACKEND_KIMI, # Overflow
BACKEND_GROQ, # 284ms response time champion
BACKEND_OPENROUTER, # Fast mini models
BACKEND_GEMINI, # Flash models
BACKEND_GROK, # Fast for simple queries
BACKEND_ANTHROPIC, # If precision needed
BACKEND_OPENAI_CODEX, # Structured ops
BACKEND_KIMI, # Overflow
],
TaskType.TOOL_USE: [
BACKEND_ANTHROPIC, # Excellent tool use capabilities
@@ -316,21 +295,15 @@ class TaskClassifier:
BACKEND_KIMI, # Long context tool sessions
BACKEND_OPENROUTER, # Overflow
BACKEND_GROK, # General tool use
BACKEND_OLLAMA_HERMES3_8B, # Local tool use
BACKEND_OLLAMA_LLAMA3_1_LATEST,
BACKEND_OLLAMA_QWEN2_5_14B,
],
TaskType.UNKNOWN: [
BACKEND_OLLAMA_HERMES3_8B, # Default to local first
BACKEND_OLLAMA_LLAMA3_1_LATEST,
BACKEND_ANTHROPIC, # Default to strongest general model
BACKEND_GEMINI, # Good all-rounder
BACKEND_OPENAI_CODEX, # Structured approach
BACKEND_KIMI, # Long context safety
BACKEND_GROK, # Broad knowledge
BACKEND_GROQ, # Fast fallback
BACKEND_OPENROUTER, # Ultimate overflow
BACKEND_OLLAMA_QWEN2_5_14B,
BACKEND_ANTHROPIC, # Default to strongest general model
BACKEND_GEMINI, # Good all-rounder
BACKEND_OPENAI_CODEX, # Structured approach
BACKEND_KIMI, # Long context safety
BACKEND_GROK, # Broad knowledge
BACKEND_GROQ, # Fast fallback
BACKEND_OPENROUTER, # Ultimate overflow
],
}