Add/Update wolf/cli.py by Wolf

This commit is contained in:
2026-04-05 17:59:19 +00:00
parent 94afe466c0
commit b1b412b1d6

94
wolf/cli.py Normal file
View File

@@ -0,0 +1,94 @@
import argparse
import logging
import sys
from pathlib import Path
from .config import Config, setup_logging
from .gitea import GiteaClient
from .task import TaskGenerator
from .runner import AgentRunner
from .evaluator import Evaluator
from .leaderboard import Leaderboard
def main():
parser = argparse.ArgumentParser(description="Wolf: Model Evaluation Backbone for Sovereign AI Fleet.")
parser.add_argument("--config", help="Path to wolf-config.yaml")
parser.add_argument("--task-spec", help="Path to task specification JSON")
parser.add_argument("--run", action="store_true", help="Run pending tasks")
parser.add_argument("--evaluate", action="store_true", help="Evaluate open PRs")
parser.add_argument("--leaderboard", action="store_true", help="Show model rankings")
args = parser.parse_args()
# 1. Setup config and logging
config = Config(args.config)
setup_logging(config.get('log_dir'))
logging.info("Wolf starting...")
# 2. Initialize clients
gitea_config = config.get('gitea', {})
gitea = GiteaClient(gitea_config.get('base_url'), gitea_config.get('token'))
# 3. Run tasks
if args.run:
task_gen = TaskGenerator(gitea)
if args.task_spec:
tasks = task_gen.from_spec(args.task_spec)
else:
# Default to fetching issues from a specific repo
tasks = task_gen.from_gitea_issues(gitea_config.get('owner'), gitea_config.get('repo'))
# Assign tasks to models
models = config.get('models', [])
if not models:
logging.error("No models configured for assignment.")
return
assigned_tasks = task_gen.assign_tasks(tasks, models)
runner = AgentRunner(gitea, config.data)
for task in assigned_tasks:
runner.execute_task(task)
# 4. Evaluate PRs
if args.evaluate:
evaluator = Evaluator(gitea)
leaderboard = Leaderboard(config.get('leaderboard_path'))
# Fetch open PRs from the configured repo
# This is a simplified version; in reality, we'd track which PRs were created by Wolf
prs = gitea.get_issues(gitea_config.get('owner'), gitea_config.get('repo'), state="open")
for pr in prs:
if "Wolf Task:" in pr['title']:
score_data = evaluator.score_pr(gitea_config.get('owner'), gitea_config.get('repo'), pr['number'])
# Extract model info from PR body or title
# For now, we'll assume the model name is in the PR body
model_name = "unknown"
provider = "unknown"
if "model" in pr['body']:
# Simple regex to find model and provider
import re
match = re.search(r"model (.*) via (.*)\.", pr['body'])
if match:
model_name = match.group(1)
provider = match.group(2)
leaderboard.record_score(model_name, provider, pr['number'], score_data)
# 5. Show leaderboard
if args.leaderboard:
leaderboard = Leaderboard(config.get('leaderboard_path'))
rankings = leaderboard.get_rankings()
print("\n🏆 WOLF LEADERBOARD")
print("-" * 60)
print(f"{'Model':<25} | {'Provider':<15} | {'Score':<8} | {'Tasks':<6} | {'Ready'}")
print("-" * 60)
for r in rankings:
ready = "" if r['serverless_ready'] else ""
print(f"{r['model_name']:<25} | {r['provider']:<15} | {r['average_score']:<8.2f} | {r['total_tasks']:<6} | {ready}")
print("-" * 60)
if __name__ == "__main__":
main()