forked from Rockachopa/Timmy-time-dashboard
Co-authored-by: Claude (Opus 4.6) <claude@hermes.local> Co-committed-by: Claude (Opus 4.6) <claude@hermes.local>
545 lines
18 KiB
Python
545 lines
18 KiB
Python
"""MCP Bridge for Qwen3 via Ollama.
|
|
|
|
Provides a lightweight bridge between Ollama's native tool-calling API
|
|
and MCP tool servers (Gitea, Filesystem, Shell). Unlike the Agno-based
|
|
agent loop, this bridge talks directly to the Ollama ``/api/chat``
|
|
endpoint, translating MCP tool schemas into Ollama tool definitions and
|
|
executing tool calls in a loop until the model produces a final response.
|
|
|
|
Designed for Qwen3 models which have first-class tool-calling support.
|
|
|
|
Usage::
|
|
|
|
from timmy.mcp_bridge import MCPBridge
|
|
|
|
bridge = MCPBridge()
|
|
async with bridge:
|
|
result = await bridge.run("List open issues in Timmy-time-dashboard")
|
|
print(result.content)
|
|
|
|
The bridge evaluates available options in order of preference:
|
|
1. Direct Ollama /api/chat with native tool_calls (selected — best fit)
|
|
2. qwen-agent MCP (requires separate qwen-agent install)
|
|
3. ollmcp / mcphost / ollama-mcp-bridge (external binaries)
|
|
|
|
Option 1 was selected because:
|
|
- Zero additional dependencies (uses httpx already in the project)
|
|
- Native Qwen3 tool-calling support via Ollama's OpenAI-compatible API
|
|
- Full control over the tool-call loop and error handling
|
|
- Consistent with the project's graceful-degradation pattern
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import logging
|
|
import time
|
|
from dataclasses import dataclass, field
|
|
from typing import Any
|
|
|
|
import httpx
|
|
|
|
from config import settings
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Maximum tool-call round-trips before aborting (safety valve).
|
|
_MAX_TOOL_ROUNDS = 10
|
|
|
|
|
|
@dataclass
|
|
class BridgeResult:
|
|
"""Result from an MCP bridge run."""
|
|
|
|
content: str
|
|
tool_calls_made: list[dict] = field(default_factory=list)
|
|
rounds: int = 0
|
|
latency_ms: float = 0.0
|
|
model: str = ""
|
|
error: str = ""
|
|
|
|
|
|
@dataclass
|
|
class MCPToolDef:
|
|
"""An MCP tool definition translated for Ollama."""
|
|
|
|
name: str
|
|
description: str
|
|
parameters: dict[str, Any]
|
|
handler: Any # async callable(**kwargs) -> str
|
|
|
|
|
|
def _mcp_schema_to_ollama_tool(tool: MCPToolDef) -> dict:
|
|
"""Convert an MCPToolDef into Ollama's tool format.
|
|
|
|
Ollama uses OpenAI-compatible tool definitions::
|
|
|
|
{
|
|
"type": "function",
|
|
"function": {
|
|
"name": "...",
|
|
"description": "...",
|
|
"parameters": { "type": "object", "properties": {...}, "required": [...] }
|
|
}
|
|
}
|
|
"""
|
|
# Normalise parameters — ensure it has "type": "object" wrapper.
|
|
params = tool.parameters
|
|
if params.get("type") != "object":
|
|
params = {
|
|
"type": "object",
|
|
"properties": params,
|
|
"required": list(params.keys()),
|
|
}
|
|
|
|
return {
|
|
"type": "function",
|
|
"function": {
|
|
"name": tool.name,
|
|
"description": tool.description,
|
|
"parameters": params,
|
|
},
|
|
}
|
|
|
|
|
|
def _build_shell_tool() -> MCPToolDef | None:
|
|
"""Build the shell execution tool using the local ShellHand."""
|
|
try:
|
|
from infrastructure.hands.shell import shell_hand
|
|
|
|
async def _handle_shell(**kwargs: Any) -> str:
|
|
command = kwargs.get("command", "")
|
|
timeout = kwargs.get("timeout")
|
|
result = await shell_hand.run(command, timeout=timeout)
|
|
if result.success:
|
|
return result.stdout or "(no output)"
|
|
return f"[error] exit={result.exit_code} {result.error or result.stderr}"
|
|
|
|
return MCPToolDef(
|
|
name="shell_exec",
|
|
description=(
|
|
"Execute a shell command in a sandboxed environment. "
|
|
"Commands are validated against an allow-list. "
|
|
"Returns stdout, stderr, and exit code."
|
|
),
|
|
parameters={
|
|
"type": "object",
|
|
"properties": {
|
|
"command": {
|
|
"type": "string",
|
|
"description": "Shell command to execute (must match allow-list)",
|
|
},
|
|
"timeout": {
|
|
"type": "integer",
|
|
"description": "Timeout in seconds (default 60)",
|
|
},
|
|
},
|
|
"required": ["command"],
|
|
},
|
|
handler=_handle_shell,
|
|
)
|
|
except Exception as exc:
|
|
logger.debug("Shell tool unavailable: %s", exc)
|
|
return None
|
|
|
|
|
|
def _build_list_issues_tool(base_url: str, token: str, owner: str, repo: str) -> MCPToolDef:
|
|
"""Build the list_issues tool for a specific Gitea repo."""
|
|
|
|
async def _list_issues(**kwargs: Any) -> str:
|
|
state = kwargs.get("state", "open")
|
|
limit = kwargs.get("limit", 10)
|
|
try:
|
|
async with httpx.AsyncClient(timeout=15) as client:
|
|
resp = await client.get(
|
|
f"{base_url}/api/v1/repos/{owner}/{repo}/issues",
|
|
headers={"Authorization": f"token {token}"},
|
|
params={"state": state, "limit": limit, "type": "issues"},
|
|
)
|
|
resp.raise_for_status()
|
|
issues = resp.json()
|
|
if not issues:
|
|
return f"No {state} issues found."
|
|
lines = []
|
|
for issue in issues:
|
|
labels = ", ".join(lb["name"] for lb in issue.get("labels", []))
|
|
label_str = f" [{labels}]" if labels else ""
|
|
lines.append(f"#{issue['number']}: {issue['title']}{label_str}")
|
|
return "\n".join(lines)
|
|
except Exception as exc:
|
|
return f"Error listing issues: {exc}"
|
|
|
|
return MCPToolDef(
|
|
name="list_issues",
|
|
description="List issues in the Gitea repository. Returns issue numbers and titles.",
|
|
parameters={
|
|
"type": "object",
|
|
"properties": {
|
|
"state": {
|
|
"type": "string",
|
|
"description": "Filter by state: open, closed, or all (default: open)",
|
|
},
|
|
"limit": {
|
|
"type": "integer",
|
|
"description": "Maximum number of issues to return (default: 10)",
|
|
},
|
|
},
|
|
"required": [],
|
|
},
|
|
handler=_list_issues,
|
|
)
|
|
|
|
|
|
def _build_create_issue_tool(base_url: str, token: str, owner: str, repo: str) -> MCPToolDef:
|
|
"""Build the create_issue tool for a specific Gitea repo."""
|
|
|
|
async def _create_issue(**kwargs: Any) -> str:
|
|
title = kwargs.get("title", "")
|
|
body = kwargs.get("body", "")
|
|
if not title:
|
|
return "Error: title is required"
|
|
try:
|
|
async with httpx.AsyncClient(timeout=15) as client:
|
|
resp = await client.post(
|
|
f"{base_url}/api/v1/repos/{owner}/{repo}/issues",
|
|
headers={
|
|
"Authorization": f"token {token}",
|
|
"Content-Type": "application/json",
|
|
},
|
|
json={"title": title, "body": body},
|
|
)
|
|
resp.raise_for_status()
|
|
data = resp.json()
|
|
return f"Created issue #{data['number']}: {data['title']}"
|
|
except Exception as exc:
|
|
return f"Error creating issue: {exc}"
|
|
|
|
return MCPToolDef(
|
|
name="create_issue",
|
|
description="Create a new issue in the Gitea repository.",
|
|
parameters={
|
|
"type": "object",
|
|
"properties": {
|
|
"title": {
|
|
"type": "string",
|
|
"description": "Issue title (required)",
|
|
},
|
|
"body": {
|
|
"type": "string",
|
|
"description": "Issue body in markdown (optional)",
|
|
},
|
|
},
|
|
"required": ["title"],
|
|
},
|
|
handler=_create_issue,
|
|
)
|
|
|
|
|
|
def _build_read_issue_tool(base_url: str, token: str, owner: str, repo: str) -> MCPToolDef:
|
|
"""Build the read_issue tool for a specific Gitea repo."""
|
|
|
|
async def _read_issue(**kwargs: Any) -> str:
|
|
number = kwargs.get("number")
|
|
if not number:
|
|
return "Error: issue number is required"
|
|
try:
|
|
async with httpx.AsyncClient(timeout=15) as client:
|
|
resp = await client.get(
|
|
f"{base_url}/api/v1/repos/{owner}/{repo}/issues/{number}",
|
|
headers={"Authorization": f"token {token}"},
|
|
)
|
|
resp.raise_for_status()
|
|
issue = resp.json()
|
|
labels = ", ".join(lb["name"] for lb in issue.get("labels", []))
|
|
parts = [
|
|
f"#{issue['number']}: {issue['title']}",
|
|
f"State: {issue['state']}",
|
|
]
|
|
if labels:
|
|
parts.append(f"Labels: {labels}")
|
|
if issue.get("body"):
|
|
parts.append(f"\n{issue['body']}")
|
|
return "\n".join(parts)
|
|
except Exception as exc:
|
|
return f"Error reading issue: {exc}"
|
|
|
|
return MCPToolDef(
|
|
name="read_issue",
|
|
description="Read details of a specific issue by number.",
|
|
parameters={
|
|
"type": "object",
|
|
"properties": {
|
|
"number": {
|
|
"type": "integer",
|
|
"description": "Issue number to read",
|
|
},
|
|
},
|
|
"required": ["number"],
|
|
},
|
|
handler=_read_issue,
|
|
)
|
|
|
|
|
|
def _build_gitea_tools() -> list[MCPToolDef]:
|
|
"""Build Gitea MCP tool definitions for direct Ollama bridge use.
|
|
|
|
These tools call the Gitea REST API directly via httpx rather than
|
|
spawning an MCP server subprocess, keeping the bridge lightweight.
|
|
"""
|
|
if not settings.gitea_enabled or not settings.gitea_token:
|
|
return []
|
|
|
|
base_url = settings.gitea_url
|
|
token = settings.gitea_token
|
|
owner, repo = settings.gitea_repo.split("/", 1)
|
|
|
|
return [
|
|
_build_list_issues_tool(base_url, token, owner, repo),
|
|
_build_create_issue_tool(base_url, token, owner, repo),
|
|
_build_read_issue_tool(base_url, token, owner, repo),
|
|
]
|
|
|
|
|
|
class MCPBridge:
|
|
"""Bridge between Ollama's tool-calling API and MCP tools.
|
|
|
|
Manages a set of tool definitions and executes a chat loop with
|
|
tool calling against a Qwen3 model via Ollama.
|
|
|
|
The bridge:
|
|
1. Registers available tools (Gitea, shell, custom)
|
|
2. Sends prompts to Ollama with tool definitions
|
|
3. Executes tool calls when the model requests them
|
|
4. Returns tool results to the model for the next round
|
|
5. Repeats until the model produces a final text response
|
|
|
|
Attributes:
|
|
model: Ollama model name (default from settings).
|
|
ollama_url: Ollama API base URL (default from settings).
|
|
tools: Registered tool definitions.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
model: str | None = None,
|
|
ollama_url: str | None = None,
|
|
*,
|
|
include_gitea: bool = True,
|
|
include_shell: bool = True,
|
|
extra_tools: list[MCPToolDef] | None = None,
|
|
max_rounds: int = _MAX_TOOL_ROUNDS,
|
|
) -> None:
|
|
self.model = model or settings.ollama_model
|
|
self.ollama_url = ollama_url or settings.normalized_ollama_url
|
|
self.max_rounds = max_rounds
|
|
self._tools: dict[str, MCPToolDef] = {}
|
|
self._client: httpx.AsyncClient | None = None
|
|
|
|
# Register built-in tools
|
|
if include_gitea:
|
|
for tool in _build_gitea_tools():
|
|
self._tools[tool.name] = tool
|
|
|
|
if include_shell:
|
|
shell = _build_shell_tool()
|
|
if shell:
|
|
self._tools[shell.name] = shell
|
|
|
|
# Register extra tools
|
|
if extra_tools:
|
|
for tool in extra_tools:
|
|
self._tools[tool.name] = tool
|
|
|
|
logger.info(
|
|
"MCPBridge initialised: model=%s, tools=%s",
|
|
self.model,
|
|
list(self._tools.keys()),
|
|
)
|
|
|
|
async def __aenter__(self) -> MCPBridge:
|
|
self._client = httpx.AsyncClient(timeout=settings.mcp_bridge_timeout)
|
|
return self
|
|
|
|
async def __aexit__(self, *exc: Any) -> None:
|
|
if self._client:
|
|
await self._client.aclose()
|
|
self._client = None
|
|
|
|
@property
|
|
def tool_names(self) -> list[str]:
|
|
"""Return names of all registered tools."""
|
|
return list(self._tools.keys())
|
|
|
|
def _build_ollama_tools(self) -> list[dict]:
|
|
"""Convert registered tools to Ollama tool format."""
|
|
return [_mcp_schema_to_ollama_tool(t) for t in self._tools.values()]
|
|
|
|
async def _chat(self, messages: list[dict], tools: list[dict]) -> dict:
|
|
"""Send a chat request to Ollama and return the response.
|
|
|
|
Uses the ``/api/chat`` endpoint with tool definitions.
|
|
"""
|
|
if not self._client:
|
|
raise RuntimeError("MCPBridge must be used as async context manager")
|
|
|
|
payload: dict[str, Any] = {
|
|
"model": self.model,
|
|
"messages": messages,
|
|
"stream": False,
|
|
}
|
|
if tools:
|
|
payload["tools"] = tools
|
|
|
|
# Set num_ctx if configured
|
|
if settings.ollama_num_ctx > 0:
|
|
payload["options"] = {"num_ctx": settings.ollama_num_ctx}
|
|
|
|
resp = await self._client.post(
|
|
f"{self.ollama_url}/api/chat",
|
|
json=payload,
|
|
)
|
|
resp.raise_for_status()
|
|
return resp.json()
|
|
|
|
async def _execute_tool_call(self, tool_call: dict) -> str:
|
|
"""Execute a single tool call and return the result string."""
|
|
func = tool_call.get("function", {})
|
|
name = func.get("name", "")
|
|
arguments = func.get("arguments", {})
|
|
|
|
tool = self._tools.get(name)
|
|
if not tool:
|
|
return f"Error: unknown tool '{name}'"
|
|
|
|
try:
|
|
result = await tool.handler(**arguments)
|
|
return str(result)
|
|
except Exception as exc:
|
|
logger.warning("Tool '%s' execution failed: %s", name, exc)
|
|
return f"Error executing {name}: {exc}"
|
|
|
|
@staticmethod
|
|
def _build_initial_messages(prompt: str, system_prompt: str | None) -> list[dict]:
|
|
"""Build the initial message list for a run."""
|
|
messages: list[dict] = []
|
|
if system_prompt:
|
|
messages.append({"role": "system", "content": system_prompt})
|
|
messages.append({"role": "user", "content": prompt})
|
|
return messages
|
|
|
|
async def _process_round_tool_calls(
|
|
self,
|
|
messages: list[dict],
|
|
model_tool_calls: list[dict],
|
|
rounds: int,
|
|
tool_calls_made: list[dict],
|
|
) -> None:
|
|
"""Execute all tool calls in one round, appending results to messages."""
|
|
for tc in model_tool_calls:
|
|
func = tc.get("function", {})
|
|
tool_name = func.get("name", "unknown")
|
|
tool_args = func.get("arguments", {})
|
|
logger.info(
|
|
"Bridge tool call [round %d]: %s(%s)",
|
|
rounds,
|
|
tool_name,
|
|
tool_args,
|
|
)
|
|
result = await self._execute_tool_call(tc)
|
|
tool_calls_made.append(
|
|
{
|
|
"round": rounds,
|
|
"tool": tool_name,
|
|
"arguments": tool_args,
|
|
"result": result[:500], # Truncate for logging
|
|
}
|
|
)
|
|
messages.append({"role": "tool", "content": result})
|
|
|
|
async def _run_tool_loop(
|
|
self, messages: list[dict], tools: list[dict]
|
|
) -> tuple[str, list[dict], int, str]:
|
|
"""Run the tool-call loop until final response or max rounds reached.
|
|
|
|
Returns:
|
|
Tuple of (content, tool_calls_made, rounds, error).
|
|
"""
|
|
tool_calls_made: list[dict] = []
|
|
rounds = 0
|
|
|
|
for round_num in range(self.max_rounds):
|
|
rounds = round_num + 1
|
|
response = await self._chat(messages, tools)
|
|
msg = response.get("message", {})
|
|
model_tool_calls = msg.get("tool_calls", [])
|
|
|
|
if not model_tool_calls:
|
|
return msg.get("content", ""), tool_calls_made, rounds, ""
|
|
|
|
messages.append(msg)
|
|
await self._process_round_tool_calls(
|
|
messages, model_tool_calls, rounds, tool_calls_made
|
|
)
|
|
|
|
error = f"Exceeded maximum of {self.max_rounds} tool-call rounds"
|
|
return "(max tool-call rounds reached)", tool_calls_made, rounds, error
|
|
|
|
async def run(
|
|
self,
|
|
prompt: str,
|
|
*,
|
|
system_prompt: str | None = None,
|
|
) -> BridgeResult:
|
|
"""Run a prompt through the MCP bridge with tool calling.
|
|
|
|
Sends the prompt to the Ollama model with tool definitions.
|
|
If the model requests tool calls, executes them and feeds
|
|
results back until the model produces a final text response.
|
|
|
|
Args:
|
|
prompt: User message to send.
|
|
system_prompt: Optional system prompt override.
|
|
|
|
Returns:
|
|
BridgeResult with the final response and tool call history.
|
|
"""
|
|
start = time.time()
|
|
messages = self._build_initial_messages(prompt, system_prompt)
|
|
tools = self._build_ollama_tools()
|
|
tool_calls_made: list[dict] = []
|
|
rounds = 0
|
|
error_msg = ""
|
|
|
|
try:
|
|
content, tool_calls_made, rounds, error_msg = await self._run_tool_loop(messages, tools)
|
|
except httpx.ConnectError as exc:
|
|
logger.warning("Ollama connection failed: %s", exc)
|
|
error_msg = f"Ollama connection failed: {exc}"
|
|
content = ""
|
|
except httpx.HTTPStatusError as exc:
|
|
logger.warning("Ollama HTTP error: %s", exc)
|
|
error_msg = f"Ollama HTTP error: {exc.response.status_code}"
|
|
content = ""
|
|
except Exception as exc:
|
|
logger.error("MCPBridge run failed: %s", exc)
|
|
error_msg = str(exc)
|
|
content = ""
|
|
|
|
return BridgeResult(
|
|
content=content,
|
|
tool_calls_made=tool_calls_made,
|
|
rounds=rounds,
|
|
latency_ms=(time.time() - start) * 1000,
|
|
model=self.model,
|
|
error=error_msg,
|
|
)
|
|
|
|
def status(self) -> dict:
|
|
"""Return bridge status for the dashboard."""
|
|
return {
|
|
"model": self.model,
|
|
"ollama_url": self.ollama_url,
|
|
"tools": self.tool_names,
|
|
"max_rounds": self.max_rounds,
|
|
"connected": self._client is not None,
|
|
}
|