Compare commits
1 Commits
gemini/iss
...
claude/iss
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7bc6e39a93 |
@@ -399,6 +399,74 @@ class MCPBridge:
|
|||||||
logger.warning("Tool '%s' execution failed: %s", name, exc)
|
logger.warning("Tool '%s' execution failed: %s", name, exc)
|
||||||
return f"Error executing {name}: {exc}"
|
return f"Error executing {name}: {exc}"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _build_initial_messages(
|
||||||
|
prompt: str, system_prompt: str | None
|
||||||
|
) -> list[dict]:
|
||||||
|
"""Build the initial message list for a run."""
|
||||||
|
messages: list[dict] = []
|
||||||
|
if system_prompt:
|
||||||
|
messages.append({"role": "system", "content": system_prompt})
|
||||||
|
messages.append({"role": "user", "content": prompt})
|
||||||
|
return messages
|
||||||
|
|
||||||
|
async def _process_round_tool_calls(
|
||||||
|
self,
|
||||||
|
messages: list[dict],
|
||||||
|
model_tool_calls: list[dict],
|
||||||
|
rounds: int,
|
||||||
|
tool_calls_made: list[dict],
|
||||||
|
) -> None:
|
||||||
|
"""Execute all tool calls in one round, appending results to messages."""
|
||||||
|
for tc in model_tool_calls:
|
||||||
|
func = tc.get("function", {})
|
||||||
|
tool_name = func.get("name", "unknown")
|
||||||
|
tool_args = func.get("arguments", {})
|
||||||
|
logger.info(
|
||||||
|
"Bridge tool call [round %d]: %s(%s)",
|
||||||
|
rounds,
|
||||||
|
tool_name,
|
||||||
|
tool_args,
|
||||||
|
)
|
||||||
|
result = await self._execute_tool_call(tc)
|
||||||
|
tool_calls_made.append(
|
||||||
|
{
|
||||||
|
"round": rounds,
|
||||||
|
"tool": tool_name,
|
||||||
|
"arguments": tool_args,
|
||||||
|
"result": result[:500], # Truncate for logging
|
||||||
|
}
|
||||||
|
)
|
||||||
|
messages.append({"role": "tool", "content": result})
|
||||||
|
|
||||||
|
async def _run_tool_loop(
|
||||||
|
self, messages: list[dict], tools: list[dict]
|
||||||
|
) -> tuple[str, list[dict], int, str]:
|
||||||
|
"""Run the tool-call loop until final response or max rounds reached.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (content, tool_calls_made, rounds, error).
|
||||||
|
"""
|
||||||
|
tool_calls_made: list[dict] = []
|
||||||
|
rounds = 0
|
||||||
|
|
||||||
|
for round_num in range(self.max_rounds):
|
||||||
|
rounds = round_num + 1
|
||||||
|
response = await self._chat(messages, tools)
|
||||||
|
msg = response.get("message", {})
|
||||||
|
model_tool_calls = msg.get("tool_calls", [])
|
||||||
|
|
||||||
|
if not model_tool_calls:
|
||||||
|
return msg.get("content", ""), tool_calls_made, rounds, ""
|
||||||
|
|
||||||
|
messages.append(msg)
|
||||||
|
await self._process_round_tool_calls(
|
||||||
|
messages, model_tool_calls, rounds, tool_calls_made
|
||||||
|
)
|
||||||
|
|
||||||
|
error = f"Exceeded maximum of {self.max_rounds} tool-call rounds"
|
||||||
|
return "(max tool-call rounds reached)", tool_calls_made, rounds, error
|
||||||
|
|
||||||
async def run(
|
async def run(
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
@@ -419,115 +487,37 @@ class MCPBridge:
|
|||||||
BridgeResult with the final response and tool call history.
|
BridgeResult with the final response and tool call history.
|
||||||
"""
|
"""
|
||||||
start = time.time()
|
start = time.time()
|
||||||
messages: list[dict] = []
|
messages = self._build_initial_messages(prompt, system_prompt)
|
||||||
|
|
||||||
if system_prompt:
|
|
||||||
messages.append({"role": "system", "content": system_prompt})
|
|
||||||
|
|
||||||
messages.append({"role": "user", "content": prompt})
|
|
||||||
|
|
||||||
tools = self._build_ollama_tools()
|
tools = self._build_ollama_tools()
|
||||||
tool_calls_made: list[dict] = []
|
tool_calls_made: list[dict] = []
|
||||||
rounds = 0
|
rounds = 0
|
||||||
|
error_msg = ""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for round_num in range(self.max_rounds):
|
content, tool_calls_made, rounds, error_msg = await self._run_tool_loop(
|
||||||
rounds = round_num + 1
|
messages, tools
|
||||||
response = await self._chat(messages, tools)
|
|
||||||
msg = response.get("message", {})
|
|
||||||
|
|
||||||
# Check if model made tool calls
|
|
||||||
model_tool_calls = msg.get("tool_calls", [])
|
|
||||||
if not model_tool_calls:
|
|
||||||
# Final text response — done.
|
|
||||||
content = msg.get("content", "")
|
|
||||||
latency = (time.time() - start) * 1000
|
|
||||||
return BridgeResult(
|
|
||||||
content=content,
|
|
||||||
tool_calls_made=tool_calls_made,
|
|
||||||
rounds=rounds,
|
|
||||||
latency_ms=latency,
|
|
||||||
model=self.model,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Append the assistant message (with tool_calls) to history
|
|
||||||
messages.append(msg)
|
|
||||||
|
|
||||||
# Execute each tool call and add results
|
|
||||||
for tc in model_tool_calls:
|
|
||||||
func = tc.get("function", {})
|
|
||||||
tool_name = func.get("name", "unknown")
|
|
||||||
tool_args = func.get("arguments", {})
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
"Bridge tool call [round %d]: %s(%s)",
|
|
||||||
rounds,
|
|
||||||
tool_name,
|
|
||||||
tool_args,
|
|
||||||
)
|
|
||||||
|
|
||||||
result = await self._execute_tool_call(tc)
|
|
||||||
tool_calls_made.append(
|
|
||||||
{
|
|
||||||
"round": rounds,
|
|
||||||
"tool": tool_name,
|
|
||||||
"arguments": tool_args,
|
|
||||||
"result": result[:500], # Truncate for logging
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add tool result to message history
|
|
||||||
messages.append(
|
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"content": result,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Hit max rounds
|
|
||||||
latency = (time.time() - start) * 1000
|
|
||||||
return BridgeResult(
|
|
||||||
content="(max tool-call rounds reached)",
|
|
||||||
tool_calls_made=tool_calls_made,
|
|
||||||
rounds=rounds,
|
|
||||||
latency_ms=latency,
|
|
||||||
model=self.model,
|
|
||||||
error=f"Exceeded maximum of {self.max_rounds} tool-call rounds",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
except httpx.ConnectError as exc:
|
except httpx.ConnectError as exc:
|
||||||
latency = (time.time() - start) * 1000
|
|
||||||
logger.warning("Ollama connection failed: %s", exc)
|
logger.warning("Ollama connection failed: %s", exc)
|
||||||
return BridgeResult(
|
error_msg = f"Ollama connection failed: {exc}"
|
||||||
content="",
|
content = ""
|
||||||
tool_calls_made=tool_calls_made,
|
|
||||||
rounds=rounds,
|
|
||||||
latency_ms=latency,
|
|
||||||
model=self.model,
|
|
||||||
error=f"Ollama connection failed: {exc}",
|
|
||||||
)
|
|
||||||
except httpx.HTTPStatusError as exc:
|
except httpx.HTTPStatusError as exc:
|
||||||
latency = (time.time() - start) * 1000
|
|
||||||
logger.warning("Ollama HTTP error: %s", exc)
|
logger.warning("Ollama HTTP error: %s", exc)
|
||||||
return BridgeResult(
|
error_msg = f"Ollama HTTP error: {exc.response.status_code}"
|
||||||
content="",
|
content = ""
|
||||||
tool_calls_made=tool_calls_made,
|
|
||||||
rounds=rounds,
|
|
||||||
latency_ms=latency,
|
|
||||||
model=self.model,
|
|
||||||
error=f"Ollama HTTP error: {exc.response.status_code}",
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
latency = (time.time() - start) * 1000
|
|
||||||
logger.error("MCPBridge run failed: %s", exc)
|
logger.error("MCPBridge run failed: %s", exc)
|
||||||
return BridgeResult(
|
error_msg = str(exc)
|
||||||
content="",
|
content = ""
|
||||||
tool_calls_made=tool_calls_made,
|
|
||||||
rounds=rounds,
|
return BridgeResult(
|
||||||
latency_ms=latency,
|
content=content,
|
||||||
model=self.model,
|
tool_calls_made=tool_calls_made,
|
||||||
error=str(exc),
|
rounds=rounds,
|
||||||
)
|
latency_ms=(time.time() - start) * 1000,
|
||||||
|
model=self.model,
|
||||||
|
error=error_msg,
|
||||||
|
)
|
||||||
|
|
||||||
def status(self) -> dict:
|
def status(self) -> dict:
|
||||||
"""Return bridge status for the dashboard."""
|
"""Return bridge status for the dashboard."""
|
||||||
|
|||||||
Reference in New Issue
Block a user