[loop-cycle-66] fix: replace language-model with inference-backend in error messages (#334)

This commit is contained in:
2026-03-18 20:27:06 -04:00
parent bcc7b068a4
commit 22e0d2d4b3
3 changed files with 6 additions and 6 deletions

View File

@@ -547,9 +547,7 @@ class DiscordVendor(ChatPlatform):
response = "Sorry, that took too long. Please try a simpler request."
except Exception as exc:
logger.error("Discord: chat_with_tools() failed: %s", exc)
response = (
"I'm having trouble reaching my language model right now. Please try again shortly."
)
response = "I'm having trouble reaching my inference backend right now. Please try again shortly."
# Check if Agno paused the run for tool confirmation
if run_output is not None:

View File

@@ -101,7 +101,9 @@ async def chat(message: str, session_id: str | None = None) -> str:
logger.error("Session: agent.arun() failed: %s", exc)
session_logger.record_error(str(exc), context="chat")
session_logger.flush()
return "I'm having trouble reaching my language model right now. Please try again shortly."
return (
"I'm having trouble reaching my inference backend right now. Please try again shortly."
)
# Post-processing: clean up any leaked tool calls or chain-of-thought
response_text = _clean_response(response_text)
@@ -175,7 +177,7 @@ async def chat_with_tools(message: str, session_id: str | None = None):
session_logger.flush()
# Return a duck-typed object that callers can handle uniformly
return _ErrorRunOutput(
"I'm having trouble reaching my language model right now. Please try again shortly."
"I'm having trouble reaching my inference backend right now. Please try again shortly."
)

View File

@@ -310,6 +310,6 @@ class TestSessionDisconnect:
result = await session.chat("test message")
assert "I'm having trouble reaching my language model" in result
assert "I'm having trouble reaching my inference backend" in result
# Should NOT have Ollama disconnected message
assert "Ollama appears to be disconnected" not in result