1
0
* polish: streamline nav, extract inline styles, improve tablet UX

- Restructure desktop nav from 8+ flat links + overflow dropdown into
  5 grouped dropdowns (Core, Agents, Intel, System, More) matching
  the mobile menu structure to reduce decision fatigue
- Extract all inline styles from mission_control.html and base.html
  notification elements into mission-control.css with semantic classes
- Replace JS-built innerHTML with secure DOM construction in
  notification loader and chat history
- Add CONNECTING state to connection indicator (amber) instead of
  showing OFFLINE before WebSocket connects
- Add tablet breakpoint (1024px) with larger touch targets for
  Apple Pencil / stylus use and safe-area padding for iPad toolbar
- Add active-link highlighting in desktop dropdown menus
- Rename "Mission Control" page title to "System Overview" to
  disambiguate from the chat home page
- Add "Home — Timmy Time" page title to index.html

https://claude.ai/code/session_015uPUoKyYa8M2UAcyk5Gt6h

* fix(security): move auth-gate credentials to environment variables

Hardcoded username, password, and HMAC secret in auth-gate.py replaced
with os.environ lookups. Startup now refuses to run if any variable is
unset. Added AUTH_GATE_SECRET/USER/PASS to .env.example.

https://claude.ai/code/session_015uPUoKyYa8M2UAcyk5Gt6h

* refactor(tooling): migrate from black+isort+bandit to ruff

Replace three separate linting/formatting tools with a single ruff
invocation. Updates tox.ini (lint, format, pre-push, pre-commit envs),
.pre-commit-config.yaml, and CI workflow. Fixes all ruff errors
including unused imports, missing raise-from, and undefined names.
Ruff config maps existing bandit skips to equivalent S-rules.

https://claude.ai/code/session_015uPUoKyYa8M2UAcyk5Gt6h

---------

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Alexander Whitestone
2026-03-11 12:23:35 -04:00
committed by GitHub
parent 708c8a2477
commit 9d78eb31d1
149 changed files with 884 additions and 962 deletions

View File

@@ -32,8 +32,9 @@ async def test_multistep_chain_completes_all_steps():
]
)
with patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent), patch(
"timmy.agentic_loop._broadcast_progress", new_callable=AsyncMock
with (
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
patch("timmy.agentic_loop._broadcast_progress", new_callable=AsyncMock),
):
result = await run_agentic_loop("Search AI news and write summary to file")
@@ -57,8 +58,9 @@ async def test_multistep_chain_adapts_on_failure():
]
)
with patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent), patch(
"timmy.agentic_loop._broadcast_progress", new_callable=AsyncMock
with (
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
patch("timmy.agentic_loop._broadcast_progress", new_callable=AsyncMock),
):
result = await run_agentic_loop("Update config timeout to 60")
@@ -79,8 +81,9 @@ async def test_max_steps_enforced():
]
)
with patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent), patch(
"timmy.agentic_loop._broadcast_progress", new_callable=AsyncMock
with (
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
patch("timmy.agentic_loop._broadcast_progress", new_callable=AsyncMock),
):
result = await run_agentic_loop("Do 5 things", max_steps=2)
@@ -106,8 +109,9 @@ async def test_progress_events_fire():
]
)
with patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent), patch(
"timmy.agentic_loop._broadcast_progress", new_callable=AsyncMock
with (
patch("timmy.agentic_loop._get_loop_agent", return_value=mock_agent),
patch("timmy.agentic_loop._broadcast_progress", new_callable=AsyncMock),
):
await run_agentic_loop("Do A and B", on_progress=on_progress)

View File

@@ -4,8 +4,6 @@ These tests verify that Ollama models are correctly loaded, Timmy can interact
with them, and fallback mechanisms work as expected.
"""
from unittest.mock import MagicMock, patch
import pytest
from config import settings
@@ -78,9 +76,9 @@ async def test_timmy_chat_with_simple_query():
assert response is not None, "Response should not be None"
assert isinstance(response, str), "Response should be a string"
assert len(response) > 0, "Response should not be empty"
assert (
"Timmy" in response or "agent" in response.lower()
), "Response should mention Timmy or agent"
assert "Timmy" in response or "agent" in response.lower(), (
"Response should mention Timmy or agent"
)
except Exception as e:
pytest.skip(f"Chat failed: {e}")
@@ -91,17 +89,17 @@ async def test_model_supports_tools():
from timmy.agent import _model_supports_tools
# Small models should not support tools
assert _model_supports_tools("llama3.2") == False, "llama3.2 should not support tools"
assert _model_supports_tools("llama3.2:3b") == False, "llama3.2:3b should not support tools"
assert not _model_supports_tools("llama3.2"), "llama3.2 should not support tools"
assert not _model_supports_tools("llama3.2:3b"), "llama3.2:3b should not support tools"
# Larger models should support tools
assert _model_supports_tools("llama3.1") == True, "llama3.1 should support tools"
assert (
_model_supports_tools("llama3.1:8b-instruct") == True
), "llama3.1:8b-instruct should support tools"
assert _model_supports_tools("llama3.1"), "llama3.1 should support tools"
assert _model_supports_tools("llama3.1:8b-instruct"), (
"llama3.1:8b-instruct should support tools"
)
# Unknown models default to True
assert _model_supports_tools("unknown-model") == True, "Unknown models should default to True"
assert _model_supports_tools("unknown-model"), "Unknown models should default to True"
@pytest.mark.asyncio