forked from Rockachopa/Timmy-time-dashboard
* feat: set qwen3.5:latest as default model - Make qwen3.5:latest the primary default model for faster inference - Move llama3.1:8b-instruct to fallback chain - Update text fallback chain to prioritize qwen3.5:latest Retains full backward compatibility via cascade fallback. * test: remove ~55 brittle, duplicate, and useless tests Audit of all 100 test files identified tests that provided no real regression protection. Removed: - 4 files deleted entirely: test_setup_script (always skipped), test_csrf_bypass (tautological assertions), test_input_validation (accepts 200-500 status codes), test_security_regression (fragile source-pattern checks redundant with rendering tests) - Duplicate test classes (TestToolTracking, TestCalculatorExtended) - Mock-only tests that just verify mock wiring, not behavior - Structurally broken tests (TestCreateToolFunctions patches after import) - Empty/pass-body tests and meaningless assertions (len > 20) - Flaky subprocess tests (aider tool calling real binary) All 1328 remaining tests pass. Net: -699 lines, zero coverage loss. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: prevent test pollution from autoresearch_enabled mutation test_autoresearch_perplexity.py was setting settings.autoresearch_enabled = True but never restoring it in the finally block — polluting subsequent tests. When pytest-randomly ordered it before test_experiments_page_shows_disabled_when_off, the victim test saw enabled=True and failed to find "Disabled" in the page. Fix both sides: - Restore autoresearch_enabled in the finally block (root cause) - Mock settings explicitly in the victim test (defense in depth) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> --------- Co-authored-by: Trip T <trip@local> Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
141 lines
5.7 KiB
Python
141 lines
5.7 KiB
Python
"""Tests for the Paperclip API client."""
|
|
|
|
from unittest.mock import AsyncMock, patch
|
|
|
|
import pytest
|
|
|
|
from integrations.paperclip.client import PaperclipClient
|
|
from integrations.paperclip.models import CreateIssueRequest
|
|
|
|
|
|
@pytest.fixture
|
|
def client():
|
|
return PaperclipClient(base_url="http://fake:3100", api_key="test-key")
|
|
|
|
|
|
# ── health ───────────────────────────────────────────────────────────────────
|
|
|
|
|
|
async def test_healthy_returns_true_on_success(client):
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value={"status": "ok"}):
|
|
assert await client.healthy() is True
|
|
|
|
|
|
async def test_healthy_returns_false_on_failure(client):
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=None):
|
|
assert await client.healthy() is False
|
|
|
|
|
|
# ── agents ───────────────────────────────────────────────────────────────────
|
|
|
|
|
|
async def test_list_agents_returns_list(client):
|
|
raw = [{"id": "a1", "name": "Codex", "role": "engineer", "status": "active"}]
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=raw):
|
|
with patch("integrations.paperclip.client.settings") as mock_settings:
|
|
mock_settings.paperclip_company_id = "comp-1"
|
|
agents = await client.list_agents(company_id="comp-1")
|
|
assert len(agents) == 1
|
|
assert agents[0].name == "Codex"
|
|
|
|
|
|
async def test_list_agents_graceful_on_none(client):
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=None):
|
|
agents = await client.list_agents(company_id="comp-1")
|
|
assert agents == []
|
|
|
|
|
|
# ── issues ───────────────────────────────────────────────────────────────────
|
|
|
|
|
|
async def test_list_issues(client):
|
|
raw = [{"id": "i1", "title": "Fix bug"}]
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=raw):
|
|
issues = await client.list_issues(company_id="comp-1")
|
|
assert len(issues) == 1
|
|
assert issues[0].title == "Fix bug"
|
|
|
|
|
|
async def test_get_issue(client):
|
|
raw = {"id": "i1", "title": "Fix bug", "description": "It's broken"}
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=raw):
|
|
issue = await client.get_issue("i1")
|
|
assert issue is not None
|
|
assert issue.id == "i1"
|
|
|
|
|
|
async def test_get_issue_not_found(client):
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=None):
|
|
issue = await client.get_issue("nonexistent")
|
|
assert issue is None
|
|
|
|
|
|
async def test_create_issue(client):
|
|
raw = {"id": "i2", "title": "New feature"}
|
|
with patch.object(client, "_post", new_callable=AsyncMock, return_value=raw):
|
|
req = CreateIssueRequest(title="New feature")
|
|
issue = await client.create_issue(req, company_id="comp-1")
|
|
assert issue is not None
|
|
assert issue.id == "i2"
|
|
|
|
|
|
async def test_create_issue_no_company_id(client):
|
|
with patch("integrations.paperclip.client.settings") as mock_settings:
|
|
mock_settings.paperclip_company_id = ""
|
|
issue = await client.create_issue(
|
|
CreateIssueRequest(title="Test"),
|
|
)
|
|
assert issue is None
|
|
|
|
|
|
async def test_delete_issue(client):
|
|
with patch.object(client, "_delete", new_callable=AsyncMock, return_value=True):
|
|
result = await client.delete_issue("i1")
|
|
assert result is True
|
|
|
|
|
|
# ── comments ─────────────────────────────────────────────────────────────────
|
|
|
|
|
|
async def test_add_comment(client):
|
|
raw = {"id": "c1", "issue_id": "i1", "content": "Done"}
|
|
with patch.object(client, "_post", new_callable=AsyncMock, return_value=raw):
|
|
comment = await client.add_comment("i1", "Done")
|
|
assert comment is not None
|
|
assert comment.content == "Done"
|
|
|
|
|
|
async def test_list_comments(client):
|
|
raw = [{"id": "c1", "issue_id": "i1", "content": "LGTM"}]
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=raw):
|
|
comments = await client.list_comments("i1")
|
|
assert len(comments) == 1
|
|
|
|
|
|
# ── goals ────────────────────────────────────────────────────────────────────
|
|
|
|
|
|
async def test_list_goals(client):
|
|
raw = [{"id": "g1", "title": "Ship MVP"}]
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=raw):
|
|
goals = await client.list_goals(company_id="comp-1")
|
|
assert len(goals) == 1
|
|
assert goals[0].title == "Ship MVP"
|
|
|
|
|
|
async def test_create_goal(client):
|
|
raw = {"id": "g2", "title": "Scale to 1000 users"}
|
|
with patch.object(client, "_post", new_callable=AsyncMock, return_value=raw):
|
|
goal = await client.create_goal("Scale to 1000 users", company_id="comp-1")
|
|
assert goal is not None
|
|
|
|
|
|
# ── heartbeat runs ───────────────────────────────────────────────────────────
|
|
|
|
|
|
async def test_list_heartbeat_runs(client):
|
|
raw = [{"id": "r1", "agent_id": "a1", "status": "running"}]
|
|
with patch.object(client, "_get", new_callable=AsyncMock, return_value=raw):
|
|
runs = await client.list_heartbeat_runs(company_id="comp-1")
|
|
assert len(runs) == 1
|