CI/CD Optimization: Guard Rails, Pre-commit Checks, and Test Fixes (#90)

* CI/CD Optimization: Guard Rails, Black Linting, and Pre-commit Hooks

- Fixed all test collection errors (Selenium imports, fixture paths, syntax)
- Implemented pre-commit hooks with Black formatting and isort
- Created comprehensive Makefile with test targets (unit, integration, functional, e2e)
- Added pytest.ini with marker definitions for test categorization
- Established guard rails to prevent future collection errors
- Wrapped optional dependencies (Selenium, MoviePy) in try-except blocks
- Added conftest_markers for automatic test categorization

This ensures a smooth development stream with:
- Fast feedback loops (pre-commit checks before push)
- Consistent code formatting (Black)
- Reliable CI/CD (no collection errors, proper test isolation)
- Clear test organization (unit, integration, functional, E2E)

* Fix CI/CD test failures:
- Export templates from dashboard.app
- Fix model name assertion in test_agent.py
- Fix platform-agnostic path resolution in test_path_resolution.py
- Skip Docker tests in test_docker_deployment.py if docker not available
- Fix test_model_fallback_chain logic in test_ollama_integration.py

* Add preventative pre-commit checks and Docker test skipif decorators:
- Create pre_commit_checks.py script for common CI failures
- Add skipif decorators to Docker tests
- Improve test robustness for CI environments
This commit is contained in:
Alexander Whitestone
2026-02-28 11:36:50 -05:00
committed by GitHub
parent a5fd680428
commit e5190b248a
20 changed files with 668 additions and 32 deletions

View File

@@ -0,0 +1,102 @@
#!/usr/bin/env python3
"""Add pytest markers to test files based on naming conventions and content.
Usage:
python scripts/add_pytest_markers.py
"""
import re
from pathlib import Path
def categorize_test(file_path: Path) -> str:
"""Determine test category based on file path and content."""
path_str = str(file_path)
content = file_path.read_text()
# E2E tests
if "e2e" in path_str or "end_to_end" in path_str:
return "e2e"
# Functional tests
if "functional" in path_str:
return "functional"
# Integration tests
if "integration" in path_str or "_integration" in file_path.stem:
return "integration"
# Selenium/browser tests
if "selenium" in path_str or "ui" in path_str or "browser" in path_str:
return "selenium"
# Docker tests
if "docker" in path_str:
return "docker"
# Default to unit
return "unit"
def add_marker_to_file(file_path: Path, marker: str) -> bool:
"""Add pytest marker to file if not already present."""
content = file_path.read_text()
# Check if marker already exists
if f'@pytest.mark.{marker}' in content or f'pytestmark = pytest.mark.{marker}' in content:
return False
# Check if file has any pytest imports
if "import pytest" not in content:
# Add pytest import at the top
lines = content.split("\n")
# Find the right place to add import (after docstring/comments)
insert_idx = 0
for i, line in enumerate(lines):
if line.startswith('"""') or line.startswith("'''") or line.startswith("#"):
insert_idx = i + 1
elif line.strip() and not line.startswith(("import", "from")):
break
lines.insert(insert_idx, "import pytest")
content = "\n".join(lines)
# Add pytestmark at module level (after imports)
lines = content.split("\n")
insert_idx = 0
for i, line in enumerate(lines):
if line.startswith(("import ", "from ")):
insert_idx = i + 1
# Add blank line and pytestmark
pytestmark_line = f"pytestmark = pytest.mark.{marker}"
if insert_idx < len(lines) and lines[insert_idx].strip():
lines.insert(insert_idx, "")
lines.insert(insert_idx + 1, pytestmark_line)
file_path.write_text("\n".join(lines))
return True
def main():
"""Add markers to all test files."""
test_dir = Path("tests")
marked_count = 0
for test_file in sorted(test_dir.rglob("test_*.py")):
marker = categorize_test(test_file)
rel_path = str(test_file.relative_to(test_dir))
if add_marker_to_file(test_file, marker):
print(f"{rel_path:<50} -> @pytest.mark.{marker}")
marked_count += 1
else:
print(f"⏭️ {rel_path:<50} (already marked)")
print(f"\n📊 Total files marked: {marked_count}")
print(f"\n✨ Pytest markers configured. Run 'pytest -m unit' to test specific categories.")
if __name__ == "__main__":
import sys
sys.path.insert(0, "src")
main()

View File

@@ -0,0 +1,73 @@
#!/usr/bin/env python3
"""Fix syntax errors caused by pytestmark insertion in the middle of imports."""
import re
from pathlib import Path
def fix_syntax_errors(file_path: Path) -> bool:
"""Fix pytestmark inserted in the middle of imports."""
content = file_path.read_text()
# Pattern: pytestmark inside an import statement
# Look for pytestmark = pytest.mark.X between "from X import (" and ")"
pattern = r'(from\s+[\w.]+\s+import\s*\(\s*\n)(.*?pytestmark\s*=\s*pytest\.mark\.\w+)(.*?\))'
if re.search(pattern, content, re.DOTALL):
# Remove pytestmark from inside the import
content = re.sub(
r'(from\s+[\w.]+\s+import\s*\(\s*\n)(.*?)(pytestmark\s*=\s*pytest\.mark\.\w+\n)(.*?\))',
r'\1\2\4',
content,
flags=re.DOTALL
)
# Now add pytestmark after all imports
lines = content.split('\n')
last_import_idx = -1
for i, line in enumerate(lines):
if line.startswith(('import ', 'from ')):
last_import_idx = i
if last_import_idx >= 0:
# Find the end of the import block (including closing parens)
i = last_import_idx
while i < len(lines):
if ')' in lines[i]:
last_import_idx = i
break
i += 1
# Check if pytestmark already exists after imports
if last_import_idx + 1 < len(lines):
if 'pytestmark' not in '\n'.join(lines[last_import_idx:last_import_idx+3]):
# Insert pytestmark after imports
lines.insert(last_import_idx + 1, '')
lines.insert(last_import_idx + 2, 'pytestmark = pytest.mark.unit')
content = '\n'.join(lines)
file_path.write_text(content)
return True
return False
def main():
"""Fix all syntax errors in test files."""
test_dir = Path("tests")
fixed_count = 0
for test_file in sorted(test_dir.rglob("test_*.py")):
try:
if fix_syntax_errors(test_file):
print(f"✅ Fixed: {test_file.relative_to(test_dir)}")
fixed_count += 1
except Exception as e:
print(f"❌ Error fixing {test_file.relative_to(test_dir)}: {e}")
print(f"\n📊 Total files fixed: {fixed_count}")
if __name__ == "__main__":
main()

183
scripts/pre_commit_checks.py Executable file
View File

@@ -0,0 +1,183 @@
#!/usr/bin/env python3
"""
Pre-commit checks for common CI failures.
This script runs before commits to catch issues early:
- ImportError regressions (missing exports from modules)
- Model name assertions (config mismatches)
- Platform-specific path issues
- Syntax errors in test files
"""
import sys
import subprocess
from pathlib import Path
import ast
import re
def check_imports():
"""Check for common ImportError issues."""
issues = []
# Check that dashboard.app exports 'templates'
try:
from dashboard.app import templates
print("✓ dashboard.app exports 'templates'")
except ImportError as e:
issues.append(f"✗ ImportError in dashboard.app: {e}")
# Check that integrations.shortcuts.siri is importable
try:
from integrations.shortcuts.siri import get_setup_guide
print("✓ integrations.shortcuts.siri exports 'get_setup_guide'")
except ImportError as e:
issues.append(f"✗ ImportError in integrations.shortcuts.siri: {e}")
return issues
def check_model_config():
"""Check that model configuration is consistent."""
issues = []
try:
from config import settings
from timmy.agent import DEFAULT_MODEL_FALLBACKS
# Ensure configured model is valid
if not settings.ollama_model:
issues.append("✗ OLLAMA_MODEL is not configured")
else:
print(f"✓ OLLAMA_MODEL is configured: {settings.ollama_model}")
# Ensure fallback chain is not empty
if not DEFAULT_MODEL_FALLBACKS:
issues.append("✗ DEFAULT_MODEL_FALLBACKS is empty")
else:
print(f"✓ DEFAULT_MODEL_FALLBACKS has {len(DEFAULT_MODEL_FALLBACKS)} models")
except Exception as e:
issues.append(f"✗ Error checking model config: {e}")
return issues
def check_test_syntax():
"""Check for syntax errors in test files."""
issues = []
tests_dir = Path("tests").resolve()
for test_file in tests_dir.rglob("test_*.py"):
try:
with open(test_file, "r") as f:
ast.parse(f.read())
print(f"{test_file.relative_to(tests_dir.parent)} has valid syntax")
except SyntaxError as e:
issues.append(f"✗ Syntax error in {test_file.relative_to(tests_dir.parent)}: {e}")
return issues
def check_platform_specific_tests():
"""Check for platform-specific test assertions."""
issues = []
# Check for hardcoded /Users/ paths in tests
tests_dir = Path("tests").resolve()
for test_file in tests_dir.rglob("test_*.py"):
with open(test_file, "r") as f:
content = f.read()
if 'startswith("/Users/")' in content:
issues.append(
f"{test_file.relative_to(tests_dir.parent)} has hardcoded /Users/ path. "
"Use Path.home() instead for platform compatibility."
)
if not issues:
print("✓ No platform-specific hardcoded paths found in tests")
return issues
def check_docker_availability():
"""Check if Docker tests will be skipped properly."""
issues = []
# Check that Docker tests have proper skipif decorators
tests_dir = Path("tests").resolve()
docker_test_files = list(tests_dir.rglob("test_docker*.py"))
if docker_test_files:
for test_file in docker_test_files:
with open(test_file, "r") as f:
content = f.read()
has_skipif = "@pytest.mark.skipif" in content or "pytestmark = pytest.mark.skipif" in content
if not has_skipif and "docker" in content.lower():
issues.append(
f"{test_file.relative_to(tests_dir.parent)} has Docker tests but no skipif decorator"
)
if not issues:
print("✓ Docker tests have proper skipif decorators")
return issues
def check_black_formatting():
"""Check that files are formatted with Black."""
issues = []
try:
result = subprocess.run(
["black", "--check", "src/", "tests/", "--quiet"],
capture_output=True,
text=True,
timeout=30,
)
if result.returncode != 0:
issues.append(
"✗ Code formatting issues detected. Run 'black src/ tests/' to fix."
)
else:
print("✓ Code formatting is correct (Black)")
except FileNotFoundError:
print("⚠ Black not found. Install with: pip install black")
except subprocess.TimeoutExpired:
print("⚠ Black check timed out")
except Exception as e:
print(f"⚠ Black check failed: {e}")
return issues
def main():
"""Run all pre-commit checks."""
print("\n🔍 Running pre-commit checks...\n")
all_issues = []
# Run all checks
all_issues.extend(check_imports())
all_issues.extend(check_model_config())
all_issues.extend(check_test_syntax())
all_issues.extend(check_platform_specific_tests())
all_issues.extend(check_docker_availability())
all_issues.extend(check_black_formatting())
# Report results
print("\n" + "=" * 70)
if all_issues:
print(f"\n❌ Found {len(all_issues)} issue(s):\n")
for issue in all_issues:
print(f" {issue}")
print("\n" + "=" * 70)
return 1
else:
print("\n✅ All pre-commit checks passed!\n")
return 0
if __name__ == "__main__":
sys.exit(main())