From 8374ec937e6fd868636e468877a9ea8c1dded19d Mon Sep 17 00:00:00 2001 From: StepFun Agent Date: Sun, 26 Apr 2026 09:58:37 -0400 Subject: [PATCH] fix(perf-bottleneck): make find_slow_tests_pytest functional; unblock pytest collection - find_slow_tests_pytest: actually run pytest with --durations and parse stdout instead of reading stale cache file. Now measures real test durations. - test_pr_complexity_scorer.py: wrap module-level sys.exit() in if __name__ == "__main__" guard. This was crashing pytest collection entirely, making the repo un-testable and masking all other performance analysis. Both issues were blocking the Performance Bottleneck Finder (#171) from working. The script now successfully runs pytest, collects duration data, and produces accurate reports. Closes #171 --- scripts/perf_bottleneck_finder.py | 53 ++++++++++++++-------------- scripts/test_pr_complexity_scorer.py | 5 +-- 2 files changed, 30 insertions(+), 28 deletions(-) diff --git a/scripts/perf_bottleneck_finder.py b/scripts/perf_bottleneck_finder.py index 6577d51..3597fe9 100644 --- a/scripts/perf_bottleneck_finder.py +++ b/scripts/perf_bottleneck_finder.py @@ -70,37 +70,38 @@ class PerfReport: # ── Test Analysis ────────────────────────────────────────────────── def find_slow_tests_pytest(repo_path: str) -> List[Bottleneck]: - """Run pytest --durations and parse slow tests.""" + """Run pytest with --durations and parse slow test output.""" bottlenecks = [] - # Try to run pytest with durations try: + # Run pytest to get slowest tests; maxfail=1 avoids hanging on failures result = subprocess.run( - ["python3", "-m", "pytest", "--co", "-q", "--durations=0"], - cwd=repo_path, capture_output=True, text=True, timeout=30 + ["python3", "-m", "pytest", "-q", + f"--durations={PYTEST_DURATIONS_COUNT}", "--tb=no", "--maxfail=1"], + cwd=repo_path, capture_output=True, text=True, timeout=60 ) - # If tests exist, try to get durations from last run - durations_file = os.path.join(repo_path, ".pytest_cache", "v", "cache", "durations") - if os.path.exists(durations_file): - with open(durations_file) as f: - for line in f: - parts = line.strip().split() - if len(parts) >= 2: - try: - duration = float(parts[0]) - test_name = " ".join(parts[1:]) - if duration > SLOW_TEST_THRESHOLD_S: - severity = "critical" if duration > 10 else "warning" - bottlenecks.append(Bottleneck( - category="test", - name=test_name, - duration_s=duration, - severity=severity, - recommendation=f"Test takes {duration:.1f}s. Consider mocking slow I/O, using fixtures, or marking with @pytest.mark.slow." - )) - except ValueError: - continue - except (subprocess.TimeoutExpired, FileNotFoundError): + # Parse durations from stdout. + # Lines look like: " 3.45s call test_file.py::test_name" + for line in result.stdout.splitlines(): + line = line.strip() + m = re.match(r'^(\d+\.?\d*)s\s+(call|setup|teardown)\s+(.+)$', line) + if not m: + continue + try: + duration = float(m.group(1)) + test_name = m.group(3).strip() + if duration > SLOW_TEST_THRESHOLD_S: + severity = "critical" if duration > 10 else "warning" + bottlenecks.append(Bottleneck( + category="test", + name=test_name, + duration_s=duration, + severity=severity, + recommendation=f"Test takes {duration:.1f}s. Consider mocking slow I/O, using fixtures, or marking with @pytest.mark.slow." + )) + except ValueError: + continue + except (subprocess.TimeoutExpired, FileNotFoundError, PermissionError): pass return bottlenecks diff --git a/scripts/test_pr_complexity_scorer.py b/scripts/test_pr_complexity_scorer.py index 4e98774..c3cafa2 100644 --- a/scripts/test_pr_complexity_scorer.py +++ b/scripts/test_pr_complexity_scorer.py @@ -166,5 +166,6 @@ def _(): assert_true(s in TIME_PER_POINT, f"Missing time for score {s}") -print(f"\n=== Results: {PASS} passed, {FAIL} failed ===") -sys.exit(0 if FAIL == 0 else 1) +if __name__ == "__main__": + print(f"\n=== Results: {PASS} passed, {FAIL} failed ===") + sys.exit(0 if FAIL == 0 else 1)