diff --git a/.ports.env b/.ports.env new file mode 100644 index 0000000..e578d39 --- /dev/null +++ b/.ports.env @@ -0,0 +1,24 @@ +# Port range allocated to this work order +# Each work order gets 10 consecutive ports for flexibility +# CLI tools can ignore ports, microservices can use multiple + +PORT_RANGE_START=9170 +PORT_RANGE_END=9179 +PORT_RANGE_SIZE=10 + +# Individual ports (use PORT_0, PORT_1, etc.) +PORT_0=9170 +PORT_1=9171 +PORT_2=9172 +PORT_3=9173 +PORT_4=9174 +PORT_5=9175 +PORT_6=9176 +PORT_7=9177 +PORT_8=9178 +PORT_9=9179 + +# Convenience aliases (backward compatible with old format) +BACKEND_PORT=9170 +FRONTEND_PORT=9171 +VITE_BACKEND_URL=http://localhost:9170 diff --git a/CLAUDE.md b/CLAUDE.md index 69c6b2e..8f1b55c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -90,12 +90,18 @@ uv pip install -e . ### Development Commands ```bash -# Run all tests +# Run all tests (sequential) uv run pytest +# Run all tests in parallel (faster for large test suites) +uv run pytest -n auto + # Run specific tests uv run pytest concept_library/full_review_loop/tests/ -v +# Run specific tests in parallel with 4 workers +uv run pytest dylan/tests/ -n 4 -v + # Format code uv run black . @@ -106,6 +112,8 @@ uv run ruff check . uv run mypy . ``` +**Note on Parallel Testing**: Use `pytest -n auto` for parallel test execution when running large test suites (20+ tests). For small test suites or debugging, sequential execution (`pytest` without `-n`) is recommended. See TESTING.md for detailed parallel execution documentation. + ### Running Core Components #### Individual Components diff --git a/PRPs/features/completed/parallel-execution-test.md b/PRPs/features/completed/parallel-execution-test.md new file mode 100644 index 0000000..c0acc21 --- /dev/null +++ b/PRPs/features/completed/parallel-execution-test.md @@ -0,0 +1,286 @@ +# Feature: Parallel Execution Test Documentation + +## Feature Description + +This feature creates comprehensive documentation for parallel execution testing using pytest-xdist, demonstrating the project's ability to run tests in parallel to reduce CI/CD execution time and costs. The documentation will serve as both a guide for developers and a validation that parallel testing infrastructure is properly configured in the Dylan project. + +## User Story + +As a developer working on the Dylan project +I want to understand how to run tests in parallel and validate parallel execution works correctly +So that I can reduce test execution time during development and CI/CD pipelines + +## Problem Statement + +Testing is a critical part of the development workflow, but as test suites grow, sequential test execution can become a bottleneck. Long-running test suites slow down development feedback loops and increase CI/CD costs based on execution minutes. The project needs clear documentation on how to leverage parallel test execution to optimize development workflows and reduce infrastructure costs. + +## Solution Statement + +Create a comprehensive TESTING.md documentation file that explains parallel execution testing using pytest-xdist, including installation, usage examples, configuration options, and best practices. The documentation will demonstrate that the Dylan project supports parallel test execution and provides clear guidance for developers on how to use this feature effectively. + +## Relevant Files + +Use these files to implement the feature: + +### Existing Files + +- **TESTING.md** (lines 1-150) + - Current testing documentation that needs to be enhanced with parallel execution information + - Contains existing testing structure, fixtures, and running test commands + - Will be updated to include parallel execution section + +- **pyproject.toml** (lines 96-105) + - Contains pytest configuration in [tool.pytest.ini_options] + - May need to be updated to include pytest-xdist configuration + - Currently has testpaths, python_files, python_functions, and addopts settings + +- **CLAUDE.md** (lines 90-107) + - Contains development commands section + - Shows current test execution commands + - Reference for project conventions and patterns + +- **dylan/tests/test_cli.py** (lines 1-95) + - Example test file showing current testing patterns + - Demonstrates unit test structure with mocking + - Shows fixture usage patterns + +- **dylan/conftest.py** + - Contains global fixtures for testing + - Provides mock_claude_provider, temp_output_dir, cli_runner, etc. + - Reference for understanding test infrastructure + +### New Files + +- **tests/test_parallel_execution.py** + - New test file to validate parallel execution works correctly + - Will contain tests that can safely run in parallel + - Demonstrates isolation and independence of tests + +## Relevant research documentation + +Use these documentation files and links to help with understanding the technology to use: + +- [pytest-xdist Documentation](https://pytest-xdist.readthedocs.io/) + - [Installation Guide](https://pytest-xdist.readthedocs.io/en/latest/index.html#installation) + - Official documentation for pytest parallel execution plugin + - Contains configuration options, usage examples, and best practices + +- [Pytest Parallel Testing with pytest-xdist](https://pytest-with-eric.com/plugins/pytest-xdist/) + - [Basic Usage](https://pytest-with-eric.com/plugins/pytest-xdist/#basic-usage) + - [Performance Benefits](https://pytest-with-eric.com/plugins/pytest-xdist/#performance-benefits) + - Comprehensive tutorial on parallel testing with real-world examples + +- [AWS CodeBuild - Configure parallel tests with Pytest](https://docs.aws.amazon.com/en_us/codebuild/latest/userguide/sample-parallel-test-python.html) + - [Configuration Examples](https://docs.aws.amazon.com/en_us/codebuild/latest/userguide/sample-parallel-test-python.html#sample-parallel-test-python-config) + - Guide for CI/CD parallel test configuration + +- [pytest-xdist PyPI](https://pypi.org/project/pytest-xdist/) + - [Installation command](https://pypi.org/project/pytest-xdist/#installation) + - Official package page with version information and quick start + +- [Run Tests in Parallel with Python - GeeksforGeeks](https://www.geeksforgeeks.org/python/run-tests-in-parallel-with-pytest/) + - [Common Issues and Solutions](https://www.geeksforgeeks.org/python/run-tests-in-parallel-with-pytest/#common-issues-and-solutions) + - Tutorial covering common parallel testing scenarios and troubleshooting + +## Implementation Plan + +### Phase 1: Foundation + +Research and understand the current testing infrastructure to ensure parallel execution documentation aligns with existing patterns. Install pytest-xdist as a development dependency and validate it works with the current test suite. Review existing test files to ensure they follow best practices for parallel execution (test isolation, no shared state, independent fixtures). + +### Phase 2: Core Implementation + +Update TESTING.md with a comprehensive parallel execution section including: +- Installation instructions for pytest-xdist +- Basic usage examples (`pytest -n auto`, `pytest -n 4`) +- Performance benefits and cost savings +- Best practices for writing parallel-safe tests +- Common pitfalls and how to avoid them +- CI/CD configuration examples + +Create a new test file `tests/test_parallel_execution.py` that demonstrates parallel execution capabilities with isolated, independent tests that validate the parallel testing infrastructure works correctly. + +### Phase 3: Integration + +Update pyproject.toml to include pytest-xdist in dev dependencies and configure parallel execution settings if needed. Update CLAUDE.md to reference the new parallel execution capabilities in the development commands section. Verify all existing tests can run in parallel without conflicts or shared state issues. + +## Step by Step Tasks + +IMPORTANT: Execute every step in order, top to bottom. + +### 1. Install and Configure pytest-xdist + +- Add pytest-xdist to the project's dev dependencies using `uv add --dev pytest-xdist` +- Verify installation by running `uv run pytest --version` to check xdist plugin is loaded +- Test basic parallel execution with `uv run pytest -n auto` to ensure compatibility with existing tests + +### 2. Create Parallel Execution Test File + +- Create `tests/test_parallel_execution.py` with isolated, independent tests +- Include tests that validate parallel execution works (e.g., multiple independent tests that can run simultaneously) +- Use proper fixtures from `dylan/conftest.py` to ensure test isolation +- Add docstrings following Google-style format to explain test purpose +- Mark tests appropriately with pytest markers if needed + +### 3. Update TESTING.md Documentation + +- Read current TESTING.md to understand existing structure and tone +- Add a new "Parallel Execution Testing" section after the "Running Tests" section +- Include subsections: + - **What is Parallel Execution**: Brief explanation of pytest-xdist + - **Installation**: How pytest-xdist is already installed as dev dependency + - **Basic Usage**: Command examples for parallel execution + - **Performance Benefits**: Explanation of time and cost savings + - **Best Practices**: Guidelines for writing parallel-safe tests + - **Common Issues**: Troubleshooting guide for parallel testing problems +- Use code blocks with bash syntax highlighting for commands +- Maintain consistent formatting and style with existing documentation + +### 4. Update pyproject.toml Configuration + +- Open pyproject.toml and review [tool.pytest.ini_options] section +- Add pytest-xdist to [project.optional-dependencies] dev section if not already present +- Consider adding parallel execution configuration options like `addopts = ["-n", "auto"]` for default parallel execution (optional, discuss trade-offs) +- Ensure configuration doesn't break existing test execution patterns + +### 5. Update CLAUDE.md Reference + +- Read CLAUDE.md to understand development commands section +- Add parallel execution examples to the "Development Commands" section +- Include examples like `uv run pytest -n auto` and `uv run pytest -n 4` +- Add brief explanation of when to use parallel vs sequential execution +- Maintain consistent formatting with existing commands + +### 6. Validate Test Suite Compatibility + +- Run full test suite in parallel mode: `uv run pytest -n auto` +- Identify any tests that fail due to shared state or race conditions +- Fix or mark problematic tests appropriately (skip in parallel mode if necessary) +- Verify test output is readable and errors are clearly reported in parallel mode +- Document any tests that must run sequentially and why + +### 7. Run Validation Commands + +- Execute all validation commands listed below to ensure zero regressions +- Fix any issues that arise during validation +- Verify parallel execution reduces test execution time compared to sequential +- Confirm all documentation is accurate and up-to-date + +## Testing Strategy + +See `CLAUDE.md` for complete testing requirements. Every file in `src/` must have a corresponding test file in `tests/`. + +### Unit Tests + +Mark with @pytest.mark.unit. Test individual components: + +- **test_parallel_execution.py**: Test that validates parallel execution infrastructure + - Test that multiple independent tests can run simultaneously + - Test that test isolation works correctly (no shared state conflicts) + - Test that fixtures are properly isolated between parallel workers + - Test that test results are correctly aggregated from parallel workers + +### Integration Tests + +This feature is primarily documentation-focused, but integration testing includes: + +- **Full test suite parallel execution**: Run entire test suite with `-n auto` to validate all tests work in parallel +- **CI/CD integration**: Verify parallel execution works in CI/CD pipeline (if applicable) + +### Edge Cases + +- Tests with file I/O that might conflict (ensure proper temp directory usage) +- Tests with database connections (ensure connection pooling or isolation) +- Tests with external API calls (ensure proper mocking) +- Tests with time-dependent behavior (ensure no race conditions) +- Tests that modify global state (ensure cleanup or isolation) + +## Acceptance Criteria + +- [ ] pytest-xdist is installed as a development dependency and working correctly +- [ ] TESTING.md contains a comprehensive "Parallel Execution Testing" section with clear examples +- [ ] A new test file `tests/test_parallel_execution.py` exists and demonstrates parallel execution +- [ ] All existing tests pass when run in parallel mode (`pytest -n auto`) +- [ ] Documentation includes best practices for writing parallel-safe tests +- [ ] Documentation includes troubleshooting guide for common parallel testing issues +- [ ] pyproject.toml is updated with pytest-xdist in dev dependencies +- [ ] CLAUDE.md references parallel execution capabilities in development commands +- [ ] Parallel execution reduces test execution time by at least 30% compared to sequential (for test suites with sufficient tests) +- [ ] All validation commands pass with zero regressions + +## Validation Commands + +Execute every command to validate the feature works correctly with zero regressions. + +**Required validation commands:** + +- `uv run ruff check .` - Lint check must pass +- `uv run mypy .` - Type check must pass (if applicable to test files) +- `uv run pytest` - All tests must pass with zero regressions (sequential mode) +- `uv run pytest -n auto` - All tests must pass in parallel mode +- `uv run pytest -n 4` - All tests must pass with 4 workers (demonstrates configurable parallelism) + +**Performance validation:** + +- `time uv run pytest` - Measure sequential execution time +- `time uv run pytest -n auto` - Measure parallel execution time +- Compare times to validate performance improvement + +**Documentation validation:** + +- Read TESTING.md to ensure parallel execution section is clear and accurate +- Verify all code examples in documentation are correct and runnable +- Check that best practices and troubleshooting sections are comprehensive + +**Test file validation:** + +- `uv run pytest tests/test_parallel_execution.py -v` - Verify new test file works correctly +- `uv run pytest tests/test_parallel_execution.py -n auto -v` - Verify tests run in parallel + +## Notes + +### Performance Considerations + +Parallel execution provides the most benefit when: +- Test suite has many tests (>50 tests recommended) +- Tests are I/O bound rather than CPU bound +- Tests are independent and don't share state +- Machine has multiple CPU cores available + +For small test suites (<20 tests), the overhead of parallel execution might not provide significant benefits. + +### CI/CD Integration + +Most CI/CD services charge based on execution minutes. Parallel execution can: +- Reduce total execution time by 50-80% for large test suites +- Lower CI/CD costs proportionally +- Provide faster feedback to developers + +Consider using `-n auto` in CI/CD pipelines to automatically scale to available cores. + +### Test Isolation Best Practices + +To ensure tests work correctly in parallel: +- Use fixtures for test data setup and teardown +- Use temporary directories for file operations (temp_output_dir fixture) +- Mock external services and APIs (mock_claude_provider fixture) +- Avoid global state or ensure proper cleanup +- Make tests deterministic (no reliance on execution order) + +### Troubleshooting Common Issues + +If tests fail in parallel but pass sequentially: +1. Check for shared file paths (use temp directories) +2. Check for database connection limits (use proper pooling) +3. Check for global state modification (ensure isolation) +4. Check for race conditions in async code +5. Use `-n 1` to disable parallelism for specific tests if needed + +### Future Enhancements + +Potential future improvements: +- Add test markers for parallel vs sequential execution +- Implement test groups that must run together +- Add CI/CD pipeline configuration for parallel execution +- Create dashboard for test execution time tracking +- Add automatic detection of non-parallel-safe tests diff --git a/TESTING.md b/TESTING.md index fc23e5a..1299888 100644 --- a/TESTING.md +++ b/TESTING.md @@ -65,6 +65,239 @@ uv run pytest dylan/utility_library/dylan_review/tests/test_dylan_review_runner. uv run pytest -k "not skip" ``` +## Parallel Execution Testing + +Dylan supports parallel test execution using pytest-xdist, which can significantly reduce test suite execution time by running tests concurrently across multiple CPU cores. + +### What is Parallel Execution + +pytest-xdist is a pytest plugin that distributes tests across multiple workers (processes), allowing tests to run simultaneously. This is particularly beneficial for: + +- **Faster feedback loops**: Reduce test execution time during development +- **CI/CD cost savings**: Lower execution minutes in continuous integration pipelines +- **Better resource utilization**: Leverage multi-core systems effectively + +### Installation + +pytest-xdist is already installed as part of the development dependencies. If you need to install it separately: + +```bash +uv add --dev pytest-xdist +``` + +### Basic Usage + +Run tests in parallel using the `-n` option: + +```bash +# Automatically detect and use all available CPU cores +uv run pytest -n auto + +# Use a specific number of workers (e.g., 4 workers) +uv run pytest -n 4 + +# Run tests for a specific module in parallel +uv run pytest dylan/utility_library/dylan_review/tests/ -n auto + +# Combine with other pytest options +uv run pytest -n auto -v --cov=dylan +``` + +### Performance Benefits + +Parallel execution provides significant performance improvements: + +- **Time savings**: 30-80% reduction in test execution time for large test suites +- **Scalability**: Automatically scales with available CPU cores +- **Cost reduction**: Lower CI/CD costs based on execution minutes + +Example performance comparison: + +```bash +# Measure sequential execution time +time uv run pytest + +# Measure parallel execution time +time uv run pytest -n auto +``` + +For test suites with 50+ tests, parallel execution typically provides 50-70% time reduction on a 4-core system. + +### Best Practices for Writing Parallel-Safe Tests + +To ensure tests work correctly in parallel execution: + +#### 1. Use Fixtures for Isolation + +Always use pytest fixtures for test data and resources: + +```python +def test_file_operations(tmp_path): + """tmp_path provides isolated temp directory per test.""" + test_file = tmp_path / "test.txt" + test_file.write_text("content") + assert test_file.exists() +``` + +#### 2. Avoid Global State + +Don't rely on or modify global state: + +```python +# Bad: Uses global state +counter = 0 +def test_increment(): + global counter + counter += 1 + assert counter == 1 # Will fail in parallel + +# Good: Self-contained test +def test_increment(): + counter = 0 + counter += 1 + assert counter == 1 +``` + +#### 3. Use Isolated Resources + +Use fixtures that provide isolated resources: + +```python +def test_with_mock(mock_claude_provider, tmp_path): + """Use fixtures for isolated mocks and temp directories.""" + output_file = tmp_path / "output.txt" + # Test implementation +``` + +#### 4. Make Tests Deterministic + +Tests should not depend on execution order or timing: + +```python +# Good: Deterministic result +def test_computation(): + result = sum(range(100)) + assert result == 4950 + +# Bad: Timing-dependent (avoid unless necessary) +def test_timing(): + start = time.time() + time.sleep(1) + assert time.time() - start >= 1 # Can be flaky +``` + +#### 5. Mock External Dependencies + +Always mock external services and APIs: + +```python +def test_api_call(mock_claude_provider): + """Mock external dependencies to ensure test isolation.""" + # Test with mocked provider +``` + +### Common Issues and Solutions + +#### Issue: Tests Pass Sequentially but Fail in Parallel + +**Cause**: Tests are sharing state or resources + +**Solution**: +- Check for shared file paths (use `tmp_path` fixture) +- Check for database connection limits (use connection pooling) +- Check for global state modification (ensure proper cleanup) +- Verify no race conditions in async code + +```bash +# Run with single worker to identify isolation issues +uv run pytest -n 1 +``` + +#### Issue: Tests Are Slower in Parallel for Small Test Suites + +**Cause**: Parallel execution overhead exceeds benefits for small test suites + +**Solution**: +- Only use parallel execution for test suites with 20+ tests +- Use sequential execution for quick smoke tests +- Profile your test suite to find the optimal number of workers + +#### Issue: Flaky Tests in Parallel Mode + +**Cause**: Tests have timing dependencies or race conditions + +**Solution**: +- Review test for timing assumptions +- Ensure proper use of fixtures for setup/teardown +- Use pytest markers to run problematic tests sequentially: + +```python +@pytest.mark.xdist_group(name="sequential") +def test_must_run_alone(): + """This test will run in a dedicated worker.""" + pass +``` + +#### Issue: File or Resource Conflicts + +**Cause**: Multiple tests writing to the same file or using the same port + +**Solution**: +- Always use `tmp_path` fixture for file operations +- Use dynamic port allocation for network tests +- Ensure proper cleanup in fixtures + +### CI/CD Integration + +Configure parallel execution in your CI/CD pipeline to reduce execution time and costs: + +#### GitHub Actions Example + +```yaml +- name: Run tests in parallel + run: uv run pytest -n auto --maxfail=5 +``` + +#### General CI/CD Recommendations + +- Use `-n auto` to automatically scale to available cores +- Set `--maxfail` to stop early on multiple failures +- Consider using `--dist loadscope` for tests with module-level fixtures +- Monitor execution times to optimize worker count + +### Advanced Configuration + +Configure pytest-xdist in `pyproject.toml`: + +```toml +[tool.pytest.ini_options] +addopts = [ + "--import-mode=importlib", + "-v", + "--strict-markers", + # Uncomment to enable parallel execution by default + # "-n", "auto", +] +``` + +**Note**: Enabling parallel execution by default may not be suitable for all workflows. Evaluate your test suite characteristics before making it the default. + +### When to Use Parallel vs Sequential Execution + +**Use Parallel Execution When**: +- Test suite has 20+ tests +- Tests are well-isolated and independent +- Tests are I/O bound (file operations, network calls) +- Running on multi-core systems +- Optimizing CI/CD execution time + +**Use Sequential Execution When**: +- Test suite has fewer than 20 tests +- Debugging test failures +- Tests require specific execution order +- Tests use shared resources that can't be isolated +- Running quick smoke tests + ## Test Types ### Unit Tests diff --git a/dylan/tests/test_parallel_execution.py b/dylan/tests/test_parallel_execution.py new file mode 100644 index 0000000..8bf58aa --- /dev/null +++ b/dylan/tests/test_parallel_execution.py @@ -0,0 +1,233 @@ +"""Tests to validate parallel execution infrastructure with pytest-xdist. + +This test module demonstrates and validates that tests can run in parallel +safely using pytest-xdist. Each test is isolated and independent, ensuring +no shared state conflicts when running with multiple workers. +""" + +import time + + +def test_parallel_execution_test_1(tmp_path): + """Test 1: Independent file operations with isolated temp directory. + + This test validates that file operations in temporary directories + are properly isolated between parallel workers. + + Args: + tmp_path: pytest fixture providing unique temporary directory + """ + test_file = tmp_path / "test_file_1.txt" + test_file.write_text("Test content 1") + + assert test_file.exists() + assert test_file.read_text() == "Test content 1" + + +def test_parallel_execution_test_2(tmp_path): + """Test 2: Independent file operations with different content. + + This test can run simultaneously with test_1 without conflicts + because each test gets its own tmp_path from pytest. + + Args: + tmp_path: pytest fixture providing unique temporary directory + """ + test_file = tmp_path / "test_file_2.txt" + test_file.write_text("Test content 2") + + assert test_file.exists() + assert test_file.read_text() == "Test content 2" + + +def test_parallel_execution_test_3(tmp_path): + """Test 3: Directory creation and file operations. + + Validates that directory operations are properly isolated + across parallel test workers. + + Args: + tmp_path: pytest fixture providing unique temporary directory + """ + subdir = tmp_path / "subdir" + subdir.mkdir() + + test_file = subdir / "nested_file.txt" + test_file.write_text("Nested content") + + assert subdir.exists() + assert test_file.exists() + assert test_file.read_text() == "Nested content" + + +def test_parallel_execution_test_4(): + """Test 4: Pure computation with no shared state. + + This test performs calculations without any file I/O or external + dependencies, demonstrating ideal parallel test characteristics. + """ + result = sum(range(1000)) + expected = 499500 + + assert result == expected + + +def test_parallel_execution_test_5(): + """Test 5: String manipulation without side effects. + + Another example of a pure test that can safely run in parallel + with any other test. + """ + test_string = "parallel execution" + reversed_string = test_string[::-1] + + assert reversed_string == "noitucexe lellarap" + assert test_string.upper() == "PARALLEL EXECUTION" + + +def test_parallel_execution_test_6(): + """Test 6: List operations demonstrating deterministic behavior. + + Tests list operations that are deterministic and don't rely + on execution order or external state. + """ + test_list = [3, 1, 4, 1, 5, 9, 2, 6] + sorted_list = sorted(test_list) + + assert sorted_list == [1, 1, 2, 3, 4, 5, 6, 9] + assert len(test_list) == 8 + + +def test_parallel_execution_test_7(): + """Test 7: Dictionary operations with no shared state. + + Validates dictionary operations work correctly in parallel + execution without interference. + """ + test_dict = {"a": 1, "b": 2, "c": 3} + test_dict["d"] = 4 + + assert len(test_dict) == 4 + assert test_dict["d"] == 4 + assert "a" in test_dict + + +def test_parallel_execution_test_8(tmp_path): + """Test 8: Multiple file operations in sequence. + + Demonstrates that sequential operations within a single test + work correctly even when the test runs in parallel with others. + + Args: + tmp_path: pytest fixture providing unique temporary directory + """ + files_created = [] + + for i in range(5): + test_file = tmp_path / f"file_{i}.txt" + test_file.write_text(f"Content {i}") + files_created.append(test_file) + + assert len(files_created) == 5 + assert all(f.exists() for f in files_created) + assert files_created[3].read_text() == "Content 3" + + +def test_parallel_execution_test_9(): + """Test 9: Set operations demonstrating isolation. + + Tests set operations that are independent and can run + in parallel safely. + """ + set_a = {1, 2, 3, 4, 5} + set_b = {4, 5, 6, 7, 8} + + union = set_a | set_b + intersection = set_a & set_b + + assert union == {1, 2, 3, 4, 5, 6, 7, 8} + assert intersection == {4, 5} + + +def test_parallel_execution_test_10(): + """Test 10: Tuple operations with immutable data. + + Tests with immutable data structures are naturally safe + for parallel execution. + """ + test_tuple = (1, 2, 3, 4, 5) + + assert len(test_tuple) == 5 + assert test_tuple[0] == 1 + assert test_tuple[-1] == 5 + assert 3 in test_tuple + + +def test_parallel_execution_isolation_verification(tmp_path): + """Test isolation: Verify temp directories are unique per test. + + This test specifically validates that pytest provides isolated + temporary directories to each test, which is critical for + parallel execution safety. + + Args: + tmp_path: pytest fixture providing unique temporary directory + """ + # Write a marker file with a unique identifier + marker_file = tmp_path / "isolation_marker.txt" + marker_file.write_text(str(id(tmp_path))) + + # Verify the marker file exists and contains our ID + assert marker_file.exists() + stored_id = marker_file.read_text() + assert stored_id == str(id(tmp_path)) + + # Verify the temp path is unique (pytest truncates long test names) + assert "test_parallel_execution_isolat" in str(tmp_path) + + +def test_parallel_execution_no_timing_dependencies(): + """Test timing independence: No race conditions or timing dependencies. + + This test validates that our tests don't rely on specific timing + or execution order, which could cause flakiness in parallel execution. + """ + start_time = time.time() + + # Perform some work + result = 0 + for i in range(1000): + result += i + + end_time = time.time() + + # We don't assert on timing, only on results + assert result == 499500 + assert end_time > start_time # Basic sanity check + + +def test_parallel_execution_fixture_isolation(tmp_path): + """Test fixture isolation: Multiple uses of tmp_path are independent. + + Even within a single test, multiple operations using the tmp_path + fixture should be isolated and not interfere with other tests. + + Args: + tmp_path: pytest fixture providing unique temporary directory + """ + # Create multiple subdirectories + dir1 = tmp_path / "dir1" + dir2 = tmp_path / "dir2" + dir1.mkdir() + dir2.mkdir() + + # Create files in each directory + file1 = dir1 / "file.txt" + file2 = dir2 / "file.txt" + file1.write_text("content1") + file2.write_text("content2") + + # Verify isolation within the test + assert file1.read_text() == "content1" + assert file2.read_text() == "content2" + assert file1.read_text() != file2.read_text() diff --git a/pyproject.toml b/pyproject.toml index a84d27a..4f5ccbc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,7 @@ dev = [ "mypy>=1.15.0", "pytest>=8.3.5", "pytest-cov>=4.1.0", + "pytest-xdist>=3.6.1", "ruff>=0.11.10", ]