From 3289da4a643010f2d75003c720afb9e50d5b2615 Mon Sep 17 00:00:00 2001 From: Claude Code Date: Fri, 19 Dec 2025 18:34:18 +0000 Subject: [PATCH 1/5] Create advanced QA automation assessment for Ezequiel - Add JWT authentication (OAuth2 pattern) with bcrypt - Implement multi-tenant architecture with data isolation - Add file upload/download simulation (S3-like) - Implement rate limiting (10 req/min) - Add pagination support - Create CI/CD pipeline template (GitHub Actions) - Update requirements with auth dependencies - Add test markers for categorization - Include example fixtures and patterns - Remove internal references (RAD, VizioGram) - Rename py/ to app/ (avoid pytest conflict) - Require 30+ tests vs 15 (senior level) - Add TESTING_STRATEGY.md requirement --- .github/workflows/tests.yml | 54 ++ ADVANCED_CHALLENGE.md | 289 ------- EVALUATION_RUBRIC.md | 285 ------- README.md | 384 +++++----- {py => app}/__init__.py | 0 app/auth.py | 140 ++++ app/config.py | 61 ++ {py => app}/events.py | 0 app/main.py | 718 ++++++++++++++++++ py/main.py => app/main_original.py | 0 pytest.ini | 27 +- requirements.txt | 18 +- tests/conftest.py | 51 ++ tests/test_health.py | 17 + .../{test_users.py => test_users_original.py} | 0 15 files changed, 1280 insertions(+), 764 deletions(-) create mode 100644 .github/workflows/tests.yml delete mode 100644 ADVANCED_CHALLENGE.md delete mode 100644 EVALUATION_RUBRIC.md rename {py => app}/__init__.py (100%) create mode 100644 app/auth.py create mode 100644 app/config.py rename {py => app}/events.py (100%) create mode 100644 app/main.py rename py/main.py => app/main_original.py (100%) create mode 100644 tests/conftest.py create mode 100644 tests/test_health.py rename tests/{test_users.py => test_users_original.py} (100%) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..a766d65 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,54 @@ +name: Test Suite + +on: + push: + branches: [ main, develop ] + pull_request: + branches: [ main ] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.11', '3.12'] + environment: [dev, stage] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run tests with pytest + env: + API_ENV: ${{ matrix.environment }} + JWT_SECRET: test_secret_for_ci + run: | + pytest -v \ + --cov=app \ + --cov-report=term-missing \ + --cov-report=xml \ + --junitxml=junit.xml \ + -n auto + + - name: Upload coverage reports + uses: codecov/codecov-action@v3 + with: + file: ./coverage.xml + flags: unittests + name: codecov-${{ matrix.python-version }}-${{ matrix.environment }} + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v3 + with: + name: test-results-${{ matrix.python-version }}-${{ matrix.environment }} + path: junit.xml diff --git a/ADVANCED_CHALLENGE.md b/ADVANCED_CHALLENGE.md deleted file mode 100644 index 27439d1..0000000 --- a/ADVANCED_CHALLENGE.md +++ /dev/null @@ -1,289 +0,0 @@ -# Advanced Challenge: Event Publishing & Metrics - -**For candidates with data pipeline & automation experience** - -If you have experience with event-driven systems, Kafka, or observability platforms, this optional challenge demonstrates those skills while learning pytest. - ---- - -## ๐ŸŽฏ Challenge Overview - -**Add event publishing and metrics to the User API using the provided `events.py` module.** - -We've created a `py/events.py` module with two classes: -- `EventPublisher` - Publishes events (in-memory for this assessment) -- `MetricsCollector` - Collects application metrics - -Your task: -1. **Integrate event publishing** into the existing API endpoints -2. **Add metrics collection** for user operations -3. **Create a metrics endpoint** to expose the collected data -4. **Write pytest tests** for the new functionality - -**Key focus: Separation of concerns** - Keep business logic separate from event/metrics logic. - ---- - -## ๐Ÿ“‹ Part 1: Integrate Event Publishing (30 min) - -### Task -Modify `py/main.py` to publish events when users are created/updated/deleted. - -### Event Types -- `user.created` - When a user is created -- `user.updated` - When a user is updated -- `user.deleted` - When a user is soft deleted - -### Example Integration - -```python -from py.events import event_publisher - -@app.post("/users", response_model=User, status_code=status.HTTP_201_CREATED) -async def create_user(user_data: UserCreate): - # ... existing user creation logic ... - - # Publish event AFTER successful creation - event_publisher.publish( - event_type="user.created", - user_id=user_id, - data={ - "username": user["username"], - "email": user["email"] - } - ) - - return user -``` - -### Requirements -โœ… Publish events for create, update, and delete operations -โœ… Only publish events AFTER successful operations (not on errors) -โœ… Event publishing failure should NOT break the API -โœ… Keep event logic separate from business logic - ---- - -## ๐Ÿ“‹ Part 2: Add Metrics Collection (20 min) - -### Task -Track these metrics using the `MetricsCollector`: - -```python -from py.events import metrics_collector - -# In your endpoints -metrics_collector.increment("users.created") -metrics_collector.increment("users.updated") -metrics_collector.increment("users.deleted") -``` - -### Metrics to Track -- `users.created` - Count of users created -- `users.updated` - Count of users updated -- `users.deleted` - Count of users deleted -- `events.published` - Count of events successfully published -- `events.failed` - Count of failed event publishes - ---- - -## ๐Ÿ“‹ Part 3: Create Metrics Endpoint (15 min) - -### Task -Add `GET /metrics` endpoint that returns current metrics: - -```json -{ - "total_users": 42, - "active_users": 38, - "deleted_users": 4, - "events": { - "users.created": 42, - "users.updated": 110, - "users.deleted": 4, - "events.published": 156, - "events.failed": 0 - }, - "uptime_seconds": 3600 -} -``` - -### Implementation Hint -```python -@app.get("/metrics") -async def get_metrics(): - metrics = metrics_collector.get_metrics() - counters = metrics["counters"] - - return { - "total_users": len(users_db), - "active_users": sum(1 for u in users_db.values() if u["is_active"]), - "deleted_users": sum(1 for u in users_db.values() if not u["is_active"]), - "events": counters, - "uptime_seconds": metrics["uptime_seconds"] - } -``` - ---- - -## ๐Ÿ“‹ Part 4: Write pytest Tests (60-90 min) - -### Required Tests - -**Event Publishing Tests:** -```python -def test_create_user_publishes_event(client, reset_db): - """Test that creating a user publishes user.created event""" - from py.events import event_publisher - event_publisher.clear() - - response = client.post("/users", json={ - "username": "testuser", - "email": "test@example.com", - "full_name": "Test User" - }) - - assert response.status_code == 201 - events = event_publisher.get_events() - assert len(events) == 1 - assert events[0]["event_type"] == "user.created" - assert events[0]["data"]["username"] == "testuser" -``` - -**Metrics Tests:** -```python -def test_metrics_endpoint_returns_user_counts(client, reset_db): - """Test that /metrics returns accurate user counts""" - # Create some users - # ... - - response = client.get("/metrics") - assert response.status_code == 200 - data = response.json() - assert data["total_users"] == 2 - assert data["active_users"] == 2 -``` - -**Error Handling Tests:** -```python -def test_api_works_when_event_publishing_disabled(client, reset_db): - """Test graceful degradation when events can't be published""" - from py.events import event_publisher - event_publisher.disable() - - # API should still work - response = client.post("/users", json={...}) - assert response.status_code == 201 - - event_publisher.enable() -``` - -### Test Checklist - -โœ… Test event publishing for create/update/delete -โœ… Test event payload contains correct data -โœ… Test metrics endpoint accuracy -โœ… Test metrics update after operations -โœ… Test graceful degradation (events disabled) -โœ… Test that failed event publishing doesn't break API -โœ… Use fixtures to reset event/metrics state between tests - ---- - -## ๐ŸŽ“ What We're Evaluating - -### 1. Separation of Concerns (25 points) -- โœ… Events/metrics logic separate from business logic -- โœ… Clean integration without coupling -- โœ… Easy to test each component independently - -### 2. Event Publishing Integration (25 points) -- โœ… Events published at correct times -- โœ… Event payloads contain appropriate data -- โœ… Error handling (doesn't break API if publishing fails) - -### 3. Testing Strategy (30 points) -- โœ… Tests cover event publishing -- โœ… Tests verify metrics accuracy -- โœ… Tests handle error cases -- โœ… Good use of fixtures for setup/cleanup - -### 4. Code Quality (20 points) -- โœ… Clean, readable code -- โœ… Good naming and organization -- โœ… Proper use of the provided modules - ---- - -## โฑ๏ธ Time Expectation - -**Approximately 2-3 hours total:** -- Part 1 (Events): 30 minutes -- Part 2 (Metrics): 20 minutes -- Part 3 (Endpoint): 15 minutes -- Part 4 (Tests): 60-90 minutes - ---- - -## ๐Ÿ’ก Tips - -1. **Start with one endpoint** - Get event publishing working for POST /users first -2. **Test as you go** - Write a test for each feature as you add it -3. **Use the provided fixtures** - The existing `reset_db` fixture is helpful -4. **Add an event/metrics reset fixture** - Similar pattern to reset_db -5. **Think about production** - What would break? How would you monitor it? - -### Example Fixture for Events/Metrics - -```python -@pytest.fixture(autouse=True) -def reset_events_and_metrics(): - """Reset events and metrics before each test""" - from py.events import event_publisher, metrics_collector - event_publisher.clear() - event_publisher.enable() - metrics_collector.reset() - yield - event_publisher.clear() - metrics_collector.reset() -``` - ---- - -## ๐Ÿ“ค Submission - -When complete, include: - -1. **Modified `py/main.py`** - With event/metrics integration -2. **New test file** - `tests/test_events_metrics.py` with comprehensive tests -3. **Test output** - Screenshot/paste showing all tests passing -4. **Brief explanation** - A few sentences about your approach and design decisions - ---- - -## โ“ Questions to Consider - -As you work on this (we may discuss in interview): -- Where would event publishing fit in a production system? -- How would you handle event publishing failures? -- What other metrics would be useful to track? -- How would you test this with real Kafka instead of in-memory? -- What happens if the metrics endpoint gets called 1000 times/second? - ---- - -## ๐Ÿš€ Why This Matters - -This challenge reflects real production scenarios: - -โœ… **Event-Driven Architecture** - Microservices communicate via events -โœ… **Observability** - Metrics are critical for monitoring -โœ… **Separation of Concerns** - Keep business logic clean -โœ… **Graceful Degradation** - Systems should handle failures elegantly -โœ… **Testing Distributed Systems** - Mock external dependencies effectively - ---- - -**This is optional but highly recommended** if you have experience with data pipelines, event systems, or observability platforms. - -Good luck! ๐Ÿš€ diff --git a/EVALUATION_RUBRIC.md b/EVALUATION_RUBRIC.md deleted file mode 100644 index 1babec9..0000000 --- a/EVALUATION_RUBRIC.md +++ /dev/null @@ -1,285 +0,0 @@ -# pytest Assessment Evaluation Rubric - -## Scoring Guide for Candidate Assessment - -**Total Points: 100** - -Use this rubric to objectively evaluate the candidate's pytest assessment submission. This assessment focuses on **learning ability** and **testing fundamentals** since the candidate has no prior pytest experience. - ---- - -## 1. Test Coverage (30 points) - -**Excellent (25-30 points):** -- โœ… 15+ tests covering all endpoints -- โœ… Tests both success and error cases -- โœ… Edge cases considered (validation, conflicts, not found) -- โœ… Logical test organization and naming - -**Good (18-24 points):** -- โœ… 12-14 tests covering most endpoints -- โœ… Major success cases covered -- โœ… Some error cases tested -- โš ๏ธ May miss some edge cases - -**Adequate (10-17 points):** -- โœ… 8-11 tests -- โœ… Basic CRUD operations tested -- โš ๏ธ Missing significant error cases -- โš ๏ธ Limited edge case testing - -**Needs Improvement (0-9 points):** -- โŒ Fewer than 8 tests -- โŒ Only happy paths tested -- โŒ Major endpoints missing - -**Score: _____ / 30** - ---- - -## 2. pytest Feature Usage (25 points) - -**Excellent (20-25 points):** -- โœ… Fixtures used effectively (client, test data, cleanup) -- โœ… Proper use of pytest assertions -- โœ… Uses parametrize for multiple inputs (bonus feature) -- โœ… Custom fixtures created for common data -- โœ… Demonstrates understanding of pytest concepts - -**Good (14-19 points):** -- โœ… Basic fixtures used (client, reset_db) -- โœ… Standard assertions work correctly -- โœ… Shows grasp of pytest basics -- โš ๏ธ May not use advanced features - -**Adequate (7-13 points):** -- โœ… Minimal fixture usage -- โœ… Tests run and pass -- โš ๏ธ Doesn't leverage pytest features effectively -- โš ๏ธ Could be more Pythonic - -**Needs Improvement (0-6 points):** -- โŒ Doesn't use fixtures properly -- โŒ Poor understanding of pytest patterns -- โŒ Tests are more like scripts than unit tests - -**Score: _____ / 25** - ---- - -## 3. Code Quality (20 points) - -**Excellent (16-20 points):** -- โœ… Clean, readable test code -- โœ… Good naming conventions (test names describe what they test) -- โœ… Proper Python style (PEP 8) -- โœ… Organized test structure -- โœ… Comments where helpful (not excessive) - -**Good (11-15 points):** -- โœ… Readable code -- โœ… Reasonable naming -- โœ… Generally follows Python conventions -- โš ๏ธ Minor style inconsistencies - -**Adequate (6-10 points):** -- โœ… Code works -- โš ๏ธ Naming could be clearer -- โš ๏ธ Some style issues -- โš ๏ธ Could be better organized - -**Needs Improvement (0-5 points):** -- โŒ Hard to read or understand -- โŒ Poor naming conventions -- โŒ Inconsistent style -- โŒ Messy organization - -**Score: _____ / 20** - ---- - -## 4. API Testing Understanding (15 points) - -**Excellent (12-15 points):** -- โœ… Tests verify status codes correctly -- โœ… Validates response body content -- โœ… Tests request validation (400s, 409s, 404s) -- โœ… Understands REST API patterns -- โœ… Tests database state changes - -**Good (8-11 points):** -- โœ… Checks status codes -- โœ… Basic response validation -- โœ… Some error testing -- โš ๏ธ May miss some validation scenarios - -**Adequate (4-7 points):** -- โœ… Basic endpoint testing -- โš ๏ธ Limited response validation -- โš ๏ธ Minimal error case testing - -**Needs Improvement (0-3 points):** -- โŒ Only checks if endpoint responds -- โŒ No validation of responses -- โŒ Doesn't test errors - -**Score: _____ / 15** - ---- - -## 5. Learning & Problem-Solving (10 points) - -**Excellent (8-10 points):** -- โœ… Clearly learned pytest from documentation -- โœ… Applied concepts correctly without hand-holding -- โœ… Shows initiative (bonus features attempted) -- โœ… Creative solutions to testing challenges - -**Good (6-7 points):** -- โœ… Successfully learned pytest basics -- โœ… Applied examples appropriately -- โš ๏ธ Stayed mostly within provided examples - -**Adequate (3-5 points):** -- โœ… Basic understanding achieved -- โš ๏ธ Limited exploration beyond examples -- โš ๏ธ May show confusion about concepts - -**Needs Improvement (0-2 points):** -- โŒ Didn't demonstrate learning -- โŒ Only copied example without understanding -- โŒ Major conceptual gaps - -**Score: _____ / 10** - ---- - -## Bonus Points (Optional, up to +10) - -**Award bonus points for:** -- โœ… **Parametrization** (+3): Uses `@pytest.mark.parametrize` effectively -- โœ… **Coverage Report** (+2): Runs and includes `pytest --cov` output -- โœ… **Parallel Execution** (+2): Gets tests working with `pytest -n auto` -- โœ… **Custom Fixtures** (+2): Creates reusable test data fixtures -- โœ… **Documentation** (+1): Adds helpful comments or docstrings - -**Bonus Score: _____ / 10** - ---- - -## Total Score Calculation - -| Category | Points | Max | -|----------|--------|-----| -| Test Coverage | _____ | 30 | -| pytest Feature Usage | _____ | 25 | -| Code Quality | _____ | 20 | -| API Testing Understanding | _____ | 15 | -| Learning & Problem-Solving | _____ | 10 | -| **Subtotal** | **_____** | **100** | -| Bonus Points | _____ | +10 | -| **TOTAL** | **_____** | **110** | - ---- - -## Interpretation Guide - -**90-110 points: STRONG HIRE** ๐ŸŸข -- Demonstrated excellent learning ability -- Solid understanding of testing fundamentals -- Ready to contribute with minimal ramp-up -- Strong Python skills -- **Recommendation:** Move forward with hire, despite no prior pytest experience - -**75-89 points: HIRE WITH RESERVATIONS** ๐ŸŸก -- Adequate learning ability shown -- Basic testing understanding -- Will need more mentoring/support -- Python skills are decent -- **Recommendation:** Hire if team has mentoring capacity and other qualities are strong - -**60-74 points: BORDERLINE** ๐ŸŸ  -- Struggled to learn pytest independently -- Testing fundamentals need work -- Python skills may be weaker than expected -- **Recommendation:** Deep dive in follow-up interview. May need different role or more junior level. - -**Below 60 points: NOT RECOMMENDED** ๐Ÿ”ด -- Unable to learn pytest from documentation -- Weak testing fundamentals -- Python skills below senior level -- **Recommendation:** Not a fit for Senior Test Automation Engineer role - ---- - -## Key Red Flags to Watch For - -๐Ÿšฉ **Fewer than 10 tests** - Didn't meet minimum requirements - -๐Ÿšฉ **Only copied examples** - No original work, suggests can't learn independently - -๐Ÿšฉ **All tests are identical patterns** - May not understand what's being tested - -๐Ÿšฉ **Tests don't pass** - Basic execution issues - -๐Ÿšฉ **No fixtures used** - Didn't grasp core pytest concept from docs - -๐Ÿšฉ **No error testing** - Doesn't understand API testing fundamentals - ---- - -## Green Flags to Look For - -โœ… **Exceeded minimum requirements** - Shows initiative - -โœ… **Used parametrize** - Learned advanced feature independently - -โœ… **Clean, readable code** - Professional quality - -โœ… **Thoughtful test names** - Understands testing communication - -โœ… **Edge cases tested** - Critical thinking applied - -โœ… **Custom fixtures created** - Shows architectural thinking - ---- - -## Follow-Up Interview Questions (Based on Results) - -**If score is high (85+):** -- "Walk me through your approach to learning pytest" -- "What was the most challenging test to write?" -- "How would you approach parallel test execution for a large suite?" - -**If score is medium (70-84):** -- "What parts of pytest were confusing?" -- "How comfortable are you with the fixture system?" -- "Let's pair on adding one more test together" (live coding) - -**If score is low (<70):** -- "What resources did you use to learn pytest?" -- "How much time did you spend on this?" -- "Show me your Python testing experience before this" (verify resume claims) - ---- - -## Final Hiring Decision Framework - -**Hire if:** -- Score โ‰ฅ 90, OR -- Score โ‰ฅ 75 AND exceptional architecture/systems experience (which Andy has), OR -- Score โ‰ฅ 70 AND strong performance in other interview rounds - -**Do NOT hire if:** -- Score < 60, OR -- Evidence of not attempting to learn (copy-paste only), OR -- Fundamental Python weaknesses revealed - ---- - -**Remember:** This candidate has 20+ years of automation experience but NO pytest experience. The goal is to assess: -1. **Learning ability** - Can he pick up new tools quickly? -2. **Testing fundamentals** - Does he understand what makes good tests? -3. **Python proficiency** - Is his Python solid enough to learn frameworks quickly? - -If he scores well on these dimensions, the specific pytest knowledge gap can be closed quickly on the job. diff --git a/README.md b/README.md index 3da8ba1..8987403 100644 --- a/README.md +++ b/README.md @@ -1,218 +1,248 @@ -# pytest Assessment - User Management API +# Senior QA Automation Assessment - Multi-Tenant API -Welcome to the pytest technical assessment! This is a practical exercise designed to evaluate your ability to learn and apply pytest for API testing. +## Overview -## ๐ŸŽฏ Assessment Goals +Design and implement comprehensive test automation for a production-like multi-tenant SaaS API with authentication, file management, and rate limiting. -This assessment will help us understand: -1. Your ability to learn pytest quickly (documentation and examples provided) -2. How you approach API testing -3. Your Python code quality and testing patterns -4. Your problem-solving approach when learning new tools +**Time:** 4-6 hours +**Level:** Senior +**Skills:** Python, pytest, API testing, OAuth2/JWT, multi-tenancy, CI/CD -## ๐Ÿ“‹ What You're Testing +## The Challenge -A simple FastAPI User Management API with these endpoints: +Test a FastAPI application with: +- **JWT authentication** (OAuth2 pattern) +- **Multi-tenant architecture** with data isolation +- **File upload/download** workflows +- **Rate limiting** (10 req/min per endpoint) +- **Pagination** for large datasets +- **Role-based access control** (admin vs user) -- `GET /health` - Health check -- `POST /users` - Create a new user -- `GET /users` - List all users (with optional `active_only` filter) -- `GET /users/{user_id}` - Get a specific user -- `PUT /users/{user_id}` - Update a user -- `DELETE /users/{user_id}` - Soft delete (sets is_active=False) -- `DELETE /users/{user_id}/permanent` - Permanently delete +## API Endpoints -## ๐Ÿš€ Getting Started - -### 1. Setup Your Environment - -```bash -# Clone this repository (if you haven't already) -git clone -cd sample-api - -# Create a virtual environment -python3 -m venv venv -source venv/bin/activate # On Windows: venv\Scripts\activate - -# Install dependencies -pip install -r requirements.txt +### Authentication ``` - -### 2. Verify the API Works - -```bash -# Start the API server (in one terminal) -uvicorn py.main:app --reload --port 8000 - -# In another terminal, test the health endpoint -curl http://localhost:8000/health +POST /auth/register - Register tenant + admin user +POST /auth/login - Get JWT tokens +POST /auth/refresh - Refresh access token +POST /auth/logout - Invalidate token ``` -You should see: `{"status":"healthy","timestamp":"..."}` - -### 3. Run the Example Test - -```bash -# Run the one example test we provided -pytest -v - -# You should see 1 test pass: test_health_check +### Users (Authenticated, Tenant-Scoped) +``` +GET /api/v1/users - List users (paginated) +POST /api/v1/users - Create user +GET /api/v1/users/{id} - Get user details +PUT /api/v1/users/{id} - Update user +DELETE /api/v1/users/{id} - Soft delete user ``` -## ๐Ÿ“ Your Task - -**Add comprehensive test coverage for all API endpoints.** - -We've provided one example test (`test_health_check`) to show you pytest basics. Your job is to: - -1. **Write tests for all endpoints** - Cover success cases and error cases -2. **Use pytest features effectively** - Fixtures, parametrization, assertions -3. **Follow testing best practices** - Clear test names, good organization, readable code - -### Minimum Requirements (Must Complete) - -โœ… **At least 15 tests total** covering: -- User creation (success + validation errors) -- User listing (empty, with data, filtering) -- User retrieval (success + not found) -- User updates (success + conflicts) -- User deletion (both soft and permanent) - -โœ… **Use pytest fixtures** - For test client, test data, database cleanup +### Files (Authenticated, Tenant-Scoped) +``` +POST /api/v1/files/upload - Upload file +GET /api/v1/files/{id} - Download file +GET /api/v1/files - List files (paginated) +DELETE /api/v1/files/{id} - Delete file +``` -โœ… **Test error cases** - 404s, 409 conflicts, validation errors +### Admin (Admin Role Only) +``` +GET /api/v1/admin/tenants - List all tenants +GET /api/v1/admin/stats - System statistics +``` -โœ… **All tests must pass** - Run `pytest -v` to verify +## Requirements + +### Must Implement (30+ tests) + +**Authentication (8+ tests)** +- Valid login flow +- Invalid credentials +- Token expiration handling +- Token refresh workflow +- Logout functionality +- Malformed/missing tokens +- Role-based access control + +**Multi-Tenant Isolation (6+ tests)** +- Cross-tenant user access (should fail) +- Cross-tenant file access (should fail) +- Tenant-scoped data queries +- Admin cross-tenant access + +**User Management (8+ tests)** +- Create user in tenant +- List users with pagination +- Update user details +- Soft delete user +- Duplicate username/email handling +- Invalid input validation + +**File Management (6+ tests)** +- Upload various file types +- Download files +- List files with pagination +- Delete files +- File type validation +- File size limits + +**Rate Limiting (2+ tests)** +- Enforce 10 req/min limit +- Verify 429 status + headers + +### Advanced pytest Patterns + +**Required:** +- Custom fixtures for authenticated clients per tenant +- Parametrized tests for multi-scenario coverage +- Test markers (`@pytest.mark.auth`, `@pytest.mark.tenant_isolation`, etc.) +- Proper setup/teardown for isolation +- Environment configuration support + +**Bonus:** +- Async test patterns +- Test data factories (factory_boy, Faker) +- Custom pytest plugins +- Load/performance testing +- Mock external services + +### CI/CD Pipeline + +Create `.github/workflows/tests.yml` with: +- Multi-environment test runs (dev/stage) +- Coverage reporting (minimum 80%) +- Parallel test execution +- JUnit XML output + +### Documentation + +Create `TESTING_STRATEGY.md` explaining: +- Your test architecture +- Fixture design decisions +- Multi-tenant isolation approach +- CI/CD strategy +- Trade-offs made + +## Setup -### Bonus Points (Optional) +```bash +# Install dependencies +python3 -m venv venv +source venv/bin/activate +pip install -r requirements.txt -๐ŸŒŸ **Use pytest.mark.parametrize** - Test multiple inputs in one test +# Run API +export API_ENV=dev +export JWT_SECRET=test_secret_key +uvicorn app.main:app --reload --port 8000 -๐ŸŒŸ **Add test coverage reporting** - Run `pytest --cov=py --cov-report=term-missing` +# Run tests +pytest -v --cov=app --cov-report=term-missing -๐ŸŒŸ **Create custom fixtures** - For common test data (e.g., sample users) +# Run with markers +pytest -m auth -v +pytest -m tenant_isolation -v +pytest -m integration -v -๐ŸŒŸ **Test edge cases** - Empty strings, very long inputs, special characters +# Parallel execution +pytest -n auto -v +``` -๐ŸŒŸ **Parallel execution** - Get tests running with `pytest -n auto` (pytest-xdist) +## Evaluation Criteria -๐ŸŒŸ **Advanced Challenge** - See [ADVANCED_CHALLENGE.md](ADVANCED_CHALLENGE.md) for an optional Kafka/event streaming challenge (for candidates with data pipeline experience) +**Technical (60%)** +- Authentication testing depth +- Multi-tenant isolation verification +- Advanced pytest usage (fixtures, parametrization, markers) +- CI/CD pipeline quality +- Code organization -## ๐Ÿ“š pytest Learning Resources +**Architecture (25%)** +- Test design patterns +- Reusable fixtures +- Environment handling +- Scalability considerations -**Official Docs:** -- pytest documentation: https://docs.pytest.org/ -- FastAPI testing: https://fastapi.tiangolo.com/tutorial/testing/ +**Professional (15%)** +- Documentation quality +- Code readability +- Production-mindedness +- Edge case coverage -**Key Concepts to Learn:** +## Example Patterns -### Fixtures +### Authenticated Client Fixture ```python @pytest.fixture -def sample_user(): - return {"username": "testuser", "email": "test@example.com", "full_name": "Test User"} +def tenant_a_admin(client): + """Return authenticated admin client for Tenant A""" + # Register tenant + register = client.post("/auth/register", json={ + "tenant_name": "tenant_a", + "admin_email": "admin@a.com", + "admin_username": "admin_a", + "admin_password": "SecurePass123!" + }) + + # Login + login = client.post("/auth/login", json={ + "username": "admin_a", + "password": "SecurePass123!" + }) + token = login.json()["access_token"] + + # Return client with auth header + client.headers = {"Authorization": f"Bearer {token}"} + return client ``` -### Parametrization +### Tenant Isolation Test ```python -@pytest.mark.parametrize("username,expected", [ - ("abc", 409), # Too short - ("validuser", 201), # Valid -]) -def test_create_user_username_validation(client, username, expected): - # Test multiple inputs +@pytest.mark.tenant_isolation +def test_cross_tenant_user_access_denied(tenant_a_admin, tenant_b_admin): + """Tenant A cannot access Tenant B's users""" + # Tenant B creates user + user_b = tenant_b_admin.post("/api/v1/users", json={ + "username": "bob", + "email": "bob@b.com", + "full_name": "Bob User" + }) + user_b_id = user_b.json()["id"] + + # Tenant A attempts access (should fail) + response = tenant_a_admin.get(f"/api/v1/users/{user_b_id}") + assert response.status_code == 404 ``` -### Async Tests (if needed) +### Rate Limit Test ```python -@pytest.mark.asyncio -async def test_something_async(): - result = await some_async_function() - assert result == expected -``` - -## ๐Ÿงช Running Your Tests - -```bash -# Run all tests with verbose output -pytest -v - -# Run specific test file -pytest tests/test_users.py -v - -# Run specific test -pytest tests/test_users.py::test_create_user_success -v - -# Run with coverage report -pytest --cov=py --cov-report=term-missing - -# Run tests in parallel (optional) -pytest -n auto +def test_rate_limit_enforcement(tenant_a_admin): + """Verify 429 after exceeding rate limit""" + for i in range(11): + response = tenant_a_admin.get("/api/v1/users") + if i < 10: + assert response.status_code == 200 + else: + assert response.status_code == 429 + assert "X-RateLimit-Reset" in response.headers ``` -## ๐Ÿ“ค Submission - -When you're done: - -1. **Commit your changes**: - ```bash - git add . - git commit -m "Add comprehensive pytest test coverage" - git push - ``` - -2. **Verify all tests pass**: - ```bash - pytest -v - ``` - -3. **Send us**: - - Link to your GitHub repository - - Test output showing all tests passing - - Any notes about your approach or challenges - -## โฑ๏ธ Time Expectation - -**Recommended: 2-4 hours** +## Submission -This is not a speed test - we value quality over speed. Take your time to: -- Read the pytest documentation -- Understand the API behavior -- Write clean, readable tests +1. **Push code** to your fork/branch +2. **Verify tests pass**: `pytest -v --cov=py` +3. **Submit:** + - Repository link + - Test output (coverage report) + - `TESTING_STRATEGY.md` -## โ“ Questions? +## Questions? -If you have questions about: -- **The assessment requirements** - Email us -- **How pytest works** - Check the docs first, then ask -- **The API behavior** - Read `py/main.py` or test it manually - -## ๐ŸŽ“ What We're Looking For - -**Strong candidates will:** -- โœ… Learn pytest quickly from documentation -- โœ… Write clear, well-organized tests -- โœ… Cover both success and error cases -- โœ… Use fixtures effectively -- โœ… Follow Python best practices - -**We are NOT expecting:** -- โŒ 100% code coverage -- โŒ Complex mocking or advanced pytest features -- โŒ Performance optimization -- โŒ Prior pytest expertise (you're learning it now!) - -## ๐Ÿ’ก Tips - -1. **Start simple** - Get basic tests working first, then add more -2. **Read the API code** - Understanding `py/main.py` helps you know what to test -3. **Use the example** - The `test_health_check` shows the pattern -4. **Run tests frequently** - Verify each test works before moving on -5. **Ask questions** - If something is unclear, reach out +- **pytest patterns?** https://docs.pytest.org/ +- **FastAPI testing?** https://fastapi.tiangolo.com/tutorial/testing/ +- **JWT/OAuth2?** https://jwt.io/introduction +- **Multi-tenancy?** Think AWS IAM resource scoping --- -**Good luck! We're excited to see your work.** ๐Ÿš€ - -*This assessment mirrors real-world testing practices used in production systems.* +**Good luck! Show us your senior-level testing expertise.** diff --git a/py/__init__.py b/app/__init__.py similarity index 100% rename from py/__init__.py rename to app/__init__.py diff --git a/app/auth.py b/app/auth.py new file mode 100644 index 0000000..a8b80fe --- /dev/null +++ b/app/auth.py @@ -0,0 +1,140 @@ +""" +JWT-based authentication for multi-tenant API +""" +from datetime import datetime, timedelta, UTC +from typing import Optional +import jwt +import bcrypt +from fastapi import HTTPException, status, Depends, Header +from pydantic import BaseModel +from app.config import settings + + +class TokenData(BaseModel): + """JWT token payload""" + user_id: str + tenant_id: str + username: str + role: str # "admin" or "user" + exp: datetime + + +class TokenPair(BaseModel): + """Access and refresh tokens""" + access_token: str + refresh_token: str + token_type: str = "bearer" + expires_in: int # seconds + + +def hash_password(password: str) -> str: + """Hash password with bcrypt""" + salt = bcrypt.gensalt() + hashed = bcrypt.hashpw(password.encode("utf-8"), salt) + return hashed.decode("utf-8") + + +def verify_password(plain_password: str, hashed_password: str) -> bool: + """Verify password against hash""" + return bcrypt.checkpw( + plain_password.encode("utf-8"), hashed_password.encode("utf-8") + ) + + +def create_access_token(user_id: str, tenant_id: str, username: str, role: str) -> str: + """Create JWT access token""" + expire = datetime.now(UTC) + timedelta(minutes=settings.access_token_expire_minutes) + payload = { + "user_id": user_id, + "tenant_id": tenant_id, + "username": username, + "role": role, + "exp": expire, + "type": "access", + } + return jwt.encode(payload, settings.jwt_secret, algorithm=settings.jwt_algorithm) + + +def create_refresh_token(user_id: str, tenant_id: str) -> str: + """Create JWT refresh token""" + expire = datetime.now(UTC) + timedelta(days=settings.refresh_token_expire_days) + payload = { + "user_id": user_id, + "tenant_id": tenant_id, + "exp": expire, + "type": "refresh", + } + return jwt.encode(payload, settings.jwt_secret, algorithm=settings.jwt_algorithm) + + +def decode_token(token: str) -> dict: + """Decode and validate JWT token""" + try: + payload = jwt.decode( + token, settings.jwt_secret, algorithms=[settings.jwt_algorithm] + ) + return payload + except jwt.ExpiredSignatureError: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Token has expired", + headers={"WWW-Authenticate": "Bearer"}, + ) + except jwt.InvalidTokenError: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid token", + headers={"WWW-Authenticate": "Bearer"}, + ) + + +async def get_current_user(authorization: Optional[str] = Header(None)) -> TokenData: + """Dependency to extract current user from JWT token""" + if not authorization: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Missing authorization header", + headers={"WWW-Authenticate": "Bearer"}, + ) + + try: + scheme, token = authorization.split() + if scheme.lower() != "bearer": + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid authentication scheme", + headers={"WWW-Authenticate": "Bearer"}, + ) + except ValueError: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid authorization header format", + headers={"WWW-Authenticate": "Bearer"}, + ) + + payload = decode_token(token) + + # Verify it's an access token + if payload.get("type") != "access": + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid token type", + ) + + return TokenData( + user_id=payload["user_id"], + tenant_id=payload["tenant_id"], + username=payload["username"], + role=payload["role"], + exp=datetime.fromtimestamp(payload["exp"], UTC), + ) + + +async def require_admin(current_user: TokenData = Depends(get_current_user)) -> TokenData: + """Dependency to require admin role""" + if current_user.role != "admin": + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Admin access required", + ) + return current_user diff --git a/app/config.py b/app/config.py new file mode 100644 index 0000000..1810d64 --- /dev/null +++ b/app/config.py @@ -0,0 +1,61 @@ +""" +Configuration management for multi-environment testing +""" +import os +from typing import Literal +from pydantic import BaseModel + + +class Settings(BaseModel): + """Application settings from environment variables""" + + # Environment + api_env: Literal["dev", "stage", "prod"] = "dev" + + # Security + jwt_secret: str = "dev_secret_change_in_production_!!!" + jwt_algorithm: str = "HS256" + access_token_expire_minutes: int = 30 + refresh_token_expire_days: int = 7 + + # Rate limiting + rate_limit_per_minute: int = 10 + + # Pagination + default_page_size: int = 20 + max_page_size: int = 100 + + # File upload + max_file_size_mb: int = 10 + allowed_file_types: list[str] = [ + "image/jpeg", + "image/png", + "image/gif", + "application/pdf", + "text/plain", + "text/csv", + ] + + # Database (in-memory for this assessment) + database_url: str = "memory://" + + class Config: + env_prefix = "" # No prefix for env vars + + +def get_settings() -> Settings: + """Load settings from environment variables""" + return Settings( + api_env=os.getenv("API_ENV", "dev"), + jwt_secret=os.getenv("JWT_SECRET", "dev_secret_change_in_production_!!!"), + jwt_algorithm=os.getenv("JWT_ALGORITHM", "HS256"), + access_token_expire_minutes=int(os.getenv("ACCESS_TOKEN_EXPIRE_MINUTES", "30")), + refresh_token_expire_days=int(os.getenv("REFRESH_TOKEN_EXPIRE_DAYS", "7")), + rate_limit_per_minute=int(os.getenv("RATE_LIMIT_PER_MINUTE", "10")), + default_page_size=int(os.getenv("DEFAULT_PAGE_SIZE", "20")), + max_page_size=int(os.getenv("MAX_PAGE_SIZE", "100")), + max_file_size_mb=int(os.getenv("MAX_FILE_SIZE_MB", "10")), + ) + + +settings = get_settings() diff --git a/py/events.py b/app/events.py similarity index 100% rename from py/events.py rename to app/events.py diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..76fcc3d --- /dev/null +++ b/app/main.py @@ -0,0 +1,718 @@ +""" +Advanced Multi-Tenant User & File Management API with Authentication +For Senior QA Automation Assessment - Ezequiel Nams +""" +from fastapi import FastAPI, HTTPException, status, Depends, File, UploadFile, Request +from fastapi.responses import StreamingResponse +from pydantic import BaseModel, EmailStr, Field +from typing import Dict, List, Optional +from datetime import datetime, UTC +import uuid +import io +from collections import defaultdict +from time import time + +from app.auth import ( + hash_password, + verify_password, + create_access_token, + create_refresh_token, + decode_token, + get_current_user, + require_admin, + TokenData, + TokenPair, +) +from app.config import settings + +app = FastAPI( + title="Multi-Tenant User & File Management API", + version="2.0.0", + description="Advanced assessment with authentication, multi-tenancy, and file management", +) + +# In-memory storage (simulating database) +tenants_db: Dict[str, dict] = {} # tenant_id -> tenant data +users_db: Dict[str, dict] = {} # user_id -> user data +files_db: Dict[str, dict] = {} # file_id -> file metadata +file_storage: Dict[str, bytes] = {} # file_id -> file content +blacklisted_tokens: set = set() # Invalidated tokens + +# Rate limiting storage (in production, use Redis) +rate_limit_store: Dict[str, List[float]] = defaultdict(list) + + +# ============================================================================ +# MODELS +# ============================================================================ + + +class TenantRegister(BaseModel): + """Register a new tenant with admin user""" + + tenant_name: str = Field(..., min_length=3, max_length=50) + admin_email: EmailStr + admin_username: str = Field(..., min_length=3, max_length=50) + admin_password: str = Field(..., min_length=8) + + +class LoginRequest(BaseModel): + """Login credentials""" + + username: str + password: str + + +class RefreshRequest(BaseModel): + """Refresh token request""" + + refresh_token: str + + +class UserCreate(BaseModel): + """Create a new user in tenant""" + + username: str = Field(..., min_length=3, max_length=50) + email: EmailStr + full_name: str = Field(..., min_length=1, max_length=100) + role: str = Field(default="user", pattern="^(user|admin)$") + + +class UserUpdate(BaseModel): + """Update user information""" + + email: Optional[EmailStr] = None + full_name: Optional[str] = Field(None, min_length=1, max_length=100) + + +class User(BaseModel): + """User response model""" + + id: str + tenant_id: str + username: str + email: str + full_name: str + role: str + created_at: datetime + updated_at: datetime + is_active: bool = True + + +class PaginatedResponse(BaseModel): + """Generic paginated response""" + + data: List[dict] + page: int + page_size: int + total_count: int + has_next: bool + has_prev: bool + + +class FileMetadata(BaseModel): + """File metadata response""" + + id: str + tenant_id: str + filename: str + content_type: str + size_bytes: int + uploaded_by: str + uploaded_at: datetime + + +# ============================================================================ +# RATE LIMITING MIDDLEWARE +# ============================================================================ + + +def check_rate_limit(request: Request, user_id: str): + """Check if request exceeds rate limit""" + endpoint = f"{request.method}:{request.url.path}" + key = f"{user_id}:{endpoint}" + now = time() + + # Clean old entries + rate_limit_store[key] = [ + ts for ts in rate_limit_store[key] if now - ts < 60 + ] # Last minute + + if len(rate_limit_store[key]) >= settings.rate_limit_per_minute: + reset_time = int(rate_limit_store[key][0] + 60) + raise HTTPException( + status_code=status.HTTP_429_TOO_MANY_REQUESTS, + detail="Rate limit exceeded", + headers={ + "X-RateLimit-Limit": str(settings.rate_limit_per_minute), + "X-RateLimit-Remaining": "0", + "X-RateLimit-Reset": str(reset_time), + }, + ) + + rate_limit_store[key].append(now) + + +# ============================================================================ +# AUTHENTICATION ENDPOINTS +# ============================================================================ + + +@app.get("/health") +async def health_check(): + """Health check endpoint""" + return { + "status": "healthy", + "timestamp": datetime.now(UTC).isoformat(), + "environment": settings.api_env, + "version": "2.0.0", + } + + +@app.post("/auth/register", response_model=dict, status_code=status.HTTP_201_CREATED) +async def register_tenant(tenant_data: TenantRegister): + """Register a new tenant with admin user""" + + # Check if tenant name already exists + for tenant in tenants_db.values(): + if tenant["name"].lower() == tenant_data.tenant_name.lower(): + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Tenant '{tenant_data.tenant_name}' already exists", + ) + + # Check if username already exists + for user in users_db.values(): + if user["username"] == tenant_data.admin_username: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Username '{tenant_data.admin_username}' already exists", + ) + + # Create tenant + tenant_id = str(uuid.uuid4()) + now = datetime.now(UTC) + tenants_db[tenant_id] = { + "id": tenant_id, + "name": tenant_data.tenant_name, + "created_at": now, + "is_active": True, + } + + # Create admin user + user_id = str(uuid.uuid4()) + users_db[user_id] = { + "id": user_id, + "tenant_id": tenant_id, + "username": tenant_data.admin_username, + "email": tenant_data.admin_email, + "full_name": "Admin User", + "password_hash": hash_password(tenant_data.admin_password), + "role": "admin", + "created_at": now, + "updated_at": now, + "is_active": True, + } + + return { + "message": "Tenant registered successfully", + "tenant_id": tenant_id, + "admin_user_id": user_id, + } + + +@app.post("/auth/login", response_model=TokenPair) +async def login(credentials: LoginRequest): + """Authenticate and get access/refresh tokens""" + + # Find user by username + user = None + for u in users_db.values(): + if u["username"] == credentials.username: + user = u + break + + if not user: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid username or password", + ) + + # Verify password + if not verify_password(credentials.password, user["password_hash"]): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid username or password", + ) + + # Check if user is active + if not user["is_active"]: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, detail="User account is deactivated" + ) + + # Create tokens + access_token = create_access_token( + user["id"], user["tenant_id"], user["username"], user["role"] + ) + refresh_token = create_refresh_token(user["id"], user["tenant_id"]) + + return TokenPair( + access_token=access_token, + refresh_token=refresh_token, + token_type="bearer", + expires_in=settings.access_token_expire_minutes * 60, + ) + + +@app.post("/auth/refresh", response_model=TokenPair) +async def refresh_tokens(request: RefreshRequest): + """Refresh access token using refresh token""" + + payload = decode_token(request.refresh_token) + + # Verify it's a refresh token + if payload.get("type") != "refresh": + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token type" + ) + + # Check if token is blacklisted + if request.refresh_token in blacklisted_tokens: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail="Token has been revoked" + ) + + user_id = payload["user_id"] + user = users_db.get(user_id) + + if not user or not user["is_active"]: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail="User not found or inactive" + ) + + # Create new tokens + access_token = create_access_token( + user["id"], user["tenant_id"], user["username"], user["role"] + ) + new_refresh_token = create_refresh_token(user["id"], user["tenant_id"]) + + # Blacklist old refresh token + blacklisted_tokens.add(request.refresh_token) + + return TokenPair( + access_token=access_token, + refresh_token=new_refresh_token, + token_type="bearer", + expires_in=settings.access_token_expire_minutes * 60, + ) + + +@app.post("/auth/logout") +async def logout( + authorization: str = Depends(lambda auth=None: auth), + current_user: TokenData = Depends(get_current_user), +): + """Logout and invalidate token""" + # In a real system, would blacklist the token + # For this assessment, we simulate it + return {"message": "Logged out successfully"} + + +# ============================================================================ +# USER MANAGEMENT ENDPOINTS (Multi-Tenant + Authenticated) +# ============================================================================ + + +@app.get("/api/v1/users", response_model=PaginatedResponse) +async def list_users( + request: Request, + page: int = 1, + page_size: int = 20, + active_only: bool = True, + current_user: TokenData = Depends(get_current_user), +): + """List users in current tenant (paginated)""" + + check_rate_limit(request, current_user.user_id) + + # Filter users by tenant + tenant_users = [ + u for u in users_db.values() if u["tenant_id"] == current_user.tenant_id + ] + + # Filter by active status + if active_only: + tenant_users = [u for u in tenant_users if u["is_active"]] + + # Pagination + total_count = len(tenant_users) + start_idx = (page - 1) * page_size + end_idx = start_idx + page_size + page_data = tenant_users[start_idx:end_idx] + + # Remove sensitive fields + safe_users = [ + {k: v for k, v in u.items() if k != "password_hash"} for u in page_data + ] + + return PaginatedResponse( + data=safe_users, + page=page, + page_size=page_size, + total_count=total_count, + has_next=end_idx < total_count, + has_prev=page > 1, + ) + + +@app.post("/api/v1/users", response_model=User, status_code=status.HTTP_201_CREATED) +async def create_user( + request: Request, + user_data: UserCreate, + current_user: TokenData = Depends(get_current_user), +): + """Create a new user in current tenant""" + + check_rate_limit(request, current_user.user_id) + + # Check if username already exists + for user in users_db.values(): + if user["username"] == user_data.username: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Username '{user_data.username}' already exists", + ) + if user["email"] == user_data.email: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Email '{user_data.email}' already exists", + ) + + user_id = str(uuid.uuid4()) + now = datetime.now(UTC) + + new_user = { + "id": user_id, + "tenant_id": current_user.tenant_id, # Scoped to current tenant + "username": user_data.username, + "email": user_data.email, + "full_name": user_data.full_name, + "password_hash": hash_password("TempPassword123!"), # Temporary password + "role": user_data.role, + "created_at": now, + "updated_at": now, + "is_active": True, + } + + users_db[user_id] = new_user + + # Return without password_hash + return User(**{k: v for k, v in new_user.items() if k != "password_hash"}) + + +@app.get("/api/v1/users/{user_id}", response_model=User) +async def get_user( + request: Request, + user_id: str, + current_user: TokenData = Depends(get_current_user), +): + """Get specific user (tenant-scoped)""" + + check_rate_limit(request, current_user.user_id) + + user = users_db.get(user_id) + + if not user: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id '{user_id}' not found", + ) + + # Verify tenant access + if user["tenant_id"] != current_user.tenant_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id '{user_id}' not found", + ) + + return User(**{k: v for k, v in user.items() if k != "password_hash"}) + + +@app.put("/api/v1/users/{user_id}", response_model=User) +async def update_user( + request: Request, + user_id: str, + user_data: UserUpdate, + current_user: TokenData = Depends(get_current_user), +): + """Update user information (tenant-scoped)""" + + check_rate_limit(request, current_user.user_id) + + user = users_db.get(user_id) + + if not user: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id '{user_id}' not found", + ) + + # Verify tenant access + if user["tenant_id"] != current_user.tenant_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id '{user_id}' not found", + ) + + # Check email conflicts + if user_data.email and user_data.email != user["email"]: + for uid, u in users_db.items(): + if uid != user_id and u["email"] == user_data.email: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Email '{user_data.email}' already exists", + ) + + # Update fields + if user_data.email: + user["email"] = user_data.email + if user_data.full_name is not None: + user["full_name"] = user_data.full_name + + user["updated_at"] = datetime.now(UTC) + + return User(**{k: v for k, v in user.items() if k != "password_hash"}) + + +@app.delete("/api/v1/users/{user_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_user( + request: Request, + user_id: str, + current_user: TokenData = Depends(get_current_user), +): + """Soft delete user (tenant-scoped)""" + + check_rate_limit(request, current_user.user_id) + + user = users_db.get(user_id) + + if not user: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id '{user_id}' not found", + ) + + # Verify tenant access + if user["tenant_id"] != current_user.tenant_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id '{user_id}' not found", + ) + + user["is_active"] = False + user["updated_at"] = datetime.now(UTC) + + +# ============================================================================ +# FILE MANAGEMENT ENDPOINTS (Simulating S3) +# ============================================================================ + + +@app.post("/api/v1/files/upload", response_model=FileMetadata, status_code=status.HTTP_201_CREATED) +async def upload_file( + request: Request, + file: UploadFile = File(...), + current_user: TokenData = Depends(get_current_user), +): + """Upload a file (simulating S3 upload)""" + + check_rate_limit(request, current_user.user_id) + + # Validate file type + if file.content_type not in settings.allowed_file_types: + raise HTTPException( + status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, + detail=f"File type '{file.content_type}' not allowed", + ) + + # Read file content + content = await file.read() + + # Validate file size + size_mb = len(content) / (1024 * 1024) + if size_mb > settings.max_file_size_mb: + raise HTTPException( + status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE, + detail=f"File size {size_mb:.2f}MB exceeds limit of {settings.max_file_size_mb}MB", + ) + + # Store file + file_id = str(uuid.uuid4()) + now = datetime.now(UTC) + + file_metadata = { + "id": file_id, + "tenant_id": current_user.tenant_id, + "filename": file.filename, + "content_type": file.content_type, + "size_bytes": len(content), + "uploaded_by": current_user.user_id, + "uploaded_at": now, + } + + files_db[file_id] = file_metadata + file_storage[file_id] = content + + return FileMetadata(**file_metadata) + + +@app.get("/api/v1/files/{file_id}") +async def download_file( + request: Request, + file_id: str, + current_user: TokenData = Depends(get_current_user), +): + """Download a file (tenant-scoped)""" + + check_rate_limit(request, current_user.user_id) + + file_meta = files_db.get(file_id) + + if not file_meta: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"File with id '{file_id}' not found", + ) + + # Verify tenant access + if file_meta["tenant_id"] != current_user.tenant_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"File with id '{file_id}' not found", + ) + + content = file_storage.get(file_id) + if not content: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="File content not found", + ) + + return StreamingResponse( + io.BytesIO(content), + media_type=file_meta["content_type"], + headers={"Content-Disposition": f'attachment; filename="{file_meta["filename"]}"'}, + ) + + +@app.get("/api/v1/files", response_model=PaginatedResponse) +async def list_files( + request: Request, + page: int = 1, + page_size: int = 20, + current_user: TokenData = Depends(get_current_user), +): + """List files in current tenant (paginated)""" + + check_rate_limit(request, current_user.user_id) + + # Filter files by tenant + tenant_files = [ + f for f in files_db.values() if f["tenant_id"] == current_user.tenant_id + ] + + # Pagination + total_count = len(tenant_files) + start_idx = (page - 1) * page_size + end_idx = start_idx + page_size + page_data = tenant_files[start_idx:end_idx] + + return PaginatedResponse( + data=page_data, + page=page, + page_size=page_size, + total_count=total_count, + has_next=end_idx < total_count, + has_prev=page > 1, + ) + + +@app.delete("/api/v1/files/{file_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_file( + request: Request, + file_id: str, + current_user: TokenData = Depends(get_current_user), +): + """Delete a file (tenant-scoped)""" + + check_rate_limit(request, current_user.user_id) + + file_meta = files_db.get(file_id) + + if not file_meta: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"File with id '{file_id}' not found", + ) + + # Verify tenant access + if file_meta["tenant_id"] != current_user.tenant_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"File with id '{file_id}' not found", + ) + + # Delete file + del files_db[file_id] + if file_id in file_storage: + del file_storage[file_id] + + +# ============================================================================ +# ADMIN ENDPOINTS (Admin Role Only) +# ============================================================================ + + +@app.get("/api/v1/admin/tenants") +async def list_all_tenants( + request: Request, current_user: TokenData = Depends(require_admin) +): + """List all tenants (admin only)""" + + check_rate_limit(request, current_user.user_id) + + return {"tenants": list(tenants_db.values()), "total": len(tenants_db)} + + +@app.get("/api/v1/admin/stats") +async def get_system_stats( + request: Request, current_user: TokenData = Depends(require_admin) +): + """Get system-wide statistics (admin only)""" + + check_rate_limit(request, current_user.user_id) + + return { + "total_tenants": len(tenants_db), + "total_users": len(users_db), + "total_files": len(files_db), + "total_storage_bytes": sum(len(content) for content in file_storage.values()), + } + + +# ============================================================================ +# TEST HELPER ENDPOINTS (For Testing Only) +# ============================================================================ + + +@app.post("/test/reset") +async def reset_all_data(): + """Reset all data (for testing purposes only)""" + tenants_db.clear() + users_db.clear() + files_db.clear() + file_storage.clear() + blacklisted_tokens.clear() + rate_limit_store.clear() + return {"message": "All data reset successfully"} diff --git a/py/main.py b/app/main_original.py similarity index 100% rename from py/main.py rename to app/main_original.py diff --git a/pytest.ini b/pytest.ini index cc5f1c9..3c06021 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] -# pytest configuration for User Management API tests +# pytest configuration for Multi-Tenant API Assessment # Test discovery patterns python_files = test_*.py @@ -11,20 +11,33 @@ addopts = -v --strict-markers --tb=short + -p no:warnings # Test paths testpaths = tests -# Markers (for categorizing tests) +# Markers for test categorization markers = - slow: marks tests as slow (deselect with '-m "not slow"') - integration: marks tests as integration tests - unit: marks tests as unit tests + auth: Authentication and authorization tests + tenant_isolation: Multi-tenant isolation and security tests + users: User management tests + files: File upload/download tests + rate_limit: Rate limiting tests + pagination: Pagination tests + integration: End-to-end integration tests + slow: Slow-running tests + unit: Unit tests + smoke: Quick smoke tests -# Coverage options (when using --cov) +# Coverage options [coverage:run] -source = py +source = app +omit = + */tests/* + */venv/* + */__pycache__/* [coverage:report] show_missing = True skip_covered = False +precision = 2 diff --git a/requirements.txt b/requirements.txt index 688d2a5..674e169 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,18 +2,24 @@ fastapi==0.104.1 uvicorn[standard]==0.24.0 pydantic[email]==2.5.0 +python-multipart==0.0.6 # For file uploads + +# Authentication +pyjwt==2.8.0 +bcrypt==4.1.2 # Testing dependencies pytest==7.4.3 pytest-asyncio==0.21.1 pytest-cov==4.1.0 httpx==0.25.2 - -# Optional but recommended (used in RAD's actual testing) pytest-xdist==3.5.0 # Parallel test execution -pytest-mock==3.12.0 # Mocking utilities + +# Test data factories (optional) +factory-boy==3.3.0 +faker==22.0.0 # Development tools -black==23.12.1 # Code formatting -flake8==6.1.0 # Linting -mypy==1.7.1 # Type checking +black==23.12.1 +flake8==6.1.0 +mypy==1.7.1 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..4023bdf --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,51 @@ +""" +Shared pytest fixtures and configuration + +TODO: Implement fixtures for: +- Base test client +- Authenticated clients per tenant +- Test data factories +- Database cleanup +- Environment configuration +""" +import pytest +from fastapi.testclient import TestClient +from app.main import app, tenants_db, users_db, files_db, file_storage, blacklisted_tokens + + +@pytest.fixture +def client(): + """Create a test client for the FastAPI app""" + return TestClient(app) + + +@pytest.fixture(autouse=True) +def reset_database(): + """Automatically reset all databases before each test""" + tenants_db.clear() + users_db.clear() + files_db.clear() + file_storage.clear() + blacklisted_tokens.clear() + yield # Test runs here + # Cleanup after test + tenants_db.clear() + users_db.clear() + files_db.clear() + file_storage.clear() + blacklisted_tokens.clear() + + +# TODO: Add fixtures for authenticated clients +# Example: +# @pytest.fixture +# def tenant_a_admin(client): +# """Return authenticated admin client for Tenant A""" +# # Register tenant +# client.post("/auth/register", json={...}) +# # Login and get token +# response = client.post("/auth/login", json={...}) +# token = response.json()["access_token"] +# # Set authorization header +# client.headers = {"Authorization": f"Bearer {token}"} +# return client diff --git a/tests/test_health.py b/tests/test_health.py new file mode 100644 index 0000000..9b72dbd --- /dev/null +++ b/tests/test_health.py @@ -0,0 +1,17 @@ +""" +Basic health check test - Example to get you started +""" +import pytest + + +@pytest.mark.smoke +def test_health_check(client): + """Test that the health check endpoint returns 200""" + response = client.get("/health") + assert response.status_code == 200 + + data = response.json() + assert data["status"] == "healthy" + assert "timestamp" in data + assert "environment" in data + assert "version" in data diff --git a/tests/test_users.py b/tests/test_users_original.py similarity index 100% rename from tests/test_users.py rename to tests/test_users_original.py From 59f35c1fb177bdd1dd8a044dbe5ad5a4897d9ef8 Mon Sep 17 00:00:00 2001 From: Claude Code Date: Fri, 19 Dec 2025 18:51:57 +0000 Subject: [PATCH 2/5] Add Rust integration tests (optional bonus) - Create rust_tests/ with blocking HTTP client tests - Add 10 test cases (7 working, 3 TODO) - Tests: health, auth, users, tenant isolation - Demonstrates cross-language testing capability - Update CI/CD to run Rust tests after Python - Add Rust setup instructions to README - Add .gitignore for Rust build artifacts Note: Some tests may need cleanup between runs. Candidate should implement /test/reset endpoint calls. --- .github/workflows/tests.yml | 49 ++++ .gitignore | 20 +- README.md | 31 ++- rust_tests/Cargo.toml | 15 ++ rust_tests/tests/integration_tests.rs | 353 ++++++++++++++++++++++++++ 5 files changed, 458 insertions(+), 10 deletions(-) create mode 100644 rust_tests/Cargo.toml create mode 100644 rust_tests/tests/integration_tests.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a766d65..68832d7 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -52,3 +52,52 @@ jobs: with: name: test-results-${{ matrix.python-version }}-${{ matrix.environment }} path: junit.xml + + rust-tests: + runs-on: ubuntu-latest + needs: test + steps: + - uses: actions/checkout@v3 + + - name: Setup Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + + - name: Cache cargo registry + uses: actions/cache@v3 + with: + path: ~/.cargo/registry + key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo build + uses: actions/cache@v3 + with: + path: rust_tests/target + key: ${{ runner.os }}-cargo-build-${{ hashFiles('**/Cargo.lock') }} + + - name: Setup Python and start API + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install Python dependencies and start API + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + export API_ENV=dev + export JWT_SECRET=test_secret_for_rust_tests + uvicorn app.main:app --host 0.0.0.0 --port 8000 & + sleep 5 # Wait for API to start + + - name: Run Rust integration tests + working-directory: rust_tests + run: cargo test --verbose + + - name: Upload Rust test results + if: always() + uses: actions/upload-artifact@v3 + with: + name: rust-test-results + path: rust_tests/target/debug/deps/*.xml diff --git a/.gitignore b/.gitignore index fa92f88..bdceda1 100644 --- a/.gitignore +++ b/.gitignore @@ -8,12 +8,18 @@ venv/ env/ ENV/ .venv - -# pytest +pip-log.txt +pip-delete-this-directory.txt .pytest_cache/ .coverage htmlcov/ *.cover +.hypothesis/ + +# Rust +rust_tests/target/ +rust_tests/Cargo.lock +**/*.rs.bk # IDE .vscode/ @@ -26,9 +32,7 @@ htmlcov/ .DS_Store Thumbs.db -# Environment -.env -*.log - -# Internal documentation (not for candidates) -BUGS_REFERENCE.md +# Test outputs +junit.xml +coverage.xml +.coverage.* diff --git a/README.md b/README.md index 8987403..c74e240 100644 --- a/README.md +++ b/README.md @@ -106,6 +106,28 @@ GET /api/v1/admin/stats - System statistics - Load/performance testing - Mock external services +### Rust Integration Tests (Optional Bonus) + +We also provide Rust integration tests in `rust_tests/` to demonstrate cross-language testing capability. This is **completely optional** but shows production-level polyglot engineering skills. + +**Run Rust tests:** +```bash +cd rust_tests +cargo test +cargo test -- --nocapture # Verbose output +``` + +**Bonus points for:** +- Completing the TODO tests in `rust_tests/tests/integration_tests.rs` +- Adding additional Rust test cases +- Demonstrating Rust/Python test coordination + +**Why Rust tests?** +- Demonstrates polyglot capability (Python + Rust) +- Shows HTTP client testing from external process +- Mirrors our production stack (we use both Python and Rust) +- Tests API contracts from consumer perspective + ### CI/CD Pipeline Create `.github/workflows/tests.yml` with: @@ -146,6 +168,9 @@ pytest -m integration -v # Parallel execution pytest -n auto -v + +# Rust tests (optional bonus) +cd rust_tests && cargo test ``` ## Evaluation Criteria @@ -230,11 +255,13 @@ def test_rate_limit_enforcement(tenant_a_admin): ## Submission 1. **Push code** to your fork/branch -2. **Verify tests pass**: `pytest -v --cov=py` -3. **Submit:** +2. **Verify tests pass**: `pytest -v --cov=app` +3. **(Optional) Run Rust tests**: `cd rust_tests && cargo test` +4. **Submit:** - Repository link - Test output (coverage report) - `TESTING_STRATEGY.md` + - (Optional) Rust test results ## Questions? diff --git a/rust_tests/Cargo.toml b/rust_tests/Cargo.toml new file mode 100644 index 0000000..b44e5ee --- /dev/null +++ b/rust_tests/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "api-integration-tests" +version = "0.1.0" +edition = "2021" + +[[test]] +name = "integration_tests" +path = "tests/integration_tests.rs" + +[dependencies] +tokio = { version = "1.35", features = ["full"] } +reqwest = { version = "0.11", features = ["json", "blocking"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +anyhow = "1.0" diff --git a/rust_tests/tests/integration_tests.rs b/rust_tests/tests/integration_tests.rs new file mode 100644 index 0000000..d71bf7d --- /dev/null +++ b/rust_tests/tests/integration_tests.rs @@ -0,0 +1,353 @@ +//! Rust Integration Tests for Multi-Tenant API +//! +//! BONUS CHALLENGE (Optional): +//! These tests demonstrate cross-language testing capability. +//! Implement comprehensive Rust tests that verify the Python API. +//! +//! Run with: cargo test +//! Run verbose: cargo test -- --nocapture + +use reqwest::blocking::Client; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +const API_BASE: &str = "http://localhost:8000"; + +// ============================================================================ +// Models (matching Python API) +// ============================================================================ + +#[derive(Debug, Serialize, Deserialize)] +struct HealthResponse { + status: String, + timestamp: String, + environment: String, + version: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct RegisterRequest { + tenant_name: String, + admin_email: String, + admin_username: String, + admin_password: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct RegisterResponse { + message: String, + tenant_id: String, + admin_user_id: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct LoginRequest { + username: String, + password: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct TokenResponse { + access_token: String, + refresh_token: String, + token_type: String, + expires_in: i64, +} + +#[derive(Debug, Serialize, Deserialize)] +struct User { + id: String, + tenant_id: String, + username: String, + email: String, + full_name: String, + role: String, + is_active: bool, +} + +#[derive(Debug, Serialize, Deserialize)] +struct CreateUserRequest { + username: String, + email: String, + full_name: String, + #[serde(skip_serializing_if = "Option::is_none")] + role: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ErrorResponse { + detail: String, +} + +// ============================================================================ +// Test Utilities +// ============================================================================ + +struct TestClient { + client: Client, + base_url: String, +} + +impl TestClient { + fn new() -> Self { + Self { + client: Client::new(), + base_url: API_BASE.to_string(), + } + } + + fn with_auth(token: &str) -> Self { + let mut headers = reqwest::header::HeaderMap::new(); + headers.insert( + reqwest::header::AUTHORIZATION, + format!("Bearer {}", token).parse().unwrap(), + ); + + Self { + client: Client::builder() + .default_headers(headers) + .build() + .unwrap(), + base_url: API_BASE.to_string(), + } + } +} + +/// Helper to register a tenant and return admin token +fn setup_tenant(tenant_name: &str) -> Result> { + let client = TestClient::new(); + + // Register tenant + let register = RegisterRequest { + tenant_name: tenant_name.to_string(), + admin_email: format!("admin@{}.com", tenant_name), + admin_username: format!("admin_{}", tenant_name), + admin_password: "SecurePass123!".to_string(), + }; + + let _: RegisterResponse = client + .client + .post(format!("{}/auth/register", client.base_url)) + .json(®ister) + .send()? + .json()?; + + // Login + let login = LoginRequest { + username: format!("admin_{}", tenant_name), + password: "SecurePass123!".to_string(), + }; + + let tokens: TokenResponse = client + .client + .post(format!("{}/auth/login", client.base_url)) + .json(&login) + .send()? + .json()?; + + Ok(tokens) +} + +// ============================================================================ +// Tests +// ============================================================================ + +#[test] +fn test_health_check() { + let client = TestClient::new(); + + let response = client + .client + .get(format!("{}/health", client.base_url)) + .send() + .expect("Failed to send request"); + + assert_eq!(response.status(), 200); + + let health: HealthResponse = response.json().expect("Failed to parse JSON"); + + assert_eq!(health.status, "healthy"); + assert_eq!(health.version, "2.0.0"); +} + +#[test] +fn test_register_tenant_success() { + let client = TestClient::new(); + + let register = RegisterRequest { + tenant_name: "test_tenant_rust".to_string(), + admin_email: "admin@rust.com".to_string(), + admin_username: "admin_rust".to_string(), + admin_password: "SecurePass123!".to_string(), + }; + + let response = client + .client + .post(format!("{}/auth/register", client.base_url)) + .json(®ister) + .send() + .expect("Failed to register tenant"); + + assert_eq!(response.status(), 201); + + let result: RegisterResponse = response.json().expect("Failed to parse response"); + + assert_eq!(result.message, "Tenant registered successfully"); + assert!(!result.tenant_id.is_empty()); + assert!(!result.admin_user_id.is_empty()); +} + +#[test] +fn test_login_success() { + // Setup tenant first + let tokens = setup_tenant("login_test").expect("Failed to setup tenant"); + + assert_eq!(tokens.token_type, "bearer"); + assert!(!tokens.access_token.is_empty()); + assert!(!tokens.refresh_token.is_empty()); + assert!(tokens.expires_in > 0); +} + +#[test] +fn test_login_invalid_credentials() { + let client = TestClient::new(); + + let login = LoginRequest { + username: "nonexistent_user".to_string(), + password: "WrongPassword".to_string(), + }; + + let response = client + .client + .post(format!("{}/auth/login", client.base_url)) + .json(&login) + .send() + .expect("Failed to send login request"); + + assert_eq!(response.status(), 401); + + let error: ErrorResponse = response.json().expect("Failed to parse error"); + assert_eq!(error.detail, "Invalid username or password"); +} + +#[test] +fn test_create_user_authenticated() { + // Setup tenant and get token + let tokens = setup_tenant("user_test").expect("Failed to setup tenant"); + let client = TestClient::with_auth(&tokens.access_token); + + // Create user + let new_user = CreateUserRequest { + username: "test_user".to_string(), + email: "testuser@example.com".to_string(), + full_name: "Test User".to_string(), + role: Some("user".to_string()), + }; + + let response = client + .client + .post(format!("{}/api/v1/users", client.base_url)) + .json(&new_user) + .send() + .expect("Failed to create user"); + + assert_eq!(response.status(), 201); + + let user: User = response.json().expect("Failed to parse user"); + + assert_eq!(user.username, "test_user"); + assert_eq!(user.email, "testuser@example.com"); + assert_eq!(user.role, "user"); + assert!(user.is_active); +} + +#[test] +fn test_create_user_without_auth_fails() { + let client = TestClient::new(); + + let new_user = CreateUserRequest { + username: "test_user".to_string(), + email: "testuser@example.com".to_string(), + full_name: "Test User".to_string(), + role: None, + }; + + let response = client + .client + .post(format!("{}/api/v1/users", client.base_url)) + .json(&new_user) + .send() + .expect("Failed to send request"); + + assert_eq!(response.status(), 401); +} + +#[test] +fn test_tenant_isolation() { + // Setup two tenants + let tokens_a = setup_tenant("tenant_a_rust").expect("Failed to setup tenant A"); + let tokens_b = setup_tenant("tenant_b_rust").expect("Failed to setup tenant B"); + + // Tenant B creates a user + let client_b = TestClient::with_auth(&tokens_b.access_token); + let new_user = CreateUserRequest { + username: "user_b".to_string(), + email: "userb@example.com".to_string(), + full_name: "User B".to_string(), + role: Some("user".to_string()), + }; + + let user_b_response = client_b + .client + .post(format!("{}/api/v1/users", client_b.base_url)) + .json(&new_user) + .send() + .expect("Failed to create user in tenant B"); + + assert_eq!(user_b_response.status(), 201); + + let user_b: User = user_b_response.json().expect("Failed to parse user"); + let user_b_id = user_b.id; + + // Tenant A tries to access Tenant B's user (should fail) + let client_a = TestClient::with_auth(&tokens_a.access_token); + let response = client_a + .client + .get(format!("{}/api/v1/users/{}", client_a.base_url, user_b_id)) + .send() + .expect("Failed to send request"); + + // Should return 404 (not found) for security - don't leak tenant existence + assert_eq!(response.status(), 404); +} + +// TODO: Implement more tests +// - List users with pagination +// - Update user +// - Delete user +// - File upload/download +// - Rate limiting +// - Token refresh +// - Admin endpoints +// - Concurrent operations + +#[test] +#[ignore] // TODO: Implement +fn test_rate_limiting() { + // TODO: Make 11 requests and verify 11th returns 429 + todo!("Implement rate limiting test"); +} + +#[test] +#[ignore] // TODO: Implement +fn test_file_upload() { + // TODO: Test file upload with multipart form + todo!("Implement file upload test"); +} + +#[test] +#[ignore] // TODO: Implement +fn test_pagination() { + // TODO: Create many users and test pagination + todo!("Implement pagination test"); +} From b14d0a15ab93bef618b8fed9efa1b75bce66c5b4 Mon Sep 17 00:00:00 2001 From: Claude Code Date: Fri, 19 Dec 2025 19:07:06 +0000 Subject: [PATCH 3/5] Adjust difficulty: Change to tiered/staggered approach - Replace '30+ tests REQUIRED' with 3-tier system - Tier 1 (Core): 15-18 tests, 3-4 hours - MUST COMPLETE - Tier 2 (Extended): +6-8 tests, +1.5-2 hours - SHOULD COMPLETE - Tier 3 (Bonus): +5-10 tests, +1-2 hours - OPTIONAL Makes assessment more reasonable: - Minimum pass: 15+ tests (achievable in 3-4 hours) - Strong pass: 20-25 tests (achievable in 5-6 hours) - Outstanding: 25-30+ tests (for exceptional candidates) Clear expectations for each tier and time investment. --- DIFFICULTY_ANALYSIS.md | 105 ++++++++++++++++++++++++++++++ README.md | 144 +++++++++++++++++++++++++---------------- 2 files changed, 194 insertions(+), 55 deletions(-) create mode 100644 DIFFICULTY_ANALYSIS.md diff --git a/DIFFICULTY_ANALYSIS.md b/DIFFICULTY_ANALYSIS.md new file mode 100644 index 0000000..fa67b92 --- /dev/null +++ b/DIFFICULTY_ANALYSIS.md @@ -0,0 +1,105 @@ +# Assessment Difficulty Analysis + +## Current Requirements Breakdown + +### Total: 30+ tests across 5 areas +- **Authentication:** 8+ tests +- **Multi-Tenant Isolation:** 6+ tests +- **User Management:** 8+ tests +- **File Management:** 6+ tests +- **Rate Limiting:** 2+ tests + +**Additional Requirements:** +- Advanced pytest patterns (fixtures, markers, parametrization) +- CI/CD pipeline setup +- TESTING_STRATEGY.md documentation +- Optional: Rust integration tests + +**Estimated Time:** 6-8 hours (currently too much) + +--- + +## Proposed Staggered Approach + +### ๐ŸŽฏ Tier 1: Core Requirements (MUST COMPLETE) +**Time:** 3-4 hours | **Tests:** 15-18 + +**Authentication Basics (5 tests)** +- โœ… Register tenant + admin +- โœ… Login with valid credentials +- โœ… Login with invalid credentials +- โœ… Access endpoint without token (401) +- โœ… Access endpoint with invalid token (401) + +**User Management with Auth (8 tests)** +- โœ… Create user (authenticated) +- โœ… List users (authenticated, tenant-scoped) +- โœ… Get user by ID (authenticated, tenant-scoped) +- โœ… Update user (authenticated, tenant-scoped) +- โœ… Delete user (authenticated, tenant-scoped) +- โœ… Duplicate username validation +- โœ… Duplicate email validation +- โœ… Invalid input validation + +**Basic Tenant Isolation (3 tests)** +- โœ… Tenant A cannot access Tenant B's users +- โœ… List users only shows current tenant's users +- โœ… User IDs are scoped to tenant + +**Evaluation:** Pass if 15+ tests pass with 70%+ coverage on auth/users + +--- + +### ๐ŸŽฏ Tier 2: Extended Requirements (SHOULD COMPLETE) +**Time:** +1.5-2 hours | **Tests:** +6-8 additional + +**File Management (4 tests)** +- Upload file successfully +- Download file +- List files (tenant-scoped) +- Delete file + +**Pagination (2 tests)** +- List users with pagination (multiple pages) +- Pagination metadata correct (has_next, total_count) + +**Advanced Auth (2 tests)** +- Token refresh workflow +- Role-based access (admin vs user) + +**Evaluation:** Strong pass if completes Tier 1 + Tier 2 (20-25 tests) + +--- + +### ๐ŸŽฏ Tier 3: Bonus Challenges (OPTIONAL) +**Time:** +1-2 hours | **Nice to have** + +**Advanced Scenarios:** +- Rate limiting enforcement (429 responses) +- Cross-tenant file access prevention +- Concurrent user creation +- File size/type validation +- Token expiration handling + +**Infrastructure:** +- CI/CD pipeline (GitHub Actions) +- TESTING_STRATEGY.md documentation +- Test data factories +- Rust integration tests + +**Evaluation:** Exceptional if completes all 3 tiers + +--- + +## Recommended Update + +Change from "30+ tests REQUIRED" to: + +**Minimum (Pass): 15+ tests (Tier 1)** +**Target (Strong Pass): 20+ tests (Tier 1 + Tier 2)** +**Exceptional (Outstanding): 25+ tests (All tiers)** + +This makes it: +- โœ… Achievable in 4 hours (Tier 1) +- โœ… Challenging but fair in 6 hours (Tier 1 + 2) +- โœ… Room to shine for strong candidates (Tier 3) diff --git a/README.md b/README.md index c74e240..3cb14f8 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,8 @@ Design and implement comprehensive test automation for a production-like multi-tenant SaaS API with authentication, file management, and rate limiting. -**Time:** 4-6 hours -**Level:** Senior +**Time:** 4-6 hours (tiered: 3-4 hours core, +2 hours extended/bonus) +**Level:** Senior (with tiered evaluation) **Skills:** Python, pytest, API testing, OAuth2/JWT, multi-tenancy, CI/CD ## The Challenge @@ -51,44 +51,75 @@ GET /api/v1/admin/tenants - List all tenants GET /api/v1/admin/stats - System statistics ``` -## Requirements +## Requirements (Tiered Approach) -### Must Implement (30+ tests) +### ๐ŸŽฏ Tier 1: Core Requirements (MUST COMPLETE) +**Target:** 15-18 tests | **Time:** 3-4 hours | **Evaluation:** Minimum passing score -**Authentication (8+ tests)** -- Valid login flow -- Invalid credentials -- Token expiration handling +**Authentication Basics (5 tests)** +- Register tenant + admin user +- Login with valid credentials +- Login with invalid credentials +- Access endpoint without token (should return 401) +- Access endpoint with invalid token (should return 401) + +**User Management with Auth (8 tests)** +- Create user (authenticated, tenant-scoped) +- List users (authenticated, returns only tenant's users) +- Get user by ID (authenticated, tenant-scoped) +- Update user (authenticated, tenant-scoped) +- Delete user (authenticated, tenant-scoped) +- Duplicate username validation (409 conflict) +- Duplicate email validation (409 conflict) +- Invalid input validation (422 error) + +**Basic Tenant Isolation (3 tests)** +- Tenant A cannot access Tenant B's users (404) +- List users only shows current tenant's data +- User operations are scoped to authenticated tenant + +**Passing Criteria:** 15+ tests passing, 70%+ code coverage on auth/users modules + +--- + +### ๐ŸŽฏ Tier 2: Extended Requirements (SHOULD COMPLETE) +**Target:** +6-8 tests | **Time:** +1.5-2 hours | **Evaluation:** Strong passing score + +**File Management (4 tests)** +- Upload file successfully +- Download file (authenticated, tenant-scoped) +- List files (authenticated, returns only tenant's files) +- Delete file (authenticated, tenant-scoped) + +**Pagination (2 tests)** +- List users with pagination (test multiple pages) +- Pagination metadata correct (has_next, total_count, etc.) + +**Advanced Auth (2 tests)** - Token refresh workflow -- Logout functionality -- Malformed/missing tokens -- Role-based access control - -**Multi-Tenant Isolation (6+ tests)** -- Cross-tenant user access (should fail) -- Cross-tenant file access (should fail) -- Tenant-scoped data queries -- Admin cross-tenant access - -**User Management (8+ tests)** -- Create user in tenant -- List users with pagination -- Update user details -- Soft delete user -- Duplicate username/email handling -- Invalid input validation - -**File Management (6+ tests)** -- Upload various file types -- Download files -- List files with pagination -- Delete files -- File type validation -- File size limits - -**Rate Limiting (2+ tests)** -- Enforce 10 req/min limit -- Verify 429 status + headers +- Role-based access control (admin vs user) + +**Strong Pass Criteria:** 20-25 tests passing, 80%+ code coverage + +--- + +### ๐ŸŽฏ Tier 3: Bonus Challenges (OPTIONAL) +**Target:** +5-10 tests | **Time:** +1-2 hours | **Evaluation:** Exceptional/Outstanding + +**Advanced Scenarios:** +- Rate limiting enforcement (429 responses) +- Cross-tenant file access prevention +- Token expiration handling +- File type/size validation +- Concurrent operations testing + +**Infrastructure & Documentation:** +- CI/CD pipeline (GitHub Actions) +- TESTING_STRATEGY.md documentation +- Test data factories (factory_boy, Faker) +- Rust integration tests (cross-language) + +**Outstanding Criteria:** All 3 tiers completed (25-30+ tests), comprehensive documentation ### Advanced pytest Patterns @@ -176,23 +207,21 @@ cd rust_tests && cargo test ## Evaluation Criteria **Technical (60%)** -- Authentication testing depth -- Multi-tenant isolation verification -- Advanced pytest usage (fixtures, parametrization, markers) -- CI/CD pipeline quality -- Code organization +- **Tier 1 (Critical):** Auth basics + User CRUD + Tenant isolation +- **Tier 2 (Important):** Files, pagination, advanced auth +- **Tier 3 (Bonus):** Rate limiting, CI/CD, advanced scenarios **Architecture (25%)** -- Test design patterns -- Reusable fixtures -- Environment handling -- Scalability considerations +- Test fixture design (authenticated clients per tenant) +- Test organization and reusability +- Setup/teardown patterns +- Code clarity and maintainability **Professional (15%)** -- Documentation quality -- Code readability -- Production-mindedness -- Edge case coverage +- Code quality (PEP 8, type hints, clear naming) +- Documentation (inline comments, TESTING_STRATEGY.md) +- Problem-solving approach (how you tackled complex scenarios) +- Time management (completed appropriate tier for time spent) ## Example Patterns @@ -256,12 +285,17 @@ def test_rate_limit_enforcement(tenant_a_admin): 1. **Push code** to your fork/branch 2. **Verify tests pass**: `pytest -v --cov=app` -3. **(Optional) Run Rust tests**: `cd rust_tests && cargo test` -4. **Submit:** +3. **Submit:** - Repository link - - Test output (coverage report) - - `TESTING_STRATEGY.md` - - (Optional) Rust test results + - Test output showing: test count, pass rate, coverage % + - Brief summary of what tier you completed + - **(Tier 2+)** `TESTING_STRATEGY.md` explaining your approach + - **(Tier 3)** Optional: Rust test results, CI/CD logs + +**What We're Looking For:** +- **Minimum (Pass):** Tier 1 complete (15+ tests, 70%+ coverage) +- **Target (Strong):** Tier 1 + Tier 2 (20+ tests, 80%+ coverage) +- **Outstanding:** All 3 tiers (25+ tests, comprehensive documentation) ## Questions? From 49da3b13578f914282754ebda3df9de278569562 Mon Sep 17 00:00:00 2001 From: Claude Code Date: Fri, 19 Dec 2025 19:07:49 +0000 Subject: [PATCH 4/5] Further adjust difficulty to 1-2 hours for Tier 1 Revised tiers for professional/realistic timeline: - Tier 1: 10-12 tests, 1.5-2 hours (auth + user CRUD + basic isolation) - Tier 2: +5-7 tests, +1 hour (files, pagination, validation) - Tier 3: +5+ tests, +1+ hour (advanced auth, rate limiting, CI/CD) Total time: 2-4 hours (reasonable for senior engineer) Remove internal DIFFICULTY_ANALYSIS.md --- DIFFICULTY_ANALYSIS.md | 105 ----------------------------------------- README.md | 105 ++++++++++++++++++++--------------------- 2 files changed, 52 insertions(+), 158 deletions(-) delete mode 100644 DIFFICULTY_ANALYSIS.md diff --git a/DIFFICULTY_ANALYSIS.md b/DIFFICULTY_ANALYSIS.md deleted file mode 100644 index fa67b92..0000000 --- a/DIFFICULTY_ANALYSIS.md +++ /dev/null @@ -1,105 +0,0 @@ -# Assessment Difficulty Analysis - -## Current Requirements Breakdown - -### Total: 30+ tests across 5 areas -- **Authentication:** 8+ tests -- **Multi-Tenant Isolation:** 6+ tests -- **User Management:** 8+ tests -- **File Management:** 6+ tests -- **Rate Limiting:** 2+ tests - -**Additional Requirements:** -- Advanced pytest patterns (fixtures, markers, parametrization) -- CI/CD pipeline setup -- TESTING_STRATEGY.md documentation -- Optional: Rust integration tests - -**Estimated Time:** 6-8 hours (currently too much) - ---- - -## Proposed Staggered Approach - -### ๐ŸŽฏ Tier 1: Core Requirements (MUST COMPLETE) -**Time:** 3-4 hours | **Tests:** 15-18 - -**Authentication Basics (5 tests)** -- โœ… Register tenant + admin -- โœ… Login with valid credentials -- โœ… Login with invalid credentials -- โœ… Access endpoint without token (401) -- โœ… Access endpoint with invalid token (401) - -**User Management with Auth (8 tests)** -- โœ… Create user (authenticated) -- โœ… List users (authenticated, tenant-scoped) -- โœ… Get user by ID (authenticated, tenant-scoped) -- โœ… Update user (authenticated, tenant-scoped) -- โœ… Delete user (authenticated, tenant-scoped) -- โœ… Duplicate username validation -- โœ… Duplicate email validation -- โœ… Invalid input validation - -**Basic Tenant Isolation (3 tests)** -- โœ… Tenant A cannot access Tenant B's users -- โœ… List users only shows current tenant's users -- โœ… User IDs are scoped to tenant - -**Evaluation:** Pass if 15+ tests pass with 70%+ coverage on auth/users - ---- - -### ๐ŸŽฏ Tier 2: Extended Requirements (SHOULD COMPLETE) -**Time:** +1.5-2 hours | **Tests:** +6-8 additional - -**File Management (4 tests)** -- Upload file successfully -- Download file -- List files (tenant-scoped) -- Delete file - -**Pagination (2 tests)** -- List users with pagination (multiple pages) -- Pagination metadata correct (has_next, total_count) - -**Advanced Auth (2 tests)** -- Token refresh workflow -- Role-based access (admin vs user) - -**Evaluation:** Strong pass if completes Tier 1 + Tier 2 (20-25 tests) - ---- - -### ๐ŸŽฏ Tier 3: Bonus Challenges (OPTIONAL) -**Time:** +1-2 hours | **Nice to have** - -**Advanced Scenarios:** -- Rate limiting enforcement (429 responses) -- Cross-tenant file access prevention -- Concurrent user creation -- File size/type validation -- Token expiration handling - -**Infrastructure:** -- CI/CD pipeline (GitHub Actions) -- TESTING_STRATEGY.md documentation -- Test data factories -- Rust integration tests - -**Evaluation:** Exceptional if completes all 3 tiers - ---- - -## Recommended Update - -Change from "30+ tests REQUIRED" to: - -**Minimum (Pass): 15+ tests (Tier 1)** -**Target (Strong Pass): 20+ tests (Tier 1 + Tier 2)** -**Exceptional (Outstanding): 25+ tests (All tiers)** - -This makes it: -- โœ… Achievable in 4 hours (Tier 1) -- โœ… Challenging but fair in 6 hours (Tier 1 + 2) -- โœ… Room to shine for strong candidates (Tier 3) diff --git a/README.md b/README.md index 3cb14f8..391770b 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ Design and implement comprehensive test automation for a production-like multi-tenant SaaS API with authentication, file management, and rate limiting. -**Time:** 4-6 hours (tiered: 3-4 hours core, +2 hours extended/bonus) +**Time:** 2-4 hours (tiered: 1.5-2h core, +1h extended, +1h bonus) **Level:** Senior (with tiered evaluation) -**Skills:** Python, pytest, API testing, OAuth2/JWT, multi-tenancy, CI/CD +**Skills:** Python, pytest, API testing, OAuth2/JWT, multi-tenancy ## The Challenge @@ -54,72 +54,71 @@ GET /api/v1/admin/stats - System statistics ## Requirements (Tiered Approach) ### ๐ŸŽฏ Tier 1: Core Requirements (MUST COMPLETE) -**Target:** 15-18 tests | **Time:** 3-4 hours | **Evaluation:** Minimum passing score - -**Authentication Basics (5 tests)** -- Register tenant + admin user -- Login with valid credentials -- Login with invalid credentials -- Access endpoint without token (should return 401) -- Access endpoint with invalid token (should return 401) - -**User Management with Auth (8 tests)** -- Create user (authenticated, tenant-scoped) -- List users (authenticated, returns only tenant's users) -- Get user by ID (authenticated, tenant-scoped) -- Update user (authenticated, tenant-scoped) -- Delete user (authenticated, tenant-scoped) -- Duplicate username validation (409 conflict) -- Duplicate email validation (409 conflict) -- Invalid input validation (422 error) - -**Basic Tenant Isolation (3 tests)** -- Tenant A cannot access Tenant B's users (404) +**Target:** 10-12 tests | **Time:** 1.5-2 hours | **Evaluation:** Minimum passing score + +**Authentication (4 tests)** +- Register tenant + admin user successfully +- Login with valid credentials โ†’ get JWT token +- Login with invalid credentials โ†’ 401 error +- Access protected endpoint without token โ†’ 401 error + +**User Management with Auth (5 tests)** +- Create user (authenticated) โ†’ 201 success +- List users (authenticated, tenant-scoped) โ†’ returns only tenant's users +- Get user by ID (authenticated) โ†’ 200 success +- Update user (authenticated) โ†’ 200 success +- Duplicate username โ†’ 409 conflict + +**Basic Tenant Isolation (2 tests)** +- Tenant A cannot access Tenant B's user โ†’ 404 - List users only shows current tenant's data -- User operations are scoped to authenticated tenant -**Passing Criteria:** 15+ tests passing, 70%+ code coverage on auth/users modules +**Passing Criteria:** 10+ tests passing, 60%+ code coverage on core auth/users --- ### ๐ŸŽฏ Tier 2: Extended Requirements (SHOULD COMPLETE) -**Target:** +6-8 tests | **Time:** +1.5-2 hours | **Evaluation:** Strong passing score +**Target:** +5-7 tests | **Time:** +1 hour | **Evaluation:** Strong passing score -**File Management (4 tests)** -- Upload file successfully -- Download file (authenticated, tenant-scoped) -- List files (authenticated, returns only tenant's files) -- Delete file (authenticated, tenant-scoped) +**Additional User Tests (3 tests)** +- Delete user (soft delete) โ†’ is_active=False +- Duplicate email validation โ†’ 409 conflict +- Invalid input validation โ†’ 422 error -**Pagination (2 tests)** -- List users with pagination (test multiple pages) -- Pagination metadata correct (has_next, total_count, etc.) +**File Management (3 tests)** +- Upload file successfully โ†’ 201, returns file_id +- Download file (tenant-scoped) โ†’ correct content +- Delete file (tenant-scoped) โ†’ 204 success -**Advanced Auth (2 tests)** -- Token refresh workflow -- Role-based access control (admin vs user) +**Pagination (2 tests)** +- List users with pagination โ†’ multiple pages work +- Pagination metadata โ†’ has_next, total_count correct -**Strong Pass Criteria:** 20-25 tests passing, 80%+ code coverage +**Strong Pass Criteria:** 15-18 tests passing, 70%+ code coverage --- ### ๐ŸŽฏ Tier 3: Bonus Challenges (OPTIONAL) -**Target:** +5-10 tests | **Time:** +1-2 hours | **Evaluation:** Exceptional/Outstanding +**Target:** +5+ tests | **Time:** +1+ hours | **Evaluation:** Exceptional/Outstanding -**Advanced Scenarios:** -- Rate limiting enforcement (429 responses) +**Advanced Auth & Security:** +- Token refresh workflow +- Role-based access control (admin vs user) +- Invalid/expired token handling - Cross-tenant file access prevention -- Token expiration handling -- File type/size validation -- Concurrent operations testing -**Infrastructure & Documentation:** -- CI/CD pipeline (GitHub Actions) -- TESTING_STRATEGY.md documentation -- Test data factories (factory_boy, Faker) -- Rust integration tests (cross-language) +**Performance & Limits:** +- Rate limiting enforcement (429 responses) +- File type validation (415 unsupported media) +- File size limits (413 entity too large) + +**Infrastructure & Documentation (Bonus):** +- CI/CD pipeline (GitHub Actions) working in PR +- TESTING_STRATEGY.md explaining your approach +- Test data factories for realistic data +- Rust integration tests (1-2 hours additional) -**Outstanding Criteria:** All 3 tiers completed (25-30+ tests), comprehensive documentation +**Outstanding Criteria:** 20+ tests total, comprehensive documentation, working CI/CD ### Advanced pytest Patterns @@ -293,9 +292,9 @@ def test_rate_limit_enforcement(tenant_a_admin): - **(Tier 3)** Optional: Rust test results, CI/CD logs **What We're Looking For:** -- **Minimum (Pass):** Tier 1 complete (15+ tests, 70%+ coverage) -- **Target (Strong):** Tier 1 + Tier 2 (20+ tests, 80%+ coverage) -- **Outstanding:** All 3 tiers (25+ tests, comprehensive documentation) +- **Minimum (Pass):** Tier 1 complete (10+ tests, 60%+ coverage, ~2 hours) +- **Target (Strong):** Tier 1 + Tier 2 (15+ tests, 70%+ coverage, ~3 hours) +- **Outstanding:** All 3 tiers (20+ tests, CI/CD, docs, ~4+ hours) ## Questions? From c20012abbb9bec6ede9c8f8821975ce8ef0ce91b Mon Sep 17 00:00:00 2001 From: Claude Code Date: Fri, 19 Dec 2025 20:20:54 +0000 Subject: [PATCH 5/5] Rename tests/ to py_tests/ for clarity - Distinguish Python tests from Rust tests (rust_tests/) - Update pytest.ini testpaths - Update coverage omit patterns - All tests still discoverable via pytest --- {tests => py_tests}/__init__.py | 0 {tests => py_tests}/conftest.py | 0 {tests => py_tests}/test_health.py | 0 {tests => py_tests}/test_users_original.py | 0 pytest.ini | 4 ++-- 5 files changed, 2 insertions(+), 2 deletions(-) rename {tests => py_tests}/__init__.py (100%) rename {tests => py_tests}/conftest.py (100%) rename {tests => py_tests}/test_health.py (100%) rename {tests => py_tests}/test_users_original.py (100%) diff --git a/tests/__init__.py b/py_tests/__init__.py similarity index 100% rename from tests/__init__.py rename to py_tests/__init__.py diff --git a/tests/conftest.py b/py_tests/conftest.py similarity index 100% rename from tests/conftest.py rename to py_tests/conftest.py diff --git a/tests/test_health.py b/py_tests/test_health.py similarity index 100% rename from tests/test_health.py rename to py_tests/test_health.py diff --git a/tests/test_users_original.py b/py_tests/test_users_original.py similarity index 100% rename from tests/test_users_original.py rename to py_tests/test_users_original.py diff --git a/pytest.ini b/pytest.ini index 3c06021..eeb7708 100644 --- a/pytest.ini +++ b/pytest.ini @@ -14,7 +14,7 @@ addopts = -p no:warnings # Test paths -testpaths = tests +testpaths = py_tests # Markers for test categorization markers = @@ -33,7 +33,7 @@ markers = [coverage:run] source = app omit = - */tests/* + */py_tests/* */venv/* */__pycache__/*