diff --git a/README.md b/README.md index eae4c37a..7b8f8c21 100644 --- a/README.md +++ b/README.md @@ -289,14 +289,14 @@ will be used to upload all results into the same test run. #### Labels Management -The TestRail CLI provides comprehensive label management capabilities for **Projects** using the `labels` command. Labels help categorize and organize your test management assets efficiently, making it easier to filter and manage test cases, runs, and projects. +The TestRail CLI provides comprehensive label management capabilities using the `labels` command. Labels help categorize and organize your test management assets efficiently, making it easier to filter and manage test cases, runs, and projects. -The `labels` command supports full CRUD (Create, Read, Update, Delete) operations: -- **Add** new labels to projects -- **List** existing labels with pagination support -- **Get** detailed information about specific labels -- **Update** existing label titles -- **Delete** single or multiple labels in batch +The TestRail CLI supports three types of label management: +- **Project Labels**: Manage labels at the project level +- **Test Case Labels**: Apply labels to specific test cases for better organization and filtering +- **Test Labels**: Apply labels to specific tests (instances of test cases within test runs) for execution management + +All types of labels support comprehensive operations with validation and error handling. Project labels support full CRUD operations, while test case and test labels focus on assignment and retrieval operations. ##### Reference ```shell @@ -310,13 +310,26 @@ Options: Commands: add Add a new label in TestRail + cases Manage labels for test cases delete Delete labels from TestRail get Get a specific label by ID list List all labels in the project + tests Manage labels for tests update Update an existing label in TestRail ``` -##### Adding Labels +#### Project Labels + +Project labels are managed using the main `labels` command and provide project-wide label management capabilities. These labels can be created, updated, deleted, and listed at the project level. + +**Project Labels Support:** +- **Add** new labels to projects +- **List** existing labels with pagination support +- **Get** detailed information about specific labels +- **Update** existing label titles +- **Delete** single or multiple labels in batch + +###### Adding Labels Create new labels for your project with a descriptive title (maximum 20 characters). ```shell @@ -336,7 +349,7 @@ $ trcli -h https://yourinstance.testrail.io --username --passwor labels add --title "Regression" ``` -##### Listing Labels +###### Listing Labels View all labels in your project with optional pagination support. ```shell @@ -367,7 +380,7 @@ Found 5 labels: ID: 127, Title: 'Performance' ``` -##### Getting Label Details +###### Getting Label Details Retrieve detailed information about a specific label by its ID. ```shell @@ -387,7 +400,7 @@ Label details: Created on: 1234567890 ``` -##### Updating Labels +###### Updating Labels Modify the title of existing labels (maximum 20 characters). ```shell @@ -403,7 +416,7 @@ Updating label with ID 123... Successfully updated label: ID=123, Title='High-Priority' ``` -##### Deleting Labels +###### Deleting Labels Remove single or multiple labels from your project. ```shell @@ -426,7 +439,7 @@ Deleting labels with IDs: 123,124... Successfully deleted 2 label(s) ``` -##### Common Use Cases +###### Common Use Cases **1. Release Management** ```shell @@ -481,7 +494,7 @@ $ trcli -h https://yourinstance.testrail.io --username --passwor labels delete --ids "100,101,102,103,104" ``` -##### Command Options Reference +###### Command Options Reference **Add Command:** ```shell @@ -525,7 +538,7 @@ Options: --help Show this message and exit. ``` -##### Error Handling and Validation +###### Error Handling and Validation The labels command includes comprehensive validation: @@ -549,6 +562,332 @@ $ trcli labels delete --ids "abc,def" Error: Invalid label IDs format ``` +#### Test Case Labels + +In addition to project-level labels, the TestRail CLI also supports **test case label management** through the `labels cases` command. This functionality allows you to assign labels to specific test cases and filter test cases by their labels, providing powerful organization and filtering capabilities for your test suite. + +###### Test Case Label Features +- **Add labels to test cases**: Apply existing or new labels to one or multiple test cases +- **List test cases by labels**: Find test cases that have specific labels applied +- **Automatic label creation**: Labels are created automatically if they don't exist when adding to cases +- **Maximum label validation**: Enforces TestRail's limit of 10 labels per test case +- **Flexible filtering**: Search by label ID or title + +###### Reference +```shell +$ trcli labels cases --help +Usage: trcli labels cases [OPTIONS] COMMAND [ARGS]... + + Manage labels for test cases + +Options: + --help Show this message and exit. + +Commands: + add Add a label to test cases + list List test cases filtered by label ID or title +``` + +###### Adding Labels to Test Cases +Apply labels to one or multiple test cases. If the label doesn't exist, it will be created automatically. + +```shell +# Add a label to a single test case +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases add --case-ids 123 --title "Regression" + +# Add a label to multiple test cases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases add --case-ids "123,124,125" --title "Critical" + +# Add a release label to test cases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases add --case-ids "100,101,102" --title "Sprint-42" +``` + +###### Listing Test Cases by Labels +Find test cases that have specific labels applied, either by label ID or title. + +```shell +# List test cases by label title +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases list --title "Regression" + +# List test cases by label ID +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases list --ids 123 + +# List test cases by multiple label IDs +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases list --ids "123,124,125" +``` + +**Output example:** +``` +Retrieving test cases with label title 'Regression'... +Found 3 matching test case(s): + + Case ID: 123, Title: 'Login functionality test' [Labels: ID:5,Title:'Regression'; ID:7,Title:'Critical'] + Case ID: 124, Title: 'Password validation test' [Labels: ID:5,Title:'Regression'] + Case ID: 125, Title: 'User registration test' [Labels: ID:5,Title:'Regression'; ID:8,Title:'UI'] +``` + +**No matches example:** +``` +Retrieving test cases with label title 'Non-Existent'... +Found 0 matching test case(s): + No test cases found with label title 'Non-Existent'. +``` + +###### Command Options Reference + +**Add Cases Command:** +```shell +$ trcli labels cases add --help +Options: + --case-ids Comma-separated list of test case IDs [required] + --title Title of the label to add (max 20 characters) [required] + --help Show this message and exit. +``` + +**List Cases Command:** +```shell +$ trcli labels cases list --help +Options: + --ids Comma-separated list of label IDs to filter by + --title Label title to filter by (max 20 characters) + --help Show this message and exit. +``` + +###### Validation Rules + +**Test Case Label Management includes these validations:** + +- **Label Title**: Maximum 20 characters (same as project labels) +- **Case IDs**: Must be valid integers in comma-separated format +- **Maximum Labels**: Each test case can have maximum 10 labels +- **Filter Requirements**: Either `--ids` or `--title` must be provided for list command +- **Label Creation**: Labels are automatically created if they don't exist when adding to cases +- **Duplicate Prevention**: Adding an existing label to a case is handled gracefully + +#### Test Labels + +The TestRail CLI also supports **test label management** through the `labels tests` command. This functionality allows you to assign labels to specific tests (instances of test cases within test runs), providing powerful organization and filtering capabilities for your test execution. + +###### Test Label Features +- **Add labels to tests**: Apply existing or new labels to one or multiple tests +- **CSV file support**: Bulk assign labels using CSV files containing test IDs +- **List tests by labels**: Find tests that have specific labels applied +- **Get test labels**: Retrieve all labels assigned to specific tests +- **Automatic label creation**: Labels are created automatically if they don't exist when adding to tests +- **Maximum label validation**: Enforces TestRail's limit of 10 labels per test +- **Flexible filtering**: Search by label ID for efficient test management + +###### Reference +```shell +$ trcli labels tests --help +Usage: trcli labels tests [OPTIONS] COMMAND [ARGS]... + + Manage labels for tests + +Options: + --help Show this message and exit. + +Commands: + add Add a label to tests + list List tests filtered by label ID + get Get the labels of tests using test IDs +``` + +###### Adding Labels to Tests +Apply labels to one or multiple tests. If the label doesn't exist, it will be created automatically. + +```shell +# Add a label to a single test +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests add --test-ids 123 --title "Regression" + +# Add a label to multiple tests +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests add --test-ids "123,124,125" --title "Critical" + +# Add a label to tests using CSV file +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests add --test-id-file test_ids.csv --title "Sprint-42" +``` + +**CSV File Format:** +The CSV file should contain test IDs, one per row or comma-separated. Headers are automatically detected and skipped. +```csv +test_id +123 +124 +125 +``` + +Or simple format: +```csv +123,124,125 +``` + +###### Listing Tests by Labels +Find tests that have specific labels applied by label ID from specific test runs. + +```shell +# List tests by label ID from a specific run +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests list --run-id 456 --ids 123 + +# List tests by multiple label IDs from multiple runs +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests list --run-id "456,457" --ids "123,124,125" +``` + +**Output example:** +``` +Retrieving tests from run IDs: 456 with label IDs: 123... +Found 2 matching test(s): + + Test ID: 1001, Title: 'Login functionality test', Status: 1 [Labels: ID:123,Title:'Regression'; ID:124,Title:'Critical'] + Test ID: 1002, Title: 'Password validation test', Status: 2 [Labels: ID:123,Title:'Regression'] +``` + +###### Getting Test Labels +Retrieve all labels assigned to specific tests. + +```shell +# Get labels for a single test +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests get --test-ids 123 + +# Get labels for multiple tests +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests get --test-ids "123,124,125" +``` + +**Output example:** +``` +Retrieving labels for 2 test(s)... +Test label information: + + Test ID: 123 + Title: 'Login functionality test' + Status: 1 + Labels (2): + - ID: 5, Title: 'Regression' + - ID: 7, Title: 'Critical' + + Test ID: 124 + Title: 'Password validation test' + Status: 2 + Labels: No labels assigned +``` + +###### Command Options Reference + +**Add Tests Command:** +```shell +$ trcli labels tests add --help +Options: + --test-ids Comma-separated list of test IDs (e.g., 1,2,3) + --test-id-file CSV file containing test IDs + --title Title of the label to add (max 20 characters) [required] + --help Show this message and exit. +``` + +**List Tests Command:** +```shell +$ trcli labels tests list --help +Options: + --run-id Comma-separated list of run IDs to filter tests from [required] + --ids Comma-separated list of label IDs to filter by [required] + --help Show this message and exit. +``` + +**Get Tests Command:** +```shell +$ trcli labels tests get --help +Options: + --test-id Comma-separated list of test IDs (e.g., 1,2,3) [required] + --help Show this message and exit. +``` + +###### Validation Rules + +**Test Label Management includes these validations:** + +- **Label Title**: Maximum 20 characters (same as project and case labels) +- **Test IDs**: Must be valid integers in comma-separated format +- **Maximum Labels**: Each test can have maximum 10 labels +- **Input Requirements**: Either `--test-ids` or `--test-id-file` must be provided for add command +- **Label Creation**: Labels are automatically created if they don't exist when adding to tests +- **Duplicate Prevention**: Adding an existing label to a test is handled gracefully +- **CSV File Validation**: Invalid entries in CSV files are ignored with warnings + +###### Common Use Cases + +**1. Test Execution Categorization** +```shell +# Label tests by execution type +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "API Tests" \ + labels tests add --test-ids "1001,1002,1003" --title "Smoke" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "API Tests" \ + labels tests add --test-ids "1004,1005" --title "Integration" +``` + +**2. Release Management** +```shell +# Label tests for specific releases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Mobile App" \ + labels tests add --test-ids "2001,2002,2003" --title "Release-2.0" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Mobile App" \ + labels tests add --test-id-file hotfix_tests.csv --title "Hotfix-2.1.3" +``` + +**3. Priority and Risk Assessment** +```shell +# Label tests by priority +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "E-Commerce" \ + labels tests add --test-ids "3001,3002" --title "P0-Critical" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "E-Commerce" \ + labels tests add --test-ids "3003,3004,3005" --title "P1-High" +``` + +**4. Test Analysis and Reporting** +```shell +# Find all regression tests from run 101 +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Web App" \ + labels tests list --run-id 101 --ids 5 + +# Get detailed label information for failed tests +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Web App" \ + labels tests get --test-ids "4001,4002,4003" +``` + ### Reference ```shell $ trcli add_run --help diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index ba76dc3a..a3bb6675 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -1,5 +1,5 @@ import pytest -from unittest.mock import Mock, patch, MagicMock +from unittest.mock import Mock, patch, MagicMock, call from pathlib import Path import json from serde.json import from_json @@ -341,4 +341,736 @@ def test_delete_labels_forbidden(self, labels_handler): success, error = labels_handler.delete_labels(label_ids=[1]) assert success is False - assert error == "No access to the project" \ No newline at end of file + assert error == "No access to the project" + + +class TestApiRequestHandlerLabelsCases: + """Test cases for test case label operations""" + + def setup_method(self): + """Set up test fixtures""" + # Create proper objects like the existing fixture + api_client = APIClient(host_name="http://test.com") + environment = Environment() + environment.project = "Test Project" + environment.batch_size = 10 + + # Create a minimal TestRailSuite for testing + from trcli.data_classes.dataclass_testrail import TestRailSuite + test_suite = TestRailSuite(name="Test Suite") + + self.labels_handler = ApiRequestHandler(environment, api_client, test_suite, verify=False) + + def test_add_labels_to_cases_success(self): + """Test successful addition of labels to test cases""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler, 'add_label') as mock_add_label, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ + patch.object(self.labels_handler.client, 'send_post') as mock_send_post: + + # Mock __get_all_cases response (cases exist) + mock_get_cases.return_value = ([ + {"id": 1, "title": "Case 1", "suite_id": 1}, + {"id": 2, "title": "Case 2", "suite_id": 1} + ], "") + + # Mock get_labels response (label doesn't exist) + mock_get_labels.return_value = ({"labels": []}, "") + + # Mock add_label response (create new label) + mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "") + + # Mock get_case responses + mock_send_get.side_effect = [ + MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}), # Case 1 + MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}) # Case 2 + ] + + # Mock update_cases batch response (for multiple cases) + mock_send_post.return_value = MagicMock(status_code=200) + + # Test the method + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1, 2], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 2 + assert len(results['failed_cases']) == 0 + assert len(results['max_labels_reached']) == 0 + assert len(results['case_not_found']) == 0 + + # Verify API calls - should be called twice: once for multi-suite detection, once for case validation + assert mock_get_cases.call_count == 2 + mock_get_cases.assert_has_calls([ + call(1, None), # Multi-suite detection + call(1, None) # Case validation + ]) + mock_get_labels.assert_called_once_with(1) + mock_add_label.assert_called_once_with(1, "test-label") + assert mock_send_get.call_count == 2 + # Should call update_cases/{suite_id} once for batch update + mock_send_post.assert_called_once_with("update_cases/1", payload={ + 'case_ids': [1, 2], + 'labels': [5] + }) + + def test_add_labels_to_cases_single_case(self): + """Test adding labels to a single test case using update_case endpoint""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler, 'add_label') as mock_add_label, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ + patch.object(self.labels_handler.client, 'send_post') as mock_send_post: + + # Mock __get_all_cases response (case exists) + mock_get_cases.return_value = ([ + {"id": 1, "title": "Case 1"} + ], "") + + # Mock get_labels response (label doesn't exist) + mock_get_labels.return_value = ({"labels": []}, "") + + # Mock add_label response (create new label) + mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "") + + # Mock get_case response + mock_send_get.return_value = MagicMock( + status_code=200, + response_text={"labels": [], "suite_id": 1, "title": "Case 1"} + ) + + # Mock update_case response (for single case) + mock_send_post.return_value = MagicMock(status_code=200) + + # Test the method with single case + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 1 + assert len(results['failed_cases']) == 0 + assert len(results['max_labels_reached']) == 0 + assert len(results['case_not_found']) == 0 + + # Verify API calls + assert mock_get_cases.call_count == 2 + mock_get_labels.assert_called_once_with(1) + mock_add_label.assert_called_once_with(1, "test-label") + assert mock_send_get.call_count == 1 + # Should call update_case/{case_id} once for single case + mock_send_post.assert_called_once_with("update_case/1", payload={'labels': [5]}) + + def test_add_labels_to_cases_existing_label(self): + """Test adding labels when label already exists""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler, 'add_label') as mock_add_label, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ + patch.object(self.labels_handler.client, 'send_post') as mock_send_post: + + # Mock __get_all_cases response (case exists) + mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") + + # Mock get_labels response (label exists) + mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") + + # Mock get_case response + mock_send_get.return_value = MagicMock(status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"}) + + # Mock add_label_to_case response + mock_send_post.return_value = MagicMock(status_code=200) + + # Test the method + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 1 + assert len(results['case_not_found']) == 0 + + # Verify add_label was not called (label already exists) + mock_add_label.assert_not_called() + + def test_add_labels_to_cases_max_labels_reached(self): + """Test handling of maximum labels limit (10)""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get: + + # Mock __get_all_cases response (case exists) + mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") + + # Mock get_labels response + mock_get_labels.return_value = ({"labels": [{"id": 15, "title": "test-label"}]}, "") + + # Mock get_case response with 10 existing labels (different from test-label) + existing_labels = [{"id": i, "title": f"label-{i}"} for i in range(1, 11)] + mock_send_get.return_value = MagicMock( + status_code=200, + response_text={"labels": existing_labels} + ) + + # Test the method + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 0 + assert len(results['failed_cases']) == 0 + assert len(results['max_labels_reached']) == 1 + assert len(results['case_not_found']) == 0 + assert results['max_labels_reached'][0] == 1 + + def test_add_labels_to_cases_label_already_on_case(self): + """Test handling when label already exists on case""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get: + + # Mock __get_all_cases response (case exists) + mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") + + # Mock get_labels response + mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") + + # Mock get_case response with the label already present + mock_send_get.return_value = MagicMock( + status_code=200, + response_text={"labels": [{"id": 5, "title": "test-label"}]} + ) + + # Test the method + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 1 + assert len(results['case_not_found']) == 0 + assert "already exists" in results['successful_cases'][0]['message'] + + def test_add_labels_to_cases_case_not_found(self): + """Test handling when case IDs don't exist""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: + + # Mock __get_all_cases response (no cases exist) + mock_get_cases.return_value = ([], "") + + # Test the method with case IDs that don't exist + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[999, 1000, 1001], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results - all cases should be in case_not_found + assert len(results['case_not_found']) == 3 + assert 999 in results['case_not_found'] + assert 1000 in results['case_not_found'] + assert 1001 in results['case_not_found'] + + # Verify that no other processing happened since no valid cases + assert len(results['successful_cases']) == 0 + assert len(results['failed_cases']) == 0 + assert len(results['max_labels_reached']) == 0 + + def test_get_cases_by_label_with_label_ids(self): + """Test getting cases by label IDs""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: + + # Mock cases response + mock_cases = [ + {"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "label1"}]}, + {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "label2"}]}, + {"id": 3, "title": "Test Case 3", "labels": [{"id": 5, "title": "label1"}]} + ] + mock_get_cases.return_value = (mock_cases, "") + + # Test the method + matching_cases, error_message = self.labels_handler.get_cases_by_label( + project_id=1, + suite_id=None, + label_ids=[5] + ) + + # Verify no error + assert error_message == "" + + # Verify results (should return cases 1 and 3) + assert len(matching_cases) == 2 + assert matching_cases[0]['id'] == 1 + assert matching_cases[1]['id'] == 3 + + def test_get_cases_by_label_with_title(self): + """Test getting cases by label title""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels: + + # Mock labels response + mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") + + # Mock cases response + mock_cases = [ + {"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "test-label"}]}, + {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "other-label"}]} + ] + mock_get_cases.return_value = (mock_cases, "") + + # Test the method + matching_cases, error_message = self.labels_handler.get_cases_by_label( + project_id=1, + suite_id=None, + label_title="test-label" + ) + + # Verify no error + assert error_message == "" + + # Verify results (should return case 1) + assert len(matching_cases) == 1 + assert matching_cases[0]['id'] == 1 + + def test_get_cases_by_label_title_not_found(self): + """Test getting cases by non-existent label title""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels: + + # Mock labels response (no matching label) + mock_get_labels.return_value = ({"labels": []}, "") + + # Mock get_all_cases to return empty (not called due to early return) + mock_get_cases.return_value = ([], "") + + # Test the method + matching_cases, error_message = self.labels_handler.get_cases_by_label( + project_id=1, + suite_id=None, + label_title="non-existent-label" + ) + + # Verify error + assert error_message == "" + assert matching_cases == [] + + def test_get_cases_by_label_no_matching_cases(self): + """Test getting cases when no cases have the specified label""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: + + # Mock cases response (no cases with target label) + mock_cases = [ + {"id": 1, "title": "Test Case 1", "labels": [{"id": 6, "title": "other-label"}]}, + {"id": 2, "title": "Test Case 2", "labels": []} + ] + mock_get_cases.return_value = (mock_cases, "") + + # Test the method + matching_cases, error_message = self.labels_handler.get_cases_by_label( + project_id=1, + suite_id=None, + label_ids=[5] + ) + + # Verify no error but no results + assert error_message == "" + assert len(matching_cases) == 0 + + +class TestApiRequestHandlerTestLabels: + """Test class for test label management API methods""" + + def test_add_labels_to_tests_success_single(self, labels_handler): + """Test successful label addition to a single test""" + # Mock test validation + mock_test_response = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, + error_message=None + ) + + # Mock run validation + mock_run_response = APIClientResult( + status_code=200, + response_text={"id": 1, "project_id": 1}, + error_message=None + ) + + # Mock existing labels + mock_labels_response = APIClientResult( + status_code=200, + response_text={"labels": []}, + error_message=None + ) + + # Mock label creation + mock_add_label_response = APIClientResult( + status_code=200, + response_text={"id": 5, "title": "Test Label"}, + error_message=None + ) + + # Mock test update + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get, \ + patch.object(labels_handler.client, 'send_post') as mock_post: + + # Setup get responses for validation and label retrieval + mock_get.side_effect = [ + mock_test_response, # get_test/{test_id} + mock_run_response, # get_run/{run_id} + mock_labels_response, # get_labels + mock_test_response, # get_test/{test_id} again for labels check + ] + + # Setup post responses for label creation and test update + mock_post.side_effect = [ + mock_add_label_response, # add_label + mock_update_response # update_test + ] + + result, error = labels_handler.add_labels_to_tests( + test_ids=[1], + titles="Test Label", + project_id=1 + ) + + assert error == "" + assert len(result['successful_tests']) == 1 + assert len(result['failed_tests']) == 0 + assert len(result['test_not_found']) == 0 + assert len(result['max_labels_reached']) == 0 + + def test_add_labels_to_tests_test_not_found(self, labels_handler): + """Test handling of non-existent test IDs""" + # Mock test not found + mock_test_response = APIClientResult( + status_code=404, + response_text=None, + error_message="Test not found" + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): + result, error = labels_handler.add_labels_to_tests( + test_ids=[999], + titles="Test Label", + project_id=1 + ) + + assert error == "" + assert len(result['test_not_found']) == 1 + assert 999 in result['test_not_found'] + + def test_add_labels_to_tests_max_labels_reached(self, labels_handler): + """Test handling of tests that already have maximum labels""" + # Create 10 existing labels + existing_labels = [{"id": i, "title": f"Label {i}"} for i in range(1, 11)] + + # Mock test with max labels + mock_test_response = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": existing_labels}, + error_message=None + ) + + # Mock run validation + mock_run_response = APIClientResult( + status_code=200, + response_text={"id": 1, "project_id": 1}, + error_message=None + ) + + # Mock existing labels + mock_labels_response = APIClientResult( + status_code=200, + response_text={"labels": []}, + error_message=None + ) + + # Mock label creation + mock_add_label_response = APIClientResult( + status_code=200, + response_text={"id": 11, "title": "New Label"}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get, \ + patch.object(labels_handler.client, 'send_post') as mock_post: + + mock_get.side_effect = [ + mock_test_response, # get_test/{test_id} + mock_run_response, # get_run/{run_id} + mock_labels_response, # get_labels + mock_test_response, # get_test/{test_id} again for labels check + ] + + mock_post.return_value = mock_add_label_response + + result, error = labels_handler.add_labels_to_tests( + test_ids=[1], + titles="New Label", + project_id=1 + ) + + assert error == "" + assert len(result['max_labels_reached']) == 1 + assert 1 in result['max_labels_reached'] + + def test_get_tests_by_label_success(self, labels_handler): + """Test successful retrieval of tests by label""" + # Mock runs response + mock_runs_response = APIClientResult( + status_code=200, + response_text={"runs": [{"id": 1}, {"id": 2}]}, + error_message=None + ) + + # Mock tests responses for each run + mock_tests_response_run1 = APIClientResult( + status_code=200, + response_text={"tests": [ + {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}, + {"id": 2, "title": "Test 2", "labels": []} + ]}, + error_message=None + ) + + mock_tests_response_run2 = APIClientResult( + status_code=200, + response_text={"tests": [ + {"id": 3, "title": "Test 3", "labels": [{"id": 5, "title": "Test Label"}]} + ]}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get: + mock_get.side_effect = [ + mock_runs_response, # get_runs/{project_id} + mock_tests_response_run1, # get_tests/{run_id} for run 1 + mock_tests_response_run2 # get_tests/{run_id} for run 2 + ] + + result, error = labels_handler.get_tests_by_label( + project_id=1, + label_ids=[5] + ) + + assert error == "" + assert len(result) == 2 + assert result[0]['id'] == 1 + assert result[1]['id'] == 3 + + def test_get_tests_by_label_with_run_ids(self, labels_handler): + """Test retrieval of tests by label filtered by specific run IDs""" + # Mock run responses for specific run IDs + mock_run_response_1 = APIClientResult( + status_code=200, + response_text={"id": 1, "name": "Test Run 1"}, + error_message=None + ) + + mock_run_response_2 = APIClientResult( + status_code=200, + response_text={"id": 2, "name": "Test Run 2"}, + error_message=None + ) + + # Mock tests responses for each run + mock_tests_response_run1 = APIClientResult( + status_code=200, + response_text={"tests": [ + {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]} + ]}, + error_message=None + ) + + mock_tests_response_run2 = APIClientResult( + status_code=200, + response_text={"tests": [ + {"id": 2, "title": "Test 2", "labels": [{"id": 5, "title": "Test Label"}]} + ]}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get: + mock_get.side_effect = [ + mock_run_response_1, # get_run/1 + mock_run_response_2, # get_run/2 + mock_tests_response_run1, # get_tests/1 + mock_tests_response_run2 # get_tests/2 + ] + + result, error = labels_handler.get_tests_by_label( + project_id=1, + label_ids=[5], + run_ids=[1, 2] + ) + + assert error == "" + assert len(result) == 2 + assert result[0]['id'] == 1 + assert result[1]['id'] == 2 + + def test_get_test_labels_success(self, labels_handler): + """Test successful retrieval of test labels""" + # Mock test responses + mock_test_response1 = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test 1", + "status_id": 1, + "labels": [{"id": 5, "title": "Test Label"}] + }, + error_message=None + ) + + mock_test_response2 = APIClientResult( + status_code=200, + response_text={ + "id": 2, + "title": "Test 2", + "status_id": 2, + "labels": [] + }, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get: + mock_get.side_effect = [mock_test_response1, mock_test_response2] + + result, error = labels_handler.get_test_labels([1, 2]) + + assert error == "" + assert len(result) == 2 + + # Check first test + assert result[0]['test_id'] == 1 + assert result[0]['title'] == "Test 1" + assert result[0]['status_id'] == 1 + assert len(result[0]['labels']) == 1 + assert result[0]['labels'][0]['title'] == "Test Label" + assert result[0]['error'] is None + + # Check second test + assert result[1]['test_id'] == 2 + assert result[1]['title'] == "Test 2" + assert result[1]['status_id'] == 2 + assert len(result[1]['labels']) == 0 + assert result[1]['error'] is None + + def test_get_test_labels_test_not_found(self, labels_handler): + """Test handling of non-existent test IDs in get_test_labels""" + # Mock test not found + mock_test_response = APIClientResult( + status_code=404, + response_text=None, + error_message="Test not found" + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): + result, error = labels_handler.get_test_labels([999]) + + assert error == "" + assert len(result) == 1 + assert result[0]['test_id'] == 999 + assert result[0]['error'] == "Test 999 not found or inaccessible" + assert result[0]['labels'] == [] + + def test_add_labels_to_tests_batch_update(self, labels_handler): + """Test batch update of multiple tests""" + # Mock test validation for multiple tests + mock_test_response1 = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, + error_message=None + ) + + mock_test_response2 = APIClientResult( + status_code=200, + response_text={"id": 2, "title": "Test 2", "run_id": 1, "labels": []}, + error_message=None + ) + + # Mock run validation + mock_run_response = APIClientResult( + status_code=200, + response_text={"id": 1, "project_id": 1}, + error_message=None + ) + + # Mock existing labels + mock_labels_response = APIClientResult( + status_code=200, + response_text={"labels": [{"id": 5, "title": "Test Label"}]}, + error_message=None + ) + + # Mock batch update + mock_batch_response = APIClientResult( + status_code=200, + response_text={"updated": 2}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get, \ + patch.object(labels_handler.client, 'send_post') as mock_post: + + # Setup get responses + mock_get.side_effect = [ + mock_test_response1, # get_test/1 + mock_run_response, # get_run/1 + mock_test_response2, # get_test/2 + mock_run_response, # get_run/1 + mock_labels_response, # get_labels + mock_test_response1, # get_test/1 for labels check + mock_test_response2, # get_test/2 for labels check + ] + + # Setup batch update response + mock_post.return_value = mock_batch_response + + result, error = labels_handler.add_labels_to_tests( + test_ids=[1, 2], + titles="Test Label", + project_id=1 + ) + + assert error == "" + assert len(result['successful_tests']) == 2 \ No newline at end of file diff --git a/tests/test_cmd_labels.py b/tests/test_cmd_labels.py index 18a1a0e2..679440fa 100644 --- a/tests/test_cmd_labels.py +++ b/tests/test_cmd_labels.py @@ -345,4 +345,681 @@ def test_print_config(self): "\n> TestRail instance: https://test.testrail.com (user: test@example.com)" "\n> Project: Test Project" ) - mock_log.assert_called_once_with(expected_message) \ No newline at end of file + mock_log.assert_called_once_with(expected_message) + + +class TestLabelsCasesCommands: + """Test cases for test case label CLI commands""" + + def setup_method(self): + """Set up test fixtures""" + self.runner = CliRunner() + self.environment = Environment() + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_success(self, mock_project_client): + """Test successful addition of labels to test cases""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.suite.suite_id = None + mock_client_instance.api_request_handler.add_labels_to_cases.return_value = ( + { + 'successful_cases': [ + {'case_id': 1, 'message': "Successfully added label 'test-label' to case 1"}, + {'case_id': 2, 'message': "Successfully added label 'test-label' to case 2"} + ], + 'failed_cases': [], + 'max_labels_reached': [], + 'case_not_found': [] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', '1,2', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_cases.assert_called_once_with( + case_ids=[1, 2], + title='test-label', + project_id=1, + suite_id=None + ) + + # Verify success messages were logged + mock_log.assert_any_call("Successfully processed 2 case(s):") + mock_log.assert_any_call(" Case 1: Successfully added label 'test-label' to case 1") + mock_log.assert_any_call(" Case 2: Successfully added label 'test-label' to case 2") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_with_max_labels_reached(self, mock_project_client): + """Test addition of labels with some cases reaching maximum labels""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.add_labels_to_cases.return_value = ( + { + 'successful_cases': [ + {'case_id': 1, 'message': "Successfully added label 'test-label' to case 1"} + ], + 'failed_cases': [], + 'max_labels_reached': [2, 3], + 'case_not_found': [] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', '1,2,3', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 0 + + # Verify warning messages were logged + mock_log.assert_any_call("Warning: 2 case(s) already have maximum labels (10):") + mock_log.assert_any_call(" Case 2: Maximum labels reached") + mock_log.assert_any_call(" Case 3: Maximum labels reached") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_title_too_long(self, mock_project_client): + """Test title length validation""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', '1', '--title', 'this-title-is-way-too-long-for-testrail'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Label title must be 20 characters or less.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_invalid_case_ids(self, mock_project_client): + """Test invalid case IDs format""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', 'invalid,ids', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3).") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_case_not_found(self, mock_project_client): + """Test handling of non-existent case IDs""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.add_labels_to_cases.return_value = ( + { + 'successful_cases': [ + {'case_id': 1, 'message': "Successfully added label 'test-label' to case 1"} + ], + 'failed_cases': [], + 'max_labels_reached': [], + 'case_not_found': [999, 1000] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', '1,999,1000', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 1 + + # Verify error messages were logged + mock_elog.assert_any_call("Error: 2 test case(s) not found:") + mock_elog.assert_any_call(" Case ID 999 does not exist in the project") + mock_elog.assert_any_call(" Case ID 1000 does not exist in the project") + + # Verify success messages were still logged + mock_log.assert_any_call("Successfully processed 1 case(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_cases_by_label_ids_success(self, mock_project_client): + """Test successful listing of cases by label IDs""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.suite = None + mock_client_instance.api_request_handler.get_cases_by_label.return_value = ( + [ + { + 'id': 1, + 'title': 'Test Case 1', + 'labels': [{'id': 5, 'title': 'test-label'}] + }, + { + 'id': 2, + 'title': 'Test Case 2', + 'labels': [{'id': 5, 'title': 'test-label'}, {'id': 6, 'title': 'other-label'}] + } + ], + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['list', '--ids', '5'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_cases_by_label.assert_called_once_with( + project_id=1, + suite_id=None, + label_ids=[5], + label_title=None + ) + + # Verify cases were logged + mock_log.assert_any_call("Found 2 matching test case(s):") + mock_log.assert_any_call(" Case ID: 1, Title: 'Test Case 1' [Labels: ID:5,Title:'test-label']") + mock_log.assert_any_call(" Case ID: 2, Title: 'Test Case 2' [Labels: ID:5,Title:'test-label'; ID:6,Title:'other-label']") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_cases_by_label_title_success(self, mock_project_client): + """Test successful listing of cases by label title""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.suite = None + mock_client_instance.api_request_handler.get_cases_by_label.return_value = ( + [ + { + 'id': 1, + 'title': 'Test Case 1', + 'labels': [{'id': 5, 'title': 'test-label'}] + } + ], + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['list', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_cases_by_label.assert_called_once_with( + project_id=1, + suite_id=None, + label_ids=None, + label_title='test-label' + ) + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_cases_no_filter_provided(self, mock_project_client): + """Test error when neither ids nor title is provided""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['list'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Either --ids or --title must be provided.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_cases_no_matching_cases(self, mock_project_client): + """Test listing when no cases match the label""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.suite = None + mock_client_instance.api_request_handler.get_cases_by_label.return_value = ([], "") + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['list', '--title', 'non-existent'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_log.assert_any_call("Found 0 matching test case(s):") + mock_log.assert_any_call(" No test cases found with label title 'non-existent'.") +class TestCmdLabelsTests: + """Test class for test labels command functionality""" + + def setup_method(self): + """Set up test environment""" + self.runner = CliRunner() + self.environment = Environment(cmd="labels") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_success(self, mock_project_client): + """Test successful label addition to tests""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [{'test_id': 1, 'message': 'Success'}], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + }, + "" + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( + test_ids=[1], titles=['Test Label'], project_id=1 + ) + mock_log.assert_any_call("Successfully processed 1 test(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_with_csv_file(self, mock_project_client): + """Test label addition to tests using CSV file""" + # Mock the project client + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [{'test_id': 1, 'message': 'Success'}, {'test_id': 2, 'message': 'Success'}], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + }, + "" + ) + + # Create a temporary CSV file + with self.runner.isolated_filesystem(): + with open('test_ids.csv', 'w') as f: + f.write('test_id,description\n1,Test One\n2,Test Two\n') + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-id-file', 'test_ids.csv', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( + test_ids=[1, 2], titles=['Test Label'], project_id=1 + ) + mock_log.assert_any_call("Loaded 2 test ID(s) from file 'test_ids.csv'") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_validation_error(self, mock_project_client): + """Test validation error when neither test-ids nor file provided""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: Either --test-ids or --test-id-file must be provided.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_title_too_long(self, mock_project_client): + """Test validation error for title too long - should fail when all labels are invalid""" + long_title = "a" * 21 # 21 characters, exceeds limit + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', long_title], + obj=self.environment + ) + + assert result.exit_code == 1 + # Should show warning for invalid label, then error for no valid labels + mock_elog.assert_any_call(f"Warning: Label title '{long_title}' exceeds 20 character limit and will be skipped.") + mock_elog.assert_any_call("Error: No valid label titles provided after filtering.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_tests_by_label_success(self, mock_project_client): + """Test successful listing of tests by label""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.get_tests_by_label.return_value = ( + [ + {'id': 1, 'title': 'Test 1', 'status_id': 1, 'labels': [{'id': 5, 'title': 'Test Label'}]}, + {'id': 2, 'title': 'Test 2', 'status_id': 2, 'labels': [{'id': 5, 'title': 'Test Label'}]} + ], + "" + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['list', '--run-id', '1', '--ids', '5'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_tests_by_label.assert_called_once_with( + project_id=1, label_ids=[5], run_ids=[1] + ) + mock_log.assert_any_call("Found 2 matching test(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_get_test_labels_success(self, mock_project_client): + """Test successful retrieval of test labels""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.get_test_labels.return_value = ( + [ + { + 'test_id': 1, + 'title': 'Test 1', + 'status_id': 1, + 'labels': [{'id': 5, 'title': 'Test Label'}], + 'error': None + } + ], + "" + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['get', '--test-ids', '1'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_test_labels.assert_called_once_with([1]) + mock_log.assert_any_call("Test label information:") + mock_log.assert_any_call(" Test ID: 1") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_tests_invalid_ids(self, mock_project_client): + """Test invalid label IDs format in list command""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['list', '--run-id', '1', '--ids', 'invalid,ids'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_tests_invalid_run_ids(self, mock_project_client): + """Test invalid run IDs format in list command""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['list', '--run-id', 'invalid,run', '--ids', '5'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: Invalid run IDs format. Use comma-separated integers (e.g., 1,2,3).") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_csv_file_not_found(self, mock_project_client): + """Test error when CSV file is not found""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-id-file', 'nonexistent.csv', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: File 'nonexistent.csv' not found.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_with_warnings(self, mock_project_client): + """Test label addition with warnings for not found tests and max labels""" + # Mock the project client + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [{'test_id': 1, 'message': 'Success'}], + 'failed_tests': [], + 'max_labels_reached': [2], + 'test_not_found': [999] + }, + "" + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1,2,999', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_log.assert_any_call("Warning: 1 test(s) not found or not accessible:") + mock_log.assert_any_call(" Test ID 999 does not exist or is not accessible") + mock_log.assert_any_call("Warning: 1 test(s) already have maximum labels (10):") + mock_log.assert_any_call(" Test 2: Maximum labels reached") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_multiple_labels_to_tests_success(self, mock_project_client): + """Test successful addition of multiple labels to tests""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [ + {'test_id': 1, 'message': 'Successfully added 2 labels (label1, label2) to test 1'}, + {'test_id': 2, 'message': 'Successfully added 2 labels (label1, label2) to test 2'} + ], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1,2', '--title', 'label1,label2'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( + test_ids=[1, 2], titles=['label1', 'label2'], project_id=1 + ) + mock_log.assert_any_call("Successfully processed 2 test(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_tests_mixed_valid_invalid(self, mock_project_client): + """Test mixed valid/invalid labels - should process valid ones and warn about invalid ones""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [ + {'test_id': 1, 'message': "Successfully added label 'valid-label' to test 1"} + ], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', 'valid-label,this-title-is-way-too-long-for-testrail'], + obj=self.environment + ) + + # Should succeed with valid label + assert result.exit_code == 0 + + # Should warn about invalid label + mock_elog.assert_any_call("Warning: Label title 'this-title-is-way-too-long-for-testrail' exceeds 20 character limit and will be skipped.") + + # Should process the valid label + mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( + test_ids=[1], titles=['valid-label'], project_id=1 + ) + + # Should show success for valid label + mock_log.assert_any_call("Successfully processed 1 test(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_tests_all_invalid_titles(self, mock_project_client): + """Test when all labels are invalid - should fail""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', 'this-title-is-way-too-long,another-title-that-is-also-too-long'], + obj=self.environment + ) + + # Should fail when all labels are invalid + assert result.exit_code == 1 + + # Should show warnings for all invalid labels + mock_elog.assert_any_call("Warning: Label title 'this-title-is-way-too-long' exceeds 20 character limit and will be skipped.") + mock_elog.assert_any_call("Warning: Label title 'another-title-that-is-also-too-long' exceeds 20 character limit and will be skipped.") + mock_elog.assert_any_call("Error: No valid label titles provided after filtering.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_tests_max_labels_validation(self, mock_project_client): + """Test early validation for more than 10 labels""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + # Create a title string with 11 labels + long_title_list = ','.join([f'label{i}' for i in range(1, 12)]) + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', long_title_list], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Cannot add more than 10 labels at once. You provided 11 valid labels.") + + \ No newline at end of file diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index 9881239c..756feae7 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -67,6 +67,6 @@ ' - parse_robot: Robot Framework XML Files\n' ' - parse_openapi: OpenAPI YML Files\n' ' - add_run: Create a new test run\n' - ' - labels: Manage labels (add, update, delete, list)\n') + ' - labels: Manage labels (projects, cases, and tests)\n') trcli_help_description = "TestRail CLI" diff --git a/tests_e2e/reports_junit/generic_ids_name.xml b/tests_e2e/reports_junit/generic_ids_name.xml index 0e9f57d9..610de13c 100644 --- a/tests_e2e/reports_junit/generic_ids_name.xml +++ b/tests_e2e/reports_junit/generic_ids_name.xml @@ -4,8 +4,8 @@ - - + + failed due to... @@ -14,7 +14,7 @@ - + diff --git a/tests_e2e/reports_junit/generic_ids_property.xml b/tests_e2e/reports_junit/generic_ids_property.xml index 979c959e..a0a947ac 100644 --- a/tests_e2e/reports_junit/generic_ids_property.xml +++ b/tests_e2e/reports_junit/generic_ids_property.xml @@ -6,13 +6,13 @@ - + failed due to... - + @@ -21,7 +21,7 @@ - + diff --git a/tests_e2e/sample_csv/test_ids.csv b/tests_e2e/sample_csv/test_ids.csv new file mode 100644 index 00000000..68c889ef --- /dev/null +++ b/tests_e2e/sample_csv/test_ids.csv @@ -0,0 +1,3 @@ +test_id +266149 +266151 diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index ebf33113..1ff10ac0 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -4,6 +4,11 @@ import pytest +def _has_testrail_credentials(): + """Check if TestRail credentials are available in environment variables""" + return bool(os.environ.get("TR_CLI_USERNAME") and os.environ.get("TR_CLI_PASSWORD")) + + def _run_cmd(multiline_cmd: str): lines_list = [] for line in multiline_cmd.splitlines(): @@ -64,8 +69,8 @@ class TestsEndToEnd: # TestRail 101 instance has the required configuration for this test run TR_INSTANCE = "https://testrail101.testrail.io/" # Uncomment and enter your credentials below in order to execute the tests locally - # os.environ.setdefault("TR_CLI_USERNAME", "") - # os.environ.setdefault("TR_CLI_PASSWORD", "") + #os.environ.setdefault("TR_CLI_USERNAME", "") + #os.environ.setdefault("TR_CLI_PASSWORD", "") @pytest.fixture(autouse=True, scope="module") def install_trcli(self): @@ -930,4 +935,543 @@ def test_labels_edge_cases(self): "Successfully deleted 1 label(s)" ] ) + + + def test_labels_cases_full_workflow(self): + """Test complete workflow of test case label operations""" + import random + import string + + # Generate random suffix to avoid label conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + case_label_title = f"e2e-case-{random_suffix}" + + # First, create a test label + add_label_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "{case_label_title}" + """) + _assert_contains( + add_label_output, + [ + f"Adding label '{case_label_title}'...", + "Successfully added label:" + ] + ) + + # Extract label ID for later use + import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) + assert label_id_match, "Could not extract label ID from output" + label_id = label_id_match.group(1) + + try: + # Use known test case IDs that should exist in the test project + test_case_ids = ["24964", "24965"] # Multiple test cases for batch testing + + # Add labels to test cases + add_cases_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases add \\ + --case-ids "{','.join(test_case_ids)}" \\ + --title "{case_label_title}" + """) + _assert_contains( + add_cases_output, + [ + f"Adding label '{case_label_title}' to {len(test_case_ids)} test case(s)...", + "Successfully processed" + ] + ) + + # List test cases by label title + list_by_title_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --title "{case_label_title}" + """) + _assert_contains( + list_by_title_output, + [ + f"Retrieving test cases with label title '{case_label_title}'...", + "matching test case(s):" + ] + ) + + # List test cases by label ID + list_by_id_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --ids "{label_id}" + """) + _assert_contains( + list_by_id_output, + [ + f"Retrieving test cases with label IDs: {label_id}...", + "matching test case(s):" + ] + ) + + finally: + # Cleanup - delete the test label + delete_output = _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_id} + """) + _assert_contains( + delete_output, + [ + f"Deleting labels with IDs: {label_id}...", + "Successfully deleted 1 label(s)" + ] + ) + + def test_labels_cases_validation_errors(self): + """Test validation errors for test case label commands""" + # Test title too long for add cases + long_title_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases add \\ + --case-ids "1" \\ + --title "this-title-is-way-too-long-for-testrail" + """) + assert return_code != 0 + _assert_contains( + long_title_output, + ["Error: Label title must be 20 characters or less."] + ) + + # Test invalid case IDs format + invalid_ids_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases add \\ + --case-ids "invalid,ids" \\ + --title "test" + """) + assert return_code != 0 + _assert_contains( + invalid_ids_output, + ["Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + ) + + # Test missing filter for list cases + no_filter_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list + """) + assert return_code != 0 + _assert_contains( + no_filter_output, + ["Error: Either --ids or --title must be provided."] + ) + + # Test title too long for list cases + long_title_list_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --title "this-title-is-way-too-long-for-testrail" + """) + assert return_code != 0 + _assert_contains( + long_title_list_output, + ["Error: Label title must be 20 characters or less."] + ) + + def test_labels_cases_help_commands(self): + """Test help output for test case label commands""" + # Test main cases help + cases_help_output = _run_cmd("trcli labels cases --help") + _assert_contains( + cases_help_output, + [ + "Usage: trcli labels cases [OPTIONS] COMMAND [ARGS]...", + "Manage labels for test cases", + "add Add a label to test cases", + "list List test cases filtered by label ID or title" + ] + ) + + # Test cases add help + cases_add_help_output = _run_cmd("trcli labels cases add --help") + _assert_contains( + cases_add_help_output, + [ + "Usage: trcli labels cases add [OPTIONS]", + "Add a label to test cases", + "--case-ids", + "--title" + ] + ) + + # Test cases list help + cases_list_help_output = _run_cmd("trcli labels cases list --help") + _assert_contains( + cases_list_help_output, + [ + "Usage: trcli labels cases list [OPTIONS]", + "List test cases filtered by label ID or title", + "--ids", + "--title" + ] + ) + + def test_labels_cases_no_matching_cases(self): + """Test behavior when no test cases match the specified label""" + # Test with non-existent label title + no_match_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --title "non-existent-label" + """) + _assert_contains( + no_match_output, + [ + "Retrieving test cases with label title 'non-existent-label'...", + "Found 0 matching test case(s):", + "No test cases found with label title 'non-existent-label'." + ] + ) + + # Test with non-existent label ID + no_match_id_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --ids "99999" + """) + _assert_contains( + no_match_id_output, + [ + "Retrieving test cases with label IDs: 99999...", + "Found 0 matching test case(s):", + "No test cases found with the specified label IDs." + ] + ) + + def test_labels_cases_single_case_workflow(self): + """Test single case label operations using update_case endpoint""" + import random + import string + + # Generate random suffix to avoid label conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + single_case_label_title = f"e2e-single-{random_suffix}" + + # First, create a test label + add_label_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "{single_case_label_title}" + """) + _assert_contains( + add_label_output, + [ + f"Adding label '{single_case_label_title}'...", + "Successfully added label:" + ] + ) + + # Extract label ID for later use + import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) + assert label_id_match, "Could not extract label ID from output" + label_id = label_id_match.group(1) + + try: + # Use single test case ID for testing update_case endpoint + single_case_id = "24964" + + # Add label to single test case + add_single_case_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases add \\ + --case-ids "{single_case_id}" \\ + --title "{single_case_label_title}" + """) + _assert_contains( + add_single_case_output, + [ + f"Adding label '{single_case_label_title}' to 1 test case(s)...", + "Successfully processed 1 case(s):", + f"Successfully added label '{single_case_label_title}' to case {single_case_id}" + ] + ) + + # Verify the label was added by listing cases with this label + list_cases_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --title "{single_case_label_title}" + """) + _assert_contains( + list_cases_output, + [ + f"Retrieving test cases with label title '{single_case_label_title}'...", + "Found 1 matching test case(s):", + f"Case ID: {single_case_id}" + ] + ) + + finally: + # Clean up: delete the test label + _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_id} + """) + + def test_labels_tests_full_workflow(self): + """Test complete workflow of test label operations""" + import random + import string + + # Generate random suffix to avoid label conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + test_label_title = f"e2e-test-{random_suffix}" + + # First, create a test label + add_label_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "{test_label_title}" + """) + _assert_contains( + add_label_output, + [ + f"Adding label '{test_label_title}'...", + "Successfully added label:" + ] + ) + + # Extract label ID for cleanup + import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) + assert label_id_match, "Could not extract label ID from output" + label_id = label_id_match.group(1) + + try: + # Use known test IDs that should exist in the test project + test_ids = ["266149", "266151"] # Real test IDs for functional testing + + # Test 1: Add labels to tests using --test-ids + add_tests_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests add \\ + --test-ids "{','.join(test_ids)}" \\ + --title "{test_label_title}" + """) + + _assert_contains( + add_tests_output, + [ + f"Adding label '{test_label_title}' to {len(test_ids)} test(s)..." + ] + ) + + # Test 2: Add labels to tests using CSV file + import os + csv_file_path = os.path.join(os.path.dirname(__file__), "sample_csv", "test_ids.csv") + + add_tests_csv_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests add \\ + --test-id-file "{csv_file_path}" \\ + --title "{test_label_title}" + """) + + _assert_contains( + add_tests_csv_output, + [ + "Loaded 2 test ID(s) from file", + f"Adding label '{test_label_title}' to 2 test(s)..." + ] + ) + + # Test 3: List tests by label ID from a specific run + # Use a realistic run ID - for E2E testing we'll use run ID 1 as a common test run + list_tests_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests list \\ + --run-id "1" \\ + --ids "{label_id}" + """) + _assert_contains( + list_tests_output, + [ + f"Retrieving tests from run IDs: 1 with label IDs: {label_id}...", + "matching test(s):" + ] + ) + + # Test 4: Get test labels for specific tests + get_test_labels_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests get \\ + --test-ids "{','.join(test_ids)}" + """) + _assert_contains( + get_test_labels_output, + [ + f"Retrieving labels for {len(test_ids)} test(s)...", + "Test label information:" + ] + ) + + finally: + # Cleanup - delete the test label + delete_output = _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_id} + """) + + def test_labels_tests_validation_errors(self): + """Test validation errors for test label commands""" + import random + import string + + # Generate random suffix to avoid label conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + # Test title too long (21 characters exceeds 20 character limit) + long_title = f"this-is-a-very-long-title-{random_suffix}" # This will be > 20 chars + title_error_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests add \\ + --test-ids "266149" \\ + --title "{long_title}" + """) + assert return_code != 0 + _assert_contains( + title_error_output, + ["Error: Label title must be 20 characters or less."] + ) + + # Test missing test-ids and file + valid_title = f"test-{random_suffix}"[:20] # Ensure valid length + missing_ids_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests add \\ + --title "{valid_title}" + """) + assert return_code != 0 + _assert_contains( + missing_ids_output, + ["Error: Either --test-ids or --test-id-file must be provided."] + ) + + # Test invalid label IDs format in list command + invalid_ids_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests list \\ + --run-id "1" \\ + --ids "invalid,ids" + """) + assert return_code != 0 + _assert_contains( + invalid_ids_output, + ["Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3)."] + ) + + def test_labels_tests_help_commands(self): + """Test help output for test label commands""" + + # Test main tests help + tests_help_output = _run_cmd("trcli labels tests --help") + _assert_contains( + tests_help_output, + [ + "Usage: trcli labels tests [OPTIONS] COMMAND [ARGS]...", + "Manage labels for tests", + "Commands:", + "add", + "list", + "get" + ] + ) + + # Test tests add help + tests_add_help_output = _run_cmd("trcli labels tests add --help") + _assert_contains( + tests_add_help_output, + [ + "Usage: trcli labels tests add [OPTIONS]", + "Add a label to tests", + "--test-ids", + "--test-id-file", + "--title" + ] + ) + + # Test tests list help + tests_list_help_output = _run_cmd("trcli labels tests list --help") + _assert_contains( + tests_list_help_output, + [ + "Usage: trcli labels tests list [OPTIONS]", + "List tests filtered by label ID from specific runs", + "--run-id", + "--ids" + ] + ) + + # Test tests get help + tests_get_help_output = _run_cmd("trcli labels tests get --help") + _assert_contains( + tests_get_help_output, + [ + "Usage: trcli labels tests get [OPTIONS]", + "Get the labels of tests using test IDs", + "--test-id" + ] + ) \ No newline at end of file diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index ea11a13d..aa7a1723 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -691,7 +691,10 @@ def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], s """ Get all cases from all pages """ - return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") + if suite_id is None: + return self.__get_all_entities('cases', f"get_cases/{project_id}") + else: + return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ @@ -815,3 +818,529 @@ def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: response = self.client.send_post("delete_labels", payload=None, files=files) success = response.status_code == 200 return success, response.error_message + + def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, suite_id: int = None) -> Tuple[dict, str]: + """ + Add a label to multiple test cases + + :param case_ids: List of test case IDs + :param title: Label title (max 20 characters) + :param project_id: Project ID for validation + :param suite_id: Suite ID (optional) + :returns: Tuple with response data and error string + """ + # Initialize results structure + results = { + 'successful_cases': [], + 'failed_cases': [], + 'max_labels_reached': [], + 'case_not_found': [] + } + + # Check if project is multi-suite by getting all cases without suite_id + all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) + if error_message: + return results, error_message + + # Check if project has multiple suites + suite_ids = set() + for case in all_cases_no_suite: + if 'suite_id' in case and case['suite_id']: + suite_ids.add(case['suite_id']) + + # If project has multiple suites and no suite_id provided, require it + if len(suite_ids) > 1 and suite_id is None: + return results, "This project is multisuite, suite id is required" + + # Get all cases to validate that the provided case IDs exist + all_cases, error_message = self.__get_all_cases(project_id, suite_id) + if error_message: + return results, error_message + + # Create a set of existing case IDs for quick lookup + existing_case_ids = {case['id'] for case in all_cases} + + # Validate case IDs and separate valid from invalid ones + invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] + valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] + + # Record invalid case IDs + for case_id in invalid_case_ids: + results['case_not_found'].append(case_id) + + # If no valid case IDs, return early + if not valid_case_ids: + return results, "" + + # Check if label exists or create it + existing_labels, error_message = self.get_labels(project_id) + if error_message: + return results, error_message + + # Find existing label with the same title + label_id = None + for label in existing_labels.get('labels', []): + if label.get('title') == title: + label_id = label.get('id') + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get('label', label_data) + label_id = label_info.get('id') + + # Collect case data and validate constraints + cases_to_update = [] + for case_id in valid_case_ids: + # Get current case to check existing labels + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + results['failed_cases'].append({ + 'case_id': case_id, + 'error': f"Could not retrieve case {case_id}: {case_response.error_message}" + }) + continue + + case_data = case_response.response_text + current_labels = case_data.get('labels', []) + + # Check if label already exists on this case + if any(label.get('id') == label_id for label in current_labels): + results['successful_cases'].append({ + 'case_id': case_id, + 'message': f"Label '{title}' already exists on case {case_id}" + }) + continue + + # Check maximum labels limit (10) + if len(current_labels) >= 10: + results['max_labels_reached'].append(case_id) + continue + + # Prepare case for update + existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] + updated_label_ids = existing_label_ids + [label_id] + cases_to_update.append({ + 'case_id': case_id, + 'labels': updated_label_ids + }) + + # Update cases using appropriate endpoint + if len(cases_to_update) == 1: + # Single case: use update_case/{case_id} + case_info = cases_to_update[0] + case_update_data = {'labels': case_info['labels']} + + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) + + if update_response.status_code == 200: + results['successful_cases'].append({ + 'case_id': case_info['case_id'], + 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" + }) + else: + results['failed_cases'].append({ + 'case_id': case_info['case_id'], + 'error': update_response.error_message + }) + elif len(cases_to_update) > 1: + # Multiple cases: use update_cases/{suite_id} + # Need to determine suite_id from the cases + case_suite_id = suite_id + if not case_suite_id: + # Get suite_id from the first case if not provided + first_case = all_cases[0] if all_cases else None + case_suite_id = first_case.get('suite_id') if first_case else None + + if not case_suite_id: + # Fall back to individual updates if no suite_id available + for case_info in cases_to_update: + case_update_data = {'labels': case_info['labels']} + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) + + if update_response.status_code == 200: + results['successful_cases'].append({ + 'case_id': case_info['case_id'], + 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" + }) + else: + results['failed_cases'].append({ + 'case_id': case_info['case_id'], + 'error': update_response.error_message + }) + else: + # Batch update using update_cases/{suite_id} + batch_update_data = { + 'case_ids': [case_info['case_id'] for case_info in cases_to_update], + 'labels': cases_to_update[0]['labels'] # Assuming same labels for all cases + } + + batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) + + if batch_response.status_code == 200: + for case_info in cases_to_update: + results['successful_cases'].append({ + 'case_id': case_info['case_id'], + 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" + }) + else: + # If batch update fails, fall back to individual updates + for case_info in cases_to_update: + case_update_data = {'labels': case_info['labels']} + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) + + if update_response.status_code == 200: + results['successful_cases'].append({ + 'case_id': case_info['case_id'], + 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" + }) + else: + results['failed_cases'].append({ + 'case_id': case_info['case_id'], + 'error': update_response.error_message + }) + + return results, "" + + def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: + """ + Get test cases filtered by label ID or title + + :param project_id: Project ID + :param suite_id: Suite ID (optional) + :param label_ids: List of label IDs to filter by + :param label_title: Label title to filter by + :returns: Tuple with list of matching cases and error string + """ + # Get all cases first + all_cases, error_message = self.__get_all_cases(project_id, suite_id) + if error_message: + return [], error_message + + # If filtering by title, first get the label ID + target_label_ids = label_ids or [] + if label_title and not target_label_ids: + labels_data, error_message = self.get_labels(project_id) + if error_message: + return [], error_message + + for label in labels_data.get('labels', []): + if label.get('title') == label_title: + target_label_ids.append(label.get('id')) + + if not target_label_ids: + return [], "" # No label found is a valid case with 0 results + + # Filter cases that have any of the target labels + matching_cases = [] + for case in all_cases: + case_labels = case.get('labels', []) + case_label_ids = [label.get('id') for label in case_labels] + + # Check if any of the target label IDs are present in this case + if any(label_id in case_label_ids for label_id in target_label_ids): + matching_cases.append(case) + + return matching_cases, "" + + def add_labels_to_tests(self, test_ids: List[int], titles: Union[str, List[str]], project_id: int) -> Tuple[dict, str]: + """ + Add labels to multiple tests + + :param test_ids: List of test IDs + :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) + :param project_id: Project ID for validation + :returns: Tuple with response data and error string + """ + # Initialize results structure + results = { + 'successful_tests': [], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + } + + # Normalize titles to a list + if isinstance(titles, str): + title_list = [titles] + else: + title_list = titles + + # At this point, title_list should already be validated by the CLI + # Just ensure we have clean titles + title_list = [title.strip() for title in title_list if title.strip()] + + if not title_list: + return {}, "No valid labels provided" + + # Validate test IDs by getting run information for each test + valid_test_ids = [] + for test_id in test_ids: + # Get test information to validate it exists + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results['test_not_found'].append(test_id) + continue + + test_data = test_response.response_text + # Validate that the test belongs to the correct project + run_id = test_data.get('run_id') + if run_id: + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.status_code == 200: + run_data = run_response.response_text + if run_data.get('project_id') == project_id: + valid_test_ids.append(test_id) + else: + results['test_not_found'].append(test_id) + else: + results['test_not_found'].append(test_id) + else: + results['test_not_found'].append(test_id) + + # If no valid test IDs, return early + if not valid_test_ids: + return results, "" + + # Check if labels exist or create them + existing_labels, error_message = self.get_labels(project_id) + if error_message: + return results, error_message + + # Process each title to get/create label IDs + label_ids = [] + label_id_to_title = {} # Map label IDs to their titles + for title in title_list: + # Find existing label with the same title + label_id = None + for label in existing_labels.get('labels', []): + if label.get('title') == title: + label_id = label.get('id') + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get('label', label_data) + label_id = label_info.get('id') + + if label_id: + label_ids.append(label_id) + label_id_to_title[label_id] = title + + # Collect test data and validate constraints + tests_to_update = [] + for test_id in valid_test_ids: + # Get current test to check existing labels + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results['failed_tests'].append({ + 'test_id': test_id, + 'error': f"Could not retrieve test {test_id}: {test_response.error_message}" + }) + continue + + test_data = test_response.response_text + current_labels = test_data.get('labels', []) + current_label_ids = [label.get('id') for label in current_labels if label.get('id')] + + new_label_ids = [] + already_exists_titles = [] + + for label_id in label_ids: + if label_id not in current_label_ids: + new_label_ids.append(label_id) + else: + if label_id in label_id_to_title: + already_exists_titles.append(label_id_to_title[label_id]) + + if not new_label_ids: + results['successful_tests'].append({ + 'test_id': test_id, + 'message': f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}" + }) + continue + + # Check maximum labels limit (10) + if len(current_label_ids) + len(new_label_ids) > 10: + results['max_labels_reached'].append(test_id) + continue + + # Prepare test for update + updated_label_ids = current_label_ids + new_label_ids + + new_label_titles = [] + for label_id in new_label_ids: + if label_id in label_id_to_title: + new_label_titles.append(label_id_to_title[label_id]) + + tests_to_update.append({ + 'test_id': test_id, + 'labels': updated_label_ids, + 'new_labels': new_label_ids, + 'new_label_titles': new_label_titles + }) + + # Update tests using appropriate endpoint + if len(tests_to_update) == 1: + # Single test: use update_test/{test_id} + test_info = tests_to_update[0] + test_update_data = {'labels': test_info['labels']} + + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + new_label_titles = test_info.get('new_label_titles', []) + new_label_count = len(new_label_titles) + + if new_label_count == 1: + message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" + elif new_label_count > 1: + message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" + else: + message = f"No new labels added to test {test_info['test_id']}" + + results['successful_tests'].append({ + 'test_id': test_info['test_id'], + 'message': message + }) + else: + results['failed_tests'].append({ + 'test_id': test_info['test_id'], + 'error': update_response.error_message + }) + else: + # Multiple tests: use individual updates to ensure each test gets its specific labels + for test_info in tests_to_update: + test_update_data = {'labels': test_info['labels']} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + new_label_titles = test_info.get('new_label_titles', []) + new_label_count = len(new_label_titles) + + if new_label_count == 1: + message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" + elif new_label_count > 1: + message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" + else: + message = f"No new labels added to test {test_info['test_id']}" + + results['successful_tests'].append({ + 'test_id': test_info['test_id'], + 'message': message + }) + else: + results['failed_tests'].append({ + 'test_id': test_info['test_id'], + 'error': update_response.error_message + }) + + return results, "" + + def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None) -> Tuple[List[dict], str]: + """ + Get tests filtered by label ID or title from specific runs + + :param project_id: Project ID + :param label_ids: List of label IDs to filter by + :param label_title: Label title to filter by + :param run_ids: List of run IDs to filter tests from (optional, defaults to all runs) + :returns: Tuple with list of matching tests and error string + """ + # If filtering by title, first get the label ID + target_label_ids = label_ids or [] + if label_title and not target_label_ids: + labels_data, error_message = self.get_labels(project_id) + if error_message: + return [], error_message + + for label in labels_data.get('labels', []): + if label.get('title') == label_title: + target_label_ids.append(label.get('id')) + + if not target_label_ids: + return [], "" # No label found is a valid case with 0 results + + # Get runs for the project (either all runs or specific run IDs) + if run_ids: + # Use specific run IDs - validate they exist by getting run details + runs = [] + for run_id in run_ids: + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.status_code == 200: + runs.append(run_response.response_text) + else: + return [], f"Run ID {run_id} not found or inaccessible" + else: + # Get all runs for the project + runs_response = self.client.send_get(f"get_runs/{project_id}") + if runs_response.status_code != 200: + return [], runs_response.error_message + + runs_data = runs_response.response_text + runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data + + # Collect all tests from all runs + matching_tests = [] + for run in runs: + run_id = run.get('id') + if not run_id: + continue + + # Get tests for this run + tests_response = self.client.send_get(f"get_tests/{run_id}") + if tests_response.status_code != 200: + continue # Skip this run if we can't get tests + + tests_data = tests_response.response_text + tests = tests_data.get('tests', []) if isinstance(tests_data, dict) else tests_data + + # Filter tests that have any of the target labels + for test in tests: + test_labels = test.get('labels', []) + test_label_ids = [label.get('id') for label in test_labels] + + # Check if any of the target label IDs are present in this test + if any(label_id in test_label_ids for label_id in target_label_ids): + matching_tests.append(test) + + return matching_tests, "" + + def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: + """ + Get labels for specific tests + + :param test_ids: List of test IDs to get labels for + :returns: Tuple with list of test label information and error string + """ + results = [] + + for test_id in test_ids: + # Get test information + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results.append({ + 'test_id': test_id, + 'error': f"Test {test_id} not found or inaccessible", + 'labels': [] + }) + continue + + test_data = test_response.response_text + test_labels = test_data.get('labels', []) + + results.append({ + 'test_id': test_id, + 'title': test_data.get('title', 'Unknown'), + 'status_id': test_data.get('status_id'), + 'labels': test_labels, + 'error': None + }) + + return results, "" diff --git a/trcli/commands/cmd_labels.py b/trcli/commands/cmd_labels.py index 81a1a9c0..7e535153 100644 --- a/trcli/commands/cmd_labels.py +++ b/trcli/commands/cmd_labels.py @@ -222,4 +222,461 @@ def get(environment: Environment, context: click.Context, label_id: int, *args, environment.log(f" Created on: {label_info.get('created_on', 'N/A')}") else: environment.elog(f"Unexpected response format: {label_data}") - exit(1) \ No newline at end of file + exit(1) + + +@cli.group() +@click.pass_context +@pass_environment +def cases(environment: Environment, context: click.Context, *args, **kwargs): + """Manage labels for test cases""" + pass + + +@cases.command(name='add') +@click.option("--case-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") +@click.option("--title", required=True, metavar="", help="Title of the label to add (max 20 characters).") +@click.pass_context +@pass_environment +def add_to_cases(environment: Environment, context: click.Context, case_ids: str, title: str, *args, **kwargs): + """Add a label to test cases""" + environment.check_for_required_parameters() + print_config(environment, "Add Cases") + + if len(title) > 20: + environment.elog("Error: Label title must be 20 characters or less.") + exit(1) + + try: + case_id_list = [int(id.strip()) for id in case_ids.split(",")] + except ValueError: + environment.elog("Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Adding label '{title}' to {len(case_id_list)} test case(s)...") + + results, error_message = project_client.api_request_handler.add_labels_to_cases( + case_ids=case_id_list, + title=title, + project_id=project_client.project.project_id, + suite_id=environment.suite_id + ) + + if error_message: + environment.elog(f"Failed to add labels to cases: {error_message}") + exit(1) + else: + # Report results + successful_cases = results.get('successful_cases', []) + failed_cases = results.get('failed_cases', []) + max_labels_reached = results.get('max_labels_reached', []) + case_not_found = results.get('case_not_found', []) + + if case_not_found: + environment.elog(f"Error: {len(case_not_found)} test case(s) not found:") + for case_id in case_not_found: + environment.elog(f" Case ID {case_id} does not exist in the project") + + if successful_cases: + environment.log(f"Successfully processed {len(successful_cases)} case(s):") + for case_result in successful_cases: + environment.log(f" Case {case_result['case_id']}: {case_result['message']}") + + if max_labels_reached: + environment.log(f"Warning: {len(max_labels_reached)} case(s) already have maximum labels (10):") + for case_id in max_labels_reached: + environment.log(f" Case {case_id}: Maximum labels reached") + + if failed_cases: + environment.log(f"Failed to process {len(failed_cases)} case(s):") + for case_result in failed_cases: + environment.log(f" Case {case_result['case_id']}: {case_result['error']}") + + # Exit with error if there were invalid case IDs + if case_not_found: + exit(1) + + +@cases.command(name='list') +@click.option("--ids", metavar="", help="Comma-separated list of label IDs to filter by (e.g., 1,2,3).") +@click.option("--title", metavar="", help="Label title to filter by (max 20 characters).") +@click.pass_context +@pass_environment +def list_cases(environment: Environment, context: click.Context, ids: str, title: str, *args, **kwargs): + """List test cases filtered by label ID or title""" + environment.check_for_required_parameters() + + # Validate that either ids or title is provided + if not ids and not title: + environment.elog("Error: Either --ids or --title must be provided.") + exit(1) + + if title and len(title) > 20: + environment.elog("Error: Label title must be 20 characters or less.") + exit(1) + + print_config(environment, "List Cases by Label") + + label_ids = None + if ids: + try: + label_ids = [int(id.strip()) for id in ids.split(",")] + except ValueError: + environment.elog("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + if title: + environment.log(f"Retrieving test cases with label title '{title}'...") + else: + environment.log(f"Retrieving test cases with label IDs: {', '.join(map(str, label_ids))}...") + + matching_cases, error_message = project_client.api_request_handler.get_cases_by_label( + project_id=project_client.project.project_id, + suite_id=environment.suite_id, + label_ids=label_ids, + label_title=title + ) + + if error_message: + environment.elog(f"Failed to retrieve cases: {error_message}") + exit(1) + else: + environment.log(f"Found {len(matching_cases)} matching test case(s):") + environment.log("") + + if matching_cases: + for case in matching_cases: + case_labels = case.get('labels', []) + label_info = [] + for label in case_labels: + label_info.append(f"ID:{label.get('id')},Title:'{label.get('title')}'") + + labels_str = f" [Labels: {'; '.join(label_info)}]" if label_info else " [No labels]" + environment.log(f" Case ID: {case['id']}, Title: '{case['title']}'{labels_str}") + else: + if title: + environment.log(f" No test cases found with label title '{title}'.") + else: + environment.log(f" No test cases found with the specified label IDs.") + + +@cli.group() +@click.pass_context +@pass_environment +def tests(environment: Environment, context: click.Context, *args, **kwargs): + """Manage labels for tests""" + pass + + +@tests.command(name='add') +@click.option("--test-ids", metavar="", help="Comma-separated list of test IDs (e.g., 1,2,3).") +@click.option("--test-id-file", metavar="", help="CSV file containing test IDs.") +@click.option("--title", required=True, metavar="", help="Label title(s) to add (max 20 characters each). Use comma separation for multiple labels (e.g., 'label1,label2').") +@click.pass_context +@pass_environment +def add_to_tests(environment: Environment, context: click.Context, test_ids: str, test_id_file: str, title: str, *args, **kwargs): + """Add label(s) to tests""" + environment.check_for_required_parameters() + print_config(environment, "Add Tests") + + # Parse comma-separated titles + title_list = [t.strip() for t in title.split(",") if t.strip()] + + # Filter valid and invalid labels + valid_titles = [] + invalid_titles = [] + + for t in title_list: + if len(t) > 20: + invalid_titles.append(t) + else: + valid_titles.append(t) + + # Show warnings for invalid labels but continue with valid ones + if invalid_titles: + for invalid_title in invalid_titles: + environment.elog(f"Warning: Label title '{invalid_title}' exceeds 20 character limit and will be skipped.") + + # Check if we have any valid labels left + if not valid_titles: + environment.elog("Error: No valid label titles provided after filtering.") + exit(1) + + # Validate maximum number of valid labels (TestRail limit is 10 labels per test) + if len(valid_titles) > 10: + environment.elog(f"Error: Cannot add more than 10 labels at once. You provided {len(valid_titles)} valid labels.") + exit(1) + + # Use only valid titles for processing + title_list = valid_titles + + # Validate that either test_ids or test_id_file is provided + if not test_ids and not test_id_file: + environment.elog("Error: Either --test-ids or --test-id-file must be provided.") + exit(1) + + if test_ids and test_id_file: + environment.elog("Error: Cannot use both --test-ids and --test-id-file. Choose one.") + exit(1) + + test_id_list = [] + + # Parse test IDs from command line + if test_ids: + try: + test_id_list = [int(id.strip()) for id in test_ids.split(",")] + except ValueError: + environment.elog("Error: Invalid test IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + # Parse test IDs from CSV file + if test_id_file: + import csv + import os + + if not os.path.exists(test_id_file): + environment.elog(f"Error: File '{test_id_file}' not found.") + exit(1) + + try: + with open(test_id_file, 'r', newline='', encoding='utf-8') as csvfile: + # Try to detect delimiter + sample = csvfile.read(1024) + csvfile.seek(0) + sniffer = csv.Sniffer() + + single_column_mode = False + try: + delimiter = sniffer.sniff(sample).delimiter + except csv.Error: + # If delimiter detection fails, check for common delimiters + if ',' in sample: + delimiter = ',' + elif ';' in sample: + delimiter = ';' + elif '\t' in sample: + delimiter = '\t' + else: + # Single column file - use line-by-line reading + single_column_mode = True + + if single_column_mode: + # Handle single column files (no delimiters) + csvfile.seek(0) + lines = csvfile.readlines() + for line in lines: + line = line.strip() + if line and not line.lower().startswith('test'): # Skip likely headers + try: + test_id_list.append(int(line)) + except ValueError: + environment.log(f"Warning: Ignoring invalid test ID '{line}' in file") + else: + # Handle CSV files with delimiters + reader = csv.reader(csvfile, delimiter=delimiter) + + # Skip header if it exists (check if first row contains non-numeric values) + first_row = next(reader, None) + if first_row: + # Check if first row looks like a header + try: + # If we can convert all values to int, it's likely data, not header + [int(val.strip()) for val in first_row if val.strip()] + # Reset to beginning and don't skip + csvfile.seek(0) + reader = csv.reader(csvfile, delimiter=delimiter) + except ValueError: + # First row contains non-numeric data, likely header, so we skip it + pass + + for row in reader: + for cell in row: + cell_value = cell.strip() + if cell_value: # Skip empty cells + try: + test_id_list.append(int(cell_value)) + except ValueError: + environment.log(f"Warning: Ignoring invalid test ID '{cell_value}' in file") + + except Exception as e: + environment.elog(f"Error reading CSV file: {e}") + exit(1) + + if not test_id_list: + environment.elog("Error: No valid test IDs found in the CSV file.") + exit(1) + + environment.log(f"Loaded {len(test_id_list)} test ID(s) from file '{test_id_file}'") + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + # Log message adjusted for single/multiple labels + if len(title_list) == 1: + environment.log(f"Adding label '{title_list[0]}' to {len(test_id_list)} test(s)...") + else: + environment.log(f"Adding {len(title_list)} labels ({', '.join(title_list)}) to {len(test_id_list)} test(s)...") + + results, error_message = project_client.api_request_handler.add_labels_to_tests( + test_ids=test_id_list, + titles=title_list, + project_id=project_client.project.project_id + ) + + # Handle validation errors (warnings, not fatal) + if error_message: + environment.elog(f"Warning: {error_message}") + + # Process results + # Report results + successful_tests = results.get('successful_tests', []) + failed_tests = results.get('failed_tests', []) + max_labels_reached = results.get('max_labels_reached', []) + test_not_found = results.get('test_not_found', []) + + if test_not_found: + environment.log(f"Warning: {len(test_not_found)} test(s) not found or not accessible:") + for test_id in test_not_found: + environment.log(f" Test ID {test_id} does not exist or is not accessible") + + if successful_tests: + environment.log(f"Successfully processed {len(successful_tests)} test(s):") + for test_result in successful_tests: + environment.log(f" Test {test_result['test_id']}: {test_result['message']}") + + if max_labels_reached: + environment.log(f"Warning: {len(max_labels_reached)} test(s) already have maximum labels (10):") + for test_id in max_labels_reached: + environment.log(f" Test {test_id}: Maximum labels reached") + + if failed_tests: + environment.log(f"Failed to process {len(failed_tests)} test(s):") + for test_result in failed_tests: + environment.log(f" Test {test_result['test_id']}: {test_result['error']}") + + +@tests.command(name='list') +@click.option("--run-id", required=True, metavar="", help="Comma-separated list of run IDs to filter tests from (e.g., 1,2,3).") +@click.option("--ids", required=True, metavar="", help="Comma-separated list of label IDs to filter by (e.g., 1,2,3).") +@click.pass_context +@pass_environment +def list_tests(environment: Environment, context: click.Context, run_id: str, ids: str, *args, **kwargs): + """List tests filtered by label ID from specific runs""" + environment.check_for_required_parameters() + print_config(environment, "List Tests by Label") + + try: + run_ids = [int(id.strip()) for id in run_id.split(",")] + except ValueError: + environment.elog("Error: Invalid run IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + try: + label_ids = [int(id.strip()) for id in ids.split(",")] + except ValueError: + environment.elog("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Retrieving tests from run IDs: {', '.join(map(str, run_ids))} with label IDs: {', '.join(map(str, label_ids))}...") + + matching_tests, error_message = project_client.api_request_handler.get_tests_by_label( + project_id=project_client.project.project_id, + label_ids=label_ids, + run_ids=run_ids + ) + + if error_message: + environment.elog(f"Failed to retrieve tests: {error_message}") + exit(1) + else: + environment.log(f"Found {len(matching_tests)} matching test(s):") + environment.log("") + + if matching_tests: + for test in matching_tests: + test_labels = test.get('labels', []) + label_info = [] + for label in test_labels: + label_info.append(f"ID:{label.get('id')},Title:'{label.get('title')}'") + + labels_str = f" [Labels: {'; '.join(label_info)}]" if label_info else " [No labels]" + status_name = test.get('status_id', 'Unknown') + environment.log(f" Test ID: {test['id']}, Title: '{test.get('title', 'Unknown')}', Status: {status_name}{labels_str}") + else: + environment.log(f" No tests found with the specified label IDs.") + + +@tests.command(name='get') +@click.option("--test-ids", required=True, metavar="", help="Comma-separated list of test IDs (e.g., 1,2,3).") +@click.pass_context +@pass_environment +def get_test_labels(environment: Environment, context: click.Context, test_ids: str, *args, **kwargs): + """Get the labels of tests using test IDs""" + environment.check_for_required_parameters() + print_config(environment, "Get Test Labels") + + try: + test_id_list = [int(id.strip()) for id in test_ids.split(",")] + except ValueError: + environment.elog("Error: Invalid test IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Retrieving labels for {len(test_id_list)} test(s)...") + + test_labels, error_message = project_client.api_request_handler.get_test_labels(test_id_list) + + if error_message: + environment.elog(f"Failed to retrieve test labels: {error_message}") + exit(1) + else: + environment.log(f"Test label information:") + environment.log("") + + for test_info in test_labels: + test_id = test_info['test_id'] + + if test_info.get('error'): + environment.log(f" Test ID: {test_id} - Error: {test_info['error']}") + else: + test_labels = test_info.get('labels', []) + title = test_info.get('title', 'Unknown') + status_id = test_info.get('status_id', 'Unknown') + + environment.log(f" Test ID: {test_id}") + environment.log(f" Title: '{title}'") + environment.log(f" Status: {status_id}") + + if test_labels: + environment.log(f" Labels ({len(test_labels)}):") + for label in test_labels: + environment.log(f" - ID: {label.get('id')}, Title: '{label.get('title')}'") + else: + environment.log(f" Labels: No labels assigned") + environment.log("") \ No newline at end of file diff --git a/trcli/constants.py b/trcli/constants.py index dbe346ed..34cf1f71 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -93,7 +93,7 @@ - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run - - labels: Manage labels (add, update, delete, list)""" + - labels: Manage labels (projects, cases, and tests)""" MISSING_COMMAND_SLOGAN = """Usage: trcli [OPTIONS] COMMAND [ARGS]...\nTry 'trcli --help' for help. \nError: Missing command."""