trcli
Advanced tools
| """ | ||
| Unit tests for update_existing_case_references with case fields support. | ||
| Tests the fix for the bug where custom case fields were not being updated. | ||
| """ | ||
| import pytest | ||
| from unittest.mock import MagicMock, patch | ||
| from trcli.api.api_request_handler import ApiRequestHandler | ||
| from trcli.api.api_client import APIClientResult | ||
| from trcli.cli import Environment | ||
| from trcli.data_classes.dataclass_testrail import TestRailSuite | ||
| class TestUpdateExistingCaseReferencesWithFields: | ||
| """Test class for update_existing_case_references with custom fields""" | ||
| @pytest.fixture | ||
| def handler(self): | ||
| """Create an ApiRequestHandler instance for testing""" | ||
| environment = Environment() | ||
| environment.host = "https://test.testrail.com" | ||
| environment.username = "test@example.com" | ||
| environment.password = "password" | ||
| mock_client = MagicMock() | ||
| suite = TestRailSuite(name="Test Suite") | ||
| handler = ApiRequestHandler(environment=environment, api_client=mock_client, suites_data=suite, verify=False) | ||
| return handler | ||
| def test_update_case_with_refs_and_custom_fields(self, handler): | ||
| """Test updating case with both references and custom fields""" | ||
| # Mock get_case response | ||
| mock_get_case_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "title": "Test Case 1", "refs": "REQ-1"}, error_message=None | ||
| ) | ||
| # Mock update_case response | ||
| mock_update_response = APIClientResult( | ||
| status_code=200, | ||
| response_text={ | ||
| "id": 1, | ||
| "refs": "REQ-1,REQ-2", | ||
| "custom_preconds": "Updated precondition", | ||
| "custom_automation_type": 2, | ||
| }, | ||
| error_message=None, | ||
| ) | ||
| case_fields = {"custom_preconds": "Updated precondition", "custom_automation_type": 2} | ||
| with patch.object(handler.client, "send_get", return_value=mock_get_case_response), patch.object( | ||
| handler.client, "send_post", return_value=mock_update_response | ||
| ): | ||
| success, error, added_refs, skipped_refs, updated_fields = handler.update_existing_case_references( | ||
| case_id=1, junit_refs="REQ-2", case_fields=case_fields, strategy="append" | ||
| ) | ||
| assert success is True | ||
| assert error is None | ||
| assert added_refs == ["REQ-2"] | ||
| assert skipped_refs == [] | ||
| assert set(updated_fields) == {"custom_preconds", "custom_automation_type"} | ||
| # Verify the API call included both refs and custom fields | ||
| handler.client.send_post.assert_called_once() | ||
| call_args = handler.client.send_post.call_args | ||
| assert call_args[0][0] == "update_case/1" | ||
| update_data = call_args[0][1] | ||
| assert update_data["refs"] == "REQ-1,REQ-2" | ||
| assert update_data["custom_preconds"] == "Updated precondition" | ||
| assert update_data["custom_automation_type"] == 2 | ||
| def test_update_case_with_only_custom_fields(self, handler): | ||
| """Test updating case with only custom fields (no refs)""" | ||
| mock_update_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "custom_automation_ids": "AUTO-123"}, error_message=None | ||
| ) | ||
| case_fields = {"custom_automation_ids": "AUTO-123", "template_id": 1} | ||
| with patch.object(handler.client, "send_post", return_value=mock_update_response): | ||
| success, error, added_refs, skipped_refs, updated_fields = handler.update_existing_case_references( | ||
| case_id=1, junit_refs="", case_fields=case_fields, strategy="append" # No refs | ||
| ) | ||
| assert success is True | ||
| assert error is None | ||
| assert added_refs == [] | ||
| assert skipped_refs == [] | ||
| assert set(updated_fields) == {"custom_automation_ids", "template_id"} | ||
| # Verify the API call included only custom fields | ||
| handler.client.send_post.assert_called_once() | ||
| call_args = handler.client.send_post.call_args | ||
| update_data = call_args[0][1] | ||
| assert "refs" not in update_data # No refs in update | ||
| assert update_data["custom_automation_ids"] == "AUTO-123" | ||
| assert update_data["template_id"] == 1 | ||
| def test_update_case_with_only_refs_no_fields(self, handler): | ||
| """Test updating case with only refs (backwards compatibility)""" | ||
| mock_get_case_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "title": "Test Case 1", "refs": ""}, error_message=None | ||
| ) | ||
| mock_update_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "refs": "REQ-1"}, error_message=None | ||
| ) | ||
| with patch.object(handler.client, "send_get", return_value=mock_get_case_response), patch.object( | ||
| handler.client, "send_post", return_value=mock_update_response | ||
| ): | ||
| success, error, added_refs, skipped_refs, updated_fields = handler.update_existing_case_references( | ||
| case_id=1, junit_refs="REQ-1", case_fields=None, strategy="append" # No custom fields | ||
| ) | ||
| assert success is True | ||
| assert error is None | ||
| assert added_refs == ["REQ-1"] | ||
| assert skipped_refs == [] | ||
| assert updated_fields == [] | ||
| # Verify the API call included only refs | ||
| handler.client.send_post.assert_called_once() | ||
| call_args = handler.client.send_post.call_args | ||
| update_data = call_args[0][1] | ||
| assert update_data == {"refs": "REQ-1"} | ||
| def test_update_case_filters_internal_fields(self, handler): | ||
| """Test that internal fields are filtered out from updates""" | ||
| mock_update_response = APIClientResult(status_code=200, response_text={"id": 1}, error_message=None) | ||
| case_fields = { | ||
| "custom_preconds": "Test", | ||
| "case_id": 999, # Should be filtered | ||
| "section_id": 888, # Should be filtered | ||
| "result": {"status": "passed"}, # Should be filtered | ||
| "custom_automation_type": 1, | ||
| } | ||
| with patch.object(handler.client, "send_post", return_value=mock_update_response): | ||
| success, error, added_refs, skipped_refs, updated_fields = handler.update_existing_case_references( | ||
| case_id=1, junit_refs="", case_fields=case_fields, strategy="append" | ||
| ) | ||
| assert success is True | ||
| # Verify internal fields were filtered out | ||
| assert set(updated_fields) == {"custom_preconds", "custom_automation_type"} | ||
| # Verify the API call excluded internal fields | ||
| call_args = handler.client.send_post.call_args | ||
| update_data = call_args[0][1] | ||
| assert "case_id" not in update_data | ||
| assert "section_id" not in update_data | ||
| assert "result" not in update_data | ||
| assert update_data["custom_preconds"] == "Test" | ||
| assert update_data["custom_automation_type"] == 1 | ||
| def test_update_case_no_changes(self, handler): | ||
| """Test when there are no refs and no custom fields to update""" | ||
| success, error, added_refs, skipped_refs, updated_fields = handler.update_existing_case_references( | ||
| case_id=1, junit_refs="", case_fields=None, strategy="append" | ||
| ) | ||
| assert success is True | ||
| assert error is None | ||
| assert added_refs == [] | ||
| assert skipped_refs == [] | ||
| assert updated_fields == [] | ||
| # Verify no API call was made | ||
| handler.client.send_post.assert_not_called() | ||
| def test_update_case_refs_append_with_fields(self, handler): | ||
| """Test append strategy for refs with custom fields""" | ||
| mock_get_case_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "title": "Test Case 1", "refs": "REQ-1,REQ-2"}, error_message=None | ||
| ) | ||
| mock_update_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "refs": "REQ-1,REQ-2,REQ-3"}, error_message=None | ||
| ) | ||
| case_fields = {"custom_preconds": "New precondition"} | ||
| with patch.object(handler.client, "send_get", return_value=mock_get_case_response), patch.object( | ||
| handler.client, "send_post", return_value=mock_update_response | ||
| ): | ||
| success, error, added_refs, skipped_refs, updated_fields = handler.update_existing_case_references( | ||
| case_id=1, junit_refs="REQ-2,REQ-3", case_fields=case_fields, strategy="append" # REQ-2 already exists | ||
| ) | ||
| assert success is True | ||
| assert added_refs == ["REQ-3"] | ||
| assert skipped_refs == ["REQ-2"] | ||
| assert updated_fields == ["custom_preconds"] | ||
| # Verify refs were appended and field was added | ||
| call_args = handler.client.send_post.call_args | ||
| update_data = call_args[0][1] | ||
| assert update_data["refs"] == "REQ-1,REQ-2,REQ-3" | ||
| assert update_data["custom_preconds"] == "New precondition" | ||
| def test_update_case_refs_replace_with_fields(self, handler): | ||
| """Test replace strategy for refs with custom fields""" | ||
| mock_get_case_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "title": "Test Case 1", "refs": "REQ-1,REQ-2"}, error_message=None | ||
| ) | ||
| mock_update_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "refs": "REQ-3,REQ-4"}, error_message=None | ||
| ) | ||
| case_fields = {"custom_automation_type": 2} | ||
| with patch.object(handler.client, "send_get", return_value=mock_get_case_response), patch.object( | ||
| handler.client, "send_post", return_value=mock_update_response | ||
| ): | ||
| success, error, added_refs, skipped_refs, updated_fields = handler.update_existing_case_references( | ||
| case_id=1, junit_refs="REQ-3,REQ-4", case_fields=case_fields, strategy="replace" | ||
| ) | ||
| assert success is True | ||
| assert added_refs == ["REQ-3", "REQ-4"] | ||
| assert skipped_refs == [] | ||
| assert updated_fields == ["custom_automation_type"] | ||
| # Verify refs were replaced and field was added | ||
| call_args = handler.client.send_post.call_args | ||
| update_data = call_args[0][1] | ||
| assert update_data["refs"] == "REQ-3,REQ-4" | ||
| assert update_data["custom_automation_type"] == 2 | ||
| def test_update_case_no_new_refs_but_has_fields(self, handler): | ||
| """Test when all refs are duplicates but custom fields need updating""" | ||
| mock_get_case_response = APIClientResult( | ||
| status_code=200, response_text={"id": 1, "title": "Test Case 1", "refs": "REQ-1,REQ-2"}, error_message=None | ||
| ) | ||
| mock_update_response = APIClientResult(status_code=200, response_text={"id": 1}, error_message=None) | ||
| case_fields = {"custom_preconds": "Updated"} | ||
| with patch.object(handler.client, "send_get", return_value=mock_get_case_response), patch.object( | ||
| handler.client, "send_post", return_value=mock_update_response | ||
| ): | ||
| success, error, added_refs, skipped_refs, updated_fields = handler.update_existing_case_references( | ||
| case_id=1, junit_refs="REQ-1,REQ-2", case_fields=case_fields, strategy="append" # All duplicates | ||
| ) | ||
| assert success is True | ||
| assert added_refs == [] | ||
| assert skipped_refs == ["REQ-1", "REQ-2"] | ||
| assert updated_fields == ["custom_preconds"] | ||
| # Verify update was still made for custom fields | ||
| handler.client.send_post.assert_called_once() | ||
| call_args = handler.client.send_post.call_args | ||
| update_data = call_args[0][1] | ||
| assert update_data["refs"] == "REQ-1,REQ-2" | ||
| assert update_data["custom_preconds"] == "Updated" |
+1
-1
| Metadata-Version: 2.4 | ||
| Name: trcli | ||
| Version: 1.12.5 | ||
| Version: 1.12.6 | ||
| License-File: LICENSE.md | ||
@@ -5,0 +5,0 @@ Requires-Dist: click<8.2.2,>=8.1.0 |
+5
-5
@@ -36,3 +36,3 @@  | ||
| ``` | ||
| TestRail CLI v1.12.5 | ||
| TestRail CLI v1.12.6 | ||
| Copyright 2025 Gurock Software GmbH - www.gurock.com | ||
@@ -51,3 +51,3 @@ Supported and loaded modules: | ||
| $ trcli --help | ||
| TestRail CLI v1.12.5 | ||
| TestRail CLI v1.12.6 | ||
| Copyright 2025 Gurock Software GmbH - www.gurock.com | ||
@@ -153,3 +153,3 @@ Usage: trcli [OPTIONS] COMMAND [ARGS]... | ||
| existing case field values, whether to append or | ||
| replace (default: append). | ||
| replace (Note: only applies to references default: append). | ||
| --help Show this message and exit. | ||
@@ -1102,3 +1102,3 @@ ``` | ||
| $ trcli add_run --help | ||
| TestRail CLI v1.12.5 | ||
| TestRail CLI v1.12.6 | ||
| Copyright 2025 Gurock Software GmbH - www.gurock.com | ||
@@ -1227,3 +1227,3 @@ Usage: trcli add_run [OPTIONS] | ||
| $ trcli parse_openapi --help | ||
| TestRail CLI v1.12.5 | ||
| TestRail CLI v1.12.6 | ||
| Copyright 2025 Gurock Software GmbH - www.gurock.com | ||
@@ -1230,0 +1230,0 @@ Usage: trcli parse_openapi [OPTIONS] |
| Metadata-Version: 2.4 | ||
| Name: trcli | ||
| Version: 1.12.5 | ||
| Version: 1.12.6 | ||
| License-File: LICENSE.md | ||
@@ -5,0 +5,0 @@ Requires-Dist: click<8.2.2,>=8.1.0 |
@@ -8,2 +8,3 @@ LICENSE.md | ||
| tests/test_api_request_handler.py | ||
| tests/test_api_request_handler_case_fields_update.py | ||
| tests/test_api_request_handler_case_matcher.py | ||
@@ -10,0 +11,0 @@ tests/test_api_request_handler_labels.py |
@@ -1,1 +0,1 @@ | ||
| __version__ = "1.12.5" | ||
| __version__ = "1.12.6" |
+114
-101
@@ -21,3 +21,3 @@ import time | ||
| self.last_run_id = None | ||
| if hasattr(self.environment, 'special_parser') and self.environment.special_parser == "saucectl": | ||
| if hasattr(self.environment, "special_parser") and self.environment.special_parser == "saucectl": | ||
| self.run_name += f" ({suite.name})" | ||
@@ -37,3 +37,3 @@ | ||
| try: | ||
| assign_value = getattr(self.environment, 'assign_failed_to', None) | ||
| assign_value = getattr(self.environment, "assign_failed_to", None) | ||
| if assign_value is not None and str(assign_value).strip(): | ||
@@ -61,5 +61,3 @@ self._validate_and_store_user_ids() | ||
| if self.environment.auto_creation_response: | ||
| added_sections, result_code = self.add_missing_sections( | ||
| self.project.project_id | ||
| ) | ||
| added_sections, result_code = self.add_missing_sections(self.project.project_id) | ||
| if result_code == -1: | ||
@@ -91,3 +89,3 @@ revert_logs = self.rollback_changes( | ||
| return | ||
| # remove empty, unused sections created earlier, based on the sections actually used by the new test cases | ||
@@ -100,5 +98,11 @@ # - iterate on added_sections and remove those that are not used by the new test cases | ||
| else: | ||
| empty_sections = [section for section in added_sections if section['section_id'] not in [case['section_id'] for case in added_test_cases]] | ||
| empty_sections = [ | ||
| section | ||
| for section in added_sections | ||
| if section["section_id"] not in [case["section_id"] for case in added_test_cases] | ||
| ] | ||
| if len(empty_sections) > 0: | ||
| self.environment.log("Removing unnecessary empty sections that may have been created earlier. ", new_line=False) | ||
| self.environment.log( | ||
| "Removing unnecessary empty sections that may have been created earlier. ", new_line=False | ||
| ) | ||
| _, error = self.api_request_handler.delete_sections(empty_sections) | ||
@@ -111,11 +115,23 @@ if error: | ||
| # Update existing cases with JUnit references if enabled | ||
| # Update existing cases with JUnit references and custom fields if enabled | ||
| case_update_results = None | ||
| case_update_failed = [] | ||
| if hasattr(self.environment, 'update_existing_cases') and self.environment.update_existing_cases == "yes": | ||
| self.environment.log("Updating existing cases with JUnit references...") | ||
| if hasattr(self.environment, "update_existing_cases") and self.environment.update_existing_cases == "yes": | ||
| self.environment.log("Updating existing cases with references and custom fields...") | ||
| case_update_results, case_update_failed = self.update_existing_cases_with_junit_refs(added_test_cases) | ||
| if case_update_results.get("updated_cases"): | ||
| self.environment.log(f"Updated {len(case_update_results['updated_cases'])} existing case(s) with references.") | ||
| updated_count = len(case_update_results["updated_cases"]) | ||
| # Count how many had refs vs fields updated | ||
| refs_updated = sum(1 for case in case_update_results["updated_cases"] if case.get("added_refs")) | ||
| fields_updated = sum(1 for case in case_update_results["updated_cases"] if case.get("updated_fields")) | ||
| msg_parts = [] | ||
| if refs_updated: | ||
| msg_parts.append(f"{refs_updated} with references") | ||
| if fields_updated: | ||
| msg_parts.append(f"{fields_updated} with custom fields") | ||
| detail = f" ({', '.join(msg_parts)})" if msg_parts else "" | ||
| self.environment.log(f"Updated {updated_count} existing case(s){detail}.") | ||
| if case_update_results.get("failed_cases"): | ||
@@ -163,6 +179,6 @@ self.environment.elog(f"Failed to update {len(case_update_results['failed_cases'])} case(s).") | ||
| self.environment.log(f"Submitted {results_amount} test results in {stop - start:.1f} secs.") | ||
| # Exit with error if there were invalid users (after processing valid ones) | ||
| try: | ||
| has_invalid = getattr(self.environment, '_has_invalid_users', False) | ||
| has_invalid = getattr(self.environment, "_has_invalid_users", False) | ||
| if has_invalid is True: # Explicitly check for True to avoid mock object issues | ||
@@ -173,3 +189,3 @@ exit(1) | ||
| pass | ||
| # Note: Error exit for case update failures is handled in cmd_parse_junit.py after reporting | ||
@@ -184,3 +200,3 @@ | ||
| try: | ||
| assign_value = getattr(self.environment, 'assign_failed_to', None) | ||
| assign_value = getattr(self.environment, "assign_failed_to", None) | ||
| if assign_value is None or not str(assign_value).strip(): | ||
@@ -190,4 +206,4 @@ return | ||
| return | ||
| # Check for empty or whitespace-only values | ||
| # Check for empty or whitespace-only values | ||
| assign_str = str(assign_value) | ||
@@ -197,12 +213,12 @@ if not assign_str.strip(): | ||
| exit(1) | ||
| emails = [email.strip() for email in assign_str.split(',') if email.strip()] | ||
| emails = [email.strip() for email in assign_str.split(",") if email.strip()] | ||
| if not emails: | ||
| self.environment.elog("Error: --assign option requires at least one user email") | ||
| exit(1) | ||
| valid_user_ids = [] | ||
| invalid_users = [] | ||
| for email in emails: | ||
@@ -218,3 +234,3 @@ user_id, error_msg = self.api_request_handler.get_user_by_email(email) | ||
| valid_user_ids.append(user_id) | ||
| # Handle invalid users | ||
@@ -224,10 +240,10 @@ if invalid_users: | ||
| self.environment.elog(f"Error: User not found: {invalid_user}") | ||
| # Store valid user IDs for processing, but mark that we should exit with error later | ||
| self.environment._has_invalid_users = True | ||
| # If ALL users are invalid, exit immediately | ||
| if not valid_user_ids: | ||
| exit(1) | ||
| # Store valid user IDs for later use | ||
@@ -238,11 +254,11 @@ self.environment._validated_user_ids = valid_user_ids | ||
| """ | ||
| Update existing test cases with references from JUnit properties. | ||
| Update existing test cases with references and custom fields from JUnit properties. | ||
| Excludes newly created cases to avoid unnecessary API calls. | ||
| :param added_test_cases: List of cases that were just created (to be excluded) | ||
| :returns: Tuple of (update_results, failed_cases) | ||
| """ | ||
| if not hasattr(self.environment, 'update_existing_cases') or self.environment.update_existing_cases != "yes": | ||
| if not hasattr(self.environment, "update_existing_cases") or self.environment.update_existing_cases != "yes": | ||
| return {}, [] # Feature not enabled | ||
| # Create a set of newly created case IDs to exclude | ||
@@ -252,44 +268,53 @@ newly_created_case_ids = set() | ||
| # Ensure all case IDs are integers for consistent comparison | ||
| newly_created_case_ids = {int(case.get('case_id')) for case in added_test_cases if case.get('case_id')} | ||
| update_results = { | ||
| "updated_cases": [], | ||
| "skipped_cases": [], | ||
| "failed_cases": [] | ||
| } | ||
| newly_created_case_ids = {int(case.get("case_id")) for case in added_test_cases if case.get("case_id")} | ||
| update_results = {"updated_cases": [], "skipped_cases": [], "failed_cases": []} | ||
| failed_cases = [] | ||
| strategy = getattr(self.environment, 'update_strategy', 'append') | ||
| strategy = getattr(self.environment, "update_strategy", "append") | ||
| # Process all test cases in all sections | ||
| for section in self.api_request_handler.suites_data_from_provider.testsections: | ||
| for test_case in section.testcases: | ||
| # Only process cases that have a case_id (existing cases) and JUnit refs | ||
| # Get refs and case fields for this test case | ||
| junit_refs = getattr(test_case, "_junit_case_refs", None) | ||
| case_fields = getattr(test_case, "case_fields", {}) | ||
| # Only process cases that have a case_id (existing cases) and either JUnit refs or case fields | ||
| # AND exclude newly created cases | ||
| if (test_case.case_id and | ||
| hasattr(test_case, '_junit_case_refs') and test_case._junit_case_refs and | ||
| int(test_case.case_id) not in newly_created_case_ids): | ||
| if ( | ||
| test_case.case_id | ||
| and (junit_refs or case_fields) | ||
| and int(test_case.case_id) not in newly_created_case_ids | ||
| ): | ||
| try: | ||
| success, error_msg, added_refs, skipped_refs = self.api_request_handler.update_existing_case_references( | ||
| test_case.case_id, test_case._junit_case_refs, strategy | ||
| success, error_msg, added_refs, skipped_refs, updated_fields = ( | ||
| self.api_request_handler.update_existing_case_references( | ||
| test_case.case_id, junit_refs or "", case_fields, strategy | ||
| ) | ||
| ) | ||
| if success: | ||
| if added_refs: | ||
| # Only count as "updated" if references were actually added | ||
| update_results["updated_cases"].append({ | ||
| "case_id": test_case.case_id, | ||
| "case_title": test_case.title, | ||
| "added_refs": added_refs, | ||
| "skipped_refs": skipped_refs | ||
| }) | ||
| if added_refs or updated_fields: | ||
| # Count as "updated" if references were added or fields were updated | ||
| update_results["updated_cases"].append( | ||
| { | ||
| "case_id": test_case.case_id, | ||
| "case_title": test_case.title, | ||
| "added_refs": added_refs, | ||
| "skipped_refs": skipped_refs, | ||
| "updated_fields": updated_fields, | ||
| } | ||
| ) | ||
| else: | ||
| # If no refs were added (all were duplicates or no valid refs), count as skipped | ||
| reason = "All references already present" if skipped_refs else "No valid references to process" | ||
| update_results["skipped_cases"].append({ | ||
| "case_id": test_case.case_id, | ||
| "case_title": test_case.title, | ||
| "reason": reason, | ||
| "skipped_refs": skipped_refs | ||
| }) | ||
| # If nothing was updated (all refs were duplicates and no fields), count as skipped | ||
| reason = "All references already present" if skipped_refs else "No changes to apply" | ||
| update_results["skipped_cases"].append( | ||
| { | ||
| "case_id": test_case.case_id, | ||
| "case_title": test_case.title, | ||
| "reason": reason, | ||
| "skipped_refs": skipped_refs, | ||
| } | ||
| ) | ||
| else: | ||
@@ -299,3 +324,3 @@ error_info = { | ||
| "case_title": test_case.title, | ||
| "error": error_msg | ||
| "error": error_msg, | ||
| } | ||
@@ -305,23 +330,23 @@ update_results["failed_cases"].append(error_info) | ||
| self.environment.elog(f"Failed to update case C{test_case.case_id}: {error_msg}") | ||
| except Exception as e: | ||
| error_info = { | ||
| error_info = {"case_id": test_case.case_id, "case_title": test_case.title, "error": str(e)} | ||
| update_results["failed_cases"].append(error_info) | ||
| failed_cases.append(error_info) | ||
| self.environment.elog(f"Exception updating case C{test_case.case_id}: {str(e)}") | ||
| elif ( | ||
| test_case.case_id | ||
| and (junit_refs or case_fields) | ||
| and int(test_case.case_id) in newly_created_case_ids | ||
| ): | ||
| # Skip newly created cases - they already have their fields set during creation | ||
| update_results["skipped_cases"].append( | ||
| { | ||
| "case_id": test_case.case_id, | ||
| "case_title": test_case.title, | ||
| "error": str(e) | ||
| "reason": "Newly created case - fields already set during creation", | ||
| } | ||
| update_results["failed_cases"].append(error_info) | ||
| failed_cases.append(error_info) | ||
| self.environment.elog(f"Exception updating case C{test_case.case_id}: {str(e)}") | ||
| elif (test_case.case_id and | ||
| hasattr(test_case, '_junit_case_refs') and test_case._junit_case_refs and | ||
| int(test_case.case_id) in newly_created_case_ids): | ||
| # Skip newly created cases - they already have their references set | ||
| update_results["skipped_cases"].append({ | ||
| "case_id": test_case.case_id, | ||
| "case_title": test_case.title, | ||
| "reason": "Newly created case - references already set during creation" | ||
| }) | ||
| ) | ||
| return update_results, failed_cases | ||
@@ -348,5 +373,3 @@ | ||
| return added_sections, result_code | ||
| prompt_message = PROMPT_MESSAGES["create_missing_sections"].format( | ||
| project_name=self.environment.project | ||
| ) | ||
| prompt_message = PROMPT_MESSAGES["create_missing_sections"].format(project_name=self.environment.project) | ||
| adding_message = "Adding missing sections to the suite." | ||
@@ -378,5 +401,3 @@ fault_message = FAULT_MAPPING["no_user_agreement"].format(type="sections") | ||
| """ | ||
| prompt_message = PROMPT_MESSAGES["create_missing_test_cases"].format( | ||
| project_name=self.environment.project | ||
| ) | ||
| prompt_message = PROMPT_MESSAGES["create_missing_test_cases"].format(project_name=self.environment.project) | ||
| adding_message = "Adding missing test cases to the suite." | ||
@@ -414,9 +435,5 @@ fault_message = FAULT_MAPPING["no_user_agreement"].format(type="test cases") | ||
| if len(added_test_cases) > 0: | ||
| _, error = self.api_request_handler.delete_cases( | ||
| suite_id, added_test_cases | ||
| ) | ||
| _, error = self.api_request_handler.delete_cases(suite_id, added_test_cases) | ||
| if error: | ||
| returned_log.append( | ||
| RevertMessages.test_cases_not_deleted.format(error=error) | ||
| ) | ||
| returned_log.append(RevertMessages.test_cases_not_deleted.format(error=error)) | ||
| else: | ||
@@ -427,5 +444,3 @@ returned_log.append(RevertMessages.test_cases_deleted) | ||
| if error: | ||
| returned_log.append( | ||
| RevertMessages.section_not_deleted.format(error=error) | ||
| ) | ||
| returned_log.append(RevertMessages.section_not_deleted.format(error=error)) | ||
| else: | ||
@@ -436,7 +451,5 @@ returned_log.append(RevertMessages.section_deleted) | ||
| if error: | ||
| returned_log.append( | ||
| RevertMessages.suite_not_deleted.format(error=error) | ||
| ) | ||
| returned_log.append(RevertMessages.suite_not_deleted.format(error=error)) | ||
| else: | ||
| returned_log.append(RevertMessages.suite_deleted) | ||
| return returned_log |
@@ -22,9 +22,10 @@ from xml.etree.ElementTree import ParseError | ||
| type=click.Choice(["junit", "saucectl"], case_sensitive=False), | ||
| help="Optional special parser option for specialized JUnit reports." | ||
| help="Optional special parser option for specialized JUnit reports.", | ||
| ) | ||
| @click.option( | ||
| "-a", "--assign", | ||
| "-a", | ||
| "--assign", | ||
| "assign_failed_to", | ||
| metavar="", | ||
| help="Comma-separated list of user emails to assign failed test results to." | ||
| help="Comma-separated list of user emails to assign failed test results to.", | ||
| ) | ||
@@ -34,10 +35,6 @@ @click.option( | ||
| metavar="", | ||
| help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total)." | ||
| help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total).", | ||
| ) | ||
| @click.option("--json-output", is_flag=True, help="Output reference operation results in JSON format.") | ||
| @click.option( | ||
| "--json-output", | ||
| is_flag=True, | ||
| help="Output reference operation results in JSON format." | ||
| ) | ||
| @click.option( | ||
| "--update-existing-cases", | ||
@@ -47,3 +44,3 @@ type=click.Choice(["yes", "no"], case_sensitive=False), | ||
| metavar="", | ||
| help="Update existing TestRail cases with values from JUnit properties (default: no)." | ||
| help="Update existing TestRail cases with values from JUnit properties (default: no).", | ||
| ) | ||
@@ -55,3 +52,3 @@ @click.option( | ||
| metavar="", | ||
| help="Strategy for combining incoming values with existing case field values, whether to append or replace (default: append)." | ||
| help="Strategy for combining incoming values with existing case field values, whether to append or replace (Note: only applies to references default: append).", | ||
| ) | ||
@@ -65,3 +62,3 @@ @click.pass_context | ||
| environment.check_for_required_parameters() | ||
| if environment.test_run_ref is not None: | ||
@@ -72,3 +69,3 @@ validation_error = _validate_test_run_ref(environment.test_run_ref) | ||
| exit(1) | ||
| settings.ALLOW_ELAPSED_MS = environment.allow_ms | ||
@@ -84,16 +81,16 @@ print_config(environment) | ||
| if run_id is None and hasattr(result_uploader, 'last_run_id'): | ||
| if run_id is None and hasattr(result_uploader, "last_run_id"): | ||
| run_id = result_uploader.last_run_id | ||
| # Collect case update results | ||
| if hasattr(result_uploader, 'case_update_results'): | ||
| if hasattr(result_uploader, "case_update_results"): | ||
| case_update_results = result_uploader.case_update_results | ||
| if environment.test_run_ref and run_id: | ||
| _handle_test_run_references(environment, run_id) | ||
| # Handle case update reporting if enabled | ||
| if environment.update_existing_cases == "yes" and case_update_results is not None: | ||
| _handle_case_update_reporting(environment, case_update_results) | ||
| # Exit with error if there were case update failures (after reporting) | ||
@@ -126,10 +123,10 @@ if case_update_results.get("failed_cases"): | ||
| return "Error: --test-run-ref cannot be empty or whitespace-only" | ||
| refs = [ref.strip() for ref in test_run_ref.split(',') if ref.strip()] | ||
| refs = [ref.strip() for ref in test_run_ref.split(",") if ref.strip()] | ||
| if not refs: | ||
| return "Error: --test-run-ref contains no valid references (malformed input)" | ||
| if len(test_run_ref) > 250: | ||
| return f"Error: --test-run-ref exceeds 250 character limit ({len(test_run_ref)} characters)" | ||
| return None | ||
@@ -146,10 +143,7 @@ | ||
| refs = [ref.strip() for ref in environment.test_run_ref.split(',') if ref.strip()] | ||
| project_client = ProjectBasedClient( | ||
| environment=environment, | ||
| suite=TestRailSuite(name="temp", suite_id=1) | ||
| ) | ||
| refs = [ref.strip() for ref in environment.test_run_ref.split(",") if ref.strip()] | ||
| project_client = ProjectBasedClient(environment=environment, suite=TestRailSuite(name="temp", suite_id=1)) | ||
| project_client.resolve_project() | ||
| environment.log(f"Appending references to test run {run_id}...") | ||
@@ -159,17 +153,12 @@ run_data, added_refs, skipped_refs, error_message = project_client.api_request_handler.append_run_references( | ||
| ) | ||
| if error_message: | ||
| environment.elog(f"Error: Failed to append references: {error_message}") | ||
| exit(1) | ||
| final_refs = run_data.get("refs", "") if run_data else "" | ||
| if environment.json_output: | ||
| # JSON output | ||
| result = { | ||
| "run_id": run_id, | ||
| "added": added_refs, | ||
| "skipped": skipped_refs, | ||
| "total_references": final_refs | ||
| } | ||
| result = {"run_id": run_id, "added": added_refs, "skipped": skipped_refs, "total_references": final_refs} | ||
| print(json.dumps(result, indent=2)) | ||
@@ -181,3 +170,5 @@ else: | ||
| environment.log(f" Newly added: {len(added_refs)} ({', '.join(added_refs) if added_refs else 'none'})") | ||
| environment.log(f" Skipped (duplicates): {len(skipped_refs)} ({', '.join(skipped_refs) if skipped_refs else 'none'})") | ||
| environment.log( | ||
| f" Skipped (duplicates): {len(skipped_refs)} ({', '.join(skipped_refs) if skipped_refs else 'none'})" | ||
| ) | ||
| if final_refs: | ||
@@ -192,7 +183,7 @@ environment.log(f" All references: {final_refs}") | ||
| import json | ||
| # Handle None input gracefully | ||
| if case_update_results is None: | ||
| return | ||
| if environment.json_output: | ||
@@ -204,3 +195,3 @@ # JSON output for case updates | ||
| "skipped_cases": len(case_update_results.get("skipped_cases", [])), | ||
| "failed_cases": len(case_update_results.get("failed_cases", [])) | ||
| "failed_cases": len(case_update_results.get("failed_cases", [])), | ||
| }, | ||
@@ -210,4 +201,4 @@ "details": { | ||
| "skipped_cases": case_update_results.get("skipped_cases", []), | ||
| "failed_cases": case_update_results.get("failed_cases", []) | ||
| } | ||
| "failed_cases": case_update_results.get("failed_cases", []), | ||
| }, | ||
| } | ||
@@ -220,3 +211,3 @@ print(json.dumps(result, indent=2)) | ||
| failed_cases = case_update_results.get("failed_cases", []) | ||
| if updated_cases or skipped_cases or failed_cases: | ||
@@ -227,3 +218,3 @@ environment.log("Case Reference Updates Summary:") | ||
| environment.log(f" Failed cases: {len(failed_cases)}") | ||
| if updated_cases: | ||
@@ -236,3 +227,3 @@ environment.log(" Updated case details:") | ||
| environment.log(f" C{case_id}: added {len(added)} refs, skipped {len(skipped)} duplicates") | ||
| if skipped_cases: | ||
@@ -244,3 +235,3 @@ environment.log(" Skipped case details:") | ||
| environment.log(f" C{case_id}: {reason}") | ||
| if failed_cases: | ||
@@ -247,0 +238,0 @@ environment.log(" Failed case details:") |
@@ -104,3 +104,4 @@ import trcli | ||
| - add_run: Create a new test run | ||
| - labels: Manage labels (projects, cases, and tests)""" | ||
| - labels: Manage labels (projects, cases, and tests) | ||
| - references: Manage references""" | ||
@@ -107,0 +108,0 @@ MISSING_COMMAND_SLOGAN = """Usage: trcli [OPTIONS] COMMAND [ARGS]...\nTry 'trcli --help' for help. |
Sorry, the diff of this file is too big to display
Alert delta unavailable
Currently unable to show alert delta for PyPI packages.
682985
2.19%61
1.67%12311
2.03%