Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

trcli

Package Overview
Dependencies
Maintainers
1
Versions
45
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

trcli - npm Package Compare versions

Comparing version
1.12.1
to
1.12.2
+273
tests/test_junit_parse_reference.py
"""
Unit tests for new features coverage - focused on critical missing areas.
Tests for --test-run-ref, case updates, and reference management functionality.
"""
import pytest
from unittest.mock import Mock, patch
import json
from trcli.commands.cmd_parse_junit import _validate_test_run_ref, _handle_case_update_reporting
class TestCmdParseJunitValidation:
"""Test coverage for cmd_parse_junit.py validation functions"""
def test_validate_test_run_ref_valid_input(self):
"""Test _validate_test_run_ref with valid input"""
# Valid single reference
result = _validate_test_run_ref("REF-123")
assert result is None
# Valid multiple references
result = _validate_test_run_ref("REF-123,REF-456,REF-789")
assert result is None
# Valid with spaces
result = _validate_test_run_ref("REF-123, REF-456 , REF-789")
assert result is None
def test_validate_test_run_ref_invalid_input(self):
"""Test _validate_test_run_ref with invalid input"""
# Empty string
result = _validate_test_run_ref("")
assert "cannot be empty" in result
# Whitespace only
result = _validate_test_run_ref(" ")
assert "cannot be empty" in result
# Only commas
result = _validate_test_run_ref(",,,")
assert "malformed input" in result
# Too long (over 250 chars)
long_refs = ",".join([f"REF-{i:03d}" for i in range(50)]) # Creates ~300 chars
result = _validate_test_run_ref(long_refs)
assert "250 character limit" in result
def test_handle_case_update_reporting_console(self):
"""Test _handle_case_update_reporting console output"""
env = Mock()
env.json_output = False
case_update_results = {
"updated_cases": [
{"case_id": 123, "case_title": "Test Case 1", "added_refs": ["REF-1"], "skipped_refs": []}
],
"skipped_cases": [
{"case_id": 456, "case_title": "Test Case 2", "reason": "All references already present", "skipped_refs": ["REF-2"]}
],
"failed_cases": [
{"case_id": 789, "case_title": "Test Case 3", "error": "API error"}
]
}
_handle_case_update_reporting(env, case_update_results)
# Verify console output was logged
assert env.log.called
call_args = [call[0][0] for call in env.log.call_args_list]
output = " ".join(call_args)
assert "Case Reference Updates Summary:" in output
assert "Updated cases: 1" in output
assert "Skipped cases: 1" in output
assert "Failed cases: 1" in output
@patch('builtins.print')
def test_handle_case_update_reporting_json(self, mock_print):
"""Test _handle_case_update_reporting JSON output"""
env = Mock()
env.json_output = True
case_update_results = {
"updated_cases": [{"case_id": 123, "added_refs": ["REF-1"]}],
"skipped_cases": [],
"failed_cases": []
}
_handle_case_update_reporting(env, case_update_results)
# Verify JSON output
assert mock_print.called
json_output = mock_print.call_args[0][0]
parsed = json.loads(json_output)
assert "summary" in parsed
assert "details" in parsed
assert parsed["summary"]["updated_cases"] == 1
def test_handle_case_update_reporting_none_input(self):
"""Test _handle_case_update_reporting with None input"""
env = Mock()
# Should return early without error
result = _handle_case_update_reporting(env, None)
assert result is None
class TestReferenceDeduplication:
"""Test coverage for reference deduplication logic"""
def test_reference_deduplication_logic(self):
"""Test the deduplication logic used in reference management"""
# Test input with duplicates
references = ["REF-1", "REF-1", "REF-2", "REF-2", "REF-1", "REF-3"]
# Apply deduplication logic (same as in api_request_handler.py)
seen = set()
unique_refs = []
for ref in references:
if ref not in seen:
seen.add(ref)
unique_refs.append(ref)
# Should preserve order and remove duplicates
assert unique_refs == ["REF-1", "REF-2", "REF-3"]
assert len(unique_refs) == 3
def test_reference_string_parsing(self):
"""Test parsing comma-separated reference strings"""
# Test various input formats
test_cases = [
("REF-1,REF-2,REF-3", ["REF-1", "REF-2", "REF-3"]),
("REF-1, REF-2 , REF-3", ["REF-1", "REF-2", "REF-3"]),
("REF-1,,REF-2", ["REF-1", "REF-2"]),
(" REF-1 , REF-2 ", ["REF-1", "REF-2"]),
]
for input_str, expected in test_cases:
# Apply parsing logic (same as in api_request_handler.py)
refs_list = [ref.strip() for ref in input_str.split(',') if ref.strip()]
assert refs_list == expected
def test_character_limit_validation(self):
"""Test character limit validation for references"""
# Test 250 character limit (for run references)
short_refs = ",".join([f"REF-{i:02d}" for i in range(30)]) # ~150 chars
assert len(short_refs) < 250
long_refs = ",".join([f"REF-{i:03d}" for i in range(50)]) # ~300 chars
assert len(long_refs) > 250
# Test 2000 character limit (for case references)
very_long_refs = ",".join([f"VERY-LONG-REFERENCE-NAME-{i:03d}" for i in range(100)])
assert len(very_long_refs) > 2000
class TestJunitReferenceExtraction:
"""Test coverage for JUnit reference extraction logic"""
def test_testrail_case_field_parsing(self):
"""Test parsing testrail_case_field values"""
# Test various formats
test_cases = [
("refs:REF-1", "REF-1"),
("refs:REF-1,REF-2", "REF-1,REF-2"),
("refs:REF-1,REF-2,REF-3", "REF-1,REF-2,REF-3"),
("refs: REF-1 , REF-2 ", " REF-1 , REF-2 "), # Spaces preserved
]
for testrail_field, expected_refs in test_cases:
# Apply parsing logic (same as in junit_xml.py)
if testrail_field.startswith("refs:"):
refs = testrail_field[5:] # Remove "refs:" prefix
assert refs == expected_refs
def test_case_refs_validation(self):
"""Test case reference validation"""
# Test empty/whitespace handling
test_cases = [
("", False),
(" ", False),
("refs:", False),
("refs: ", False),
("refs:REF-1", True),
("refs: REF-1 ", True),
]
for case_refs, should_be_valid in test_cases:
# Apply validation logic (same as in junit_xml.py)
if case_refs.startswith("refs:"):
refs_content = case_refs[5:]
is_valid = bool(refs_content and refs_content.strip())
assert is_valid == should_be_valid
class TestCaseUpdateWorkflow:
"""Test coverage for case update workflow logic"""
def test_case_categorization_logic(self):
"""Test logic for categorizing cases during updates"""
# Mock test cases
existing_case = {"case_id": 123, "has_junit_refs": True}
newly_created_case = {"case_id": 456, "has_junit_refs": True}
case_without_refs = {"case_id": 789, "has_junit_refs": False}
# Mock newly created case IDs
newly_created_case_ids = {456}
# Test categorization logic
cases_to_update = []
cases_to_skip = []
for case in [existing_case, newly_created_case, case_without_refs]:
case_id = case["case_id"]
has_refs = case["has_junit_refs"]
if case_id in newly_created_case_ids:
cases_to_skip.append({"case_id": case_id, "reason": "Newly created case"})
elif not has_refs:
cases_to_skip.append({"case_id": case_id, "reason": "No JUnit refs"})
else:
cases_to_update.append(case)
# Verify categorization
assert len(cases_to_update) == 1
assert cases_to_update[0]["case_id"] == 123
assert len(cases_to_skip) == 2
assert any(c["case_id"] == 456 and "Newly created" in c["reason"] for c in cases_to_skip)
assert any(c["case_id"] == 789 and "No JUnit refs" in c["reason"] for c in cases_to_skip)
def test_update_result_categorization(self):
"""Test categorization of update results"""
# Mock API responses
api_responses = [
(True, "Success", ["REF-1"], []), # Successful update
(True, "Success", [], ["REF-2"]), # All refs already present
(False, "API Error", [], []), # Failed update
]
updated_cases = []
skipped_cases = []
failed_cases = []
for i, (success, message, added_refs, skipped_refs) in enumerate(api_responses):
case_id = 100 + i
if not success:
failed_cases.append({"case_id": case_id, "error": message})
elif not added_refs: # No refs were added (all were duplicates)
skipped_cases.append({
"case_id": case_id,
"reason": "All references already present",
"skipped_refs": skipped_refs
})
else:
updated_cases.append({
"case_id": case_id,
"added_refs": added_refs,
"skipped_refs": skipped_refs
})
# Verify categorization
assert len(updated_cases) == 1
assert updated_cases[0]["case_id"] == 100
assert updated_cases[0]["added_refs"] == ["REF-1"]
assert len(skipped_cases) == 1
assert skipped_cases[0]["case_id"] == 101
assert "All references already present" in skipped_cases[0]["reason"]
assert len(failed_cases) == 1
assert failed_cases[0]["case_id"] == 102
assert failed_cases[0]["error"] == "API Error"
+1
-1
Metadata-Version: 2.4
Name: trcli
Version: 1.12.1
Version: 1.12.2
License-File: LICENSE.md

@@ -5,0 +5,0 @@ Requires-Dist: click<8.2.2,>=8.1.0

@@ -36,3 +36,3 @@ ![Tests](https://github.com/gurock/trcli/actions/workflows/python-app.yml/badge.svg)

```
TestRail CLI v1.12.1
TestRail CLI v1.12.2
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -91,2 +91,3 @@ Supported and loaded modules:

parse_robot Parse Robot Framework report and upload results to TestRail
references Manage references in TestRail
```

@@ -141,4 +142,12 @@

reports.
-a, --assign Comma-separated list of user emails to assign failed test
results to.
-a, --assign Comma-separated list of user emails to assign failed
test results to.
--test-run-ref Comma-separated list of reference IDs to append to the
test run (up to 250 characters total).
--json-output Output reference operation results in JSON format.
--update-existing-cases Update existing TestRail cases with values from
JUnit properties (default: no).
--update-strategy Strategy for combining incoming values with
existing case field values, whether to append or
replace (default: append).
--help Show this message and exit.

@@ -145,0 +154,0 @@ ```

@@ -177,4 +177,4 @@ import pytest

# Try to add more refs that would exceed 2000 chars
long_refs = ["REQ-" + "X" * 500 for _ in range(5)]
# Try to add more refs that would exceed 2000 chars (using unique refs to account for deduplication)
long_refs = [f"REQ-{i}-" + "X" * 500 for i in range(5)]

@@ -189,2 +189,39 @@ success, error = references_handler.add_case_references(

def test_add_case_references_deduplication(self, references_handler):
"""Test that duplicate references in input are deduplicated"""
# Mock get_case response with existing refs
mock_get_case_response = APIClientResult(
status_code=200,
response_text={
"id": 1,
"title": "Test Case 1",
"refs": "REQ-1"
},
error_message=None
)
# Mock update_case response
mock_update_response = APIClientResult(
status_code=200,
response_text={"id": 1, "refs": "REQ-1,REQ-2,REQ-3"},
error_message=None
)
with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response), \
patch.object(references_handler.client, 'send_post', return_value=mock_update_response):
success, error = references_handler.add_case_references(
case_id=1,
references=["REQ-2", "REQ-2", "REQ-3", "REQ-2"] # Duplicates should be removed
)
assert success is True
assert error == ""
# Verify the API call has deduplicated references
references_handler.client.send_post.assert_called_once_with(
"update_case/1",
{'refs': 'REQ-1,REQ-2,REQ-3'} # Duplicates removed, order preserved
)
def test_update_case_references_success(self, references_handler):

@@ -217,4 +254,4 @@ """Test successful update of references on a test case"""

"""Test character limit validation for update"""
# Try to update with refs that exceed 2000 chars
long_refs = ["REQ-" + "X" * 500 for _ in range(5)]
# Try to update with refs that exceed 2000 chars (using unique refs to account for deduplication)
long_refs = [f"REQ-{i}-" + "X" * 500 for i in range(5)]

@@ -229,2 +266,27 @@ success, error = references_handler.update_case_references(

def test_update_case_references_deduplication(self, references_handler):
"""Test that duplicate references in input are deduplicated"""
# Mock update_case response
mock_update_response = APIClientResult(
status_code=200,
response_text={"id": 1, "refs": "REQ-1,REQ-2"},
error_message=None
)
with patch.object(references_handler.client, 'send_post', return_value=mock_update_response):
success, error = references_handler.update_case_references(
case_id=1,
references=["REQ-1", "REQ-1", "REQ-2", "REQ-1"] # Duplicates should be removed
)
assert success is True
assert error == ""
# Verify the API call has deduplicated references
references_handler.client.send_post.assert_called_once_with(
"update_case/1",
{'refs': 'REQ-1,REQ-2'} # Duplicates removed, order preserved
)
def test_update_case_references_api_failure(self, references_handler):

@@ -231,0 +293,0 @@ """Test API failure during update"""

@@ -172,3 +172,3 @@ import json

) -> TestRailSuite:
"""helper method to delete junit_result_unparsed field,
"""helper method to delete junit_result_unparsed field and temporary junit_case_refs attribute,
which asdict() method of dataclass can't handle"""

@@ -178,2 +178,5 @@ for section in test_rail_suite.testsections:

case.result.junit_result_unparsed = []
# Remove temporary junit_case_refs attribute if it exists
if hasattr(case, '_junit_case_refs'):
delattr(case, '_junit_case_refs')
return test_rail_suite

@@ -56,3 +56,3 @@ import json

file_reader = RobotParser(env)
read_junit = file_reader.parse_file()[0]
read_junit = self.__clear_unparsable_junit_elements(file_reader.parse_file()[0])
parsing_result_json = asdict(read_junit)

@@ -64,2 +64,14 @@ file_json = open(expected_path)

def __clear_unparsable_junit_elements(
self, test_rail_suite: TestRailSuite
) -> TestRailSuite:
"""helper method to delete temporary junit_case_refs attribute,
which asdict() method of dataclass can't handle"""
for section in test_rail_suite.testsections:
for case in section.testcases:
# Remove temporary junit_case_refs attribute if it exists
if hasattr(case, '_junit_case_refs'):
delattr(case, '_junit_case_refs')
return test_rail_suite
@pytest.mark.parse_robot

@@ -66,0 +78,0 @@ def test_robot_xml_parser_file_not_found(self):

Metadata-Version: 2.4
Name: trcli
Version: 1.12.1
Version: 1.12.2
License-File: LICENSE.md

@@ -5,0 +5,0 @@ Requires-Dist: click<8.2.2,>=8.1.0

@@ -15,2 +15,3 @@ LICENSE.md

tests/test_dataclass_creation.py
tests/test_junit_parse_reference.py
tests/test_junit_parser.py

@@ -17,0 +18,0 @@ tests/test_load_data_from_config.py

@@ -1,1 +0,1 @@

__version__ = "1.12.1"
__version__ = "1.12.2"
import time
from beartype.typing import Tuple, Callable, List
from beartype.typing import Tuple, Callable, List, Dict

@@ -20,2 +20,3 @@ from trcli.api.project_based_client import ProjectBasedClient

self.skip_run = skip_run
self.last_run_id = None
if hasattr(self.environment, 'special_parser') and self.environment.special_parser == "saucectl":

@@ -106,4 +107,19 @@ self.run_name += f" ({suite.name})"

# Update existing cases with JUnit references if enabled
case_update_results = None
case_update_failed = []
if hasattr(self.environment, 'update_existing_cases') and self.environment.update_existing_cases == "yes":
self.environment.log("Updating existing cases with JUnit references...")
case_update_results, case_update_failed = self.update_existing_cases_with_junit_refs(added_test_cases)
if case_update_results.get("updated_cases"):
self.environment.log(f"Updated {len(case_update_results['updated_cases'])} existing case(s) with references.")
if case_update_results.get("failed_cases"):
self.environment.elog(f"Failed to update {len(case_update_results['failed_cases'])} case(s).")
# Create/update test run
run_id, error_message = self.create_or_update_test_run()
self.last_run_id = run_id
# Store case update results for later reporting
self.case_update_results = case_update_results
if error_message:

@@ -152,2 +168,4 @@ revert_logs = self.rollback_changes(

pass
# Note: Error exit for case update failures is handled in cmd_parse_junit.py after reporting

@@ -208,2 +226,90 @@ def _validate_and_store_user_ids(self):

def update_existing_cases_with_junit_refs(self, added_test_cases: List[Dict] = None) -> Tuple[Dict, List]:
"""
Update existing test cases with references from JUnit properties.
Excludes newly created cases to avoid unnecessary API calls.
:param added_test_cases: List of cases that were just created (to be excluded)
:returns: Tuple of (update_results, failed_cases)
"""
if not hasattr(self.environment, 'update_existing_cases') or self.environment.update_existing_cases != "yes":
return {}, [] # Feature not enabled
# Create a set of newly created case IDs to exclude
newly_created_case_ids = set()
if added_test_cases:
# Ensure all case IDs are integers for consistent comparison
newly_created_case_ids = {int(case.get('case_id')) for case in added_test_cases if case.get('case_id')}
update_results = {
"updated_cases": [],
"skipped_cases": [],
"failed_cases": []
}
failed_cases = []
strategy = getattr(self.environment, 'update_strategy', 'append')
# Process all test cases in all sections
for section in self.api_request_handler.suites_data_from_provider.testsections:
for test_case in section.testcases:
# Only process cases that have a case_id (existing cases) and JUnit refs
# AND exclude newly created cases
if (test_case.case_id and
hasattr(test_case, '_junit_case_refs') and test_case._junit_case_refs and
int(test_case.case_id) not in newly_created_case_ids):
try:
success, error_msg, added_refs, skipped_refs = self.api_request_handler.update_existing_case_references(
test_case.case_id, test_case._junit_case_refs, strategy
)
if success:
if added_refs:
# Only count as "updated" if references were actually added
update_results["updated_cases"].append({
"case_id": test_case.case_id,
"case_title": test_case.title,
"added_refs": added_refs,
"skipped_refs": skipped_refs
})
else:
# If no refs were added (all were duplicates or no valid refs), count as skipped
reason = "All references already present" if skipped_refs else "No valid references to process"
update_results["skipped_cases"].append({
"case_id": test_case.case_id,
"case_title": test_case.title,
"reason": reason,
"skipped_refs": skipped_refs
})
else:
error_info = {
"case_id": test_case.case_id,
"case_title": test_case.title,
"error": error_msg
}
update_results["failed_cases"].append(error_info)
failed_cases.append(error_info)
self.environment.elog(f"Failed to update case C{test_case.case_id}: {error_msg}")
except Exception as e:
error_info = {
"case_id": test_case.case_id,
"case_title": test_case.title,
"error": str(e)
}
update_results["failed_cases"].append(error_info)
failed_cases.append(error_info)
self.environment.elog(f"Exception updating case C{test_case.case_id}: {str(e)}")
elif (test_case.case_id and
hasattr(test_case, '_junit_case_refs') and test_case._junit_case_refs and
int(test_case.case_id) in newly_created_case_ids):
# Skip newly created cases - they already have their references set
update_results["skipped_cases"].append({
"case_id": test_case.case_id,
"case_title": test_case.title,
"reason": "Newly created case - references already set during creation"
})
return update_results, failed_cases

@@ -210,0 +316,0 @@ def add_missing_sections(self, project_id: int) -> Tuple[List, int]:

@@ -72,2 +72,6 @@ import os

self.run_refs = None
self.test_run_ref = None
self.json_output = None
self.update_existing_cases = None
self.update_strategy = None
self.proxy = None

@@ -74,0 +78,0 @@ self.assign_failed_to = None # Add proxy related attributes

@@ -30,2 +30,26 @@ from xml.etree.ElementTree import ParseError

)
@click.option(
"--test-run-ref",
metavar="",
help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total)."
)
@click.option(
"--json-output",
is_flag=True,
help="Output reference operation results in JSON format."
)
@click.option(
"--update-existing-cases",
type=click.Choice(["yes", "no"], case_sensitive=False),
default="no",
metavar="",
help="Update existing TestRail cases with values from JUnit properties (default: no)."
)
@click.option(
"--update-strategy",
type=click.Choice(["append", "replace"], case_sensitive=False),
default="append",
metavar="",
help="Strategy for combining incoming values with existing case field values, whether to append or replace (default: append)."
)
@click.pass_context

@@ -38,2 +62,9 @@ @pass_environment

environment.check_for_required_parameters()
if environment.test_run_ref is not None:
validation_error = _validate_test_run_ref(environment.test_run_ref)
if validation_error:
environment.elog(validation_error)
exit(1)
settings.ALLOW_ELAPSED_MS = environment.allow_ms

@@ -43,5 +74,25 @@ print_config(environment)

parsed_suites = JunitParser(environment).parse_file()
run_id = None
case_update_results = {}
for suite in parsed_suites:
result_uploader = ResultsUploader(environment=environment, suite=suite)
result_uploader.upload_results()
if run_id is None and hasattr(result_uploader, 'last_run_id'):
run_id = result_uploader.last_run_id
# Collect case update results
if hasattr(result_uploader, 'case_update_results'):
case_update_results = result_uploader.case_update_results
if environment.test_run_ref and run_id:
_handle_test_run_references(environment, run_id)
# Handle case update reporting if enabled
if environment.update_existing_cases == "yes" and case_update_results is not None:
_handle_case_update_reporting(environment, case_update_results)
# Exit with error if there were case update failures (after reporting)
if case_update_results.get("failed_cases"):
exit(1)
except FileNotFoundError:

@@ -62,1 +113,125 @@ environment.elog(FAULT_MAPPING["missing_file"])

exit(1)
def _validate_test_run_ref(test_run_ref: str) -> str:
"""
Validate the test-run-ref input.
Returns error message if invalid, None if valid.
"""
if not test_run_ref or not test_run_ref.strip():
return "Error: --test-run-ref cannot be empty or whitespace-only"
refs = [ref.strip() for ref in test_run_ref.split(',') if ref.strip()]
if not refs:
return "Error: --test-run-ref contains no valid references (malformed input)"
if len(test_run_ref) > 250:
return f"Error: --test-run-ref exceeds 250 character limit ({len(test_run_ref)} characters)"
return None
def _handle_test_run_references(environment: Environment, run_id: int):
"""
Handle appending references to the test run.
"""
from trcli.api.project_based_client import ProjectBasedClient
from trcli.data_classes.dataclass_testrail import TestRailSuite
import json
refs = [ref.strip() for ref in environment.test_run_ref.split(',') if ref.strip()]
project_client = ProjectBasedClient(
environment=environment,
suite=TestRailSuite(name="temp", suite_id=1)
)
project_client.resolve_project()
environment.log(f"Appending references to test run {run_id}...")
run_data, added_refs, skipped_refs, error_message = project_client.api_request_handler.append_run_references(
run_id, refs
)
if error_message:
environment.elog(f"Error: Failed to append references: {error_message}")
exit(1)
final_refs = run_data.get("refs", "") if run_data else ""
if environment.json_output:
# JSON output
result = {
"run_id": run_id,
"added": added_refs,
"skipped": skipped_refs,
"total_references": final_refs
}
print(json.dumps(result, indent=2))
else:
environment.log(f"References appended successfully:")
environment.log(f" Run ID: {run_id}")
environment.log(f" Total references: {len(final_refs.split(',')) if final_refs else 0}")
environment.log(f" Newly added: {len(added_refs)} ({', '.join(added_refs) if added_refs else 'none'})")
environment.log(f" Skipped (duplicates): {len(skipped_refs)} ({', '.join(skipped_refs) if skipped_refs else 'none'})")
if final_refs:
environment.log(f" All references: {final_refs}")
def _handle_case_update_reporting(environment: Environment, case_update_results: dict):
"""
Handle reporting of case update results.
"""
import json
# Handle None input gracefully
if case_update_results is None:
return
if environment.json_output:
# JSON output for case updates
result = {
"summary": {
"updated_cases": len(case_update_results.get("updated_cases", [])),
"skipped_cases": len(case_update_results.get("skipped_cases", [])),
"failed_cases": len(case_update_results.get("failed_cases", []))
},
"details": {
"updated_cases": case_update_results.get("updated_cases", []),
"skipped_cases": case_update_results.get("skipped_cases", []),
"failed_cases": case_update_results.get("failed_cases", [])
}
}
print(json.dumps(result, indent=2))
else:
# Console output for case updates
updated_cases = case_update_results.get("updated_cases", [])
skipped_cases = case_update_results.get("skipped_cases", [])
failed_cases = case_update_results.get("failed_cases", [])
if updated_cases or skipped_cases or failed_cases:
environment.log("Case Reference Updates Summary:")
environment.log(f" Updated cases: {len(updated_cases)}")
environment.log(f" Skipped cases: {len(skipped_cases)}")
environment.log(f" Failed cases: {len(failed_cases)}")
if updated_cases:
environment.log(" Updated case details:")
for case_info in updated_cases:
case_id = case_info["case_id"]
added = case_info.get("added_refs", [])
skipped = case_info.get("skipped_refs", [])
environment.log(f" C{case_id}: added {len(added)} refs, skipped {len(skipped)} duplicates")
if skipped_cases:
environment.log(" Skipped case details:")
for case_info in skipped_cases:
case_id = case_info["case_id"]
reason = case_info.get("reason", "Unknown reason")
environment.log(f" C{case_id}: {reason}")
if failed_cases:
environment.log(" Failed case details:")
for case_info in failed_cases:
case_id = case_info["case_id"]
error = case_info.get("error", "Unknown error")
environment.log(f" C{case_id}: {error}")

@@ -148,2 +148,3 @@ import glob

case_fields = []
case_refs = None
sauce_session = None

@@ -170,7 +171,12 @@

text = prop._elem.text.strip() if prop._elem.text else None
case_fields.append(text or value)
field_value = text or value
case_fields.append(field_value)
# Extract refs for case updates
if field_value and field_value.startswith("refs:"):
case_refs = field_value[5:].strip() # Remove "refs:" prefix
elif name.startswith("testrail_sauce_session"):
sauce_session = value
return result_steps, attachments, result_fields, comments, case_fields, sauce_session
return result_steps, attachments, result_fields, comments, case_fields, case_refs, sauce_session

@@ -200,3 +206,3 @@ def _resolve_case_fields(self, result_fields, case_fields):

case_id, case_name = self._extract_case_id_and_name(case)
result_steps, attachments, result_fields, comments, case_fields, sauce_session = self._parse_case_properties(
result_steps, attachments, result_fields, comments, case_fields, case_refs, sauce_session = self._parse_case_properties(
case)

@@ -225,10 +231,23 @@ result_fields_dict, case_fields_dict = self._resolve_case_fields(result_fields, case_fields)

test_cases.append(TestRailCase(
title=TestRailCaseFieldsOptimizer.extract_last_words(case_name,
TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH),
case_id=case_id,
result=result,
custom_automation_id=automation_id,
case_fields=case_fields_dict
))
# Create TestRailCase kwargs
case_kwargs = {
"title": TestRailCaseFieldsOptimizer.extract_last_words(case_name,
TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH),
"case_id": case_id,
"result": result,
"custom_automation_id": automation_id,
"case_fields": case_fields_dict,
}
# Only set refs field if case_refs has actual content
if case_refs and case_refs.strip():
case_kwargs["refs"] = case_refs
test_case = TestRailCase(**case_kwargs)
# Store JUnit references as a temporary attribute for case updates (not serialized)
if case_refs and case_refs.strip():
test_case._junit_case_refs = case_refs
test_cases.append(test_case)

@@ -235,0 +254,0 @@ return test_cases

Sorry, the diff of this file is too big to display