Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoSign in
Socket

trcli

Package Overview
Dependencies
Maintainers
1
Versions
46
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

trcli - pypi Package Compare versions

Comparing version
1.12.0
to
1.12.1
+101
tests/test_matchers_parser.py
import pytest
from trcli.data_classes.data_parsers import MatchersParser
class TestMatchersParser:
"""Test cases for MatchersParser.parse_name_with_id method"""
@pytest.mark.parametrize(
"test_input, expected_id, expected_name",
[
# Basic patterns (existing functionality)
("C123 my test case", 123, "my test case"),
("my test case C123", 123, "my test case"),
("C123_my_test_case", 123, "my_test_case"),
("my_test_case_C123", 123, "my_test_case"),
("module_1_C123_my_test_case", 123, "module_1_my_test_case"),
("[C123] my test case", 123, "my test case"),
("my test case [C123]", 123, "my test case"),
("module 1 [C123] my test case", 123, "module 1 my test case"),
# JUnit 5 patterns with parentheses (new functionality)
("test_name_C120013()", 120013, "test_name"),
("testMethod_C123()", 123, "testMethod"),
("my_test_C456()", 456, "my_test"),
("C789_test_name()", 789, "test_name()"),
("C100 test_name()", 100, "test_name()"),
# JUnit 5 patterns with parameters
("test_name_C120013(TestParam)", 120013, "test_name"),
("test_C456(param1, param2)", 456, "test"),
("complexTest_C999(String param, int value)", 999, "complexTest"),
# Edge cases with parentheses
("myTest_C789()", 789, "myTest"),
("C200_method()", 200, "method()"),
("[C300] test_case()", 300, "test_case()"),
("test [C400] method()", 400, "test method()"),
# Cases that should not match
("test_name_C()", None, "test_name_C()"),
("test_name_123()", None, "test_name_123()"),
("test_name", None, "test_name"),
("C_test_name", None, "C_test_name"),
("test_Cabc_name", None, "test_Cabc_name"),
# Case sensitivity
("c123_test_name", 123, "test_name"),
("test_name_c456", 456, "test_name"),
("[c789] test_name", 789, "test_name"),
]
)
def test_parse_name_with_id_patterns(self, test_input, expected_id, expected_name):
"""Test various patterns of test name parsing including JUnit 5 parentheses support"""
case_id, case_name = MatchersParser.parse_name_with_id(test_input)
assert case_id == expected_id, f"Expected ID {expected_id}, got {case_id} for input '{test_input}'"
assert case_name == expected_name, f"Expected name '{expected_name}', got '{case_name}' for input '{test_input}'"
def test_parse_name_with_id_junit5_specific(self):
"""Specific test cases for JUnit 5 parentheses issue reported by user"""
# The exact examples from the user's issue
junit5_cases = [
("test_name_C120013()", 120013, "test_name"), # Should work now
("test_name_C120013", 120013, "test_name"), # Should still work
("C120013_test_name()", 120013, "test_name()"), # Should work
]
for test_case, expected_id, expected_name in junit5_cases:
case_id, case_name = MatchersParser.parse_name_with_id(test_case)
assert case_id == expected_id, f"JUnit 5 case failed: {test_case}"
assert case_name == expected_name, f"JUnit 5 name failed: {test_case}"
def test_parse_name_with_id_regression(self):
"""Ensure existing functionality still works (regression test)"""
# Test all the patterns mentioned in the docstring
existing_patterns = [
("C123 my test case", 123, "my test case"),
("my test case C123", 123, "my test case"),
("C123_my_test_case", 123, "my_test_case"),
("my_test_case_C123", 123, "my_test_case"),
("module_1_C123_my_test_case", 123, "module_1_my_test_case"),
("[C123] my test case", 123, "my test case"),
("my test case [C123]", 123, "my test case"),
("module 1 [C123] my test case", 123, "module 1 my test case"),
]
for test_case, expected_id, expected_name in existing_patterns:
case_id, case_name = MatchersParser.parse_name_with_id(test_case)
assert case_id == expected_id, f"Regression failed for: {test_case}"
assert case_name == expected_name, f"Regression name failed for: {test_case}"
def test_parse_name_with_id_empty_and_none(self):
"""Test edge cases with empty or None inputs"""
# Empty string
case_id, case_name = MatchersParser.parse_name_with_id("")
assert case_id is None
assert case_name == ""
# String with just spaces
case_id, case_name = MatchersParser.parse_name_with_id(" ")
assert case_id is None
assert case_name == " "
+1
-1
Metadata-Version: 2.4
Name: trcli
Version: 1.12.0
Version: 1.12.1
License-File: LICENSE.md

@@ -5,0 +5,0 @@ Requires-Dist: click<8.2.2,>=8.1.0

@@ -36,3 +36,3 @@ ![Tests](https://github.com/gurock/trcli/actions/workflows/python-app.yml/badge.svg)

```
TestRail CLI v1.12.0
TestRail CLI v1.12.1
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -51,3 +51,3 @@ Supported and loaded modules:

$ trcli --help
TestRail CLI v1.12.0
TestRail CLI v1.12.1
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -141,2 +141,4 @@ Usage: trcli [OPTIONS] COMMAND [ARGS]...

reports.
-a, --assign Comma-separated list of user emails to assign failed test
results to.
--help Show this message and exit.

@@ -272,2 +274,46 @@ ```

```/api/v2/get_statuses```
### Auto-Assigning Failed Tests
The `--assign` (or `-a`) option allows you to automatically assign failed test results to specific TestRail users. This feature is particularly useful in CI/CD environments where you want to automatically assign failures to responsible team members for investigation.
#### Usage
```shell
# Assign failed tests to a single user
$ trcli parse_junit -f results.xml --assign user@example.com \
--host https://yourinstance.testrail.io --username <your_username> --password <your_password> \
--project "Your Project"
# Assign failed tests to multiple users (round-robin distribution)
$ trcli parse_junit -f results.xml --assign "user1@example.com,user2@example.com,user3@example.com" \
--host https://yourinstance.testrail.io --username <your_username> --password <your_password> \
--project "Your Project"
# Short form using -a
$ trcli parse_junit -f results.xml -a user@example.com \
--host https://yourinstance.testrail.io --username <your_username> --password <your_password> \
--project "Your Project"
```
#### Example Output
```shell
Parser Results Execution Parameters
> Report file: results.xml
> Config file: /path/to/config.yml
> TestRail instance: https://yourinstance.testrail.io (user: your@email.com)
> Project: Your Project
> Run title: Automated Test Run
> Update run: No
> Add to milestone: No
> Auto-assign failures: Yes (user1@example.com,user2@example.com)
> Auto-create entities: True
Creating test run. Done.
Adding results: 100%|████████████| 25/25 [00:02<00:00, 12.5results/s]
Assigning failed results: 3/3, Done.
Submitted 25 test results in 2.1 secs.
```
### Exploring other features

@@ -1046,3 +1092,3 @@

$ trcli add_run --help
TestRail CLI v1.12.0
TestRail CLI v1.12.1
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -1171,3 +1217,3 @@ Usage: trcli add_run [OPTIONS]

$ trcli parse_openapi --help
TestRail CLI v1.12.0
TestRail CLI v1.12.1
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -1174,0 +1220,0 @@ Usage: trcli parse_openapi [OPTIONS]

@@ -39,2 +39,5 @@ import pytest

environment.case_matcher = MatchersParser.AUTO
environment.assign_failed_to = None
environment._has_invalid_users = False
environment._validated_user_ids = []

@@ -41,0 +44,0 @@ junit_file_parser = mocker.patch.object(JunitParser, "parse_file")

Metadata-Version: 2.4
Name: trcli
Version: 1.12.0
Version: 1.12.1
License-File: LICENSE.md

@@ -5,0 +5,0 @@ Requires-Dist: click<8.2.2,>=8.1.0

@@ -17,2 +17,3 @@ LICENSE.md

tests/test_load_data_from_config.py
tests/test_matchers_parser.py
tests/test_project_based_client.py

@@ -19,0 +20,0 @@ tests/test_response_verify.py

@@ -1,1 +0,1 @@

__version__ = "1.12.0"
__version__ = "1.12.1"

@@ -20,3 +20,3 @@ import time

self.skip_run = skip_run
if self.environment.special_parser == "saucectl":
if hasattr(self.environment, 'special_parser') and self.environment.special_parser == "saucectl":
self.run_name += f" ({suite.name})"

@@ -34,2 +34,11 @@

# Validate user emails early if --assign is specified
try:
assign_value = getattr(self.environment, 'assign_failed_to', None)
if assign_value is not None and str(assign_value).strip():
self._validate_and_store_user_ids()
except (AttributeError, TypeError):
# Skip validation if there are any issues with the assign_failed_to attribute
pass
self.resolve_project()

@@ -122,2 +131,3 @@ suite_id, suite_added = self.resolve_suite()

exit(1)
if self.environment.close_run:

@@ -134,3 +144,67 @@ self.environment.log("Closing test run. ", new_line=False)

self.environment.log(f"Submitted {results_amount} test results in {stop - start:.1f} secs.")
# Exit with error if there were invalid users (after processing valid ones)
try:
has_invalid = getattr(self.environment, '_has_invalid_users', False)
if has_invalid is True: # Explicitly check for True to avoid mock object issues
exit(1)
except (AttributeError, TypeError):
# Skip exit if there are any issues with the attribute
pass
def _validate_and_store_user_ids(self):
"""
Validates user emails from --assign option and stores valid user IDs.
For mixed valid/invalid users, warns about invalid ones but continues with valid ones.
Exits only if NO valid users are found.
"""
try:
assign_value = getattr(self.environment, 'assign_failed_to', None)
if assign_value is None or not str(assign_value).strip():
return
except (AttributeError, TypeError):
return
# Check for empty or whitespace-only values
assign_str = str(assign_value)
if not assign_str.strip():
self.environment.elog("Error: --assign option requires at least one user email")
exit(1)
emails = [email.strip() for email in assign_str.split(',') if email.strip()]
if not emails:
self.environment.elog("Error: --assign option requires at least one user email")
exit(1)
valid_user_ids = []
invalid_users = []
for email in emails:
user_id, error_msg = self.api_request_handler.get_user_by_email(email)
if user_id is None:
invalid_users.append(email)
if "User not found" not in error_msg:
# If it's not a "user not found" error, it might be an API issue
self.environment.elog(f"Error: {error_msg}")
exit(1)
else:
valid_user_ids.append(user_id)
# Handle invalid users
if invalid_users:
for invalid_user in invalid_users:
self.environment.elog(f"Error: User not found: {invalid_user}")
# Store valid user IDs for processing, but mark that we should exit with error later
self.environment._has_invalid_users = True
# If ALL users are invalid, exit immediately
if not valid_user_ids:
exit(1)
# Store valid user IDs for later use
self.environment._validated_user_ids = valid_user_ids
def add_missing_sections(self, project_id: int) -> Tuple[List, int]:

@@ -137,0 +211,0 @@ """

@@ -72,3 +72,4 @@ import os

self.run_refs = None
self.proxy = None # Add proxy related attributes
self.proxy = None
self.assign_failed_to = None # Add proxy related attributes
self.noproxy = None

@@ -75,0 +76,0 @@ self.proxy_user = None

@@ -24,2 +24,8 @@ from xml.etree.ElementTree import ParseError

)
@click.option(
"-a", "--assign",
"assign_failed_to",
metavar="",
help="Comma-separated list of user emails to assign failed test results to."
)
@click.pass_context

@@ -26,0 +32,0 @@ @pass_environment

@@ -10,2 +10,3 @@ import functools

def print_config(env: Environment):
assign_info = f"Yes ({env.assign_failed_to})" if hasattr(env, 'assign_failed_to') and env.assign_failed_to and env.assign_failed_to.strip() else "No"
env.log(f"Parser Results Execution Parameters"

@@ -19,2 +20,3 @@ f"\n> Report file: {env.file}"

f"\n> Add to milestone: {env.milestone_id if env.milestone_id else 'No'}"
f"\n> Auto-assign failures: {assign_info}"
f"\n> Auto-create entities: {env.auto_creation_response}")

@@ -21,0 +23,0 @@

@@ -22,2 +22,3 @@ import re, ast

- "module 1 [C123] my test case"
- "my_test_case_C123()" (JUnit 5 support)

@@ -33,5 +34,6 @@ :param case_name: Name of the test case

id_part = part[1:]
if id_part.isnumeric():
id_part_clean = re.sub(r'\(.*\)$', '', id_part)
if id_part_clean.isnumeric():
parts_copy.pop(idx)
return int(id_part), char.join(parts_copy)
return int(id_part_clean), char.join(parts_copy)

@@ -38,0 +40,0 @@ results = re.findall(r"\[(.*?)\]", case_name)

@@ -122,3 +122,3 @@ from beartype.typing import List, Dict, Optional

def add_results_for_cases(self, bulk_size):
def add_results_for_cases(self, bulk_size, user_ids=None):
"""Return bodies for adding results for cases. Returns bodies for results that already have case ID."""

@@ -128,2 +128,5 @@ testcases = [sections.testcases for sections in self.suites_input.testsections]

bodies = []
user_index = 0
assigned_count = 0
total_failed_count = 0

@@ -134,4 +137,19 @@ for sublist in testcases:

case.result.add_global_result_fields(self.result_fields)
# Count failed tests
if case.result.status_id == 5: # status_id 5 = Failed
total_failed_count += 1
# Assign failed tests to users in round-robin fashion if user_ids provided
if user_ids:
case.result.assignedto_id = user_ids[user_index % len(user_ids)]
user_index += 1
assigned_count += 1
bodies.append(case.result.to_dict())
# Store counts for logging (we'll access this from the api_request_handler)
self._assigned_count = assigned_count if user_ids else 0
self._total_failed_count = total_failed_count
result_bulks = ApiDataProvider.divide_list_into_bulks(

@@ -138,0 +156,0 @@ bodies,

Sorry, the diff of this file is too big to display