You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

trcli

Package Overview
Dependencies
Maintainers
1
Versions
47
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

trcli - pypi Package Compare versions

Comparing version
1.12.4
to
1.12.5
+555
tests/test_api_request_handler_case_matcher.py
"""
Unit tests for NAME matcher optimization that skips fetching all cases.
Tests the performance optimization introduced to avoid downloading 165k+ cases
when using NAME or PROPERTY matcher, which only need to validate specific case IDs.
"""
import pytest
from unittest.mock import patch, MagicMock, call
from pathlib import Path
import json
from serde.json import from_json
from tests.helpers.api_client_helpers import TEST_RAIL_URL, create_url
from trcli.cli import Environment
from trcli.api.api_request_handler import ApiRequestHandler
from trcli.api.api_client import APIClient, APIClientResult
from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailSection, TestRailCase, TestRailResult
from trcli.data_classes.data_parsers import MatchersParser
@pytest.fixture
def environment():
"""Create test environment"""
env = Environment()
env.project = "Test Project"
env.batch_size = 10
return env
@pytest.fixture
def api_client():
"""Create test API client"""
return APIClient(host_name=TEST_RAIL_URL)
def create_test_suite_with_case_ids(num_cases=10):
"""Helper to create test suite with specified number of cases with case IDs"""
test_cases = []
for i in range(1, num_cases + 1):
test_case = TestRailCase(
case_id=i,
title=f"Test case {i}",
section_id=1,
result=TestRailResult(case_id=i, comment=f"Test result {i}", elapsed="1s", status_id=1),
)
test_cases.append(test_case)
section = TestRailSection(name="Test Section", section_id=1, suite_id=1, testcases=test_cases)
return TestRailSuite(name="Test Suite", suite_id=1, testsections=[section])
def create_test_suite_with_missing_case_ids(total_cases=10, missing_count=3):
"""Helper to create test suite with some cases missing IDs"""
test_cases = []
for i in range(1, total_cases + 1):
# First `missing_count` cases don't have case_id
case_id = None if i <= missing_count else i
test_case = TestRailCase(
case_id=case_id,
title=f"Test case {i}",
section_id=1,
result=TestRailResult(case_id=case_id, comment=f"Test result {i}", elapsed="1s", status_id=1),
)
test_cases.append(test_case)
section = TestRailSection(name="Test Section", section_id=1, suite_id=1, testcases=test_cases)
return TestRailSuite(name="Test Suite", suite_id=1, testsections=[section])
class TestNameMatcherOptimization:
"""Test suite for NAME matcher performance optimizations"""
@pytest.mark.api_handler
def test_name_matcher_skips_bulk_case_fetch(self, environment, api_client, mocker):
"""
Test that NAME matcher does NOT fetch all cases from TestRail.
This is the key optimization - we should skip the expensive get_all_cases call.
"""
# Setup: NAME matcher with 100 test cases
environment.case_matcher = MatchersParser.NAME
test_suite = create_test_suite_with_case_ids(num_cases=100)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock the get_all_cases method to track if it's called
mock_get_all_cases = mocker.patch.object(
api_request_handler, "_ApiRequestHandler__get_all_cases", return_value=([], None)
)
# Mock validation to return all IDs as valid (skip actual validation)
mocker.patch.object(
api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 101))
)
# Execute
project_id = 1
missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id)
# Assert: get_all_cases should NOT have been called for NAME matcher
mock_get_all_cases.assert_not_called()
assert not missing_ids, "Should not have missing IDs"
assert error == "", "Should not have errors"
@pytest.mark.api_handler
def test_auto_matcher_still_fetches_all_cases(self, environment, api_client, mocker):
"""
Test that AUTO matcher STILL fetches all cases (required for automation ID lookup).
This ensures we didn't break the AUTO matcher functionality.
"""
# Setup: AUTO matcher
environment.case_matcher = MatchersParser.AUTO
test_suite = create_test_suite_with_case_ids(num_cases=10)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock get_all_cases to return some cases
mock_cases = [
{"id": i, "custom_automation_id": f"test{i}", "title": f"Test {i}", "section_id": 1} for i in range(1, 11)
]
mock_get_all_cases = mocker.patch.object(
api_request_handler, "_ApiRequestHandler__get_all_cases", return_value=(mock_cases, None)
)
mocker.patch.object(api_request_handler.data_provider, "update_data")
# Execute
project_id = 1
api_request_handler.check_missing_test_cases_ids(project_id)
# Assert: get_all_cases SHOULD be called for AUTO matcher
mock_get_all_cases.assert_called_once_with(project_id, 1)
@pytest.mark.api_handler
def test_name_matcher_skips_validation_for_large_batches(self, environment, api_client, mocker):
"""
Test that validation is SKIPPED when:
- Using NAME matcher
- All tests have case IDs (no missing)
- More than 1000 case IDs (large batch)
"""
# Setup: NAME matcher with 2000 test cases (> 1000 threshold)
environment.case_matcher = MatchersParser.NAME
test_suite = create_test_suite_with_case_ids(num_cases=2000)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock validation method to track if it's called
mock_validate = mocker.patch.object(
api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 2001))
)
mock_log = mocker.patch.object(environment, "log")
# Execute
project_id = 1
missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id)
# Assert: Validation should be SKIPPED for large batches
mock_validate.assert_not_called()
# Should log that validation was skipped
skip_log_calls = [call for call in mock_log.call_args_list if "Skipping validation" in str(call)]
assert len(skip_log_calls) > 0, "Should log that validation was skipped"
assert not missing_ids, "Should not have missing IDs"
assert error == "", "Should not have errors"
@pytest.mark.api_handler
def test_name_matcher_validates_small_batches(self, environment, api_client, mocker):
"""
Test that validation RUNS when:
- Using NAME matcher
- Less than 1000 case IDs (small batch)
"""
# Setup: NAME matcher with 500 test cases (< 1000 threshold)
environment.case_matcher = MatchersParser.NAME
test_suite = create_test_suite_with_case_ids(num_cases=500)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock validation method to track if it's called
mock_validate = mocker.patch.object(
api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 501))
)
# Execute
project_id = 1
missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id)
# Assert: Validation SHOULD run for small batches
mock_validate.assert_called_once()
assert not missing_ids, "Should not have missing IDs"
assert error == "", "Should not have errors"
@pytest.mark.api_handler
def test_name_matcher_fetches_all_cases_for_large_report_with_missing_ids(self, environment, api_client, mocker):
"""
Test that for large reports with missing IDs, we FETCH ALL CASES instead of individual validation.
This is the new optimized behavior:
- Using NAME matcher
- Large report (>=1000 total cases)
- Some tests are missing case IDs
Strategy: Fetch all cases once (e.g., 660 calls for 165k cases) is more efficient than
individual validation (e.g., 1500 calls for 1500 cases in report).
"""
# Setup: 1500 total cases, 3 missing IDs (total >= 1000 threshold)
environment.case_matcher = MatchersParser.NAME
test_suite = create_test_suite_with_missing_case_ids(total_cases=1500, missing_count=3)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock get_all_cases to return all case IDs 4-1500 (cases 1-3 don't exist, matching missing IDs)
mock_get_all_cases = mocker.patch.object(
api_request_handler,
"_ApiRequestHandler__get_all_cases",
return_value=([{"id": i} for i in range(4, 1501)], None),
)
# Mock individual validation - should NOT be called for large reports
mock_validate = mocker.patch.object(
api_request_handler,
"_ApiRequestHandler__validate_case_ids_exist",
return_value=set(range(4, 1501)),
)
mock_log = mocker.patch.object(environment, "log")
# Execute
project_id = 1
missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id)
# Assert: Should FETCH ALL CASES for large reports with missing IDs
mock_get_all_cases.assert_called_once_with(project_id, 1)
# Should NOT use individual validation
mock_validate.assert_not_called()
# Should log that it's using fetch-all strategy
fetch_log_calls = [call for call in mock_log.call_args_list if "Fetching all cases" in str(call)]
assert len(fetch_log_calls) > 0, "Should log that fetch-all strategy is being used"
# Should log that missing cases were found
missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)]
assert len(missing_log_calls) > 0, "Should log missing case IDs"
assert missing_ids, "Should have missing IDs"
assert error == "", "Should not have errors"
@pytest.mark.api_handler
def test_name_matcher_validates_individually_for_small_report_with_missing_ids(
self, environment, api_client, mocker
):
"""
Test that for small reports with missing IDs, we use INDIVIDUAL validation.
- Using NAME matcher
- Small report (<1000 total cases)
- Some tests are missing case IDs
Strategy: Individual validation (e.g., 500 calls) is more efficient than
fetch all (e.g., 660 calls for 165k cases).
"""
# Setup: 500 total cases, 10 missing IDs (total < 1000 threshold)
environment.case_matcher = MatchersParser.NAME
test_suite = create_test_suite_with_missing_case_ids(total_cases=500, missing_count=10)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock individual validation
mock_validate = mocker.patch.object(
api_request_handler,
"_ApiRequestHandler__validate_case_ids_exist",
return_value=set(range(11, 501)), # Exclude the 10 missing (1-10)
)
# Mock get_all_cases - should NOT be called for small reports
mock_get_all_cases = mocker.patch.object(
api_request_handler,
"_ApiRequestHandler__get_all_cases",
return_value=([], None),
)
mock_log = mocker.patch.object(environment, "log")
# Execute
project_id = 1
missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id)
# Assert: Should use INDIVIDUAL validation for small reports
mock_validate.assert_called_once()
# Should NOT fetch all cases
mock_get_all_cases.assert_not_called()
# Should log that missing cases were found
missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)]
assert len(missing_log_calls) > 0, "Should log missing case IDs"
assert missing_ids, "Should have missing IDs"
assert error == "", "Should not have errors"
@pytest.mark.api_handler
def test_name_matcher_detects_nonexistent_case_ids(self, environment, api_client, mocker):
"""
Test that NAME matcher correctly detects case IDs that don't exist in TestRail.
"""
# Setup: Test suite with case IDs 1-10
environment.case_matcher = MatchersParser.NAME
test_suite = create_test_suite_with_case_ids(num_cases=10)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock validation: Only IDs 1-5 exist, 6-10 don't exist
mock_validate = mocker.patch.object(
api_request_handler,
"_ApiRequestHandler__validate_case_ids_exist",
return_value=set(range(1, 6)), # Only 1-5 exist
)
mock_elog = mocker.patch.object(environment, "elog")
# Execute
project_id = 1
missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id)
# Assert: Should detect nonexistent IDs
mock_validate.assert_called_once()
mock_elog.assert_called_once()
# Check error message contains nonexistent IDs
error_call = mock_elog.call_args[0][0]
assert "Nonexistent case IDs" in error_call
assert "6" in error_call or "7" in error_call # At least some of the missing IDs
assert not missing_ids, "missing_ids refers to tests without IDs in report"
assert error != "", "Should have error about nonexistent IDs"
class TestValidateCaseIdsExist:
"""Test the __validate_case_ids_exist helper method"""
@pytest.mark.api_handler
def test_validate_empty_list(self, environment, api_client):
"""Test that empty list returns empty set"""
test_suite = create_test_suite_with_case_ids(num_cases=1)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[])
assert result == set(), "Empty list should return empty set"
@pytest.mark.api_handler
def test_validate_small_batch_sequential(self, environment, api_client, requests_mock):
"""
Test validation of small batch (<=50 cases) uses sequential validation.
"""
test_suite = create_test_suite_with_case_ids(num_cases=1)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock get_case responses for IDs 1-10
for i in range(1, 11):
requests_mock.get(create_url(f"get_case/{i}"), json={"id": i, "suite_id": 1, "title": f"Case {i}"})
# Add one non-existent case (returns 404)
requests_mock.get(create_url("get_case/999"), status_code=404, json={"error": "Not found"})
result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(
suite_id=1, case_ids=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]
)
# Should return 1-10 (11 total requested, 1 doesn't exist)
assert result == set(range(1, 11)), "Should validate existing cases"
assert 999 not in result, "Non-existent case should not be in result"
@pytest.mark.api_handler
def test_validate_large_batch_concurrent(self, environment, api_client, requests_mock):
"""
Test validation of large batch (>50 cases) uses concurrent validation.
"""
test_suite = create_test_suite_with_case_ids(num_cases=1)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Mock 100 case responses
for i in range(1, 101):
requests_mock.get(create_url(f"get_case/{i}"), json={"id": i, "suite_id": 1, "title": f"Case {i}"})
result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(
suite_id=1, case_ids=list(range(1, 101))
)
# Should validate all 100 cases concurrently
assert result == set(range(1, 101)), "Should validate all cases"
assert len(result) == 100
@pytest.mark.api_handler
def test_validate_filters_wrong_suite(self, environment, api_client, requests_mock):
"""
Test that validation filters out cases belonging to different suite.
"""
test_suite = create_test_suite_with_case_ids(num_cases=1)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Case 1 belongs to suite 1 (correct)
requests_mock.get(create_url("get_case/1"), json={"id": 1, "suite_id": 1, "title": "Case 1"})
# Case 2 belongs to suite 2 (wrong suite)
requests_mock.get(create_url("get_case/2"), json={"id": 2, "suite_id": 2, "title": "Case 2"})
# Case 3 belongs to suite 1 (correct)
requests_mock.get(create_url("get_case/3"), json={"id": 3, "suite_id": 1, "title": "Case 3"})
result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[1, 2, 3])
# Should only return cases from suite 1
assert result == {1, 3}, "Should filter out case from wrong suite"
assert 2 not in result, "Case from wrong suite should be excluded"
@pytest.mark.api_handler
def test_validate_handles_api_errors(self, environment, api_client, requests_mock):
"""
Test that validation gracefully handles API errors (404, 500, etc).
"""
test_suite = create_test_suite_with_case_ids(num_cases=1)
api_request_handler = ApiRequestHandler(environment, api_client, test_suite)
# Case 1: Success
requests_mock.get(create_url("get_case/1"), json={"id": 1, "suite_id": 1, "title": "Case 1"})
# Case 2: 404 Not Found
requests_mock.get(create_url("get_case/2"), status_code=404, json={"error": "Not found"})
# Case 3: 500 Server Error
requests_mock.get(create_url("get_case/3"), status_code=500, json={"error": "Internal error"})
# Case 4: Success
requests_mock.get(create_url("get_case/4"), json={"id": 4, "suite_id": 1, "title": "Case 4"})
result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[1, 2, 3, 4])
# Should return only successful cases
assert result == {1, 4}, "Should only return successfully validated cases"
class TestPerformanceComparison:
"""Tests demonstrating the performance improvement"""
@pytest.mark.api_handler
def test_performance_auto_vs_name_matcher(self, environment, api_client, mocker):
"""
Demonstrate that NAME matcher makes fewer API calls than AUTO matcher.
This is a documentation test showing the optimization benefit.
Scenario: Large report with all case IDs present (best case for NAME matcher)
"""
# Test AUTO matcher (always fetches all cases)
environment.case_matcher = MatchersParser.AUTO
test_suite_auto = create_test_suite_with_case_ids(num_cases=2000)
api_request_handler_auto = ApiRequestHandler(environment, api_client, test_suite_auto)
mock_get_all_cases_auto = mocker.patch.object(
api_request_handler_auto,
"_ApiRequestHandler__get_all_cases",
return_value=([{"id": i, "custom_automation_id": f"test{i}"} for i in range(1, 2001)], None),
)
mocker.patch.object(api_request_handler_auto.data_provider, "update_data")
api_request_handler_auto.check_missing_test_cases_ids(project_id=1)
# AUTO matcher should call get_all_cases
assert mock_get_all_cases_auto.call_count == 1, "AUTO matcher fetches all cases"
# Test NAME matcher with all IDs present (best case - skips validation)
env_name = Environment()
env_name.project = "Test Project"
env_name.batch_size = 10
env_name.case_matcher = MatchersParser.NAME
test_suite_name = create_test_suite_with_case_ids(num_cases=2000)
api_request_handler_name = ApiRequestHandler(env_name, api_client, test_suite_name)
mock_get_all_cases_name = mocker.patch.object(
api_request_handler_name, "_ApiRequestHandler__get_all_cases", return_value=([], None)
)
mock_validate_name = mocker.patch.object(
api_request_handler_name, "_ApiRequestHandler__validate_case_ids_exist", return_value=set()
)
mocker.patch.object(env_name, "log")
api_request_handler_name.check_missing_test_cases_ids(project_id=1)
# NAME matcher should NOT call get_all_cases when all IDs present and report >= 1000
mock_get_all_cases_name.assert_not_called()
# Should also not call individual validation
mock_validate_name.assert_not_called()
print("\n" + "=" * 60)
print("PERFORMANCE COMPARISON")
print("=" * 60)
print(f"AUTO matcher: {mock_get_all_cases_auto.call_count} get_all_cases calls")
print(f"NAME matcher: {mock_get_all_cases_name.call_count} get_all_cases calls")
print(f"Improvement: {mock_get_all_cases_auto.call_count - mock_get_all_cases_name.call_count} fewer calls")
print("=" * 60)
@pytest.mark.api_handler
def test_performance_name_matcher_with_missing_ids(self, environment, api_client, mocker):
"""
Demonstrate smart strategy selection for NAME matcher with large reports containing missing IDs.
Scenario: 5000 cases in report, 100 missing IDs
- Individual validation: 5000 API calls
- Fetch all + validate locally: ~660 API calls (for 165k cases in TestRail)
Strategy: Fetch all is more efficient
"""
env = Environment()
env.project = "Test Project"
env.batch_size = 10
env.case_matcher = MatchersParser.NAME
# 5000 cases, 100 missing IDs
test_suite = create_test_suite_with_missing_case_ids(total_cases=5000, missing_count=100)
api_request_handler = ApiRequestHandler(env, api_client, test_suite)
# Mock get_all_cases to simulate fetching 165k cases
mock_get_all_cases = mocker.patch.object(
api_request_handler,
"_ApiRequestHandler__get_all_cases",
return_value=([{"id": i} for i in range(101, 5001)], None), # Cases 101-5000 exist
)
# Mock individual validation - should NOT be called
mock_validate = mocker.patch.object(
api_request_handler,
"_ApiRequestHandler__validate_case_ids_exist",
return_value=set(range(101, 5001)),
)
mocker.patch.object(env, "log")
api_request_handler.check_missing_test_cases_ids(project_id=1)
# Should use fetch-all strategy (more efficient for large reports)
mock_get_all_cases.assert_called_once()
mock_validate.assert_not_called()
print("\n" + "=" * 60)
print("LARGE REPORT WITH MISSING IDS")
print("=" * 60)
print(f"Report size: 5000 cases, 100 missing IDs")
print(f"Strategy chosen: Fetch all cases")
print(f"API calls: 1 fetch (simulates ~660 paginated calls)")
print(f"Alternative: 4900 individual validation calls")
print(f"Efficiency: ~7.4x fewer calls")
print("=" * 60)
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
+1
-1
Metadata-Version: 2.4
Name: trcli
Version: 1.12.4
Version: 1.12.5
License-File: LICENSE.md

@@ -5,0 +5,0 @@ Requires-Dist: click<8.2.2,>=8.1.0

@@ -36,3 +36,3 @@ ![Tests](https://github.com/gurock/trcli/actions/workflows/python-app.yml/badge.svg)

```
TestRail CLI v1.12.4
TestRail CLI v1.12.5
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -51,3 +51,3 @@ Supported and loaded modules:

$ trcli --help
TestRail CLI v1.12.4
TestRail CLI v1.12.5
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -84,2 +84,4 @@ Usage: trcli [OPTIONS] COMMAND [ARGS]...

(e.g., localhost,127.0.0.1).
--parallel-pagination Enable parallel pagination for faster case fetching
(experimental).
--help Show this message and exit.

@@ -1100,3 +1102,3 @@

$ trcli add_run --help
TestRail CLI v1.12.4
TestRail CLI v1.12.5
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -1225,3 +1227,3 @@ Usage: trcli add_run [OPTIONS]

$ trcli parse_openapi --help
TestRail CLI v1.12.4
TestRail CLI v1.12.5
Copyright 2025 Gurock Software GmbH - www.gurock.com

@@ -1349,3 +1351,47 @@ Usage: trcli parse_openapi [OPTIONS]

### Parallel Pagination (Experimental)
The TestRail CLI includes an experimental `--parallel-pagination` option that significantly improves performance when fetching large numbers of test cases from TestRail. This feature uses parallel fetching to retrieve multiple pages of results concurrently, rather than fetching them sequentially.
#### When to Use Parallel Pagination
Use `--parallel-pagination` when:
- Working with projects that have thousands of test cases
- Fetching test cases takes a long time during operations
- You need faster case matching and validation during result uploads
#### How It Works
When enabled, parallel pagination:
1. Fetches the first page to determine total pages available
2. Uses a thread pool (default: 10 workers set by `MAX_WORKERS_PARALLEL_PAGINATION` in `trcli/settings.py`) to fetch remaining pages concurrently
3. Automatically handles batching to avoid overwhelming the server
4. Combines all results efficiently for processing
#### Usage
Enable parallel pagination by adding the `--parallel-pagination` flag to any command:
```shell
# Enable parallel pagination for faster case fetching during result upload
$ trcli parse_junit -f results.xml --parallel-pagination \
--host https://yourinstance.testrail.io --username <your_username> --password <your_password> \
--project "Your Project"
# Example with parse_robot
$ trcli parse_robot -f output.xml --parallel-pagination \
--host https://yourinstance.testrail.io --username <your_username> --password <your_password> \
--project "Your Project"
```
You can also enable this feature globally by setting `ENABLE_PARALLEL_PAGINATION = True` in `trcli/settings.py`. The CLI flag takes precedence over the settings file.
#### Performance Considerations
- This feature is most beneficial when dealing with large test case repositories (1000+ cases)
- The default worker count is set to 10, which provides a good balance between speed and server load
- For smaller projects with few test cases, the performance improvement may be negligible
- This is an experimental feature - please report any issues you encounter
Contributing

@@ -1352,0 +1398,0 @@ ------------

@@ -24,3 +24,2 @@ import pytest

# Load test data

@@ -31,3 +30,3 @@ json_path = Path(__file__).parent / "test_data/json/api_request_handler.json"

test_input = from_json(TestRailSuite, json_string)
api_request = ApiRequestHandler(environment, api_client, test_input, verify=False)

@@ -44,51 +43,38 @@ return api_request

mock_response = APIClientResult(
status_code=200,
response_text={"id": 1, "title": "Test Label"},
error_message=None
status_code=200, response_text={"id": 1, "title": "Test Label"}, error_message=None
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
result, error = labels_handler.add_label(project_id=1, title="Test Label")
assert error is None
assert result["id"] == 1
assert result["title"] == "Test Label"
# Verify the API call was made with correct parameters
labels_handler.client.send_post.assert_called_once_with(
"add_label/1",
payload=None,
files={'title': (None, "Test Label")}
)
labels_handler.client.send_post.assert_called_once_with("add_label/1", payload={"title": "Test Label"})
def test_add_label_api_error(self, labels_handler):
"""Test label addition with API error"""
mock_response = APIClientResult(
status_code=400,
response_text=None,
error_message="Label title already exists"
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
mock_response = APIClientResult(status_code=400, response_text=None, error_message="Label title already exists")
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
result, error = labels_handler.add_label(project_id=1, title="Duplicate Label")
assert error == "Label title already exists"
assert result is None
def test_add_label_multipart_format(self, labels_handler):
"""Test label addition uses multipart/form-data format"""
def test_add_label_json_format(self, labels_handler):
"""Test label addition uses JSON format"""
mock_response = APIClientResult(
status_code=200,
response_text={"id": 1, "title": "Test Label"},
error_message=None
status_code=200, response_text={"id": 1, "title": "Test Label"}, error_message=None
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
result, error = labels_handler.add_label(project_id=1, title="Test Label")
assert error is None
# Verify multipart/form-data format is used
# Verify JSON format is used
call_args = labels_handler.client.send_post.call_args
assert call_args[1]['payload'] is None
assert call_args[1]['files'] == {'title': (None, "Test Label")}
assert call_args[1]["payload"] == {"title": "Test Label"}

@@ -98,21 +84,15 @@ def test_update_label_success(self, labels_handler):

mock_response = APIClientResult(
status_code=200,
response_text={"id": 1, "title": "Updated Label"},
error_message=None
status_code=200, response_text={"id": 1, "title": "Updated Label"}, error_message=None
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
result, error = labels_handler.update_label(
label_id=1, project_id=1, title="Updated Label"
)
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
result, error = labels_handler.update_label(label_id=1, project_id=1, title="Updated Label")
assert error is None
assert result["id"] == 1
assert result["title"] == "Updated Label"
# Verify the API call was made with correct parameters
labels_handler.client.send_post.assert_called_once_with(
"update_label/1",
payload=None,
files={'project_id': (None, '1'), 'title': (None, "Updated Label")}
"update_label/1", payload={"project_id": 1, "title": "Updated Label"}
)

@@ -122,13 +102,7 @@

"""Test label update with API error"""
mock_response = APIClientResult(
status_code=403,
response_text=None,
error_message="No access to the project"
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
result, error = labels_handler.update_label(
label_id=1, project_id=1, title="Updated Label"
)
mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project")
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
result, error = labels_handler.update_label(label_id=1, project_id=1, title="Updated Label")
assert error == "No access to the project"

@@ -141,14 +115,9 @@ assert result is None

status_code=200,
response_text={
"id": 1,
"title": "Test Label",
"created_by": "2",
"created_on": "1234567890"
},
error_message=None
response_text={"id": 1, "title": "Test Label", "created_by": "2", "created_on": "1234567890"},
error_message=None,
)
with patch.object(labels_handler.client, 'send_get', return_value=mock_response):
with patch.object(labels_handler.client, "send_get", return_value=mock_response):
result, error = labels_handler.get_label(label_id=1)
assert error is None

@@ -158,3 +127,3 @@ assert result["id"] == 1

assert result["created_by"] == "2"
# Verify the API call was made with correct parameters

@@ -165,11 +134,7 @@ labels_handler.client.send_get.assert_called_once_with("get_label/1")

"""Test single label retrieval when label not found"""
mock_response = APIClientResult(
status_code=400,
response_text=None,
error_message="Label not found"
)
with patch.object(labels_handler.client, 'send_get', return_value=mock_response):
mock_response = APIClientResult(status_code=400, response_text=None, error_message="Label not found")
with patch.object(labels_handler.client, "send_get", return_value=mock_response):
result, error = labels_handler.get_label(label_id=999)
assert error == "Label not found"

@@ -189,11 +154,11 @@ assert result is None

{"id": 1, "title": "Label 1", "created_by": "2", "created_on": "1234567890"},
{"id": 2, "title": "Label 2", "created_by": "3", "created_on": "1234567891"}
]
{"id": 2, "title": "Label 2", "created_by": "3", "created_on": "1234567891"},
],
},
error_message=None
error_message=None,
)
with patch.object(labels_handler.client, 'send_get', return_value=mock_response):
with patch.object(labels_handler.client, "send_get", return_value=mock_response):
result, error = labels_handler.get_labels(project_id=1)
assert error is None

@@ -204,3 +169,3 @@ assert result["size"] == 2

assert result["labels"][1]["id"] == 2
# Verify the API call was made with correct parameters

@@ -213,15 +178,9 @@ labels_handler.client.send_get.assert_called_once_with("get_labels/1")

status_code=200,
response_text={
"offset": 10,
"limit": 5,
"size": 0,
"_links": {"next": None, "prev": None},
"labels": []
},
error_message=None
response_text={"offset": 10, "limit": 5, "size": 0, "_links": {"next": None, "prev": None}, "labels": []},
error_message=None,
)
with patch.object(labels_handler.client, 'send_get', return_value=mock_response):
with patch.object(labels_handler.client, "send_get", return_value=mock_response):
result, error = labels_handler.get_labels(project_id=1, offset=10, limit=5)
assert error is None

@@ -231,3 +190,3 @@ assert result["offset"] == 10

assert len(result["labels"]) == 0
# Verify the API call was made with pagination parameters

@@ -240,14 +199,9 @@ labels_handler.client.send_get.assert_called_once_with("get_labels/1&offset=10&limit=5")

status_code=200,
response_text={
"offset": 0,
"limit": 250,
"size": 1,
"labels": [{"id": 1, "title": "Label 1"}]
},
error_message=None
response_text={"offset": 0, "limit": 250, "size": 1, "labels": [{"id": 1, "title": "Label 1"}]},
error_message=None,
)
with patch.object(labels_handler.client, 'send_get', return_value=mock_response):
with patch.object(labels_handler.client, "send_get", return_value=mock_response):
result, error = labels_handler.get_labels(project_id=1, offset=0, limit=250)
assert error is None

@@ -259,11 +213,7 @@ # Should call without pagination parameters since they're defaults

"""Test labels listing with API error"""
mock_response = APIClientResult(
status_code=403,
response_text=None,
error_message="No access to the project"
)
with patch.object(labels_handler.client, 'send_get', return_value=mock_response):
mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project")
with patch.object(labels_handler.client, "send_get", return_value=mock_response):
result, error = labels_handler.get_labels(project_id=1)
assert error == "No access to the project"

@@ -274,70 +224,46 @@ assert result is None

"""Test successful label deletion"""
mock_response = APIClientResult(
status_code=200,
response_text="Success",
error_message=None
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None)
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
success, error = labels_handler.delete_labels(label_ids=[1, 2, 3])
assert success is True
assert error is None
# Verify the API call was made with correct parameters
labels_handler.client.send_post.assert_called_once_with(
"delete_labels",
payload=None,
files={"label_ids": (None, "[1, 2, 3]")}
)
labels_handler.client.send_post.assert_called_once_with("delete_labels", payload={"label_ids": [1, 2, 3]})
def test_delete_label_single_id(self, labels_handler):
"""Test single label deletion"""
mock_response = APIClientResult(
status_code=200,
response_text="Success",
error_message=None
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None)
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
success, error = labels_handler.delete_label(label_id=1)
assert success is True
assert error is None
labels_handler.client.send_post.assert_called_once_with(
"delete_label/1"
)
labels_handler.client.send_post.assert_called_once_with("delete_label/1")
def test_delete_labels_batch(self, labels_handler):
"""Test batch label deletion with multiple IDs"""
mock_response = APIClientResult(
status_code=200,
response_text="Success",
error_message=None
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None)
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
success, error = labels_handler.delete_labels(label_ids=[1, 2, 3])
assert success is True
assert error is None
labels_handler.client.send_post.assert_called_once_with(
"delete_labels",
payload=None,
files={"label_ids": (None, "[1, 2, 3]")}
)
labels_handler.client.send_post.assert_called_once_with("delete_labels", payload={"label_ids": [1, 2, 3]})
def test_delete_labels_api_error(self, labels_handler):
"""Test label deletion with API error"""
mock_response = APIClientResult(
status_code=400,
response_text=None,
error_message="One or more labels not found"
status_code=400, response_text=None, error_message="One or more labels not found"
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
success, error = labels_handler.delete_labels(label_ids=[999, 1000])
assert success is False

@@ -348,13 +274,9 @@ assert error == "One or more labels not found"

"""Test label deletion with forbidden access"""
mock_response = APIClientResult(
status_code=403,
response_text=None,
error_message="No access to the project"
)
with patch.object(labels_handler.client, 'send_post', return_value=mock_response):
mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project")
with patch.object(labels_handler.client, "send_post", return_value=mock_response):
success, error = labels_handler.delete_labels(label_ids=[1])
assert success is False
assert error == "No access to the project"
assert error == "No access to the project"

@@ -364,3 +286,3 @@

"""Test cases for test case label operations"""
def setup_method(self):

@@ -373,60 +295,58 @@ """Set up test fixtures"""

environment.batch_size = 10
# Create a minimal TestRailSuite for testing
from trcli.data_classes.dataclass_testrail import TestRailSuite
test_suite = TestRailSuite(name="Test Suite")
self.labels_handler = ApiRequestHandler(environment, api_client, test_suite, verify=False)
def test_add_labels_to_cases_success(self):
"""Test successful addition of labels to test cases"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \
patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \
patch.object(self.labels_handler, 'add_label') as mock_add_label, \
patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \
patch.object(self.labels_handler.client, 'send_post') as mock_send_post:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object(
self.labels_handler, "get_labels"
) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object(
self.labels_handler.client, "send_get"
) as mock_send_get, patch.object(
self.labels_handler.client, "send_post"
) as mock_send_post:
# Mock __get_all_cases response (cases exist)
mock_get_cases.return_value = ([
{"id": 1, "title": "Case 1", "suite_id": 1},
{"id": 2, "title": "Case 2", "suite_id": 1}
], "")
mock_get_cases.return_value = (
[{"id": 1, "title": "Case 1", "suite_id": 1}, {"id": 2, "title": "Case 2", "suite_id": 1}],
"",
)
# Mock get_labels response (label doesn't exist)
mock_get_labels.return_value = ({"labels": []}, "")
# Mock add_label response (create new label)
mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "")
# Mock get_case responses
mock_send_get.side_effect = [
MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}), # Case 1
MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}) # Case 2
MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}), # Case 2
]
# Mock update_cases batch response (for multiple cases)
mock_send_post.return_value = MagicMock(status_code=200)
# Test the method
results, error_message = self.labels_handler.add_labels_to_cases(
case_ids=[1, 2],
title="test-label",
project_id=1
case_ids=[1, 2], title="test-label", project_id=1
)
# Verify no error
assert error_message == ""
# Verify results
assert len(results['successful_cases']) == 2
assert len(results['failed_cases']) == 0
assert len(results['max_labels_reached']) == 0
assert len(results['case_not_found']) == 0
assert len(results["successful_cases"]) == 2
assert len(results["failed_cases"]) == 0
assert len(results["max_labels_reached"]) == 0
assert len(results["case_not_found"]) == 0
# Verify API calls - should be called twice: once for multi-suite detection, once for case validation
assert mock_get_cases.call_count == 2
mock_get_cases.assert_has_calls([
call(1, None), # Multi-suite detection
call(1, None) # Case validation
])
mock_get_cases.assert_has_calls([call(1, None), call(1, None)]) # Multi-suite detection # Case validation
mock_get_labels.assert_called_once_with(1)

@@ -436,51 +356,45 @@ mock_add_label.assert_called_once_with(1, "test-label")

# Should call update_cases/{suite_id} once for batch update
mock_send_post.assert_called_once_with("update_cases/1", payload={
'case_ids': [1, 2],
'labels': [5]
})
mock_send_post.assert_called_once_with("update_cases/1", payload={"case_ids": [1, 2], "labels": [5]})
def test_add_labels_to_cases_single_case(self):
"""Test adding labels to a single test case using update_case endpoint"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \
patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \
patch.object(self.labels_handler, 'add_label') as mock_add_label, \
patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \
patch.object(self.labels_handler.client, 'send_post') as mock_send_post:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object(
self.labels_handler, "get_labels"
) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object(
self.labels_handler.client, "send_get"
) as mock_send_get, patch.object(
self.labels_handler.client, "send_post"
) as mock_send_post:
# Mock __get_all_cases response (case exists)
mock_get_cases.return_value = ([
{"id": 1, "title": "Case 1"}
], "")
mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "")
# Mock get_labels response (label doesn't exist)
mock_get_labels.return_value = ({"labels": []}, "")
# Mock add_label response (create new label)
mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "")
# Mock get_case response
mock_send_get.return_value = MagicMock(
status_code=200,
response_text={"labels": [], "suite_id": 1, "title": "Case 1"}
status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}
)
# Mock update_case response (for single case)
mock_send_post.return_value = MagicMock(status_code=200)
# Test the method with single case
results, error_message = self.labels_handler.add_labels_to_cases(
case_ids=[1],
title="test-label",
project_id=1
case_ids=[1], title="test-label", project_id=1
)
# Verify no error
assert error_message == ""
# Verify results
assert len(results['successful_cases']) == 1
assert len(results['failed_cases']) == 0
assert len(results['max_labels_reached']) == 0
assert len(results['case_not_found']) == 0
assert len(results["successful_cases"]) == 1
assert len(results["failed_cases"]) == 0
assert len(results["max_labels_reached"]) == 0
assert len(results["case_not_found"]) == 0
# Verify API calls

@@ -492,142 +406,134 @@ assert mock_get_cases.call_count == 2

# Should call update_case/{case_id} once for single case
mock_send_post.assert_called_once_with("update_case/1", payload={'labels': [5]})
mock_send_post.assert_called_once_with("update_case/1", payload={"labels": [5]})
def test_add_labels_to_cases_existing_label(self):
"""Test adding labels when label already exists"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \
patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \
patch.object(self.labels_handler, 'add_label') as mock_add_label, \
patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \
patch.object(self.labels_handler.client, 'send_post') as mock_send_post:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object(
self.labels_handler, "get_labels"
) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object(
self.labels_handler.client, "send_get"
) as mock_send_get, patch.object(
self.labels_handler.client, "send_post"
) as mock_send_post:
# Mock __get_all_cases response (case exists)
mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "")
# Mock get_labels response (label exists)
mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "")
# Mock get_case response
mock_send_get.return_value = MagicMock(status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"})
mock_send_get.return_value = MagicMock(
status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"}
)
# Mock add_label_to_case response
mock_send_post.return_value = MagicMock(status_code=200)
# Test the method
results, error_message = self.labels_handler.add_labels_to_cases(
case_ids=[1],
title="test-label",
project_id=1
case_ids=[1], title="test-label", project_id=1
)
# Verify no error
assert error_message == ""
# Verify results
assert len(results['successful_cases']) == 1
assert len(results['case_not_found']) == 0
assert len(results["successful_cases"]) == 1
assert len(results["case_not_found"]) == 0
# Verify add_label was not called (label already exists)
mock_add_label.assert_not_called()
def test_add_labels_to_cases_max_labels_reached(self):
"""Test handling of maximum labels limit (10)"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \
patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \
patch.object(self.labels_handler.client, 'send_get') as mock_send_get:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object(
self.labels_handler, "get_labels"
) as mock_get_labels, patch.object(self.labels_handler.client, "send_get") as mock_send_get:
# Mock __get_all_cases response (case exists)
mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "")
# Mock get_labels response
mock_get_labels.return_value = ({"labels": [{"id": 15, "title": "test-label"}]}, "")
# Mock get_case response with 10 existing labels (different from test-label)
existing_labels = [{"id": i, "title": f"label-{i}"} for i in range(1, 11)]
mock_send_get.return_value = MagicMock(
status_code=200,
response_text={"labels": existing_labels}
)
mock_send_get.return_value = MagicMock(status_code=200, response_text={"labels": existing_labels})
# Test the method
results, error_message = self.labels_handler.add_labels_to_cases(
case_ids=[1],
title="test-label",
project_id=1
case_ids=[1], title="test-label", project_id=1
)
# Verify no error
assert error_message == ""
# Verify results
assert len(results['successful_cases']) == 0
assert len(results['failed_cases']) == 0
assert len(results['max_labels_reached']) == 1
assert len(results['case_not_found']) == 0
assert results['max_labels_reached'][0] == 1
assert len(results["successful_cases"]) == 0
assert len(results["failed_cases"]) == 0
assert len(results["max_labels_reached"]) == 1
assert len(results["case_not_found"]) == 0
assert results["max_labels_reached"][0] == 1
def test_add_labels_to_cases_label_already_on_case(self):
"""Test handling when label already exists on case"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \
patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \
patch.object(self.labels_handler.client, 'send_get') as mock_send_get:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object(
self.labels_handler, "get_labels"
) as mock_get_labels, patch.object(self.labels_handler.client, "send_get") as mock_send_get:
# Mock __get_all_cases response (case exists)
mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "")
# Mock get_labels response
mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "")
# Mock get_case response with the label already present
mock_send_get.return_value = MagicMock(
status_code=200,
response_text={"labels": [{"id": 5, "title": "test-label"}]}
status_code=200, response_text={"labels": [{"id": 5, "title": "test-label"}]}
)
# Test the method
results, error_message = self.labels_handler.add_labels_to_cases(
case_ids=[1],
title="test-label",
project_id=1
case_ids=[1], title="test-label", project_id=1
)
# Verify no error
assert error_message == ""
# Verify results
assert len(results['successful_cases']) == 1
assert len(results['case_not_found']) == 0
assert "already exists" in results['successful_cases'][0]['message']
assert len(results["successful_cases"]) == 1
assert len(results["case_not_found"]) == 0
assert "already exists" in results["successful_cases"][0]["message"]
def test_add_labels_to_cases_case_not_found(self):
"""Test handling when case IDs don't exist"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases:
# Mock __get_all_cases response (no cases exist)
mock_get_cases.return_value = ([], "")
# Test the method with case IDs that don't exist
results, error_message = self.labels_handler.add_labels_to_cases(
case_ids=[999, 1000, 1001],
title="test-label",
project_id=1
case_ids=[999, 1000, 1001], title="test-label", project_id=1
)
# Verify no error
assert error_message == ""
# Verify results - all cases should be in case_not_found
assert len(results['case_not_found']) == 3
assert 999 in results['case_not_found']
assert 1000 in results['case_not_found']
assert 1001 in results['case_not_found']
assert len(results["case_not_found"]) == 3
assert 999 in results["case_not_found"]
assert 1000 in results["case_not_found"]
assert 1001 in results["case_not_found"]
# Verify that no other processing happened since no valid cases
assert len(results['successful_cases']) == 0
assert len(results['failed_cases']) == 0
assert len(results['max_labels_reached']) == 0
assert len(results["successful_cases"]) == 0
assert len(results["failed_cases"]) == 0
assert len(results["max_labels_reached"]) == 0
def test_get_cases_by_label_with_label_ids(self):
"""Test getting cases by label IDs"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases:
# Mock cases response

@@ -637,90 +543,84 @@ mock_cases = [

{"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "label2"}]},
{"id": 3, "title": "Test Case 3", "labels": [{"id": 5, "title": "label1"}]}
{"id": 3, "title": "Test Case 3", "labels": [{"id": 5, "title": "label1"}]},
]
mock_get_cases.return_value = (mock_cases, "")
# Test the method
matching_cases, error_message = self.labels_handler.get_cases_by_label(
project_id=1,
suite_id=None,
label_ids=[5]
project_id=1, suite_id=None, label_ids=[5]
)
# Verify no error
assert error_message == ""
# Verify results (should return cases 1 and 3)
assert len(matching_cases) == 2
assert matching_cases[0]['id'] == 1
assert matching_cases[1]['id'] == 3
assert matching_cases[0]["id"] == 1
assert matching_cases[1]["id"] == 3
def test_get_cases_by_label_with_title(self):
"""Test getting cases by label title"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \
patch.object(self.labels_handler, 'get_labels') as mock_get_labels:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object(
self.labels_handler, "get_labels"
) as mock_get_labels:
# Mock labels response
mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "")
# Mock cases response
mock_cases = [
{"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "test-label"}]},
{"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "other-label"}]}
{"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "other-label"}]},
]
mock_get_cases.return_value = (mock_cases, "")
# Test the method
matching_cases, error_message = self.labels_handler.get_cases_by_label(
project_id=1,
suite_id=None,
label_title="test-label"
project_id=1, suite_id=None, label_title="test-label"
)
# Verify no error
assert error_message == ""
# Verify results (should return case 1)
assert len(matching_cases) == 1
assert matching_cases[0]['id'] == 1
assert matching_cases[0]["id"] == 1
def test_get_cases_by_label_title_not_found(self):
"""Test getting cases by non-existent label title"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \
patch.object(self.labels_handler, 'get_labels') as mock_get_labels:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object(
self.labels_handler, "get_labels"
) as mock_get_labels:
# Mock labels response (no matching label)
mock_get_labels.return_value = ({"labels": []}, "")
# Mock get_all_cases to return empty (not called due to early return)
mock_get_cases.return_value = ([], "")
# Test the method
matching_cases, error_message = self.labels_handler.get_cases_by_label(
project_id=1,
suite_id=None,
label_title="non-existent-label"
project_id=1, suite_id=None, label_title="non-existent-label"
)
# Verify error
assert error_message == ""
assert matching_cases == []
def test_get_cases_by_label_no_matching_cases(self):
"""Test getting cases when no cases have the specified label"""
with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases:
with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases:
# Mock cases response (no cases with target label)
mock_cases = [
{"id": 1, "title": "Test Case 1", "labels": [{"id": 6, "title": "other-label"}]},
{"id": 2, "title": "Test Case 2", "labels": []}
{"id": 2, "title": "Test Case 2", "labels": []},
]
mock_get_cases.return_value = (mock_cases, "")
# Test the method
matching_cases, error_message = self.labels_handler.get_cases_by_label(
project_id=1,
suite_id=None,
label_ids=[5]
project_id=1, suite_id=None, label_ids=[5]
)
# Verify no error but no results

@@ -738,63 +638,45 @@ assert error_message == ""

mock_test_response = APIClientResult(
status_code=200,
response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []},
error_message=None
status_code=200, response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, error_message=None
)
# Mock run validation
mock_run_response = APIClientResult(
status_code=200,
response_text={"id": 1, "project_id": 1},
error_message=None
status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None
)
# Mock existing labels
mock_labels_response = APIClientResult(
status_code=200,
response_text={"labels": []},
error_message=None
)
mock_labels_response = APIClientResult(status_code=200, response_text={"labels": []}, error_message=None)
# Mock label creation
mock_add_label_response = APIClientResult(
status_code=200,
response_text={"id": 5, "title": "Test Label"},
error_message=None
status_code=200, response_text={"id": 5, "title": "Test Label"}, error_message=None
)
# Mock test update
mock_update_response = APIClientResult(
status_code=200,
response_text={"id": 1, "labels": [{"id": 5, "title": "Test Label"}]},
error_message=None
status_code=200, response_text={"id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, error_message=None
)
with patch.object(labels_handler.client, 'send_get') as mock_get, \
patch.object(labels_handler.client, 'send_post') as mock_post:
with patch.object(labels_handler.client, "send_get") as mock_get, patch.object(
labels_handler.client, "send_post"
) as mock_post:
# Setup get responses for validation and label retrieval
mock_get.side_effect = [
mock_test_response, # get_test/{test_id}
mock_run_response, # get_run/{run_id}
mock_labels_response, # get_labels
mock_run_response, # get_run/{run_id}
mock_labels_response, # get_labels
mock_test_response, # get_test/{test_id} again for labels check
]
# Setup post responses for label creation and test update
mock_post.side_effect = [
mock_add_label_response, # add_label
mock_update_response # update_test
]
result, error = labels_handler.add_labels_to_tests(
test_ids=[1],
titles="Test Label",
project_id=1
)
mock_post.side_effect = [mock_add_label_response, mock_update_response] # add_label # update_test
result, error = labels_handler.add_labels_to_tests(test_ids=[1], titles="Test Label", project_id=1)
assert error == ""
assert len(result['successful_tests']) == 1
assert len(result['failed_tests']) == 0
assert len(result['test_not_found']) == 0
assert len(result['max_labels_reached']) == 0
assert len(result["successful_tests"]) == 1
assert len(result["failed_tests"]) == 0
assert len(result["test_not_found"]) == 0
assert len(result["max_labels_reached"]) == 0

@@ -804,18 +686,10 @@ def test_add_labels_to_tests_test_not_found(self, labels_handler):

# Mock test not found
mock_test_response = APIClientResult(
status_code=404,
response_text=None,
error_message="Test not found"
)
with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response):
result, error = labels_handler.add_labels_to_tests(
test_ids=[999],
titles="Test Label",
project_id=1
)
mock_test_response = APIClientResult(status_code=404, response_text=None, error_message="Test not found")
with patch.object(labels_handler.client, "send_get", return_value=mock_test_response):
result, error = labels_handler.add_labels_to_tests(test_ids=[999], titles="Test Label", project_id=1)
assert error == ""
assert len(result['test_not_found']) == 1
assert 999 in result['test_not_found']
assert len(result["test_not_found"]) == 1
assert 999 in result["test_not_found"]

@@ -826,3 +700,3 @@ def test_add_labels_to_tests_max_labels_reached(self, labels_handler):

existing_labels = [{"id": i, "title": f"Label {i}"} for i in range(1, 11)]
# Mock test with max labels

@@ -832,47 +706,36 @@ mock_test_response = APIClientResult(

response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": existing_labels},
error_message=None
error_message=None,
)
# Mock run validation
mock_run_response = APIClientResult(
status_code=200,
response_text={"id": 1, "project_id": 1},
error_message=None
status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None
)
# Mock existing labels
mock_labels_response = APIClientResult(
status_code=200,
response_text={"labels": []},
error_message=None
)
mock_labels_response = APIClientResult(status_code=200, response_text={"labels": []}, error_message=None)
# Mock label creation
mock_add_label_response = APIClientResult(
status_code=200,
response_text={"id": 11, "title": "New Label"},
error_message=None
status_code=200, response_text={"id": 11, "title": "New Label"}, error_message=None
)
with patch.object(labels_handler.client, 'send_get') as mock_get, \
patch.object(labels_handler.client, 'send_post') as mock_post:
with patch.object(labels_handler.client, "send_get") as mock_get, patch.object(
labels_handler.client, "send_post"
) as mock_post:
mock_get.side_effect = [
mock_test_response, # get_test/{test_id}
mock_run_response, # get_run/{run_id}
mock_labels_response, # get_labels
mock_test_response, # get_test/{test_id} again for labels check
mock_test_response, # get_test/{test_id}
mock_run_response, # get_run/{run_id}
mock_labels_response, # get_labels
mock_test_response, # get_test/{test_id} again for labels check
]
mock_post.return_value = mock_add_label_response
result, error = labels_handler.add_labels_to_tests(
test_ids=[1],
titles="New Label",
project_id=1
)
result, error = labels_handler.add_labels_to_tests(test_ids=[1], titles="New Label", project_id=1)
assert error == ""
assert len(result['max_labels_reached']) == 1
assert 1 in result['max_labels_reached']
assert len(result["max_labels_reached"]) == 1
assert 1 in result["max_labels_reached"]

@@ -883,41 +746,36 @@ def test_get_tests_by_label_success(self, labels_handler):

mock_runs_response = APIClientResult(
status_code=200,
response_text={"runs": [{"id": 1}, {"id": 2}]},
error_message=None
status_code=200, response_text={"runs": [{"id": 1}, {"id": 2}]}, error_message=None
)
# Mock tests responses for each run
mock_tests_response_run1 = APIClientResult(
status_code=200,
response_text={"tests": [
{"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]},
{"id": 2, "title": "Test 2", "labels": []}
]},
error_message=None
response_text={
"tests": [
{"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]},
{"id": 2, "title": "Test 2", "labels": []},
]
},
error_message=None,
)
mock_tests_response_run2 = APIClientResult(
status_code=200,
response_text={"tests": [
{"id": 3, "title": "Test 3", "labels": [{"id": 5, "title": "Test Label"}]}
]},
error_message=None
response_text={"tests": [{"id": 3, "title": "Test 3", "labels": [{"id": 5, "title": "Test Label"}]}]},
error_message=None,
)
with patch.object(labels_handler.client, 'send_get') as mock_get:
with patch.object(labels_handler.client, "send_get") as mock_get:
mock_get.side_effect = [
mock_runs_response, # get_runs/{project_id}
mock_runs_response, # get_runs/{project_id}
mock_tests_response_run1, # get_tests/{run_id} for run 1
mock_tests_response_run2 # get_tests/{run_id} for run 2
mock_tests_response_run2, # get_tests/{run_id} for run 2
]
result, error = labels_handler.get_tests_by_label(
project_id=1,
label_ids=[5]
)
result, error = labels_handler.get_tests_by_label(project_id=1, label_ids=[5])
assert error == ""
assert len(result) == 2
assert result[0]['id'] == 1
assert result[1]['id'] == 3
assert result[0]["id"] == 1
assert result[1]["id"] == 3

@@ -928,48 +786,36 @@ def test_get_tests_by_label_with_run_ids(self, labels_handler):

mock_run_response_1 = APIClientResult(
status_code=200,
response_text={"id": 1, "name": "Test Run 1"},
error_message=None
status_code=200, response_text={"id": 1, "name": "Test Run 1"}, error_message=None
)
mock_run_response_2 = APIClientResult(
status_code=200,
response_text={"id": 2, "name": "Test Run 2"},
error_message=None
status_code=200, response_text={"id": 2, "name": "Test Run 2"}, error_message=None
)
# Mock tests responses for each run
mock_tests_response_run1 = APIClientResult(
status_code=200,
response_text={"tests": [
{"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}
]},
error_message=None
response_text={"tests": [{"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}]},
error_message=None,
)
mock_tests_response_run2 = APIClientResult(
status_code=200,
response_text={"tests": [
{"id": 2, "title": "Test 2", "labels": [{"id": 5, "title": "Test Label"}]}
]},
error_message=None
response_text={"tests": [{"id": 2, "title": "Test 2", "labels": [{"id": 5, "title": "Test Label"}]}]},
error_message=None,
)
with patch.object(labels_handler.client, 'send_get') as mock_get:
with patch.object(labels_handler.client, "send_get") as mock_get:
mock_get.side_effect = [
mock_run_response_1, # get_run/1
mock_run_response_2, # get_run/2
mock_run_response_1, # get_run/1
mock_run_response_2, # get_run/2
mock_tests_response_run1, # get_tests/1
mock_tests_response_run2 # get_tests/2
mock_tests_response_run2, # get_tests/2
]
result, error = labels_handler.get_tests_by_label(
project_id=1,
label_ids=[5],
run_ids=[1, 2]
)
result, error = labels_handler.get_tests_by_label(project_id=1, label_ids=[5], run_ids=[1, 2])
assert error == ""
assert len(result) == 2
assert result[0]['id'] == 1
assert result[1]['id'] == 2
assert result[0]["id"] == 1
assert result[1]["id"] == 2

@@ -981,44 +827,34 @@ def test_get_test_labels_success(self, labels_handler):

status_code=200,
response_text={
"id": 1,
"title": "Test 1",
"status_id": 1,
"labels": [{"id": 5, "title": "Test Label"}]
},
error_message=None
response_text={"id": 1, "title": "Test 1", "status_id": 1, "labels": [{"id": 5, "title": "Test Label"}]},
error_message=None,
)
mock_test_response2 = APIClientResult(
status_code=200,
response_text={
"id": 2,
"title": "Test 2",
"status_id": 2,
"labels": []
},
error_message=None
response_text={"id": 2, "title": "Test 2", "status_id": 2, "labels": []},
error_message=None,
)
with patch.object(labels_handler.client, 'send_get') as mock_get:
with patch.object(labels_handler.client, "send_get") as mock_get:
mock_get.side_effect = [mock_test_response1, mock_test_response2]
result, error = labels_handler.get_test_labels([1, 2])
assert error == ""
assert len(result) == 2
# Check first test
assert result[0]['test_id'] == 1
assert result[0]['title'] == "Test 1"
assert result[0]['status_id'] == 1
assert len(result[0]['labels']) == 1
assert result[0]['labels'][0]['title'] == "Test Label"
assert result[0]['error'] is None
assert result[0]["test_id"] == 1
assert result[0]["title"] == "Test 1"
assert result[0]["status_id"] == 1
assert len(result[0]["labels"]) == 1
assert result[0]["labels"][0]["title"] == "Test Label"
assert result[0]["error"] is None
# Check second test
assert result[1]['test_id'] == 2
assert result[1]['title'] == "Test 2"
assert result[1]['status_id'] == 2
assert len(result[1]['labels']) == 0
assert result[1]['error'] is None
assert result[1]["test_id"] == 2
assert result[1]["title"] == "Test 2"
assert result[1]["status_id"] == 2
assert len(result[1]["labels"]) == 0
assert result[1]["error"] is None

@@ -1028,16 +864,12 @@ def test_get_test_labels_test_not_found(self, labels_handler):

# Mock test not found
mock_test_response = APIClientResult(
status_code=404,
response_text=None,
error_message="Test not found"
)
with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response):
mock_test_response = APIClientResult(status_code=404, response_text=None, error_message="Test not found")
with patch.object(labels_handler.client, "send_get", return_value=mock_test_response):
result, error = labels_handler.get_test_labels([999])
assert error == ""
assert len(result) == 1
assert result[0]['test_id'] == 999
assert result[0]['error'] == "Test 999 not found or inaccessible"
assert result[0]['labels'] == []
assert result[0]["test_id"] == 999
assert result[0]["error"] == "Test 999 not found or inaccessible"
assert result[0]["labels"] == []

@@ -1048,58 +880,43 @@ def test_add_labels_to_tests_batch_update(self, labels_handler):

mock_test_response1 = APIClientResult(
status_code=200,
response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []},
error_message=None
status_code=200, response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, error_message=None
)
mock_test_response2 = APIClientResult(
status_code=200,
response_text={"id": 2, "title": "Test 2", "run_id": 1, "labels": []},
error_message=None
status_code=200, response_text={"id": 2, "title": "Test 2", "run_id": 1, "labels": []}, error_message=None
)
# Mock run validation
mock_run_response = APIClientResult(
status_code=200,
response_text={"id": 1, "project_id": 1},
error_message=None
status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None
)
# Mock existing labels
mock_labels_response = APIClientResult(
status_code=200,
response_text={"labels": [{"id": 5, "title": "Test Label"}]},
error_message=None
status_code=200, response_text={"labels": [{"id": 5, "title": "Test Label"}]}, error_message=None
)
# Mock batch update
mock_batch_response = APIClientResult(
status_code=200,
response_text={"updated": 2},
error_message=None
)
with patch.object(labels_handler.client, 'send_get') as mock_get, \
patch.object(labels_handler.client, 'send_post') as mock_post:
mock_batch_response = APIClientResult(status_code=200, response_text={"updated": 2}, error_message=None)
with patch.object(labels_handler.client, "send_get") as mock_get, patch.object(
labels_handler.client, "send_post"
) as mock_post:
# Setup get responses
mock_get.side_effect = [
mock_test_response1, # get_test/1
mock_run_response, # get_run/1
mock_run_response, # get_run/1
mock_test_response2, # get_test/2
mock_run_response, # get_run/1
mock_labels_response, # get_labels
mock_run_response, # get_run/1
mock_labels_response, # get_labels
mock_test_response1, # get_test/1 for labels check
mock_test_response2, # get_test/2 for labels check
]
# Setup batch update response
mock_post.return_value = mock_batch_response
result, error = labels_handler.add_labels_to_tests(
test_ids=[1, 2],
titles="Test Label",
project_id=1
)
result, error = labels_handler.add_labels_to_tests(test_ids=[1, 2], titles="Test Label", project_id=1)
assert error == ""
assert len(result['successful_tests']) == 2
assert len(result["successful_tests"]) == 2

@@ -26,5 +26,3 @@ import json

if custom_json is None:
json_path = (
Path(__file__).parent / "test_data/json/api_request_handler.json"
)
json_path = Path(__file__).parent / "test_data/json/api_request_handler.json"
else:

@@ -53,5 +51,3 @@ json_path = custom_json

def api_request_handler_update_case_json(handler_maker):
json_path = (
Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json"
)
json_path = Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json"
yield handler_maker(custom_json=json_path, verify=False)

@@ -62,5 +58,3 @@

@pytest.mark.api_handler
def test_return_project(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_return_project(self, api_request_handler: ApiRequestHandler, requests_mock):
mocked_response = {

@@ -78,3 +72,3 @@ "offset": 0,

{"id": 3, "name": "DataHub", "suite_mode": 1},
]
],
}

@@ -114,5 +108,3 @@ requests_mock.get(create_url("get_projects"), json=mocked_response)

@pytest.mark.api_handler
def test_return_project_legacy_response(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_return_project_legacy_response(self, api_request_handler: ApiRequestHandler, requests_mock):
mocked_response = [

@@ -139,3 +131,5 @@ {"id": 1, "name": "DataHub", "suite_mode": 1},

requests_mock.get(create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n"+json.dumps(mocked_response))
requests_mock.get(
create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n" + json.dumps(mocked_response)
)
assert api_request_handler.get_project_data("Test Project") == ProjectData(

@@ -146,5 +140,3 @@ project_id=2, suite_mode=1, error_message=""

@pytest.mark.api_handler
def test_check_suite_exists(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_check_suite_exists(self, api_request_handler: ApiRequestHandler, requests_mock):
project_id = 3

@@ -165,3 +157,3 @@ mocked_response = [

), "Given suite id should NOT exist in mocked response."
@pytest.mark.api_handler

@@ -178,3 +170,3 @@ def test_check_suite_exists_with_pagination(self, api_request_handler: ApiRequestHandler, requests_mock):

{"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3},
]
],
}

@@ -219,12 +211,9 @@ requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response)

assert (
api_request_handler.suites_data_from_provider.suite_id
== mocked_response["id"]
api_request_handler.suites_data_from_provider.suite_id == mocked_response["id"]
), "Added suite id in DataProvider doesn't match mocked response id."
@pytest.mark.api_handler
def test_check_missing_sections_true(
self, api_request_handler: ApiRequestHandler, requests_mock, mocker
):
def test_check_missing_sections_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker):
project_id = 3
update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data')
update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data")
mocked_response = {

@@ -238,21 +227,15 @@ "_links": {"next": None, "prev": None},

}
]
],
}
requests_mock.get(
create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response
)
requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response)
missing, _ = api_request_handler.check_missing_section_ids(project_id)
update_data_mock.assert_called_with(
section_data=[{'section_id': 0, 'suite_id': 4, 'name': 'Skipped test'}]
)
update_data_mock.assert_called_with(section_data=[{"section_id": 0, "suite_id": 4, "name": "Skipped test"}])
assert missing, "There should be missing section"
@pytest.mark.api_handler
def test_check_missing_sections_false(
self, api_request_handler: ApiRequestHandler, requests_mock, mocker
):
def test_check_missing_sections_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker):
project_id = 3
update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data')
update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data")
mocked_response = {

@@ -270,9 +253,7 @@ "_links": {"next": None, "prev": None},

"name": "Passed test",
}
]
},
],
}
requests_mock.get(
create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response
)
requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response)

@@ -282,4 +263,4 @@ missing, _ = api_request_handler.check_missing_section_ids(project_id)

section_data=[
{'name': 'Skipped test', 'section_id': 1, 'suite_id': 4},
{'name': 'Passed test', 'section_id': 2, 'suite_id': 4}
{"name": "Skipped test", "section_id": 1, "suite_id": 4},
{"name": "Passed test", "section_id": 2, "suite_id": 4},
]

@@ -298,5 +279,3 @@ )

requests_mock.post(
create_url(f"add_section/{project_id}"), json=mocked_response
)
requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response)
resources_added, error = api_request_handler.add_sections(project_id)

@@ -313,10 +292,7 @@

assert (
api_request_handler.suites_data_from_provider.testsections[1].section_id
== mocked_response["id"]
api_request_handler.suites_data_from_provider.testsections[1].section_id == mocked_response["id"]
), "Added section id in DataProvider doesn't match mocked response id."
@pytest.mark.api_handler
def test_add_section_and_cases(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_add_section_and_cases(self, api_request_handler: ApiRequestHandler, requests_mock):
project_id = 3

@@ -327,3 +303,3 @@ mocked_response_for_section = {

"name": "Passed test",
"custom_automation_id": "className.testCase"
"custom_automation_id": "className.testCase",
}

@@ -336,3 +312,3 @@

"title": "testCase2",
"custom_automation_id": "className.testCase"
"custom_automation_id": "className.testCase",
}

@@ -345,9 +321,7 @@

"title": "testCase3",
"custom_automation_id": "className.testCase"
"custom_automation_id": "className.testCase",
}
requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section)
requests_mock.post(
create_url(f"add_section/{project_id}"), json=mocked_response_for_section
)
requests_mock.post(
create_url(f"add_case/{mocked_response_for_case_1['section_id']}"),

@@ -392,5 +366,3 @@ json=mocked_response_for_case_1,

resources_added, error = api_request_handler.add_run(project_id, run_name)
assert (
mocked_response["id"] == resources_added
), "Added run id doesn't match mocked response id"
assert mocked_response["id"] == resources_added, "Added run id doesn't match mocked response id"
assert error == "", "Error occurred in add_case"

@@ -416,21 +388,19 @@

]
requests_mock.post(
create_url(f"add_results_for_cases/{run_id}"), json=mocked_response
)
requests_mock.post(create_url(f"add_results_for_cases/{run_id}"), json=mocked_response)
tests_mocked_response = {
'offset': 0,
'limit': 250,
'size': 4,
'_links': {'next': None, 'prev': None},
'tests': [
"offset": 0,
"limit": 250,
"size": 4,
"_links": {"next": None, "prev": None},
"tests": [
{
'id': 4,
'case_id': 1,
'status_id': 5,
'assignedto_id': None,
'run_id': run_id,
'title': 'Fail To Login With Invalid Password'
}
]
"id": 4,
"case_id": 1,
"status_id": 5,
"assignedto_id": None,
"run_id": run_id,
"title": "Fail To Login With Invalid Password",
}
],
}

@@ -441,5 +411,3 @@ requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response)

requests_mock.post(
create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response
)
requests_mock.post(create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response)

@@ -450,4 +418,5 @@ with patch("builtins.open", mock_open()) as mock_file:

assert error == "", "Error occurred in add_results"
assert results_added == len(mocked_response), \
f"Expected {len(mocked_response)} results to be added but got {results_added} instead."
assert results_added == len(
mocked_response
), f"Expected {len(mocked_response)} results to be added but got {results_added} instead."
mock_file.assert_any_call("./path1", "rb")

@@ -471,8 +440,6 @@ mock_file.assert_any_call("./path2", "rb")

@pytest.mark.api_handler
def test_check_missing_test_cases_ids_true(
self, api_request_handler: ApiRequestHandler, requests_mock, mocker
):
def test_check_missing_test_cases_ids_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker):
project_id = 3
suite_id = api_request_handler.suites_data_from_provider.suite_id
update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data')
update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data")
mocked_response_page_1 = {

@@ -485,3 +452,3 @@ "_links": {

{"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234},
{"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}
{"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234},
],

@@ -493,5 +460,3 @@ }

)
missing_ids, error = api_request_handler.check_missing_test_cases_ids(
project_id
)
missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id)

@@ -504,11 +469,10 @@ update_data_mock.assert_called_with(

"section_id": 1234,
"title": "testCase1"
"title": "testCase1",
},
{
"case_id": 2,
"custom_automation_id":
"Skipped test.testCase2",
"custom_automation_id": "Skipped test.testCase2",
"section_id": 1234,
"title": "testCase2"
}
"title": "testCase2",
},
]

@@ -520,8 +484,6 @@ )

@pytest.mark.api_handler
def test_check_missing_test_cases_ids_false(
self, api_request_handler: ApiRequestHandler, requests_mock, mocker
):
def test_check_missing_test_cases_ids_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker):
project_id = 3
suite_id = api_request_handler.suites_data_from_provider.suite_id
update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data')
update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data")
mocked_response_page_1 = {

@@ -534,3 +496,3 @@ "_links": {

{"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234},
{"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}
{"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234},
],

@@ -552,5 +514,3 @@ }

)
missing_ids, error = api_request_handler.check_missing_test_cases_ids(
project_id
)
missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id)
update_data_mock.assert_called_with(

@@ -562,3 +522,3 @@ case_data=[

"section_id": 1234,
"title": "testCase1"
"title": "testCase1",
},

@@ -569,10 +529,5 @@ {

"section_id": 1234,
"title": "testCase2"
"title": "testCase2",
},
{
"case_id": 1,
"custom_automation_id": "Passed test.testCase3",
"section_id": 2,
"title": "testCase3"
}
{"case_id": 1, "custom_automation_id": "Passed test.testCase3", "section_id": 2, "title": "testCase3"},
]

@@ -594,26 +549,21 @@ )

assert (
resources_added[0] == mocked_response[0]["id"] and
resources_added[1] == mocked_response[1]["id"]
resources_added[0] == mocked_response[0]["id"] and resources_added[1] == mocked_response[1]["id"]
), "ID in response doesn't match mocked response"
@pytest.mark.api_handler
def test_get_suite_ids_error(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_get_suite_ids_error(self, api_request_handler: ApiRequestHandler, requests_mock):
project_id = 3
requests_mock.get(
create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout
)
requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout)
suite_ids, error = api_request_handler.get_suite_ids(project_id)
assert suite_ids == [], "Should return empty list on API error"
assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \
" Please check your settings and try again.", "Should return connection error message"
assert (
error == "Your upload to TestRail did not receive a successful response from your TestRail Instance."
" Please check your settings and try again."
), "Should return connection error message"
@pytest.mark.api_handler
def test_resolve_suite_id_using_name(
self, api_request_handler: ApiRequestHandler, requests_mock, mocker
):
def test_resolve_suite_id_using_name(self, api_request_handler: ApiRequestHandler, requests_mock, mocker):
project_id = 3

@@ -623,3 +573,3 @@ suite_name = "Suite2"

update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data')
update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data")

@@ -634,23 +584,19 @@ mocked_response = {

{"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3},
]
],
}
requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response)
suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id)
assert suite_id == 5, "Should return the correct suite ID for matching name with pagination"
assert error == "", "Should have no error message"
update_data_mock.assert_called_once_with([{"suite_id": 5, "name": "Suite2"}])
@pytest.mark.api_handler
def test_resolve_suite_id_using_name_error(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_resolve_suite_id_using_name_error(self, api_request_handler: ApiRequestHandler, requests_mock):
project_id = 3
requests_mock.get(
create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout
)
requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout)

@@ -660,14 +606,11 @@ suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id)

assert suite_id == -1, "Should return -1 on API error"
assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \
" Please check your settings and try again.", "Should return connection error message"
assert (
error == "Your upload to TestRail did not receive a successful response from your TestRail Instance."
" Please check your settings and try again."
), "Should return connection error message"
@pytest.mark.api_handler
def test_return_project_error(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_return_project_error(self, api_request_handler: ApiRequestHandler, requests_mock):
requests_mock.get(
create_url("get_projects"), exc=requests.exceptions.ConnectTimeout
)
requests_mock.get(create_url("get_projects"), exc=requests.exceptions.ConnectTimeout)
assert api_request_handler.get_project_data("Test Project") == ProjectData(

@@ -681,5 +624,3 @@ project_id=-3,

@pytest.mark.api_handler
def test_add_suite_error(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_add_suite_error(self, api_request_handler: ApiRequestHandler, requests_mock):

@@ -697,4 +638,3 @@ project_id = 3

assert (
error
== "Your upload to TestRail did not receive a successful response from your TestRail Instance."
error == "Your upload to TestRail did not receive a successful response from your TestRail Instance."
" Please check your settings and try again."

@@ -704,5 +644,3 @@ ), "Connection error is expected"

@pytest.mark.api_handler
def test_add_sections_error(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_add_sections_error(self, api_request_handler: ApiRequestHandler, requests_mock):
project_id = 3

@@ -717,4 +655,3 @@ requests_mock.post(

assert (
error
== "Your upload to TestRail did not receive a successful response from your TestRail Instance."
error == "Your upload to TestRail did not receive a successful response from your TestRail Instance."
" Please check your settings and try again."

@@ -724,10 +661,7 @@ ), "Connection error is expected"

assert (
api_request_handler.suites_data_from_provider.testsections[1].section_id
is None
api_request_handler.suites_data_from_provider.testsections[1].section_id is None
), "No resources should be added to DataProvider"
@pytest.mark.api_handler
def test_add_section_and_cases_error(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_add_section_and_cases_error(self, api_request_handler: ApiRequestHandler, requests_mock):
project_id = 3

@@ -745,3 +679,3 @@ mocked_response_for_section = {

"title": "testCase2",
"custom_automation_id": "Skipped test.testCase2"
"custom_automation_id": "Skipped test.testCase2",
}

@@ -754,9 +688,7 @@

"title": "testCase3",
"custom_automation_id": "Passed test.testCase3"
"custom_automation_id": "Passed test.testCase3",
}
requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section)
requests_mock.post(
create_url(f"add_section/{project_id}"), json=mocked_response_for_section
)
requests_mock.post(
create_url(f"add_case/{mocked_response_for_case_1['section_id']}"),

@@ -780,4 +712,3 @@ json=mocked_response_for_case_1,

assert (
error
== "Your upload to TestRail did not receive a successful response from your TestRail Instance."
error == "Your upload to TestRail did not receive a successful response from your TestRail Instance."
" Please check your settings and try again."

@@ -787,5 +718,3 @@ ), "Connection error is expected"

@pytest.mark.api_handler
def test_add_results_error(
self, api_request_handler: ApiRequestHandler, requests_mock
):
def test_add_results_error(self, api_request_handler: ApiRequestHandler, requests_mock):
run_id = 3

@@ -797,16 +726,16 @@ requests_mock.post(

tests_mocked_response = {
'offset': 0,
'limit': 250,
'size': 4,
'_links': {'next': None, 'prev': None},
'tests': [
"offset": 0,
"limit": 250,
"size": 4,
"_links": {"next": None, "prev": None},
"tests": [
{
'id': 18319,
'case_id': 6086,
'status_id': 5,
'assignedto_id': None,
'run_id': run_id,
'title': 'Fail To Login With Invalid Password'
}
]
"id": 18319,
"case_id": 6086,
"status_id": 5,
"assignedto_id": None,
"run_id": run_id,
"title": "Fail To Login With Invalid Password",
}
],
}

@@ -817,4 +746,3 @@ requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response)

assert (
error
== "Your upload to TestRail did not receive a successful response from your TestRail Instance."
error == "Your upload to TestRail did not receive a successful response from your TestRail Instance."
" Please check your settings and try again."

@@ -825,5 +753,3 @@ ), "Connection error is expected"

@pytest.mark.api_handler
def test_add_results_keyboard_interrupt(
self, api_request_handler: ApiRequestHandler, requests_mock, mocker
):
def test_add_results_keyboard_interrupt(self, api_request_handler: ApiRequestHandler, requests_mock, mocker):
run_id = 3

@@ -834,5 +760,3 @@ requests_mock.post(

)
mocker.patch(
"trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt
)
mocker.patch("trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt)
with pytest.raises(KeyboardInterrupt) as exception:

@@ -842,5 +766,3 @@ api_request_handler.add_results(run_id)

@pytest.mark.api_handler
def test_add_suite_with_verify(
self, api_request_handler_verify: ApiRequestHandler, requests_mock
):
def test_add_suite_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock):
project_id = 3

@@ -870,5 +792,3 @@ mocked_response = {

requests_mock.post(
create_url(f"add_section/{project_id}"), json=mocked_response
)
requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response)
api_request_handler_verify = handler_maker(verify=True)

@@ -881,10 +801,7 @@ resources_added, error = api_request_handler_verify.add_sections(project_id)

assert (
error
== "Data verification failed. Server added different resource than expected."
error == "Data verification failed. Server added different resource than expected."
), "There should be error in verification."
@pytest.mark.api_handler
def test_add_case_with_verify(
self, api_request_handler_verify: ApiRequestHandler, requests_mock
):
def test_add_case_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock):
mocked_response_for_case = {

@@ -896,3 +813,3 @@ "id": 3,

"estimate": "30s",
"custom_automation_id": "Skipped test.testCase2"
"custom_automation_id": "Skipped test.testCase2",
}

@@ -904,20 +821,12 @@

)
del api_request_handler_verify.suites_data_from_provider.testsections[
1
].testcases[0]
del api_request_handler_verify.suites_data_from_provider.testsections[1].testcases[0]
resources_added, error = api_request_handler_verify.add_cases()
assert error == "", "There should be no error in verification."
mocked_response_for_case["estimate"] = "60s"
api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[
1
].case_id = None
api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[1].case_id = None
resources_added, error = api_request_handler_verify.add_cases()
assert (
error == FAULT_MAPPING["data_verification_error"]
), "There should be error in verification."
assert error == FAULT_MAPPING["data_verification_error"], "There should be error in verification."
@pytest.mark.api_handler
def test_delete_section(
self, api_request_handler_verify: ApiRequestHandler, requests_mock
):
def test_delete_section(self, api_request_handler_verify: ApiRequestHandler, requests_mock):
sections_id = [{"section_id": 1}]

@@ -935,5 +844,3 @@ mocked_response_for_case = {"success": 200}

@pytest.mark.api_handler
def test_delete_suite(
self, api_request_handler_verify: ApiRequestHandler, requests_mock
):
def test_delete_suite(self, api_request_handler_verify: ApiRequestHandler, requests_mock):
suite_id = 1

@@ -951,5 +858,3 @@ mocked_response_for_case = {"success": 200}

@pytest.mark.api_handler
def test_delete_cases(
self, api_request_handler_verify: ApiRequestHandler, requests_mock
):
def test_delete_cases(self, api_request_handler_verify: ApiRequestHandler, requests_mock):
suite_id = 1

@@ -964,11 +869,7 @@ cases = [{"case_id": 1}]

resources_added, error = api_request_handler_verify.delete_cases(
suite_id, cases
)
resources_added, error = api_request_handler_verify.delete_cases(suite_id, cases)
assert error == "", "There should be no error in verification."
@pytest.mark.api_handler
def test_delete_run(
self, api_request_handler_verify: ApiRequestHandler, requests_mock
):
def test_delete_run(self, api_request_handler_verify: ApiRequestHandler, requests_mock):
run_id = 1

@@ -984,1 +885,327 @@ mocked_response_for_case = {"success": 200}

assert error == "", "There should be no error in verification."
@pytest.mark.api_handler
def test_update_run_with_include_all_false_standalone(self, api_request_handler: ApiRequestHandler, requests_mock):
"""Test update_run for standalone run with include_all=false"""
run_id = 100
run_name = "Updated Test Run"
# Mock get_run response - standalone run (no plan_id), include_all=false
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": False,
"plan_id": None,
"config_ids": [],
}
# Mock get_tests response - existing cases in run
get_tests_response = {
"offset": 0,
"limit": 250,
"size": 2,
"_links": {"next": None, "prev": None},
"tests": [{"id": 1, "case_id": 1, "status_id": 1}, {"id": 2, "case_id": 2, "status_id": 1}],
}
# Mock update_run response
update_run_response = {"id": run_id, "name": run_name}
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response)
# Execute update_run
run_data, error = api_request_handler.update_run(run_id, run_name)
# Assertions
assert error == "", "No error should occur"
assert run_data["id"] == run_id, "Run ID should match"
# Verify the payload sent to update_run
request_history = requests_mock.request_history
update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0]
payload = update_request.json()
assert payload["include_all"] == False, "include_all should be False"
assert "case_ids" in payload, "case_ids should be present"
# Should contain union of existing (1, 2) and report cases
assert set(payload["case_ids"]) >= {1, 2}, "Should include existing case IDs"
@pytest.mark.api_handler
def test_update_run_with_include_all_false_plan_with_config(
self, api_request_handler: ApiRequestHandler, requests_mock
):
"""Test update_run for run in plan with config and include_all=false (the bug scenario)"""
run_id = 200
run_name = "Updated Test Run in Plan"
# Mock get_run response - run in plan with config, include_all=false
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": False,
"plan_id": 10,
"config_ids": [5, 6], # Has configs - will use update_run_in_plan_entry
}
# Mock get_tests response - existing cases
get_tests_response = {
"offset": 0,
"limit": 250,
"size": 3,
"_links": {"next": None, "prev": None},
"tests": [
{"id": 1, "case_id": 188, "status_id": 1},
{"id": 2, "case_id": 180, "status_id": 1},
{"id": 3, "case_id": 191, "status_id": 1},
],
}
# Mock update_run_in_plan_entry response
update_run_response = {"id": run_id, "name": run_name}
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
requests_mock.post(create_url(f"update_run_in_plan_entry/{run_id}"), json=update_run_response)
# Execute update_run
run_data, error = api_request_handler.update_run(run_id, run_name)
# Assertions
assert error == "", "No error should occur"
assert run_data["id"] == run_id, "Run ID should match"
# Verify the payload sent to update_run_in_plan_entry
request_history = requests_mock.request_history
update_request = [r for r in request_history if "update_run_in_plan_entry" in r.url][0]
payload = update_request.json()
# THIS IS THE CRITICAL FIX - must include include_all=False
assert payload["include_all"] == False, "include_all must be False (fixes the bug)"
assert "case_ids" in payload, "case_ids should be present"
# Should contain union of existing (188, 180, 191) and report cases
assert set(payload["case_ids"]) >= {188, 180, 191}, "Should preserve existing case IDs"
@pytest.mark.api_handler
def test_update_run_with_include_all_true_preserves_setting(
self, api_request_handler: ApiRequestHandler, requests_mock
):
"""Test update_run preserves include_all=true and doesn't send case_ids"""
run_id = 300
run_name = "Updated Run with Include All"
# Mock get_run response - include_all=true
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": True, # Run includes all cases
"plan_id": None,
"config_ids": [],
}
# Mock update_run response
update_run_response = {"id": run_id, "name": run_name, "include_all": True}
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response)
# Execute update_run
run_data, error = api_request_handler.update_run(run_id, run_name)
# Assertions
assert error == "", "No error should occur"
assert run_data["include_all"] == True, "include_all should be preserved"
# Verify the payload sent to update_run
request_history = requests_mock.request_history
update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0]
payload = update_request.json()
assert payload["include_all"] == True, "include_all should be True"
assert "case_ids" not in payload, "case_ids should NOT be present when include_all=True"
@pytest.mark.api_handler
def test_update_run_handles_get_tests_error(self, api_request_handler: ApiRequestHandler, requests_mock):
"""Test update_run handles errors from get_tests gracefully"""
run_id = 400
run_name = "Test Run"
# Mock get_run response - include_all=false
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": False,
"plan_id": None,
"config_ids": [],
}
# Mock get_tests to return error (403 Forbidden, for example)
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.get(create_url(f"get_tests/{run_id}"), status_code=403, json={"error": "Access denied"})
# Execute update_run - should fail gracefully
run_data, error = api_request_handler.update_run(run_id, run_name)
# Assertions
assert run_data is None, "run_data should be None on error"
assert error is not None, "Error message should be present"
assert "Failed to get tests in run" in error, "Error should indicate get_tests failure"
@pytest.mark.api_handler
def test_update_run_with_include_all_false_plan_without_config(
self, api_request_handler: ApiRequestHandler, requests_mock
):
"""Test update_run for run in plan without config uses update_plan_entry"""
run_id = 500
run_name = "Updated Test Run in Plan No Config"
plan_id = 20
entry_id = "abc-123"
# Mock get_run response - run in plan without config
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": False,
"plan_id": plan_id,
"config_ids": [], # No configs - will use update_plan_entry
}
# Mock get_tests response
get_tests_response = {
"offset": 0,
"limit": 250,
"size": 1,
"_links": {"next": None, "prev": None},
"tests": [{"id": 1, "case_id": 50, "status_id": 1}],
}
# Mock get_plan response
get_plan_response = {
"id": plan_id,
"entries": [{"id": entry_id, "runs": [{"id": run_id, "entry_id": entry_id}]}],
}
# Mock update_plan_entry response
update_plan_response = {"id": run_id, "name": run_name}
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
requests_mock.get(create_url(f"get_plan/{plan_id}"), json=get_plan_response)
requests_mock.post(create_url(f"update_plan_entry/{plan_id}/{entry_id}"), json=update_plan_response)
# Execute update_run
run_data, error = api_request_handler.update_run(run_id, run_name)
# Assertions
assert error == "", "No error should occur"
assert run_data["id"] == run_id, "Run ID should match"
# Verify update_plan_entry was called with correct payload
request_history = requests_mock.request_history
update_request = [r for r in request_history if f"update_plan_entry/{plan_id}/{entry_id}" in r.url][0]
payload = update_request.json()
assert payload["include_all"] == False, "include_all should be False"
assert "case_ids" in payload, "case_ids should be present"
assert 50 in payload["case_ids"], "Should include existing case ID"
@pytest.mark.api_handler
def test_upload_attachments_413_error(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path):
"""Test that 413 errors (file too large) are properly reported."""
run_id = 1
# Mock get_tests endpoint
mocked_tests_response = {
"offset": 0,
"limit": 250,
"size": 1,
"_links": {"next": None, "prev": None},
"tests": [{"id": 1001, "case_id": 100}],
}
requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response)
# Create a temporary test file
test_file = tmp_path / "large_attachment.jpg"
test_file.write_text("test content")
# Mock add_attachment_to_result endpoint to return 413
requests_mock.post(
create_url("add_attachment_to_result/2001"),
status_code=413,
text='<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">\n<html><head>\n<title>413 Request Entity Too Large</title>\n</head><body>\n<h1>Request Entity Too Large</h1>\n</body></html>\n',
)
# Prepare test data
report_results = [{"case_id": 100, "attachments": [str(test_file)]}]
results = [{"id": 2001, "test_id": 1001}]
# Call upload_attachments
api_request_handler.upload_attachments(report_results, results, run_id)
# Verify the request was made (case-insensitive comparison)
assert requests_mock.last_request.url.lower() == create_url("add_attachment_to_result/2001").lower()
@pytest.mark.api_handler
def test_upload_attachments_success(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path):
"""Test that successful attachment uploads work correctly."""
run_id = 1
# Mock get_tests endpoint
mocked_tests_response = {
"offset": 0,
"limit": 250,
"size": 1,
"_links": {"next": None, "prev": None},
"tests": [{"id": 1001, "case_id": 100}],
}
requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response)
# Create a temporary test file
test_file = tmp_path / "test_attachment.jpg"
test_file.write_text("test content")
# Mock add_attachment_to_result endpoint to return success
requests_mock.post(create_url("add_attachment_to_result/2001"), status_code=200, json={"attachment_id": 5001})
# Prepare test data
report_results = [{"case_id": 100, "attachments": [str(test_file)]}]
results = [{"id": 2001, "test_id": 1001}]
# Call upload_attachments
api_request_handler.upload_attachments(report_results, results, run_id)
# Verify the request was made (case-insensitive comparison)
assert requests_mock.last_request.url.lower() == create_url("add_attachment_to_result/2001").lower()
@pytest.mark.api_handler
def test_upload_attachments_file_not_found(self, api_request_handler: ApiRequestHandler, requests_mock):
"""Test that missing attachment files are properly reported."""
run_id = 1
# Mock get_tests endpoint
mocked_tests_response = {
"offset": 0,
"limit": 250,
"size": 1,
"_links": {"next": None, "prev": None},
"tests": [{"id": 1001, "case_id": 100}],
}
requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response)
# Prepare test data with non-existent file
report_results = [{"case_id": 100, "attachments": ["/path/to/nonexistent/file.jpg"]}]
results = [{"id": 2001, "test_id": 1001}]
# Call upload_attachments - should not raise exception
api_request_handler.upload_attachments(report_results, results, run_id)

@@ -29,5 +29,3 @@ import pytest

api_request_handler = mocker.patch(
"trcli.api.project_based_client.ApiRequestHandler"
)
api_request_handler = mocker.patch("trcli.api.project_based_client.ApiRequestHandler")
api_request_handler.get_project_data.return_value = ProjectData(

@@ -38,3 +36,4 @@ project_id=environment.project_id, suite_mode=1, error_message=""

project_based_client = ProjectBasedClient(
environment=environment, suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id),
environment=environment,
suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id),
)

@@ -45,8 +44,4 @@ project_based_client.api_request_handler = api_request_handler

@pytest.mark.project_based_client
@pytest.mark.parametrize(
"timeout", [40, None], ids=["with_timeout", "without_timeout"]
)
def test_instantiate_api_client(
self, timeout, project_based_client_data_provider, mocker
):
@pytest.mark.parametrize("timeout", [40, None], ids=["with_timeout", "without_timeout"])
def test_instantiate_api_client(self, timeout, project_based_client_data_provider, mocker):
"""The purpose of this test is to check that APIClient was instantiated properly and credential fields

@@ -63,6 +58,4 @@ were set es expected."""

environment.timeout = timeout
timeout_expected_result = 30 if not timeout else timeout
project_based_client = ProjectBasedClient(
environment=environment, suite=junit_file_parser
)
timeout_expected_result = 60 if not timeout else timeout
project_based_client = ProjectBasedClient(environment=environment, suite=junit_file_parser)

@@ -72,12 +65,12 @@ api_client = project_based_client.instantiate_api_client()

assert (
api_client.username == environment.username
api_client.username == environment.username
), f"Expected username to be set to: {environment.username}, but got: {api_client.username} instead."
assert (
api_client.password == environment.password
api_client.password == environment.password
), f"Expected password to be set to: {environment.password}, but got: {api_client.password} instead."
assert (
api_client.api_key == environment.key
api_client.api_key == environment.key
), f"Expected api_key to be set to: {environment.key}, but got: {api_client.api_key} instead."
assert (
api_client.timeout == timeout_expected_result
api_client.timeout == timeout_expected_result
), f"Expected timeout to be set to: {timeout_expected_result}, but got: {api_client.timeout} instead."

@@ -95,6 +88,6 @@

project_based_client.resolve_project()
assert (
project_based_client.project.project_id == environment.project_id
), (f"Expected project_based_client.project to have {environment.project_id},"
f" but had {project_based_client.project.project_id}")
assert project_based_client.project.project_id == environment.project_id, (
f"Expected project_based_client.project to have {environment.project_id},"
f" but had {project_based_client.project.project_id}"
)

@@ -118,10 +111,6 @@ @pytest.mark.project_based_client

assert result_suite_id == suite_id, f"Expected suite_id: {suite_id} but got {result_suite_id} instead."
assert suite_added is False, f"Expected suite_added: {False} but got {suite_added} instead."
assert (
result_suite_id == suite_id
), f"Expected suite_id: {suite_id} but got {result_suite_id} instead."
assert (
suite_added is False
), f"Expected suite_added: {False} but got {suite_added} instead."
assert (
result_return_code == result_code
result_return_code == result_code
), f"Expected suite_id: {result_code} but got {result_return_code} instead."

@@ -136,10 +125,10 @@

def test_get_suite_id_multiple_suites_mode(
self,
user_response,
expected_suite_id,
expected_result_code,
expected_message,
suite_add_error,
project_based_client_data_provider,
mocker,
self,
user_response,
expected_suite_id,
expected_result_code,
expected_message,
suite_add_error,
project_based_client_data_provider,
mocker,
):

@@ -171,5 +160,3 @@ """The purpose of this test is to check that user will be prompted to add suite is one is missing

[{"suite_id": expected_suite_id, "name": suite_name}],
FAULT_MAPPING["error_while_adding_suite"].format(
error_message="Failed to add suite."
),
FAULT_MAPPING["error_while_adding_suite"].format(error_message="Failed to add suite."),
)

@@ -189,14 +176,10 @@ project_based_client.api_request_handler.suites_data_from_provider.suite_id = None

expected_elog_calls.append(
mocker.call(
FAULT_MAPPING["error_while_adding_suite"].format(
error_message="Failed to add suite."
)
)
mocker.call(FAULT_MAPPING["error_while_adding_suite"].format(error_message="Failed to add suite."))
)
assert (
expected_suite_id == result_suite_id
expected_suite_id == result_suite_id
), f"Expected suite_id: {expected_suite_id} but got {result_suite_id} instead."
assert (
expected_result_code == result_code
expected_result_code == result_code
), f"Expected suite_id: {expected_result_code} but got {result_code} instead."

@@ -210,5 +193,3 @@ environment.get_prompt_response_for_auto_creation.assert_called_with(

if user_response:
project_based_client.api_request_handler.add_suites.assert_called_with(
project_id=project_id
)
project_based_client.api_request_handler.add_suites.assert_called_with(project_id=project_id)
environment.log.assert_has_calls(expected_log_calls)

@@ -224,9 +205,9 @@ environment.elog.assert_has_calls(expected_elog_calls)

def test_get_suite_id_single_suite_mode(
self,
suite_ids,
error_message,
expected_suite_id,
expected_result_code,
project_based_client_data_provider,
mocker,
self,
suite_ids,
error_message,
expected_suite_id,
expected_result_code,
project_based_client_data_provider,
mocker,
):

@@ -253,6 +234,6 @@ """The purpose of this test is to check flow of get_suite_id_log_error function for single

assert (
result_suite_id == expected_suite_id
result_suite_id == expected_suite_id
), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead."
assert (
result_code == expected_result_code
result_code == expected_result_code
), f"Expected result code: {expected_result_code} but got {result_code} instead."

@@ -269,9 +250,9 @@ if error_message:

def test_get_suite_id_single_suite_mode_baselines(
self,
get_suite_ids_result,
expected_suite_id,
expected_result_code,
expected_error_message,
project_based_client_data_provider,
mocker,
self,
get_suite_ids_result,
expected_suite_id,
expected_result_code,
expected_error_message,
project_based_client_data_provider,
mocker,
):

@@ -288,5 +269,3 @@ """The purpose of this test is to check flow of get_suite_id_log_error function for single

project_based_client.api_request_handler.suites_data_from_provider.suite_id = None
project_based_client.api_request_handler.get_suite_ids.return_value = (
get_suite_ids_result
)
project_based_client.api_request_handler.get_suite_ids.return_value = get_suite_ids_result
expected_elog_calls = []

@@ -298,6 +277,6 @@ if expected_error_message:

assert (
result_suite_id == expected_suite_id
result_suite_id == expected_suite_id
), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead."
assert (
result_code == expected_result_code
result_code == expected_result_code
), f"Expected result code: {expected_result_code} but got {result_code} instead."

@@ -307,5 +286,3 @@ environment.elog.assert_has_calls(expected_elog_calls)

@pytest.mark.project_based_client
def test_get_suite_id_unknown_suite_mode(
self, project_based_client_data_provider, mocker
):
def test_get_suite_id_unknown_suite_mode(self, project_based_client_data_provider, mocker):
"""The purpose of this test is to check that get_suite_id will return -1 and print

@@ -322,14 +299,10 @@ proper message when unknown suite mode will be returned during execution."""

project_based_client.api_request_handler.suites_data_from_provider.suite_id = None
expected_elog_calls = [
mocker.call(
FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode)
)
]
expected_elog_calls = [mocker.call(FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode))]
result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode)
assert (
result_suite_id == expected_suite_id
result_suite_id == expected_suite_id
), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead."
assert (
result_code == expected_result_code
result_code == expected_result_code
), f"Expected result code: {expected_result_code} but got {result_code} instead."

@@ -354,9 +327,7 @@ environment.elog.assert_has_calls(expected_elog_calls)

assert (
result_code == expected_result_code
result_code == expected_result_code
), f"Expected to get {result_code} as result code, but got {expected_result_code} instead."
@pytest.mark.project_based_client
def test_check_suite_id_prints_error_message(
self, project_based_client_data_provider, mocker
):
def test_check_suite_id_prints_error_message(self, project_based_client_data_provider, mocker):
"""The purpose of this test is to check that proper message would be printed to the user

@@ -378,9 +349,7 @@ and program will quit when suite ID is not present in TestRail."""

result_code = project_based_client.check_suite_id(project_id=project_id)
expected_elog_calls = [
mocker.call(FAULT_MAPPING["missing_suite"].format(suite_id=suite_id))
]
expected_elog_calls = [mocker.call(FAULT_MAPPING["missing_suite"].format(suite_id=suite_id))]
environment.elog.assert_has_calls(expected_elog_calls)
assert (
result_code == expected_result_code
result_code == expected_result_code
), f"Expected to get {expected_result_code} as result code, but got {result_code} instead."

@@ -400,5 +369,3 @@

suite_id, suite_added = project_based_client.resolve_suite()
assert (
suite_id == 1
), f"Expected suite id 1 but got {suite_id} instead."
assert suite_id == 1, f"Expected suite id 1 but got {suite_id} instead."

@@ -420,8 +387,4 @@ def test_create_or_update_test_run_calls_add_run(self, project_based_client_data_provider):

project_based_client.api_request_handler.add_run.assert_called_once()
assert (
run_id == 1
), f"Expected run_id to be 1 but got {run_id} instead."
assert (
error_message == ""
), f"Expected error message to be None but got {error_message} instead."
assert run_id == 1, f"Expected run_id to be 1 but got {run_id} instead."
assert error_message == "", f"Expected error message to be None but got {error_message} instead."

@@ -443,8 +406,4 @@ def test_create_or_update_test_run_calls_update_run(self, project_based_client_data_provider):

api_request_handler.update_run.assert_called_once()
assert (
run_id == 1
), f"Expected run_id to be 1 but got {run_id} instead."
assert (
error_message == ""
), f"Expected error message to be None but got {error_message} instead."
assert run_id == 1, f"Expected run_id to be 1 but got {run_id} instead."
assert error_message == "", f"Expected error message to be None but got {error_message} instead."

@@ -460,5 +419,5 @@ def test_get_project_id(self, project_based_client_data_provider):

assert (
project_based_client._get_project_id() == environment.project_id
), (f"Expected to get {environment.project_id} from project_based_client.get_project_id but got"
f" {project_based_client._get_project_id()} instead.")
assert project_based_client._get_project_id() == environment.project_id, (
f"Expected to get {environment.project_id} from project_based_client.get_project_id but got"
f" {project_based_client._get_project_id()} instead."
)
Metadata-Version: 2.4
Name: trcli
Version: 1.12.4
Version: 1.12.5
License-File: LICENSE.md

@@ -5,0 +5,0 @@ Requires-Dist: click<8.2.2,>=8.1.0

@@ -8,2 +8,3 @@ LICENSE.md

tests/test_api_request_handler.py
tests/test_api_request_handler_case_matcher.py
tests/test_api_request_handler_labels.py

@@ -10,0 +11,0 @@ tests/test_api_request_handler_references.py

@@ -1,1 +0,1 @@

__version__ = "1.12.4"
__version__ = "1.12.5"

@@ -42,3 +42,3 @@ import json

SUFFIX_API_V2_VERSION = f"{PREFIX}{VERSION}"
RETRY_ON = [429, 500, 502]
RETRY_ON = [429, 500, 502, 503, 504] # Added 503 Service Unavailable and 504 Gateway Timeout
USER_AGENT = "TRCLI"

@@ -180,2 +180,8 @@

sleep(retry_time)
elif status_code in [500, 502, 503, 504] and i < self.retries:
backoff_time = min(2**i, 30) # Exponential backoff capped at 30 seconds
self.logging_function(
f"Server error {status_code}, retrying in {backoff_time}s (attempt {i+1}/{self.retries})..."
)
sleep(backoff_time)
try:

@@ -182,0 +188,0 @@ # workaround for buggy legacy TR server version response

@@ -80,2 +80,3 @@ import os

self.proxy_user = None
self.parallel_pagination = None

@@ -94,3 +95,3 @@ @property

@property
@property
def result_fields(self):

@@ -207,14 +208,7 @@ return self._result_fields

self.params_from_config.update(page_content)
if (
self.params_from_config.get("config") is not None
and self.default_config_file
):
if self.params_from_config.get("config") is not None and self.default_config_file:
self.default_config_file = False
self.parse_params_from_config_file(
self.params_from_config["config"]
)
self.parse_params_from_config_file(self.params_from_config["config"])
except (yaml.YAMLError, ValueError, TypeError) as e:
self.elog(
FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path)
)
self.elog(FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path))
self.elog(f"Error details:\n{e}")

@@ -286,6 +280,9 @@ if not self.default_config_file:

@click.option("-p", "--password", type=click.STRING, metavar="", help="Password.")
@click.option("-k", "--key", metavar="", help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.")
@click.option(
"-v", "--verbose", is_flag=True, help="Output all API calls and their results."
"-k",
"--key",
metavar="",
help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.",
)
@click.option("-v", "--verbose", is_flag=True, help="Output all API calls and their results.")
@click.option("--verify", is_flag=True, help="Verify the data was added correctly.")

@@ -335,18 +332,10 @@ @click.option("--insecure", is_flag=True, help="Allow insecure requests.")

)
@click.option("--proxy", metavar="", help="Proxy address and port (e.g., http://proxy.example.com:8080).")
@click.option("--proxy-user", metavar="", help="Proxy username and password in the format 'username:password'.")
@click.option(
"--proxy",
metavar="",
help="Proxy address and port (e.g., http://proxy.example.com:8080)."
"--noproxy", metavar="", help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)."
)
@click.option(
"--proxy-user",
metavar="",
help="Proxy username and password in the format 'username:password'."
"--parallel-pagination", is_flag=True, help="Enable parallel pagination for faster case fetching (experimental)."
)
@click.option(
"--noproxy",
metavar="",
help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)."
)
def cli(environment: Environment, context: click.core.Context, *args, **kwargs):

@@ -362,4 +351,4 @@ """TestRail CLI"""

exit(2)
environment.parse_config_file(context)
environment.set_parameters(context)

@@ -8,5 +8,3 @@ import trcli

PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING = dict(
missing_title="Please give your Test Run a title using the --title argument."
)
PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING = dict(missing_title="Please give your Test Run a title using the --title argument.")

@@ -62,14 +60,17 @@ ADD_RUN_FAULT_MAPPING = dict(

f" - Is Active: True",
proxy_connection_error= "Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available.",
proxy_authentication_failed= "Proxy authentication failed for proxy. Please verify the username and password.",
proxy_timeout= "The connection to the proxy server timed out. Please try again later or check the proxy server's availability.",
proxy_bypass_error= "Failed to bypass the proxy for host. Please check the settings.",
proxy_invalid_configuration= "The provided proxy configuration is invalid. Please check the proxy URL and format.",
ssl_error_on_proxy= "SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.",
no_proxy_match_error= "The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.",
no_suites_found= "The project {project_id} does not have any suites.",
invalid_json_response= "Received invalid response from TestRail server (HTTP {status_code}). "
proxy_connection_error="Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available.",
proxy_authentication_failed="Proxy authentication failed for proxy. Please verify the username and password.",
proxy_timeout="The connection to the proxy server timed out. Please try again later or check the proxy server's availability.",
proxy_bypass_error="Failed to bypass the proxy for host. Please check the settings.",
proxy_invalid_configuration="The provided proxy configuration is invalid. Please check the proxy URL and format.",
ssl_error_on_proxy="SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.",
no_proxy_match_error="The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.",
no_suites_found="The project {project_id} does not have any suites.",
invalid_json_response="Received invalid response from TestRail server (HTTP {status_code}). "
"Please verify your TestRail host URL (-h) is correct and points to a valid TestRail instance. "
"Response preview: {response_preview}",
invalid_api_response= "Invalid response from TestRail API: {error_details}"
invalid_api_response="Invalid response from TestRail API: {error_details}",
attachment_upload_failed="Failed to upload attachment '{file_path}' for case {case_id}: {error_message}",
attachment_too_large="Failed to upload attachment '{file_name}' for case {case_id}: "
"File size exceeds TestRail's maximum limit of 250 MB. Please reduce the file size or exclude this attachment.",
)

@@ -133,4 +134,5 @@

OLD_SYSTEM_NAME_AUTOMATION_ID = "custom_automation_id"
# field name mismatch on testrail side (can not reproduce in cloud version TestRail v9.1.2)
UPDATED_SYSTEM_NAME_AUTOMATION_ID = "custom_case_automation_id"
UPDATED_SYSTEM_NAME_AUTOMATION_ID = "custom_case_automation_id"
MAX_WORKERS_ADD_CASE = 10
MAX_WORKERS_ADD_RESULTS = 10
DEFAULT_API_CALL_RETRIES = 3
DEFAULT_API_CALL_TIMEOUT = 30
MAX_WORKERS_ADD_RESULTS = 20
DEFAULT_API_CALL_RETRIES = 5
DEFAULT_API_CALL_TIMEOUT = 60
DEFAULT_BATCH_SIZE = 50
ALLOW_ELAPSED_MS = False
ENABLE_PARALLEL_PAGINATION = False
MAX_WORKERS_PARALLEL_PAGINATION = 10

Sorry, the diff of this file is too big to display