Move dependencies in 'external' directory and pdate git submodules

This commit is contained in:
Vyn 2024-08-31 09:42:46 +02:00
parent 63bf267a22
commit cbaa1b58d8
608 changed files with 198659 additions and 199 deletions

View file

@ -0,0 +1,75 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
from typing import List, Tuple
import os
import subprocess
def configure_and_build(source_path: str, project_path: str, options: List[Tuple[str, str]]):
base_configure_cmd = ['cmake',
'-B{}'.format(project_path),
'-H{}'.format(source_path),
'-DCMAKE_BUILD_TYPE=Debug',
'-DCATCH_DEVELOPMENT_BUILD=ON']
for option, value in options:
base_configure_cmd.append('-D{}={}'.format(option, value))
try:
subprocess.run(base_configure_cmd,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
check = True)
except subprocess.SubprocessError as ex:
print("Could not configure build to '{}' from '{}'".format(project_path, source_path))
print("Return code: {}".format(ex.returncode))
print("output: {}".format(ex.output))
raise
print('Configuring {} finished'.format(project_path))
build_cmd = ['cmake',
'--build', '{}'.format(project_path),
# For now we assume that we only need Debug config
'--config', 'Debug']
try:
subprocess.run(build_cmd,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
check = True)
except subprocess.SubprocessError as ex:
print("Could not build project in '{}'".format(project_path))
print("Return code: {}".format(ex.returncode))
print("output: {}".format(ex.output))
raise
print('Building {} finished'.format(project_path))
def run_and_return_output(base_path: str, binary_name: str, other_options: List[str]) -> Tuple[str, str]:
# For now we assume that Windows builds are done using MSBuild under
# Debug configuration. This means that we need to add "Debug" folder
# to the path when constructing it. On Linux, we don't add anything.
config_path = "Debug" if os.name == 'nt' else ""
full_path = os.path.join(base_path, config_path, binary_name)
base_cmd = [full_path]
base_cmd.extend(other_options)
try:
ret = subprocess.run(base_cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
check = True,
universal_newlines = True)
except subprocess.SubprocessError as ex:
print('Could not run "{}"'.format(base_cmd))
print('Args: "{}"'.format(other_options))
print('Return code: {}'.format(ex.returncode))
print('stdout: {}'.format(ex.stdout))
print('stderr: {}'.format(ex.stdout))
raise
return (ret.stdout, ret.stderr)

View file

@ -0,0 +1,22 @@
cmake_minimum_required(VERSION 3.10)
project(discover-tests-test
LANGUAGES CXX
)
add_executable(tests
register-tests.cpp
)
add_subdirectory(${CATCH2_PATH} catch2-build)
target_link_libraries(tests PRIVATE Catch2::Catch2WithMain)
enable_testing()
include(Catch)
set(extra_args)
if (CMAKE_VERSION GREATER_EQUAL 3.27)
list(APPEND extra_args
DL_PATHS "${CMAKE_CURRENT_LIST_DIR};${CMAKE_CURRENT_LIST_DIR}/.."
)
endif ()
catch_discover_tests(tests ${extra_args})

View file

@ -0,0 +1,153 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
import os
import subprocess
import sys
import re
import json
cmake_version_regex = re.compile('cmake version (\d+)\.(\d+)\.(\d+)')
def get_cmake_version():
result = subprocess.run(['cmake', '--version'],
capture_output = True,
check = True,
text = True)
version_match = cmake_version_regex.match(result.stdout)
if not version_match:
print('Could not find cmake version in output')
print(f"output: '{result.stdout}'")
exit(4)
return (int(version_match.group(1)),
int(version_match.group(2)),
int(version_match.group(3)))
def build_project(sources_dir, output_base_path, catch2_path):
build_dir = os.path.join(output_base_path, 'ctest-registration-test')
config_cmd = ['cmake',
'-B', build_dir,
'-S', sources_dir,
f'-DCATCH2_PATH={catch2_path}',
'-DCMAKE_BUILD_TYPE=Debug']
build_cmd = ['cmake',
'--build', build_dir,
'--config', 'Debug']
try:
subprocess.run(config_cmd,
capture_output = True,
check = True,
text = True)
subprocess.run(build_cmd,
capture_output = True,
check = True,
text = True)
except subprocess.CalledProcessError as err:
print('Error when building the test project')
print(f'cmd: {err.cmd}')
print(f'stderr: {err.stderr}')
print(f'stdout: {err.stdout}')
exit(3)
return build_dir
def get_test_names(build_path):
# For now we assume that Windows builds are done using MSBuild under
# Debug configuration. This means that we need to add "Debug" folder
# to the path when constructing it. On Linux, we don't add anything.
config_path = "Debug" if os.name == 'nt' else ""
full_path = os.path.join(build_path, config_path, 'tests')
cmd = [full_path, '--reporter', 'xml', '--list-tests']
result = subprocess.run(cmd,
capture_output = True,
check = True,
text = True)
import xml.etree.ElementTree as ET
root = ET.fromstring(result.stdout)
return [tc.text for tc in root.findall('TestCase/Name')]
def get_ctest_listing(build_path):
old_path = os.getcwd()
os.chdir(build_path)
cmd = ['ctest', '-C', 'debug', '--show-only=json-v1']
result = subprocess.run(cmd,
capture_output = True,
check = True,
text = True)
os.chdir(old_path)
return result.stdout
def extract_tests_from_ctest(ctest_output):
ctest_response = json.loads(ctest_output)
tests = ctest_response['tests']
test_names = []
for test in tests:
test_command = test['command']
# First part of the command is the binary, second is the filter.
# If there are less, registration has failed. If there are more,
# registration has changed and the script needs updating.
assert len(test_command) == 2
test_names.append(test_command[1])
test_name = test_command[1]
return test_names
def check_DL_PATHS(ctest_output):
ctest_response = json.loads(ctest_output)
tests = ctest_response['tests']
for test in tests:
properties = test['properties']
for property in properties:
if property['name'] == 'ENVIRONMENT_MODIFICATION':
assert len(property['value']) == 2, f"The test provides 2 arguments to DL_PATHS, but instead found {len(property['value'])}"
def escape_catch2_test_name(name):
for char in ('\\', ',', '[', ']'):
name = name.replace(char, f"\\{char}")
return name
if __name__ == '__main__':
if len(sys.argv) != 3:
print(f'Usage: {sys.argv[0]} path-to-catch2-cml output-path')
exit(2)
catch2_path = sys.argv[1]
output_base_path = sys.argv[2]
sources_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
build_path = build_project(sources_dir, output_base_path, catch2_path)
catch_test_names = [escape_catch2_test_name(name) for name in get_test_names(build_path)]
ctest_output = get_ctest_listing(build_path)
ctest_test_names = extract_tests_from_ctest(ctest_output)
mismatched = 0
for catch_test in catch_test_names:
if catch_test not in ctest_test_names:
print(f"Catch2 test '{catch_test}' not found in CTest")
mismatched += 1
for ctest_test in ctest_test_names:
if ctest_test not in catch_test_names:
print(f"CTest test '{ctest_test}' not found in Catch2")
mismatched += 1
if mismatched:
print(f"Found {mismatched} mismatched tests catch test names and ctest test commands!")
exit(1)
cmake_version = get_cmake_version()
if cmake_version >= (3, 27):
check_DL_PATHS(ctest_output)

View file

@ -0,0 +1,16 @@
// Copyright Catch2 Authors
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
// SPDX-License-Identifier: BSL-1.0
#include <catch2/catch_test_macros.hpp>
TEST_CASE("@Script[C:\\EPM1A]=x;\"SCALA_ZERO:\"", "[script regressions]"){}
TEST_CASE("Some test") {}
TEST_CASE( "Let's have a test case with a long name. Longer. No, even longer. "
"Really looooooooooooong. Even longer than that. Multiple lines "
"worth of test name. Yep, like this." ) {}
TEST_CASE( "And now a test case with weird tags.", "[tl;dr][tl;dw][foo,bar]" ) {}

View file

@ -0,0 +1,104 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
import os
import re
import sys
import xml.etree.ElementTree as ET
import subprocess
"""
Test that Catch2 recognizes `XML_OUTPUT_FILE` env variable and creates
a junit reporter that writes to the provided path.
Requires 2 arguments, path to Catch2 binary configured with
`CATCH_CONFIG_BAZEL_SUPPORT`, and the output directory for the output file.
"""
if len(sys.argv) != 3:
print("Wrong number of arguments: {}".format(len(sys.argv)))
print("Usage: {} test-bin-path output-dir".format(sys.argv[0]))
exit(1)
bin_path = os.path.abspath(sys.argv[1])
output_dir = os.path.abspath(sys.argv[2])
xml_out_path = os.path.join(output_dir, '{}.xml'.format(os.path.basename(bin_path)))
# Ensure no file exists from previous test runs
if os.path.isfile(xml_out_path):
os.remove(xml_out_path)
print('bin path:', bin_path)
print('xml out path:', xml_out_path)
env = os.environ.copy()
env["XML_OUTPUT_FILE"] = xml_out_path
test_passing = True
try:
ret = subprocess.run(
bin_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
universal_newlines=True,
env=env
)
stdout = ret.stdout
except subprocess.SubprocessError as ex:
if ex.returncode == 1:
# The test cases are allowed to fail.
test_passing = False
stdout = ex.stdout
else:
print('Could not run "{}"'.format(bin_path))
print("Return code: {}".format(ex.returncode))
print("stdout: {}".format(ex.stdout))
print("stderr: {}".format(ex.stderr))
raise
# Check for valid XML output
try:
tree = ET.parse(xml_out_path)
except ET.ParseError as ex:
print("Invalid XML: '{}'".format(ex))
raise
except FileNotFoundError as ex:
print("Could not find '{}'".format(xml_out_path))
raise
bin_name = os.path.basename(bin_path)
# Check for matching testsuite
if not tree.find('.//testsuite[@name="{}"]'.format(bin_name)):
print("Could not find '{}' testsuite".format(bin_name))
exit(2)
# Check that we haven't disabled the default reporter
summary_test_cases = re.findall(r'test cases: \d* \| \d* passed \| \d* failed', stdout)
if len(summary_test_cases) == 0:
print("Could not find test summary in {}".format(stdout))
exit(2)
total, passed, failed = [int(s) for s in summary_test_cases[0].split() if s.isdigit()]
if failed == 0 and not test_passing:
print("Expected at least 1 test failure!")
exit(2)
if len(tree.findall('.//testcase')) != total:
print("Unexpected number of test cases!")
exit(2)
if len(tree.findall('.//failure')) != failed:
print("Unexpected number of test failures!")
exit(2)
if (passed + failed) != total:
print("Something has gone very wrong, ({} + {}) != {}".format(passed, failed, total))
exit(2)

View file

@ -0,0 +1,75 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
import os
import re
import sys
import subprocess
"""
Test that Catch2 recognizes the three sharding-related environment variables
and responds accordingly (running only the selected shard, creating the
response file, etc).
Requires 2 arguments, path to Catch2 binary to run and the output directory
for the output file.
"""
if len(sys.argv) != 3:
print("Wrong number of arguments: {}".format(len(sys.argv)))
print("Usage: {} test-bin-path output-dir".format(sys.argv[0]))
exit(1)
bin_path = os.path.abspath(sys.argv[1])
output_dir = os.path.abspath(sys.argv[2])
info_file_path = os.path.join(output_dir, '{}.shard-support'.format(os.path.basename(bin_path)))
# Ensure no file exists from previous test runs
if os.path.isfile(info_file_path):
os.remove(info_file_path)
print('bin path:', bin_path)
print('shard confirmation path:', info_file_path)
env = os.environ.copy()
# We will run only one shard, and it should have the passing test.
# This simplifies our work a bit, and if we have any regression in this
# functionality we can make more complex tests later.
env["BAZEL_TEST"] = "1"
env["TEST_SHARD_INDEX"] = "0"
env["TEST_TOTAL_SHARDS"] = "2"
env["TEST_SHARD_STATUS_FILE"] = info_file_path
try:
ret = subprocess.run(
bin_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
universal_newlines=True,
env=env
)
stdout = ret.stdout
except subprocess.SubprocessError as ex:
print('Could not run "{}"'.format(bin_path))
print("Return code: {}".format(ex.returncode))
print("stdout: {}".format(ex.stdout))
print("stderr: {}".format(ex.stderr))
raise
if not "All tests passed (1 assertion in 1 test case)" in stdout:
print("Did not find expected output in stdout.")
print("stdout:\n{}".format(stdout))
exit(1)
if not os.path.isfile(info_file_path):
print("Catch2 did not create expected file at path '{}'".format(info_file_path))
exit(2)

View file

@ -0,0 +1,50 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
from ConfigureTestsCommon import configure_and_build, run_and_return_output
import os
import re
import sys
"""
Tests the CMake configure option for CATCH_CONFIG_DEFAULT_REPORTER
Requires 2 arguments, path folder where the Catch2's main CMakeLists.txt
exists, and path to where the output files should be stored.
"""
if len(sys.argv) != 3:
print('Wrong number of arguments: {}'.format(len(sys.argv)))
print('Usage: {} catch2-top-level-dir base-build-output-dir'.format(sys.argv[0]))
exit(1)
catch2_source_path = os.path.abspath(sys.argv[1])
build_dir_path = os.path.join(os.path.abspath(sys.argv[2]), 'CMakeConfigTests', 'DefaultReporter')
output_file = f"{build_dir_path}/foo.xml"
# We need to escape backslashes in Windows paths, because otherwise they
# are interpreted as escape characters in strings, and cause compilation
# error.
escaped_output_file = output_file.replace('\\', '\\\\')
configure_and_build(catch2_source_path,
build_dir_path,
[("CATCH_CONFIG_DEFAULT_REPORTER", f"xml::out={escaped_output_file}")])
stdout, _ = run_and_return_output(os.path.join(build_dir_path, 'tests'), 'SelfTest', ['[approx][custom]'])
if not os.path.exists(output_file):
print(f'Did not find the {output_file} file')
exit(2)
xml_tag = '</Catch2TestRun>'
with open(output_file, 'r', encoding='utf-8') as file:
if xml_tag not in file.read():
print(f"Could not find '{xml_tag}' in the file")
exit(3)

View file

@ -0,0 +1,48 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
from ConfigureTestsCommon import configure_and_build, run_and_return_output
import os
import re
import sys
"""
Tests the CMake configure option for CATCH_CONFIG_DISABLE
Requires 2 arguments, path folder where the Catch2's main CMakeLists.txt
exists, and path to where the output files should be stored.
"""
if len(sys.argv) != 3:
print('Wrong number of arguments: {}'.format(len(sys.argv)))
print('Usage: {} catch2-top-level-dir base-build-output-dir'.format(sys.argv[0]))
exit(1)
catch2_source_path = os.path.abspath(sys.argv[1])
build_dir_path = os.path.join(os.path.abspath(sys.argv[2]), 'CMakeConfigTests', 'Disable')
configure_and_build(catch2_source_path,
build_dir_path,
[("CATCH_CONFIG_DISABLE", "ON"),
# We need to turn off WERROR, because the compilers
# can see that the various variables inside test cases
# are set but unused.
("CATCH_ENABLE_WERROR", "OFF")])
stdout, _ = run_and_return_output(os.path.join(build_dir_path, 'tests'),
'SelfTest',
['--allow-running-no-tests'])
summary_line = 'No tests ran'
if not summary_line in stdout:
print("Could not find '{}' in the stdout".format(summary_line))
print('stdout: "{}"'.format(stdout))
exit(2)

View file

@ -0,0 +1,44 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
from ConfigureTestsCommon import configure_and_build, run_and_return_output
import os
import re
import sys
"""
Tests the CMake configure option for CATCH_CONFIG_DISABLE_STRINGIFICATION
Requires 2 arguments, path folder where the Catch2's main CMakeLists.txt
exists, and path to where the output files should be stored.
"""
if len(sys.argv) != 3:
print('Wrong number of arguments: {}'.format(len(sys.argv)))
print('Usage: {} catch2-top-level-dir base-build-output-dir'.format(sys.argv[0]))
exit(1)
catch2_source_path = os.path.abspath(sys.argv[1])
build_dir_path = os.path.join(os.path.abspath(sys.argv[2]), 'CMakeConfigTests', 'DisableStringification')
configure_and_build(catch2_source_path,
build_dir_path,
[("CATCH_CONFIG_DISABLE_STRINGIFICATION", "ON")])
stdout, _ = run_and_return_output(os.path.join(build_dir_path, 'tests'),
'SelfTest',
['-s', '[approx][custom]'])
required_output = 'Disabled by CATCH_CONFIG_DISABLE_STRINGIFICATION'
if not required_output in stdout:
print("Could not find '{}' in the stdout".format(required_output))
print('stdout: "{}"'.format(stdout))
exit(2)

View file

@ -0,0 +1,49 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
from ConfigureTestsCommon import configure_and_build, run_and_return_output
import os
import re
import sys
"""
Tests the CMake configure option for CATCH_CONFIG_EXPERIMENTAL_REDIRECT
Requires 2 arguments, path folder where the Catch2's main CMakeLists.txt
exists, and path to where the output files should be stored.
"""
if len(sys.argv) != 3:
print('Wrong number of arguments: {}'.format(len(sys.argv)))
print('Usage: {} catch2-top-level-dir base-build-output-dir'.format(sys.argv[0]))
exit(1)
catch2_source_path = os.path.abspath(sys.argv[1])
build_dir_path = os.path.join(os.path.abspath(sys.argv[2]), 'CMakeConfigTests', 'ExperimentalRedirect')
configure_and_build(catch2_source_path,
build_dir_path,
[("CATCH_CONFIG_EXPERIMENTAL_REDIRECT", "ON")])
stdout, _ = run_and_return_output(os.path.join(build_dir_path, 'tests'),
'SelfTest',
['-r', 'xml', '"has printf"'])
# The print from printf must be within the XML's reporter stdout tag.
required_output = '''\
<StdOut>
loose text artifact
</StdOut>
'''
if not required_output in stdout:
print("Could not find '{}' in the stdout".format(required_output))
print('stdout: "{}"'.format(stdout))
exit(2)

View file

@ -0,0 +1,79 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
"""
This test script verifies that the testCasePartial{Starting,Ended} reporter
events fire properly. This is done by calling a test binary compiled with
reporter that reports specifically testCase* events, and verifying the
outputs match what we expect.
"""
import subprocess
import sys
expected_section_output = '''\
TestCaseStarting: section
TestCaseStartingPartial: section#0
TestCasePartialEnded: section#0
TestCaseStartingPartial: section#1
TestCasePartialEnded: section#1
TestCaseStartingPartial: section#2
TestCasePartialEnded: section#2
TestCaseStartingPartial: section#3
TestCasePartialEnded: section#3
TestCaseEnded: section
'''
expected_generator_output = '''\
TestCaseStarting: generator
TestCaseStartingPartial: generator#0
TestCasePartialEnded: generator#0
TestCaseStartingPartial: generator#1
TestCasePartialEnded: generator#1
TestCaseStartingPartial: generator#2
TestCasePartialEnded: generator#2
TestCaseStartingPartial: generator#3
TestCasePartialEnded: generator#3
TestCaseEnded: generator
'''
from typing import List
def get_test_output(test_exe: str, sections: bool) -> List[str]:
cmd = [test_exe, '--reporter', 'partial']
if sections:
cmd.append('section')
else:
cmd.append('generator')
ret = subprocess.run(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
timeout = 10,
check = True,
universal_newlines = True)
return ret.stdout
def main():
test_exe, = sys.argv[1:]
actual_section_output = get_test_output(test_exe, sections = True)
assert actual_section_output == expected_section_output, (
'Sections\nActual:\n{}\nExpected:\n{}\n'.format(actual_section_output, expected_section_output))
actual_generator_output = get_test_output(test_exe, sections = False)
assert actual_generator_output == expected_generator_output, (
'Generators\nActual:\n{}\nExpected:\n{}\n'.format(actual_generator_output, expected_generator_output))
if __name__ == '__main__':
sys.exit(main())

View file

@ -0,0 +1,77 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
"""
This test script verifies that the random ordering of tests inside
Catch2 is invariant in regards to subsetting. This is done by running
the binary 3 times, once with all tests selected, and twice with smaller
subsets of tests selected, and verifying that the selected tests are in
the same relative order.
"""
import subprocess
import sys
import random
import xml.etree.ElementTree as ET
def none_to_empty_str(e):
if e is None:
return ""
assert type(e) is str
return e
def list_tests(self_test_exe, tags, rng_seed):
cmd = [self_test_exe, '--reporter', 'xml', '--list-tests', '--order', 'rand',
'--rng-seed', str(rng_seed)]
tags_arg = ','.join('[{}]~[.]'.format(t) for t in tags)
if tags_arg:
cmd.append(tags_arg)
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stderr:
raise RuntimeError("Unexpected error output:\n" + process.stderr)
root = ET.fromstring(stdout)
result = [(none_to_empty_str(tc.find('Name').text),
none_to_empty_str(tc.find('Tags').text),
none_to_empty_str(tc.find('ClassName').text)) for tc in root.findall('./TestCase')]
if len(result) < 2:
raise RuntimeError("Unexpectedly few tests listed (got {})".format(
len(result)))
return result
def check_is_sublist_of(shorter, longer):
assert len(shorter) < len(longer)
assert len(set(longer)) == len(longer)
indexes_in_longer = {s: i for i, s in enumerate(longer)}
for s1, s2 in zip(shorter, shorter[1:]):
assert indexes_in_longer[s1] < indexes_in_longer[s2], (
'{} comes before {} in longer list.\n'
'Longer: {}\nShorter: {}'.format(s2, s1, longer, shorter))
def main():
self_test_exe, = sys.argv[1:]
# We want a random seed for the test, but want to avoid 0,
# because it has special meaning
seed = random.randint(1, 2 ** 32 - 1)
list_one_tag = list_tests(self_test_exe, ['generators'], seed)
list_two_tags = list_tests(self_test_exe, ['generators', 'matchers'], seed)
list_all = list_tests(self_test_exe, [], seed)
# First, verify that restricting to a subset yields the same order
check_is_sublist_of(list_two_tags, list_all)
check_is_sublist_of(list_one_tag, list_two_tags)
if __name__ == '__main__':
sys.exit(main())

View file

@ -0,0 +1,165 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
"""
This test script verifies that sharding tests does change which tests are run.
This is done by running the binary multiple times, once to list all the tests,
once per shard to list the tests for that shard, and once again per shard to
execute the tests. The sharded lists are compared to the full list to ensure
none are skipped, duplicated, and that the order remains the same.
"""
import random
import subprocess
import sys
import xml.etree.ElementTree as ET
from collections import namedtuple
from typing import List, Dict
seed = random.randint(0, 2 ** 32 - 1)
number_of_shards = 5
def make_base_commandline(self_test_exe):
return [
self_test_exe,
'--reporter', 'xml',
'--order', 'rand',
'--rng-seed', str(seed),
"[generators]~[benchmarks]~[.]"
]
def list_tests(self_test_exe: str, extra_args: List[str] = None):
cmd = make_base_commandline(self_test_exe) + ['--list-tests']
if extra_args:
cmd.extend(extra_args)
try:
ret = subprocess.run(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
timeout = 10,
check = True,
universal_newlines = True)
except subprocess.CalledProcessError as ex:
print('Could not list tests:\n{}'.format(ex.stderr))
if ret.stderr:
raise RuntimeError("Unexpected error output:\n" + ret.stderr)
root = ET.fromstring(ret.stdout)
result = [elem.text for elem in root.findall('./TestCase/Name')]
if len(result) < 2:
raise RuntimeError("Unexpectedly few tests listed (got {})".format(
len(result)))
return result
def execute_tests(self_test_exe: str, extra_args: List[str] = None):
cmd = make_base_commandline(self_test_exe)
if extra_args:
cmd.extend(extra_args)
try:
ret = subprocess.run(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
timeout = 10,
check = True,
universal_newlines = True)
except subprocess.CalledProcessError as ex:
print('Could not list tests:\n{}'.format(ex.stderr))
if ret.stderr:
raise RuntimeError("Unexpected error output:\n" + process.stderr)
root = ET.fromstring(ret.stdout)
result = [elem.attrib["name"] for elem in root.findall('./TestCase')]
if len(result) < 2:
raise RuntimeError("Unexpectedly few tests listed (got {})".format(
len(result)))
return result
def test_sharded_listing(self_test_exe: str) -> Dict[int, List[str]]:
"""
Asks the test binary for list of all tests, and also for lists of
tests from shards.
The combination of shards is then checked whether it corresponds to
the full list of all tests.
Returns the dictionary of shard-index => listed tests for later use.
"""
all_tests = list_tests(self_test_exe)
big_shard_tests = list_tests(self_test_exe, ['--shard-count', '1', '--shard-index', '0'])
assert all_tests == big_shard_tests, (
"No-sharding test list does not match the listing of big shard:\nNo shard:\n{}\n\nWith shard:\n{}\n".format(
'\n'.join(all_tests),
'\n'.join(big_shard_tests)
)
)
shard_listings = dict()
for shard_idx in range(number_of_shards):
shard_listings[shard_idx] = list_tests(self_test_exe, ['--shard-count', str(number_of_shards), '--shard-index', str(shard_idx)])
shard_sizes = [len(v) for v in shard_listings.values()]
assert len(all_tests) == sum(shard_sizes)
# Check that the shards have roughly the right sizes (e.g. we don't
# have all tests in single shard and the others are empty)
differences = [abs(x1 - x2) for x1, x2 in zip(shard_sizes, shard_sizes[1:])]
assert all(diff <= 1 for diff in differences), "A shard has weird size: {}".format(shard_sizes)
combined_shards = [inner for outer in shard_listings.values() for inner in outer]
assert all_tests == combined_shards, (
"All tests and combined shards disagree.\nNo shard:\n{}\n\nCombined:\n{}\n\n".format(
'\n'.join(all_tests),
'\n'.join(combined_shards)
)
)
shard_listings[-1] = all_tests
return shard_listings
def test_sharded_execution(self_test_exe: str, listings: Dict[int, List[str]]):
"""
Runs the test binary and checks that the executed tests match the
previously listed tests.
Also does this for various shard indices, and that the combination
of all shards matches the full run/listing.
"""
all_tests = execute_tests(self_test_exe)
big_shard_tests = execute_tests(self_test_exe, ['--shard-count', '1', '--shard-index', '0'])
assert all_tests == big_shard_tests
assert listings[-1] == all_tests
for shard_idx in range(number_of_shards):
assert listings[shard_idx] == execute_tests(self_test_exe, ['--shard-count', str(number_of_shards), '--shard-index', str(shard_idx)])
def main():
self_test_exe, = sys.argv[1:]
listings = test_sharded_listing(self_test_exe)
test_sharded_execution(self_test_exe, listings)
if __name__ == '__main__':
sys.exit(main())