diff options
Diffstat (limited to 'debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify')
7 files changed, 740 insertions, 0 deletions
diff --git a/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/__init__.py b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/__init__.py new file mode 100644 index 00000000..bf83b667 --- /dev/null +++ b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/__init__.py @@ -0,0 +1,19 @@ +# Entry point for uncrustify test utilities. +# +# * @author Matthew Woehlke June 2018 +# + +from .ansicolor import printc + +from .config import config, test_dir, all_tests + +from .failure import (Failure, ExecutionFailure, MissingFailure, + MismatchFailure, UnstableFailure) + +from .selector import Selector + +from .test import SourceTest, FormatTest + +from .utilities import (add_test_arguments, add_format_tests_arguments, + add_source_tests_arguments, parse_args, run_tests, + read_format_tests, report, fixup_ctest_path) diff --git a/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/ansicolor.py b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/ansicolor.py new file mode 100644 index 00000000..44d6dbd5 --- /dev/null +++ b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/ansicolor.py @@ -0,0 +1,55 @@ +# Print in color, if possible. +# +# * @author Matthew Woehlke June 2018 +# + +# Curses setup might fail... +try: + import curses + import sys + + curses.setupterm() + + if sys.stdout.isatty(): + def _tparm(p, *args): + return curses.tparm(p, *args).decode('ascii') + + _setf = curses.tigetstr('setaf') or curses.tigetstr('setf') + _setb = curses.tigetstr('setab') or curses.tigetstr('setb') + _bold = curses.tigetstr('bold') + _reset = _tparm(curses.tigetstr('sgr0')) + + else: + def _tparm(p, *args): + return '' + + _setf = '' + _setb = '' + _bold = '' + _reset = '' + + # ------------------------------------------------------------------------- + def printc(ctext, ntext='', fore=None, back=None, bold=False): + reset = "" + + if bold: + ctext = _tparm(_bold) + ctext + reset = _reset + + if fore is not None: + ctext = _tparm(_setf, fore) + ctext + reset = _reset + + if back is not None: + ctext = _tparm(_setf, back) + ctext + reset = _reset + + print(ctext + reset + ntext) + +# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +# ...so if something went wrong, provide a fall-back instead +except Exception: + # ------------------------------------------------------------------------- + def printc(ctext, ntext, *args, **kwargs): + print(ctext + ntext) diff --git a/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/config.py b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/config.py new file mode 100644 index 00000000..4370816d --- /dev/null +++ b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/config.py @@ -0,0 +1,38 @@ +# Global state. +# +# * @author Ben Gardner October 2009 +# * @author Guy Maurel October 2015 +# * @author Matthew Woehlke June 2018 +# + +import os + +test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + +# sorted by lexicographic order +all_tests = [ + 'c-sharp', + 'c', + 'cpp', + 'd', + 'ecma', + 'imported', + 'java', + 'objective-c', + 'pawn', + 'staging', + 'vala', +] + +FAIL_ATTRS = {'bold': True} +PASS_ATTRS = {'fore': 2} # Green +MISMATCH_ATTRS = {'fore': 1} # Red +UNSTABLE_ATTRS = {'fore': 6} # Cyan +SKIP_ATTRS = {'fore': 3} # Yellow + + +# ============================================================================= +class config(object): + uncrustify_exe = None + python_exe = None + git_exe = 'git' diff --git a/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/failure.py b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/failure.py new file mode 100644 index 00000000..0972930f --- /dev/null +++ b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/failure.py @@ -0,0 +1,82 @@ +# Exceptions when a test fails. +# +# * @author Matthew Woehlke June 2018 +# + + +# ============================================================================= +class Failure(Exception): + pass + +# ============================================================================= +class TestDeclarationParseError(Failure): + # ------------------------------------------------------------------------- + def __init__(self, test_suite, line_number): + self.test_suite = test_suite + self.line_number = line_number + + # ------------------------------------------------------------------------- + def __str__(self): + return 'Error parsing line {!r} from the {!r} test suite'.format( + self.line_number, self.test_suite) + + +# ============================================================================= +class ExecutionFailure(Failure): + # ------------------------------------------------------------------------- + def __init__(self, exception): + self.exception = exception + + # ------------------------------------------------------------------------- + def __str__(self): + return str(self.exception) + + +# ============================================================================= +class MissingFailure(Failure): + # ------------------------------------------------------------------------- + def __init__(self, exception, missing_path): + self.exception = exception + self.missing_path = missing_path + + # ------------------------------------------------------------------------- + def __str__(self): + return 'Expected output file not found: {!r}'.format(self.missing_path) + + +# ============================================================================= +class MismatchFailure(Failure): + # ------------------------------------------------------------------------- + def __init__(self, expected, actual): + self.expected_path = expected + self.actual_path = actual + + # ------------------------------------------------------------------------- + def __str__(self): + return 'Output {!r} does not match expected output {!r}'.format( + self.actual_path, self.expected_path) + + +# ============================================================================= +class UnstableFailure(Failure): + # ------------------------------------------------------------------------- + def __init__(self, expected, actual): + self.expected_path = expected + self.actual_path = actual + + # ------------------------------------------------------------------------- + def __str__(self): + return 'Output {!r} does not match expected output {!r}'.format( + self.actual_path, self.expected_path) + +# ============================================================================= +class UnexpectedlyPassingFailure(Failure): + # ------------------------------------------------------------------------- + def __init__(self, expected, actual): + self.expected_path = expected + self.actual_path = actual + + # ------------------------------------------------------------------------- + def __str__(self): + return 'Output {!r} unexpectedly matches expected output {!r}'.format( + self.actual_path, self.expected_path) diff --git a/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/selector.py b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/selector.py new file mode 100644 index 00000000..93646257 --- /dev/null +++ b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/selector.py @@ -0,0 +1,40 @@ +# Utility class to match test filters. +# +# * @author Matthew Woehlke June 2018 +# + + +# ============================================================================= +class Selector(object): + # ------------------------------------------------------------------------- + def __init__(self, s): + class Range(object): + pass + + self.ranges = [] + for p in s.split(','): + r = Range() + if ':' in p: + r.group, p = p.split(':') + else: + r.group = None + if '-' in p: + r.lower, r.upper = map(int, p.split('-')) + else: + r.lower = int(p) + r.upper = int(p) + self.ranges.append(r) + + # ------------------------------------------------------------------------- + def test(self, name): + group, num = name.split(':') + num = int(num) + + for r in self.ranges: + if r.group is not None and r.group != group: + continue + if num < r.lower or num > r.upper: + continue + return True + + return False diff --git a/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/test.py b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/test.py new file mode 100644 index 00000000..f621f40f --- /dev/null +++ b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/test.py @@ -0,0 +1,296 @@ +# Class encapsulating a unit test. +# +# * @author Ben Gardner October 2009 +# * @author Guy Maurel October 2015 +# * @author Matthew Woehlke June 2018 +# + +import filecmp +import os +import re +import subprocess +import sys +import errno + +from .ansicolor import printc +from .config import (config, test_dir, FAIL_ATTRS, PASS_ATTRS, + MISMATCH_ATTRS, UNSTABLE_ATTRS) +from .failure import (ExecutionFailure, MismatchFailure, MissingFailure, + TestDeclarationParseError, UnexpectedlyPassingFailure, + UnstableFailure) + + +# ============================================================================= +class SourceTest(object): + # ------------------------------------------------------------------------- + def __init__(self): + self.test_result_dir = 'results' + + self.diff_text = 'MISMATCH' + self.diff_attrs = MISMATCH_ATTRS + self.diff_exception = MismatchFailure + + # ------------------------------------------------------------------------- + def _check_attr(self, name): + if not hasattr(self, name) or getattr(self, name) is None: + raise AttributeError( + 'Test is missing required attribute {!r}'.format(name)) + + # ------------------------------------------------------------------------- + def _make_abs(self, name, base): + path = getattr(self, name) + if not os.path.isabs(path): + setattr(self, name, os.path.join(test_dir, base, path)) + + # ------------------------------------------------------------------------- + def _diff(self, expected, actual): + sys.stdout.flush() + cmd = [config.git_exe, 'diff', '--no-index', expected, actual] + subprocess.call(cmd) + + # ------------------------------------------------------------------------- + def build(self, test_input, test_lang, test_config, test_expected): + self.test_name = os.path.basename(test_input) + self.test_lang = test_lang + self.test_input = test_input + self.test_config = test_config + self.test_expected = test_expected + self.test_xfail = False + + # ------------------------------------------------------------------------- + def _check(self): + self._check_attr('test_name') + self._check_attr('test_lang') + self._check_attr('test_input') + self._check_attr('test_config') + self._check_attr('test_expected') + self._check_attr('test_xfail') + + # ------------------------------------------------------------------------- + def run(self, args): + self._check() + + _expected = self.test_expected + _result = os.path.join(args.result_dir, self.test_result_dir, + os.path.basename(os.path.dirname(_expected)), + os.path.basename(_expected)) + + if args.verbose: + print(self.test_name) + print(' Language : {}'.format(self.test_lang)) + print(' Input : {}'.format(self.test_input)) + print(' Config : {}'.format(self.test_config)) + print(' Expected : {}'.format(_expected)) + print(' Result : {}'.format(_result)) + print(' XFail : {}'.format(self.test_xfail)) + + if not os.path.exists(os.path.dirname(_result)): + try: + os.makedirs(os.path.dirname(_result)) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + cmd = [ + config.uncrustify_exe, + '-q', + '-l', self.test_lang, + '-c', self.test_config, + '-f', self.test_input, + '-o', _result + ] + if args.debug: + cmd += [ + '-LA', + '-p', _result + '.unc' + ] + + else: + cmd += ['-LA'] + + if args.show_commands: + printc('RUN: ', repr(cmd)) + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + output = exc.output + if not self.test_xfail: + print(output.rstrip()) + msg = '{} (Uncrustify error code {})' + msg = msg.format(self.test_name, exc.returncode) + printc('FAILED: ', msg, **FAIL_ATTRS) + raise ExecutionFailure(exc) + elif args.xdiff: + print(output.rstrip()) + finally: + if args.debug: + with open(_result + '.log', 'wt') as f: + f.write(output) + + try: + has_diff = not filecmp.cmp(_expected, _result) + if has_diff and not self.test_xfail: + if args.diff: + self._diff(_expected, _result) + printc('{}: '.format(self.diff_text), + self.test_name, **self.diff_attrs) + raise self.diff_exception(_expected, _result) + if not has_diff and self.test_xfail: + raise UnexpectedlyPassingFailure(_expected, _result) + if has_diff and self.test_xfail: + if args.xdiff: + self._diff(_expected, _result) + if not args.show_all: + printc('XFAILED: ', self.test_name, **PASS_ATTRS) + except OSError as exc: + printc('MISSING: ', self.test_name, **self.diff_attrs) + raise MissingFailure(exc, _expected) + + +# ============================================================================= +class FormatTest(SourceTest): + pass_config = ['test_config', 'test_rerun_config'] + pass_input = ['test_input', 'test_expected'] + pass_expected = ['test_expected', 'test_rerun_expected'] + + re_test_declaration = re.compile(r'^(?P<num>\d+)(?P<mark>[~!]*)\s+' + r'(?P<config>\S+)\s+(?P<input>\S+)' + r'(?:\s+(?P<lang>\S+))?$') + + # ------------------------------------------------------------------------- + def _build_pass(self, i): + p = SourceTest() + + p.test_name = self.test_name + p.test_lang = self.test_lang + p.test_config = getattr(self, self.pass_config[i]) + p.test_input = getattr(self, self.pass_input[i]) + p.test_expected = getattr(self, self.pass_expected[i]) + p.test_xfail = self.test_xfail + if i == 1 and not os.path.exists(p.test_expected): + p.test_expected = getattr(self, self.pass_expected[0]) + + return p + + # ------------------------------------------------------------------------- + def _build_passes(self): + self._check() + self._check_attr('test_rerun_config') + self._check_attr('test_rerun_expected') + + self._make_abs('test_input', 'input') + self._make_abs('test_config', 'config') + self._make_abs('test_expected', 'expected') + self._make_abs('test_rerun_config', 'config') + self._make_abs('test_rerun_expected', 'expected') + + self.test_passes = [ + self._build_pass(0), + self._build_pass(1)] + + self.test_passes[1].test_name = self.test_name + ' (re-run)' + self.test_passes[1].test_result_dir = 'results_2' + self.test_passes[1].diff_text = 'UNSTABLE' + self.test_passes[1].diff_attrs = UNSTABLE_ATTRS + self.test_passes[1].diff_exception = UnstableFailure + + # ------------------------------------------------------------------------- + def build_from_declaration(self, decl, group, line_number): + match = self.re_test_declaration.match(decl) + if not match: + raise TestDeclarationParseError(group, line_number) + + num = match.group('num') + is_rerun = ('!' in match.group('mark')) + is_xfail = ('~' in match.group('mark')) + + self.test_xfail = is_xfail + + self.test_config = match.group('config') + self.test_input = match.group('input') + + test_dir = os.path.dirname(self.test_input) + test_filename = os.path.basename(self.test_input) + + if match.group('lang'): + self.test_lang = match.group('lang') + else: + self.test_lang = test_dir + + self.test_expected = os.path.join( + test_dir, '{}-{}'.format(num, test_filename)) + + def rerun_file(name): + parts = name.split('.') + return '.'.join(parts[:-1] + ['rerun'] + parts[-1:]) + + if is_rerun: + self.test_rerun_config = rerun_file(self.test_config) + self.test_rerun_expected = rerun_file(self.test_expected) + else: + self.test_rerun_config = self.test_config + self.test_rerun_expected = self.test_expected + + self.test_name = '{}:{}'.format(group, num) + + self._build_passes() + + # ------------------------------------------------------------------------- + def build_from_args(self, args): + self.test_name = args.name + self.test_lang = args.lang + self.test_input = args.input + self.test_config = args.config + self.test_expected = args.expected + self.test_rerun_config = args.rerun_config or args.config + self.test_rerun_expected = args.rerun_expected or args.expected + self.test_xfail = args.xfail + + self._build_passes() + + # ------------------------------------------------------------------------- + def print_as_ctest(self, out_file=sys.stdout): + self._check() + + def to_cmake_path(obj): + if type(obj) is dict: + return {k: to_cmake_path(v) for k, v in obj.items()} + if type(obj) is str: + return obj.replace(os.sep, '/') + return obj + + runner = os.path.join(test_dir, 'run_test.py') + + out_file.write( + ('add_test({test_name}\n' + + ' "{python_exe}" -S "{test_runner}" "{test_name}"\n' + + ' --executable "{uncrustify_exe}"\n' + + ' --lang "{test_lang}"\n' + + ' --input "{test_input}"\n' + + ' --config "{test_config}"\n' + + ' --expected "{test_expected}"\n' + + ' --rerun-config "{test_rerun_config}"\n' + + ' --rerun-expected "{test_rerun_expected}"\n' + + ' -d --git "{git_exe}"\n' + + '{xfail}' + + ')\n').format( + test_runner=to_cmake_path(runner), + python_exe=to_cmake_path(config.python_exe), + uncrustify_exe=to_cmake_path(config.uncrustify_exe), + git_exe=to_cmake_path(config.git_exe), + xfail=(' --xfail\n' if self.test_xfail else ''), + **to_cmake_path(self.__dict__))) + out_file.write( + ('set_tests_properties({}\n' + + ' PROPERTIES LABELS "{}"\n)\n').format( + self.test_name, self.test_name.split(':')[0])) + #out_file.write( + # ('set_tests_properties({}\n' + + # ' PROPERTIES DEPENDS "sources_format"\n)\n').format( + # self.test_name)) + + # ------------------------------------------------------------------------- + def run(self, args): + for p in self.test_passes: + p.run(args) diff --git a/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/utilities.py b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/utilities.py new file mode 100644 index 00000000..8570e6d3 --- /dev/null +++ b/debian/uncrustify-trinity/uncrustify-trinity-0.75.0/tests/test_uncrustify/utilities.py @@ -0,0 +1,210 @@ +# Logic for listing and running tests. +# +# * @author Ben Gardner October 2009 +# * @author Guy Maurel October 2015 +# * @author Matthew Woehlke June 2018 +# + +import argparse +import os +import subprocess +import sys + +from .ansicolor import printc +from .config import config, all_tests, FAIL_ATTRS, PASS_ATTRS, SKIP_ATTRS +from .failure import (Failure, MismatchFailure, UnexpectedlyPassingFailure, + UnstableFailure) +from .test import FormatTest + + +# ----------------------------------------------------------------------------- +def _add_common_arguments(parser): + parser.add_argument('-c', '--show-commands', action='store_true', + help='show commands') + + parser.add_argument('-v', '--verbose', action='store_true', + help='show detailed test information') + + parser.add_argument('-d', '--diff', action='store_true', + help='show diff on failure') + + parser.add_argument('-x', '--xdiff', action='store_true', + help='show diff on expected failure') + + parser.add_argument('-g', '--debug', action='store_true', + help='generate debug files (.log, .unc)') + + parser.add_argument('-e', '--executable', type=str, required=True, + metavar='PATH', + help='uncrustify executable to test') + + parser.add_argument('--git', type=str, default=config.git_exe, + metavar='PATH', + help='git executable to use to generate diffs') + + parser.add_argument('--result-dir', type=str, default=os.getcwd(), + metavar='DIR', + help='location to which results will be written') + + +# ----------------------------------------------------------------------------- +def add_test_arguments(parser): + _add_common_arguments(parser) + + parser.add_argument("name", type=str, metavar='NAME') + parser.add_argument("--lang", type=str, required=True) + parser.add_argument("--input", type=str, required=True) + parser.add_argument("--config", type=str, required=True) + parser.add_argument("--expected", type=str, required=True) + parser.add_argument("--rerun-config", type=str, metavar='INPUT') + parser.add_argument("--rerun-expected", type=str, metavar='CONFIG') + parser.add_argument("--xfail", action='store_true') + + +# ----------------------------------------------------------------------------- +def add_source_tests_arguments(parser): + _add_common_arguments(parser) + + parser.add_argument('-p', '--show-all', action='store_true', + help='show passed/skipped tests') + + +# ----------------------------------------------------------------------------- +def add_format_tests_arguments(parser): + _add_common_arguments(parser) + + parser.add_argument('-p', '--show-all', action='store_true', + help='show passed/skipped tests') + + parser.add_argument('-r', '--select', metavar='CASE(S)', type=str, + help='select tests to be executed') + + parser.add_argument('tests', metavar='TEST', type=str, nargs='*', + default=all_tests, + help='test(s) to run (default all)') + + # Arguments for generating the CTest script; users should not use these + # directly + parser.add_argument("--write-ctest", type=str, help=argparse.SUPPRESS) + parser.add_argument("--cmake-config", type=str, help=argparse.SUPPRESS) + parser.add_argument("--python", type=str, help=argparse.SUPPRESS) + + +# ----------------------------------------------------------------------------- +def parse_args(parser): + args = parser.parse_args() + + if args.git is not None: + config.git_exe = args.git + + config.uncrustify_exe = args.executable + if not os.path.exists(config.uncrustify_exe): + msg = 'Specified uncrustify executable {!r} does not exist'.format( + config.uncrustify_exe) + printc("FAILED: ", msg, **FAIL_ATTRS) + sys.exit(-1) + + # Do a sanity check on the executable + try: + with open(os.devnull, 'w') as bitbucket: + subprocess.check_call([config.uncrustify_exe, '--help'], + stdout=bitbucket) + except Exception as exc: + msg = ('Specified uncrustify executable {!r} ' + + 'does not appear to be usable: {!s}').format( + config.uncrustify_exe, exc) + printc("FAILED: ", msg, **FAIL_ATTRS) + sys.exit(-1) + + return args + + +# ----------------------------------------------------------------------------- +def run_tests(tests, args, selector=None): + pass_count = 0 + fail_count = 0 + mismatch_count = 0 + unstable_count = 0 + unexpectedly_passing_count = 0 + + for test in tests: + if selector is not None and not selector.test(test.test_name): + if args.show_all: + printc("SKIPPED: ", test.test_name, **SKIP_ATTRS) + continue + + try: + test.run(args) + if args.show_all: + outcome = 'XFAILED' if test.test_xfail else 'PASSED' + printc('{}: '.format(outcome), test.test_name, **PASS_ATTRS) + pass_count += 1 + except UnstableFailure: + unstable_count += 1 + except MismatchFailure: + mismatch_count += 1 + except UnexpectedlyPassingFailure: + unexpectedly_passing_count += 1 + except Failure: + fail_count += 1 + + return { + 'passing': pass_count, + 'failing': fail_count, + 'mismatch': mismatch_count, + 'unstable': unstable_count, + 'xpass': unexpectedly_passing_count + } + + +# ----------------------------------------------------------------------------- +def report(counts): + total = sum(counts.values()) + print('{passing} / {total} tests passed'.format(total=total, **counts)) + if counts['failing'] > 0: + printc('{failing} tests failed to execute'.format(**counts), 'f', + **FAIL_ATTRS) + if counts['mismatch'] > 0: + printc( + '{mismatch} tests did not match the expected output'.format( + **counts), 'm', + **FAIL_ATTRS) + if counts['unstable'] > 0: + printc('{unstable} tests were unstable'.format(**counts), 'u', + **FAIL_ATTRS) + if counts['xpass'] > 0: + printc('{xpass} tests passed but were expected to fail'.format(**counts), + 'x', **FAIL_ATTRS) + + +# ----------------------------------------------------------------------------- +def read_format_tests(filename, group): + tests = [] + + print("Processing " + filename) + with open(filename, 'rt') as f: + for line_number, line in enumerate(f, 1): + line = line.strip() + if not len(line): + continue + if line.startswith('#'): + continue + + test = FormatTest() + test.build_from_declaration(line, group, line_number) + tests.append(test) + + return tests + + +# ----------------------------------------------------------------------------- +def fixup_ctest_path(path, config): + if config is None: + return path + + dirname, basename = os.path.split(path) + if os.path.basename(dirname).lower() == config.lower(): + dirname, junk = os.path.split(dirname) + return os.path.join(dirname, '${CTEST_CONFIGURATION_TYPE}', basename) + + return path |