diff --git a/MANIFEST.in b/MANIFEST.in index e0470dc71..04cefe43b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -24,7 +24,7 @@ include plover_build_utils/*.sh include pyproject.toml include reqs/*.txt include test/__init__.py -include test/utils.py +include test/conftest.py include tox.ini include windows/* # Exclude: CI/Git/GitHub specific files, diff --git a/news.d/api/1302.new.md b/news.d/api/1302.new.md new file mode 100644 index 000000000..900fbc4df --- /dev/null +++ b/news.d/api/1302.new.md @@ -0,0 +1,4 @@ +Add some new helpers to `plover_build_utils.testing`: +- `dictionary_test`: torture tests for dictionary implementations. +- `make_dict`: create a temporary dictionary. +- `parametrize`: parametrize helper for tracking test data source line numbers. diff --git a/news.d/bugfix/1302.core.md b/news.d/bugfix/1302.core.md new file mode 100644 index 000000000..4c4b59211 --- /dev/null +++ b/news.d/bugfix/1302.core.md @@ -0,0 +1,3 @@ +Fix 2 corner cases when handling dictionaries: +- If the class implementation is marked as read-only, then loading from a writable file should still result in a read-only dictionary. +- Don't allow `clear` on a read-only dictionary. diff --git a/plover/steno_dictionary.py b/plover/steno_dictionary.py index 217b5c412..25f5754f7 100644 --- a/plover/steno_dictionary.py +++ b/plover/steno_dictionary.py @@ -62,8 +62,9 @@ def load(cls, resource): timestamp = resource_timestamp(filename) d = cls() d._load(filename) - if resource.startswith(ASSET_SCHEME) or \ - not os.access(filename, os.W_OK): + if (cls.readonly or + resource.startswith(ASSET_SCHEME) or + not os.access(filename, os.W_OK)): d.readonly = True d.path = resource d.timestamp = timestamp @@ -96,6 +97,7 @@ def __getitem__(self, key): return self._dict.__getitem__(key) def clear(self): + assert not self.readonly self._dict.clear() self.reverse.clear() self.casereverse.clear() diff --git a/plover_build_utils/testing/__init__.py b/plover_build_utils/testing/__init__.py new file mode 100644 index 000000000..9e768e313 --- /dev/null +++ b/plover_build_utils/testing/__init__.py @@ -0,0 +1,6 @@ +from .blackbox import blackbox_test +from .dict import make_dict +from .output import CaptureOutput +from .parametrize import parametrize +from .steno import steno_to_stroke +from .steno_dictionary import dictionary_test diff --git a/plover_build_utils/testing.py b/plover_build_utils/testing/blackbox.py similarity index 70% rename from plover_build_utils/testing.py rename to plover_build_utils/testing/blackbox.py index 921aef5c9..a84d6197e 100644 --- a/plover_build_utils/testing.py +++ b/plover_build_utils/testing/blackbox.py @@ -5,70 +5,13 @@ import re import textwrap -from plover import system from plover.formatting import Formatter -from plover.steno import Stroke, normalize_steno +from plover.steno import normalize_steno from plover.steno_dictionary import StenoDictionary from plover.translation import Translator - -class CaptureOutput: - - def __init__(self): - self.instructions = [] - self.text = '' - - def send_backspaces(self, n): - assert n <= len(self.text) - self.text = self.text[:-n] - self.instructions.append(('b', n)) - - def send_string(self, s): - self.text += s - self.instructions.append(('s', s)) - - def send_key_combination(self, c): - self.instructions.append(('c', c)) - - def send_engine_command(self, c): - self.instructions.append(('e', c)) - - -def steno_to_stroke(steno): - if steno_to_stroke.system != system.NAME: - keys = [] - letters = '' - has_hyphen = False - for k in system.KEYS: - if not has_hyphen and k.startswith('-'): - has_hyphen = True - keys.append(None) - letters += '-' - keys.append(k) - letters += k.strip('-') - steno_to_stroke.keys = keys - steno_to_stroke.letters = letters - steno_to_stroke.system = system.NAME - steno_to_stroke.numbers = { - v.strip('-'): k.strip('-') - for k, v in system.NUMBERS.items() - } - n = -1 - keys = set() - for li, l in enumerate(steno): - rl = steno_to_stroke.numbers.get(l) - if rl is not None: - keys.add('#') - l = rl - n = steno_to_stroke.letters.find(l, n + 1) - if n < 0: - raise ValueError('invalid steno letter at index %u:\n%s\n%s^' % (li, steno, ' ' * li)) - k = steno_to_stroke.keys[n] - if k is not None: - keys.add(k) - return Stroke(keys) - -steno_to_stroke.system = None +from .output import CaptureOutput +from .steno import steno_to_stroke BLACKBOX_OUTPUT_RX = re.compile("r?['\"]") diff --git a/plover_build_utils/testing/dict.py b/plover_build_utils/testing/dict.py new file mode 100644 index 000000000..0dae9ac2c --- /dev/null +++ b/plover_build_utils/testing/dict.py @@ -0,0 +1,20 @@ +from contextlib import contextmanager +from pathlib import Path +import os +import tempfile + + +@contextmanager +def make_dict(tmp_path, contents, extension=None, name=None): + kwargs = {'dir': str(tmp_path)} + if name is not None: + kwargs['prefix'] = name + '_' + if extension is not None: + kwargs['suffix'] = '.' + extension + fd, path = tempfile.mkstemp(**kwargs) + try: + os.write(fd, contents) + os.close(fd) + yield Path(path) + finally: + os.unlink(path) diff --git a/plover_build_utils/testing/output.py b/plover_build_utils/testing/output.py new file mode 100644 index 000000000..967e6e880 --- /dev/null +++ b/plover_build_utils/testing/output.py @@ -0,0 +1,20 @@ +class CaptureOutput: + + def __init__(self): + self.instructions = [] + self.text = '' + + def send_backspaces(self, n): + assert n <= len(self.text) + self.text = self.text[:-n] + self.instructions.append(('b', n)) + + def send_string(self, s): + self.text += s + self.instructions.append(('s', s)) + + def send_key_combination(self, c): + self.instructions.append(('c', c)) + + def send_engine_command(self, c): + self.instructions.append(('e', c)) diff --git a/plover_build_utils/testing/parametrize.py b/plover_build_utils/testing/parametrize.py new file mode 100644 index 000000000..36f08feb5 --- /dev/null +++ b/plover_build_utils/testing/parametrize.py @@ -0,0 +1,38 @@ +import inspect + +import pytest + + +def parametrize(tests, arity=None): + '''Helper for parametrizing pytest tests. + + Expects a list of lambdas, one per test. Each lambda must return + the parameters for its respective test. + + Test identifiers will be automatically generated, from the test + number and its lambda definition line (1.10, 2.12, 3.20, ...). + + If arity is None, the arguments being parametrized will be automatically + set from the function's last arguments, according to the numbers of + parameters for each test. + + ''' + ids = [] + argvalues = [] + for n, t in enumerate(tests): + line = inspect.getsourcelines(t)[1] + ids.append('%u:%u' % (n+1, line)) + argvalues.append(t()) + if arity is None: + arity = len(argvalues[0]) + assert arity > 0 + def decorator(fn): + argnames = list( + parameter.name + for parameter in inspect.signature(fn).parameters.values() + if parameter.default is inspect.Parameter.empty + )[-arity:] + if arity == 1: + argnames = argnames[0] + return pytest.mark.parametrize(argnames, argvalues, ids=ids)(fn) + return decorator diff --git a/plover_build_utils/testing/steno.py b/plover_build_utils/testing/steno.py new file mode 100644 index 000000000..456426e61 --- /dev/null +++ b/plover_build_utils/testing/steno.py @@ -0,0 +1,41 @@ +from plover import system +from plover.steno import Stroke + + +def steno_to_stroke(steno): + # Check if the system changed, or + # we need to perform initial setup. + if steno_to_stroke.system != system.NAME: + keys = [] + letters = '' + has_hyphen = False + for k in system.KEYS: + if not has_hyphen and k.startswith('-'): + has_hyphen = True + keys.append(None) + letters += '-' + keys.append(k) + letters += k.strip('-') + steno_to_stroke.keys = keys + steno_to_stroke.letters = letters + steno_to_stroke.system = system.NAME + steno_to_stroke.numbers = { + v.strip('-'): k.strip('-') + for k, v in system.NUMBERS.items() + } + n = -1 + keys = set() + for li, l in enumerate(steno): + rl = steno_to_stroke.numbers.get(l) + if rl is not None: + keys.add('#') + l = rl + n = steno_to_stroke.letters.find(l, n + 1) + if n < 0: + raise ValueError('invalid steno letter at index %u:\n%s\n%s^' % (li, steno, ' ' * li)) + k = steno_to_stroke.keys[n] + if k is not None: + keys.add(k) + return Stroke(keys) + +steno_to_stroke.system = None diff --git a/plover_build_utils/testing/steno_dictionary.py b/plover_build_utils/testing/steno_dictionary.py new file mode 100644 index 000000000..6b6156f8d --- /dev/null +++ b/plover_build_utils/testing/steno_dictionary.py @@ -0,0 +1,483 @@ +from collections import defaultdict +from contextlib import contextmanager +import ast +import functools +import inspect +import os + +import pytest + +from plover.registry import registry +from plover.resource import ASSET_SCHEME +from plover.steno import normalize_steno +from plover.steno_dictionary import StenoDictionary + +from .dict import make_dict +from .parametrize import parametrize + + +class _DictionaryTests: + + @staticmethod + def make_dict(contents): + return contents + + @contextmanager + def tmp_dict(self, tmp_path, contents): + contents = self.make_dict(contents) + with make_dict(tmp_path, contents, extension=self.DICT_EXTENSION) as dict_path: + yield dict_path + + @contextmanager + def sample_dict(self, tmp_path): + with self.tmp_dict(tmp_path, self.DICT_SAMPLE) as dict_path: + yield dict_path + + @staticmethod + def parse_entries(entries): + return { + normalize_steno(k): v + for k, v in ast.literal_eval('{' + entries + '}').items() + } + + def test_readonly_writable_file(self, tmp_path): + ''' + Writable file: match class read-only attribute. + ''' + with self.sample_dict(tmp_path) as dict_path: + d = self.DICT_CLASS.load(str(dict_path)) + assert d.readonly == self.DICT_CLASS.readonly + + def test_readonly_readonly_file(self, tmp_path): + ''' + Read-only file: read-only dictionary. + ''' + with self.sample_dict(tmp_path) as dict_path: + dict_path.chmod(0o440) + try: + d = self.DICT_CLASS.load(str(dict_path)) + assert d.readonly + finally: + # Deleting the file will fail on Windows + # if we don't restore write permission. + dict_path.chmod(0o660) + + def test_readonly_asset(self, tmp_path, monkeypatch): + ''' + Assets are always read-only. + ''' + with self.sample_dict(tmp_path) as dict_path: + fake_asset = ASSET_SCHEME + 'fake:' + dict_path.name + def fake_asset_only(p, r, v=None): + assert (p, r) == ('fake', dict_path.name) + return v + monkeypatch.setattr('pkg_resources.resource_exists', + functools.partial(fake_asset_only, + v=True)) + monkeypatch.setattr('pkg_resources.resource_filename', + functools.partial(fake_asset_only, + v=str(dict_path))) + d = self.DICT_CLASS.load(fake_asset) + assert d.readonly + + VALID_KEY = ('TEFT', '-G') + + @pytest.mark.parametrize('method_name, args', ( + ('__delitem__', (VALID_KEY,)), + ('__setitem__', (VALID_KEY, 'pouet!')), + ('clear' , ()), + ('save' , ()), + ('update' , ()), + )) + def test_readonly_no_change_allowed(self, tmp_path, method_name, args): + ''' + Don't allow changing a read-only dictionary. + ''' + with self.sample_dict(tmp_path) as dict_path: + d = self.DICT_CLASS.load(str(dict_path)) + d.readonly = True + method = getattr(d, method_name) + with pytest.raises(AssertionError): + method(*args) + + def _test_entrypoint(self): + ''' + Check a corresponding `plover.dictionary` entrypoint exists. + ''' + plugin = registry.get_plugin('dictionary', self.DICT_EXTENSION) + assert plugin.obj == self.DICT_CLASS + + DUMMY = object() + MISSING_KEY = "ceci n'est pas une clef" + MISSING_TRANSLATION = "ceci n'est pas une translation" + + def _test_load(self, tmp_path, contents, expected): + ''' + Test `load` implementation. + ''' + with self.tmp_dict(tmp_path, contents) as dict_path: + expected_timestamp = dict_path.stat().st_mtime + if inspect.isclass(expected): + # Expect an exception. + with pytest.raises(expected): + self.DICT_CLASS.load(str(dict_path)) + return + # Except a successful load. + d = self.DICT_CLASS.load(str(dict_path)) + # Parse entries: + entries = self.parse_entries(expected) + # - expected: must be present + expected_entries = {k: v for k, v in entries.items() + if v is not None} + # - unexpected: must not be present + unexpected_entries = {k for k, v in entries.items() + if v is None} + # Basic checks. + assert d.readonly == self.DICT_CLASS.readonly + assert d.timestamp == expected_timestamp + if self.DICT_SUPPORT_SEQUENCE_METHODS: + assert len(d) == len(expected_entries) + assert sorted(d) == sorted(expected_entries) + assert sorted(d.items()) == sorted(expected_entries.items()) + else: + assert len(d) == 0 + assert tuple(d) == () + assert tuple(d.items()) == () + for k, v in expected_entries.items(): + assert k in d + assert d[k] == v + assert d.get(k) == v + assert d.get(k, self.DUMMY) == v + for k in unexpected_entries: + assert k not in d + with pytest.raises(KeyError): + d[k] + assert d.get(k) == None + assert d.get(k, self.DUMMY) == self.DUMMY + assert self.MISSING_KEY not in d + assert d.get(self.MISSING_KEY, self.DUMMY) is self.DUMMY + assert d.get(self.MISSING_KEY) == None + with pytest.raises(KeyError): + d[self.MISSING_KEY] + # Longest key check. + expected_longest_key = functools.reduce(max, (len(k) for k in expected_entries), 0) + assert d.longest_key == expected_longest_key + # Reverse lookup checks. + expected_reverse = defaultdict(set) + if self.DICT_SUPPORT_REVERSE_LOOKUP: + for k, v in expected_entries.items(): + expected_reverse[v].add(k) + for v, key_set in expected_reverse.items(): + assert d.reverse_lookup(v) == key_set + assert d.reverse_lookup(self.MISSING_TRANSLATION) == set() + # Case reverse lookup checks. + expected_casereverse = defaultdict(set) + if self.DICT_SUPPORT_CASEREVERSE_LOOKUP: + for v in expected_entries.values(): + expected_casereverse[v.lower()].add(v) + for v, value_set in expected_casereverse.items(): + assert d.casereverse_lookup(v) == value_set + assert d.casereverse_lookup(self.MISSING_TRANSLATION) == set() + + +class _ReadOnlyDictionaryTests: + + def test_readonly_no_create_allowed(self, tmp_path): + ''' + Don't allow creating a read-only dictionary. + ''' + with self.sample_dict(tmp_path) as dict_path: + with pytest.raises(ValueError): + self.DICT_CLASS.create(str(dict_path)) + + +_TEST_DICTIONARY_UPDATE_DICT = { + ('S-G',): 'something', + ('SPH-G',): 'something', + ('SPH*G',): 'Something', + ('SPH', 'THEUPBG'): 'something', +} +_TEST_DICTIONARY_UPDATE_STENODICT = StenoDictionary() +_TEST_DICTIONARY_UPDATE_STENODICT.update(_TEST_DICTIONARY_UPDATE_DICT) + +class _WritableDictionaryTests: + + def test_longest_key(self): + ''' + Check `longest_key` support (including callbacks handling). + ''' + assert self.DICT_SUPPORT_SEQUENCE_METHODS + notifications = [] + def listener(longest_key): + notifications.append(longest_key) + d = self.DICT_CLASS() + assert d.longest_key == 0 + d.add_longest_key_listener(listener) + d[('S',)] = 'a' + assert d.longest_key == 1 + assert notifications == [1] + d[('S', 'S', 'S', 'S')] = 'b' + assert d.longest_key == 4 + assert notifications == [1, 4] + d[('S', 'S')] = 'c' + assert d.longest_key == 4 + assert d[('S', 'S')] == 'c' + assert notifications == [1, 4] + del d[('S', 'S', 'S', 'S')] + assert d.longest_key == 2 + assert notifications == [1, 4, 2] + del d[('S',)] + assert d.longest_key == 2 + assert notifications == [1, 4, 2] + if self.DICT_SUPPORT_REVERSE_LOOKUP: + assert d.reverse_lookup('c') == {('S', 'S')} + else: + assert d.reverse_lookup('c') == set() + if self.DICT_SUPPORT_CASEREVERSE_LOOKUP: + assert d.casereverse_lookup('c') == {'c'} + else: + assert d.casereverse_lookup('c') == set() + d.clear() + assert d.longest_key == 0 + assert notifications == [1, 4, 2, 0] + assert d.reverse_lookup('c') == set() + assert d.casereverse_lookup('c') == set() + d.remove_longest_key_listener(listener) + d[('S', 'S')] = 'c' + assert d.longest_key == 2 + assert notifications == [1, 4, 2, 0] + + def test_casereverse_del(self): + ''' + Check deletion correctly updates `casereverse_lookup` data. + ''' + d = self.DICT_CLASS() + d[('S-G',)] = 'something' + d[('SPH-G',)] = 'something' + if self.DICT_SUPPORT_CASEREVERSE_LOOKUP: + assert d.casereverse_lookup('something') == {'something'} + else: + assert d.casereverse_lookup('something') == set() + del d[('S-G',)] + if self.DICT_SUPPORT_CASEREVERSE_LOOKUP: + assert d.casereverse_lookup('something') == {'something'} + else: + assert d.casereverse_lookup('something') == set() + del d[('SPH-G',)] + assert d.casereverse_lookup('something') == set() + + @parametrize(( + lambda: (dict(_TEST_DICTIONARY_UPDATE_DICT), False, True), + lambda: (dict(_TEST_DICTIONARY_UPDATE_DICT), False, False), + lambda: (list(_TEST_DICTIONARY_UPDATE_DICT.items()), False, True), + lambda: (list(_TEST_DICTIONARY_UPDATE_DICT.items()), False, False), + lambda: (list(_TEST_DICTIONARY_UPDATE_DICT.items()), True, True), + lambda: (list(_TEST_DICTIONARY_UPDATE_DICT.items()), True, False), + lambda: (_TEST_DICTIONARY_UPDATE_STENODICT, False, True), + lambda: (_TEST_DICTIONARY_UPDATE_STENODICT, False, False), + )) + def test_update(self, update_from, use_iter, start_empty): + ''' + Check `update` does the right thing, including consuming iterators once. + ''' + d = self.DICT_CLASS() + if not start_empty: + d.update({ + ('SPH*G',): 'not something', + ('STHEUPBG',): 'something', + ('EF', 'REU', 'TH*EUPBG'): 'everything', + }) + assert d[('STHEUPBG',)] == 'something' + assert d[('EF', 'REU', 'TH*EUPBG')] == 'everything' + if self.DICT_SUPPORT_REVERSE_LOOKUP: + assert d.reverse_lookup('not something') == {('SPH*G',)} + else: + assert d.reverse_lookup('not something') == set() + if self.DICT_SUPPORT_REVERSE_LOOKUP: + assert d.casereverse_lookup('not something') == {'not something'} + else: + assert d.casereverse_lookup('not something') == set() + assert d.longest_key == 3 + if use_iter: + update_from = iter(update_from) + d.update(update_from) + assert d[('S-G',)] == 'something' + assert d[('SPH-G',)] == 'something' + assert d[('SPH*G',)] == 'Something' + assert d[('SPH', 'THEUPBG')] == 'something' + if not start_empty: + assert d[('STHEUPBG',)] == 'something' + assert d[('EF', 'REU', 'TH*EUPBG')] == 'everything' + assert d.reverse_lookup('not something') == set() + if self.DICT_SUPPORT_REVERSE_LOOKUP: + assert d.reverse_lookup('something') == {('STHEUPBG',), ('S-G',), ('SPH-G',), ('SPH', 'THEUPBG')} + else: + assert d.reverse_lookup('something') == set() + if self.DICT_SUPPORT_CASEREVERSE_LOOKUP: + assert d.casereverse_lookup('something') == {'something', 'Something'} + else: + assert d.casereverse_lookup('something') == set() + assert d.longest_key == 3 + else: + if self.DICT_SUPPORT_REVERSE_LOOKUP: + assert d.reverse_lookup('something') == {('S-G',), ('SPH-G',), ('SPH', 'THEUPBG')} + else: + assert d.reverse_lookup('something') == set() + if self.DICT_SUPPORT_CASEREVERSE_LOOKUP: + assert d.casereverse_lookup('something') == {'something', 'Something'} + else: + assert d.casereverse_lookup('something') == set() + assert d.longest_key == 2 + + INVALID_CONTENTS = b"ceci n'est pas un dictionaire" + + def _test_save(self, tmp_path, entries, expected): + ''' + Test `save` implementation. + ''' + dict_entries = self.parse_entries(entries) + with self.tmp_dict(tmp_path, self.INVALID_CONTENTS) as dict_path: + st = dict_path.stat() + old_timestamp = st.st_mtime - 1 + os.utime(str(dict_path), (st.st_atime, old_timestamp)) + # Create... + d = self.DICT_CLASS.create(str(dict_path)) + # ...must not change the target file... + assert d.timestamp == 0 + assert dict_path.read_bytes() == self.INVALID_CONTENTS + # ...even on update... + d.update(dict_entries) + assert d.timestamp == 0 + assert dict_path.read_bytes() == self.INVALID_CONTENTS + # ...until save is called. + d.save() + assert d.timestamp > old_timestamp + if expected is None: + assert dict_path.read_bytes() != self.INVALID_CONTENTS + else: + assert dict_path.read_bytes() == expected + d = self.DICT_CLASS.load(str(dict_path)) + assert sorted(d.items()) == sorted(dict_entries.items()) + + +def _wrap_method(method): + @functools.wraps(method) + def wrapper(*args, **kwargs): + return method(*args, **kwargs) + wrapper.__signature__ = inspect.signature(method) + return wrapper + + +def dictionary_test(cls): + + """ + Torture tests for dictionary implementations. + + Usage: + + ```python + + @dictionary_test + class TestMyDictionaryClass: + + # Mandatory: implementation class. + DICT_CLASS = MyDictionaryClass + + # Mandatory: extension. + DICT_EXTENSION = 'json' + + # Mandatory: valid sample dictionary contents. + DICT_SAMPLE = b'{}' + + # Optional: if `True`, will check the implementation class + # is registered as a valid `plover.dictionary` entrypoint. + DICT_REGISTERED = True + + # Optional: if `False`, `len`, `__iter__`, and `items` + # are not supported, and must respectively return: + # 0 and empty sequences. + # Note: only supported for read-only implementations, + # writable implementations must support all sequence + # methods. + DICT_SUPPORT_SEQUENCE_METHODS = False + + # Optional: if `False`, then `reverse_lookup` is not + # supported, and must return an empty set. + DICT_SUPPORT_REVERSE_LOOKUP = False + + # Optional: if `False`, then `casereverse_lookup` is not + # supported, and must return an empty set. + # Note: if supported, then `DICT_SUPPORT_REVERSE_LOOKUP` + # must be too. + DICT_SUPPORT_CASEREVERSE_LOOKUP = False + + # Optional: load tests. + DICT_LOAD_TESTS = ( + lambda: ( + # Dictionary file contents. + b''' + ''', + # Expected entries, in Python-dictionary like format. + ''' + "TEFT": 'test', + 'TEFT/-G': "testing", + ''' + ), + ) + + # Optional: save tests. + # Note: only for writable implementations. + DICT_SAVE_TESTS = ( + lambda: ( + # Initial entries, in Python-dictionary like format. + ''' + "TEFT": 'test', + 'TEFT/-G': "testing", + ''' + # Expected saved dictionary contents, or `None` + # to skip the byte-for-byte test of the resulting + # file contents. + b''' + ''', + ), + ) + + # Optional: if implemented, will be called to format + # the contents before saving to a dictionary file, + # including for load and save tests. + @staticmethod + def make_dict(self, contents): + return contents + + """ + + DICT_SUPPORT_SEQUENCE_METHODS = getattr(cls, 'DICT_SUPPORT_SEQUENCE_METHODS', True) + DICT_SUPPORT_REVERSE_LOOKUP = getattr(cls, 'DICT_SUPPORT_REVERSE_LOOKUP', True) + DICT_SUPPORT_CASEREVERSE_LOOKUP = getattr(cls, 'DICT_SUPPORT_CASEREVERSE_LOOKUP', DICT_SUPPORT_REVERSE_LOOKUP) + assert DICT_SUPPORT_REVERSE_LOOKUP or not DICT_SUPPORT_CASEREVERSE_LOOKUP + + base_classes = [cls, _DictionaryTests] + if cls.DICT_CLASS.readonly: + base_classes.append(_ReadOnlyDictionaryTests) + else: + base_classes.append(_WritableDictionaryTests) + + class_dict = { + 'DICT_SUPPORT_SEQUENCE_METHODS': DICT_SUPPORT_SEQUENCE_METHODS, + 'DICT_SUPPORT_REVERSE_LOOKUP': DICT_SUPPORT_REVERSE_LOOKUP, + 'DICT_SUPPORT_CASEREVERSE_LOOKUP': DICT_SUPPORT_CASEREVERSE_LOOKUP, + } + + if getattr(cls, 'DICT_REGISTERED', False): + class_dict['test_entrypoint'] = _wrap_method(_DictionaryTests._test_entrypoint) + + if hasattr(cls, 'DICT_LOAD_TESTS'): + class_dict['test_load'] = parametrize(cls.DICT_LOAD_TESTS, arity=2)( + _wrap_method(_DictionaryTests._test_load)) + + if hasattr(cls, 'DICT_SAVE_TESTS'): + assert not cls.DICT_CLASS.readonly + class_dict['test_save'] = parametrize(cls.DICT_SAVE_TESTS, arity=2)( + _wrap_method(_WritableDictionaryTests._test_save)) + + return type(cls.__name__, tuple(base_classes), class_dict) diff --git a/setup.cfg b/setup.cfg index 58941bb57..063125d7b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -46,6 +46,7 @@ packages = plover.oslayer plover.system plover_build_utils + plover_build_utils.testing [options.entry_points] console_scripts = diff --git a/test/__init__.py b/test/__init__.py index c912ab6d2..e69de29bb 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1,48 +0,0 @@ -import inspect - -import pytest - -from plover import system -from plover.config import DEFAULT_SYSTEM_NAME -from plover.registry import registry - - -# Setup registry. -registry.update() -# Setup default system. -system.setup(DEFAULT_SYSTEM_NAME) - - -def parametrize(tests, arity=None): - '''Helper for parametrizing pytest tests. - - Expect a list of lambdas, one per test. Each lambda must return - the parameters for its respecting test. - - Test identifiers will be automatically generated, from the test - number and its lambda definition line (1.10, 2.12, 3.20, ...). - - If arity is None, the arguments being parametrized will be automatically - set from the function last arguments, according to the numbers of - parameters for each test. - - ''' - ids = [] - argvalues = [] - for n, t in enumerate(tests): - line = inspect.getsourcelines(t)[1] - ids.append('%u:%u' % (n+1, line)) - argvalues.append(t()) - if arity is None: - arity = len(argvalues[0]) - assert arity > 0 - def decorator(fn): - argnames = list( - parameter.name - for parameter in inspect.signature(fn).parameters.values() - if parameter.default is inspect.Parameter.empty - )[-arity:] - if arity == 1: - argnames = argnames[0] - return pytest.mark.parametrize(argnames, argvalues, ids=ids)(fn) - return decorator diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 000000000..406349724 --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,13 @@ +import pytest + +from plover import system +from plover.config import DEFAULT_SYSTEM_NAME +from plover.registry import registry + + +@pytest.fixture(scope='session', autouse=True) +def setup_plover(): + registry.update() + system.setup(DEFAULT_SYSTEM_NAME) + +pytest.register_assert_rewrite('plover_build_utils.testing') diff --git a/test/test_command.py b/test/test_command.py index 0b0c8403c..64007ddc5 100644 --- a/test/test_command.py +++ b/test/test_command.py @@ -6,9 +6,10 @@ from plover.command.set_config import set_config from plover.config import Config, DictionaryConfig -from test.test_config import DEFAULTS, DEFAULT_KEYMAP -from . import parametrize +from plover_build_utils.testing import parametrize + +from .test_config import DEFAULTS, DEFAULT_KEYMAP class FakeEngine: diff --git a/test/test_engine.py b/test/test_engine.py index 3518b4832..02ec5cb74 100644 --- a/test/test_engine.py +++ b/test/test_engine.py @@ -14,10 +14,11 @@ StenotypeBase, ) from plover.machine.keymap import Keymap +from plover.misc import normalize_path from plover.registry import Registry from plover.steno_dictionary import StenoDictionaryCollection -from .utils import make_dict +from plover_build_utils.testing import make_dict class FakeMachine(StenotypeBase): @@ -159,7 +160,7 @@ def test_engine(engine): ] assert FakeMachine.instance is None -def test_loading_dictionaries(engine): +def test_loading_dictionaries(tmp_path, engine): def check_loaded_events(actual_events, expected_events): assert len(actual_events) == len(expected_events) for n, event in enumerate(actual_events): @@ -174,10 +175,14 @@ def check_loaded_events(actual_events, expected_events): for d in event_args[0].dicts ] == expected_events[n], msg with \ - make_dict(b'{}', 'json', 'valid1') as valid_dict_1, \ - make_dict(b'{}', 'json', 'valid2') as valid_dict_2, \ - make_dict(b'', 'json', 'invalid1') as invalid_dict_1, \ - make_dict(b'', 'json', 'invalid2') as invalid_dict_2: + make_dict(tmp_path, b'{}', 'json', 'valid1') as valid_dict_1, \ + make_dict(tmp_path, b'{}', 'json', 'valid2') as valid_dict_2, \ + make_dict(tmp_path, b'', 'json', 'invalid1') as invalid_dict_1, \ + make_dict(tmp_path, b'', 'json', 'invalid2') as invalid_dict_2: + valid_dict_1 = normalize_path(str(valid_dict_1)) + valid_dict_2 = normalize_path(str(valid_dict_2)) + invalid_dict_1 = normalize_path(str(invalid_dict_1)) + invalid_dict_2 = normalize_path(str(invalid_dict_2)) engine.start() for new_dictionaries, *expected_events in ( # Load one valid dictionary. diff --git a/test/test_formatting.py b/test/test_formatting.py index ed1d25b69..7bb682b69 100644 --- a/test/test_formatting.py +++ b/test/test_formatting.py @@ -9,9 +9,8 @@ from plover import formatting from plover.formatting import Case -from plover_build_utils.testing import CaptureOutput -from . import parametrize +from plover_build_utils.testing import CaptureOutput, parametrize def action(**kwargs): diff --git a/test/test_json_dict.py b/test/test_json_dict.py index bf56126b0..551108c2c 100644 --- a/test/test_json_dict.py +++ b/test/test_json_dict.py @@ -4,70 +4,82 @@ """Unit tests for json.py.""" -import inspect - -import pytest - from plover.dictionary.json_dict import JsonDictionary -from .utils import make_dict -from . import parametrize +from plover_build_utils.testing import dictionary_test + +def json_load_test(contents, expected): + if isinstance(contents, str): + contents = contents.encode('utf-8') + return contents, expected -LOAD_TESTS = ( - lambda: ('{"S": "a"}', {('S', ): 'a'}), +JSON_LOAD_TESTS = ( + lambda: json_load_test('{"S": "a"}', '"S": "a"'), # Default encoding is utf-8. - lambda: ('{"S": "café"}', {('S', ): 'café'}), + lambda: json_load_test('{"S": "café"}', '"S": "café"'), # But if that fails, the implementation # must automatically retry with latin-1. - lambda: ('{"S": "café"}'.encode('latin-1'), {('S', ): 'café'}), + lambda: json_load_test('{"S": "café"}'.encode('latin-1'), '"S": "café"'), # Invalid JSON. - lambda: ('{"foo", "bar",}', ValueError), + lambda: json_load_test('{"foo", "bar",}', ValueError), # Invalid JSON. - lambda: ('foo', ValueError), + lambda: json_load_test('foo', ValueError), # Cannot convert to dict. - lambda: ('"foo"', ValueError), + lambda: json_load_test('"foo"', ValueError), # Ditto. - lambda: ('4.2', TypeError), + lambda: json_load_test('4.2', TypeError), ) -@parametrize(LOAD_TESTS) -def test_load_dictionary(contents, expected): - if isinstance(contents, str): - contents = contents.encode('utf-8') - with make_dict(contents) as filename: - if inspect.isclass(expected): - with pytest.raises(expected): - JsonDictionary.load(filename) - else: - d = JsonDictionary.load(filename) - assert dict(d.items()) == expected +def json_save_test(entries, expected): + return entries, expected.encode('utf-8') - -SAVE_TESTS = ( +JSON_SAVE_TESTS = ( # Simple test. - lambda: ({('S', ): 'a'}, - '{\n"S": "a"\n}\n'), + lambda: json_save_test( + ''' + 'S': 'a', + ''', + '{\n"S": "a"\n}\n' + ), # Check strokes format: '/' separated. - lambda: ({('SAPL', '-PL'): 'sample'}, - '{\n"SAPL/-PL": "sample"\n}\n'), + lambda: json_save_test( + ''' + 'SAPL/-PL': 'sample', + ''', + '{\n"SAPL/-PL": "sample"\n}\n' + ), # Contents should be saved as UTF-8, no escaping. - lambda: ({('S', ): 'café'}, - '{\n"S": "café"\n}\n'), + lambda: json_save_test( + ''' + 'S': 'café', + ''', + '{\n"S": "café"\n}\n' + ), # Check escaping of special characters. - lambda: ({('S', ): '{^"\n\t"^}'}, - '{\n"S": "' + r'{^\"\n\t\"^}' + '"\n}\n'), + lambda: json_save_test( + r''' + 'S': '{^"\n\t"^}', + ''', + '{\n"S": "' + r'{^\"\n\t\"^}' + '"\n}\n' + ), # Keys are sorted on save. - lambda: ({('B', ): 'bravo', ('A', ): 'alpha', ('C', ): 'charlie'}, - '{\n"A": "alpha",\n"B": "bravo",\n"C": "charlie"\n}\n'), + lambda: json_save_test( + ''' + 'B': 'bravo', + 'A': 'alpha', + 'C': 'charlie', + ''', + '{\n"A": "alpha",\n"B": "bravo",\n"C": "charlie"\n}\n' + ), ) -@parametrize(SAVE_TESTS) -def test_save_dictionary(contents, expected): - with make_dict(b'foo') as filename: - d = JsonDictionary.create(filename) - d.update(contents) - d.save() - with open(filename, 'rb') as fp: - contents = fp.read().decode('utf-8') - assert contents == expected +@dictionary_test +class TestJsonDictionary: + + DICT_CLASS = JsonDictionary + DICT_EXTENSION = 'json' + DICT_REGISTERED = True + DICT_LOAD_TESTS = JSON_LOAD_TESTS + DICT_SAVE_TESTS = JSON_SAVE_TESTS + DICT_SAMPLE = b'{}' diff --git a/test/test_misc.py b/test/test_misc.py index 83b84cad8..441e27052 100644 --- a/test/test_misc.py +++ b/test/test_misc.py @@ -13,7 +13,7 @@ import plover.oslayer.config as conf from plover.resource import ASSET_SCHEME -from . import parametrize +from plover_build_utils.testing import parametrize def test_popcount_8(): diff --git a/test/test_orthography.py b/test/test_orthography.py index 57a6dfa2f..2a85ab688 100644 --- a/test/test_orthography.py +++ b/test/test_orthography.py @@ -3,7 +3,7 @@ from plover.orthography import add_suffix -from . import parametrize +from plover_build_utils.testing import parametrize ADD_SUFFIX_TESTS = ( diff --git a/test/test_passport.py b/test/test_passport.py index 10061aa87..7cfefc67c 100644 --- a/test/test_passport.py +++ b/test/test_passport.py @@ -7,7 +7,7 @@ from plover.machine.passport import Passport -from . import parametrize +from plover_build_utils.testing import parametrize class MockSerial: diff --git a/test/test_rtfcre_dict.py b/test/test_rtfcre_dict.py index bcee814dd..8a0b17233 100644 --- a/test/test_rtfcre_dict.py +++ b/test/test_rtfcre_dict.py @@ -1,268 +1,290 @@ # Copyright (c) 2013 Hesky Fisher # See LICENSE.txt for details. -import ast +import textwrap import pytest from plover.dictionary.rtfcre_dict import RtfDictionary, format_translation -from plover.steno import normalize_steno -from . import parametrize -from .utils import make_dict +from plover_build_utils.testing import dictionary_test, parametrize +@parametrize(( + lambda: ('', ''), + lambda: ('{^in^}', r'\cxds in\cxds '), + lambda: ('{pre^}', r'pre\cxds '), + lambda: ('{pre^} ', r'pre\cxds '), + lambda: ('{pre^} ', r'pre\cxds '), +)) +def test_format_translation(before, expected): + result = format_translation(before) + assert result == expected + + +def rtf_load_test(*spec): + assert 1 <= len(spec) <= 2 + if len(spec) == 2: + # Conversion test. + rtf_entries = r'{\*\cxs S}%s' % spec[0] + dict_entries = '"S": %r' % spec[1] + else: + spec = textwrap.dedent(spec[0]).lstrip() + if not spec: + rtf_entries, dict_entries = '', '' + else: + rtf_entries, dict_entries = tuple(spec.rsplit('\n\n', 1)) + return rtf_entries, dict_entries + RTF_LOAD_TESTS = ( # Empty dictionary. - lambda: ''' + lambda: rtf_load_test( + ''' - ''', + '''), # Only one translation. - lambda: r''' - {\*\cxs SP}translation + lambda: rtf_load_test( + r''' + {\*\cxs SP}translation - 'SP': 'translation', - ''', + 'SP': 'translation', + '''), # One translation on multiple lines. - lambda: pytest.param(''' - {\\*\\cxs SP}\r\ntranslation + lambda: pytest.param(*rtf_load_test( + ''' + {\\*\\cxs SP}\r\ntranslation - 'SP': 'translation' - ''', marks=pytest.mark.xfail), + 'SP': 'translation' + '''), marks=pytest.mark.xfail), # Multiple translations no newlines. - lambda: r''' - {\*\cxs SP}translation{\*\cxs S}translation2 + lambda: rtf_load_test( + r''' + {\*\cxs SP}translation{\*\cxs S}translation2 - 'SP': 'translation', - 'S': 'translation2', - ''', + 'SP': 'translation', + 'S': 'translation2', + '''), # Multiple translations on separate lines. - lambda: ''' - {\\*\\cxs SP}translation\r\n{\\*\\cxs S}translation2 + lambda: rtf_load_test( + ''' + {\\*\\cxs SP}translation\r\n{\\*\\cxs S}translation2 - 'SP': 'translation', - 'S': 'translation2', - ''', - lambda: ''' - {\\*\\cxs SP}translation\n{\\*\\cxs S}translation2 + 'SP': 'translation', + 'S': 'translation2', + '''), + lambda: rtf_load_test( + ''' + {\\*\\cxs SP}translation\n{\\*\\cxs S}translation2 - 'SP': 'translation', - 'S': 'translation2', - ''', + 'SP': 'translation', + 'S': 'translation2', + '''), - # Escaped \r and \n handled - lambda: ''' - {\\*\\cxs SP}trans\\\r\\\n + # Escaped \r and \n handled. + lambda: rtf_load_test( + ''' + {\\*\\cxs SP}trans\\\r\\\n - 'SP': 'trans{#Return}{#Return}{#Return}{#Return}', - ''', + 'SP': 'trans{#Return}{#Return}{#Return}{#Return}', + '''), - # Escaped \r\n handled in mid translation - lambda: ''' - {\\*\\cxs SP}trans\\\r\\\nlation + # Escaped \r\n handled in mid translation. + lambda: rtf_load_test( + ''' + {\\*\\cxs SP}trans\\\r\\\nlation - 'SP': 'trans{#Return}{#Return}{#Return}{#Return}lation', - ''', + 'SP': 'trans{#Return}{#Return}{#Return}{#Return}lation', + '''), # Whitespace is preserved in various situations. - lambda: r''' - {\*\cxs S}t - - 'S': 't{^ ^}', - ''', - lambda: r''' - {\*\cxs S} t - - 'S': '{^ ^}t', - ''', - lambda: r''' - {\*\cxs S}t {\*\cxs T}t - - 'S': 't{^ ^}', - 'T': 't{^ ^}', - ''', - lambda: ''' - {\\*\\cxs S}t \r\n{\\*\\cxs T}t - - 'S': 't{^ ^}', - 'T': 't{^ ^}', - ''', - lambda: ''' - {\\*\\cxs S}t \r\n{\\*\\cxs T} t \r\n - - 'S': 't{^ ^}', - 'T': ' t ', - ''', + lambda: rtf_load_test( + r''' + {\*\cxs S}t + + 'S': 't{^ ^}', + '''), + lambda: rtf_load_test( + r''' + {\*\cxs S} t + + 'S': '{^ ^}t', + '''), + lambda: rtf_load_test( + r''' + {\*\cxs S}t {\*\cxs T}t + + 'S': 't{^ ^}', + 'T': 't{^ ^}', + '''), + lambda: rtf_load_test( + ''' + {\\*\\cxs S}t \r\n{\\*\\cxs T}t + + 'S': 't{^ ^}', + 'T': 't{^ ^}', + '''), + lambda: rtf_load_test( + ''' + {\\*\\cxs S}t \r\n{\\*\\cxs T} t \r\n + + 'S': 't{^ ^}', + 'T': ' t ', + '''), # Translations are ignored if converter returns None - lambda: r''' - {\*\cxs S}return_none + lambda: rtf_load_test( + r''' + {\*\cxs S}return_none - 'S': 'return_none', - ''', + 'S': 'return_none', + '''), - lambda: r''' - {\*\cxs T}t t t + lambda: rtf_load_test( + r''' + {\*\cxs T}t t t - 'T': 't t t{^ ^}', - ''', + 'T': 't t t{^ ^}', + '''), # Conflicts result on only the last one kept. - lambda: r''' - {\*\cxs T}t - {\*\cxs T}g + lambda: rtf_load_test( + r''' + {\*\cxs T}t + {\*\cxs T}g - 'T': 'g', - ''', - lambda: r''' - {\*\cxs T}t - {\*\cxs T}return_none + 'T': 'g', + '''), + lambda: rtf_load_test( + r''' + {\*\cxs T}t + {\*\cxs T}return_none - 'T': 'return_none', - ''', + 'T': 'return_none', + '''), # Translation conversion tests. - lambda: ('', ''), - lambda: (r'\-', '-'), - lambda: (r'\\ ', '\\ '), - lambda: pytest.param((r'\\', '\\'), marks=pytest.mark.xfail), - lambda: (r'\{', '{'), - lambda: (r'\}', '}'), - lambda: (r'\~', '{^ ^}'), - lambda: (r'\_', '-'), - lambda: ('\\\r', '{#Return}{#Return}'), - lambda: ('\\\n', '{#Return}{#Return}'), - lambda: (r'\cxds', '{^}'), - lambda: (r'pre\cxds ', '{pre^}'), - lambda: (r'pre\cxds ', '{pre^} '), - lambda: (r'pre\cxds', '{pre^}'), - lambda: (r'\cxds post', '{^post}'), - lambda: (r'\cxds in\cxds', '{^in^}'), - lambda: (r'\cxds in\cxds ', '{^in^}'), - lambda: (r'\cxfc', '{-|}'), - lambda: (r'\cxfl', '{>}'), - lambda: (r'pre\cxfl', 'pre{>}'), - lambda: (r'\par', '{#Return}{#Return}'), + lambda: rtf_load_test('', ''), + lambda: rtf_load_test(r'\-', '-'), + lambda: rtf_load_test(r'\\ ', '\\ '), + lambda: pytest.param(*rtf_load_test(r'\\', '\\'), marks=pytest.mark.xfail), + lambda: rtf_load_test(r'\{', '{'), + lambda: rtf_load_test(r'\}', '}'), + lambda: rtf_load_test(r'\~', '{^ ^}'), + lambda: rtf_load_test(r'\_', '-'), + lambda: rtf_load_test('\\\r', '{#Return}{#Return}'), + lambda: rtf_load_test('\\\n', '{#Return}{#Return}'), + lambda: rtf_load_test(r'\cxds', '{^}'), + lambda: rtf_load_test(r'pre\cxds ', '{pre^}'), + lambda: rtf_load_test(r'pre\cxds ', '{pre^} '), + lambda: rtf_load_test(r'pre\cxds', '{pre^}'), + lambda: rtf_load_test(r'\cxds post', '{^post}'), + lambda: rtf_load_test(r'\cxds in\cxds', '{^in^}'), + lambda: rtf_load_test(r'\cxds in\cxds ', '{^in^}'), + lambda: rtf_load_test(r'\cxfc', '{-|}'), + lambda: rtf_load_test(r'\cxfl', '{>}'), + lambda: rtf_load_test(r'pre\cxfl', 'pre{>}'), + lambda: rtf_load_test(r'\par', '{#Return}{#Return}'), # Stenovations extensions... - lambda: (r'{\*\cxsvatdictflags N}', '{-|}'), - lambda: (r'{\*\cxsvatdictflags LN1}', '{-|}'), + lambda: rtf_load_test(r'{\*\cxsvatdictflags N}', '{-|}'), + lambda: rtf_load_test(r'{\*\cxsvatdictflags LN1}', '{-|}'), # caseCATalyst declares new styles without a preceding \par so we treat # it as an implicit par. - lambda: (r'\s1', '{#Return}{#Return}'), + lambda: rtf_load_test(r'\s1', '{#Return}{#Return}'), # But if the \par is present we don't treat \s as an implicit par. - lambda: (r'\par\s1', '{#Return}{#Return}'), + lambda: rtf_load_test(r'\par\s1', '{#Return}{#Return}'), # Continuation styles are indented too. - lambda: (r'\par\s4', '{#Return}{#Return}{^ ^}'), + lambda: rtf_load_test(r'\par\s4', '{#Return}{#Return}{^ ^}'), # caseCATalyst punctuation. - lambda: (r'.', '{.}'), - lambda: (r'. ', '{.} '), - lambda: (r' . ', ' . '), - lambda: (r'{\cxa Q.}.', 'Q..'), + lambda: rtf_load_test(r'.', '{.}'), + lambda: rtf_load_test(r'. ', '{.} '), + lambda: rtf_load_test(r' . ', ' . '), + lambda: rtf_load_test(r'{\cxa Q.}.', 'Q..'), # Don't mess with period that is part of a word. - lambda: (r'Mr.', 'Mr.'), - lambda: (r'.attribute', '.attribute'), - lambda: (r'{\cxstit contents}', 'contents'), - lambda: (r'{\cxfing c}', '{&c}'), - lambda: (r'{\cxp.}', '{.}'), - lambda: (r'{\cxp .}', '{.}'), - lambda: (r'{\cxp . }', '{.}'), - lambda: (r'{\cxp . }', '{.}'), - lambda: (r'{\cxp !}', '{!}'), - lambda: (r'{\cxp ?}', '{?}'), - lambda: (r'{\cxp ,}', '{,}'), - lambda: (r'{\cxp ;}', '{;}'), - lambda: (r'{\cxp :}', '{:}'), - lambda: ('{\\cxp \'}', '{^\'}'), - lambda: ('{\\cxp -}', '{^-^}'), - lambda: ('{\\cxp /}', '{^/^}'), - lambda: ('{\\cxp... }', '{^... ^}'), - lambda: ('{\\cxp ") }', '{^") ^}'), - lambda: ('{\\nonexistent }', ''), - lambda: ('{\\nonexistent contents}', 'contents'), - lambda: ('{\\nonexistent cont\\_ents}', 'cont-ents'), - lambda: ('{\\*\\nonexistent }', ''), - lambda: ('{\\*\\nonexistent contents}', ''), - lambda: ('{eclipse command}', '{eclipse command}'), - lambda: ('test text', 'test text'), - lambda: ('test text', 'test{^ ^}text'), - lambda: (r'{\cxconf [{\cxc abc}]}', 'abc'), - lambda: (r'{\cxconf [{\cxc abc}|{\cxc def}]}', 'def'), - lambda: (r'{\cxconf [{\cxc abc}|{\cxc def}|{\cxc ghi}]}', 'ghi'), - lambda: (r'{\cxconf [{\cxc abc}|{\cxc {\cxp... }}]}', '{^... ^}'), - lambda: (r'be\cxds{\*\cxsvatdictentrydate\yr2006\mo5\dy10}', '{be^}'), - lambda: (r'{\nonexistent {\cxp .}}', '{.}'), - lambda: (r'{\*\nonexistent {\cxp .}}', ''), + lambda: rtf_load_test(r'Mr.', 'Mr.'), + lambda: rtf_load_test(r'.attribute', '.attribute'), + lambda: rtf_load_test(r'{\cxstit contents}', 'contents'), + lambda: rtf_load_test(r'{\cxfing c}', '{&c}'), + lambda: rtf_load_test(r'{\cxp.}', '{.}'), + lambda: rtf_load_test(r'{\cxp .}', '{.}'), + lambda: rtf_load_test(r'{\cxp . }', '{.}'), + lambda: rtf_load_test(r'{\cxp . }', '{.}'), + lambda: rtf_load_test(r'{\cxp !}', '{!}'), + lambda: rtf_load_test(r'{\cxp ?}', '{?}'), + lambda: rtf_load_test(r'{\cxp ,}', '{,}'), + lambda: rtf_load_test(r'{\cxp ;}', '{;}'), + lambda: rtf_load_test(r'{\cxp :}', '{:}'), + lambda: rtf_load_test('{\\cxp \'}', '{^\'}'), + lambda: rtf_load_test('{\\cxp -}', '{^-^}'), + lambda: rtf_load_test('{\\cxp /}', '{^/^}'), + lambda: rtf_load_test('{\\cxp... }', '{^... ^}'), + lambda: rtf_load_test('{\\cxp ") }', '{^") ^}'), + lambda: rtf_load_test('{\\nonexistent }', ''), + lambda: rtf_load_test('{\\nonexistent contents}', 'contents'), + lambda: rtf_load_test('{\\nonexistent cont\\_ents}', 'cont-ents'), + lambda: rtf_load_test('{\\*\\nonexistent }', ''), + lambda: rtf_load_test('{\\*\\nonexistent contents}', ''), + lambda: rtf_load_test('{eclipse command}', '{eclipse command}'), + lambda: rtf_load_test('test text', 'test text'), + lambda: rtf_load_test('test text', 'test{^ ^}text'), + lambda: rtf_load_test(r'{\cxconf [{\cxc abc}]}', 'abc'), + lambda: rtf_load_test(r'{\cxconf [{\cxc abc}|{\cxc def}]}', 'def'), + lambda: rtf_load_test(r'{\cxconf [{\cxc abc}|{\cxc def}|{\cxc ghi}]}', 'ghi'), + lambda: rtf_load_test(r'{\cxconf [{\cxc abc}|{\cxc {\cxp... }}]}', '{^... ^}'), + lambda: rtf_load_test(r'be\cxds{\*\cxsvatdictentrydate\yr2006\mo5\dy10}', '{be^}'), + lambda: rtf_load_test(r'{\nonexistent {\cxp .}}', '{.}'), + lambda: rtf_load_test(r'{\*\nonexistent {\cxp .}}', ''), ) -@parametrize(RTF_LOAD_TESTS, arity=1) -def test_rtf_load(test_case): - if isinstance(test_case, tuple): - # Translation conversion test. - rtf_entries = r'{\*\cxs S}' + test_case[0] - dict_entries = { normalize_steno('S'): test_case[1] } - else: - rtf_entries, dict_entries = test_case.rsplit('\n\n', 1) - dict_entries = { - normalize_steno(k): v - for k, v in ast.literal_eval('{' + dict_entries + '}').items() - } - rtf_styles = { - 0: 'Normal', - 1: 'Question', - 2: 'Answer', - 3: 'Colloquy', - 4: 'Continuation Q', - 5: 'Continuation A', - 6: 'Continuation Col', - 7: 'Paren', - 8: 'Centered', - } - rtf = ( - '\r\n'.join( - [r'{\rtf1\ansi\cxdict{\*\cxrev100}{\*\cxsystem Fake Software}'] + - [r'{\s%d %s;}' % (k, v) for k, v in rtf_styles.items()] + - ['}']) - + rtf_entries - + '\r\n}' - ) - with make_dict(rtf.encode('cp1252')) as filename: - d = RtfDictionary.load(filename) - assert dict(d.items()) == dict_entries - - - -@parametrize(( - lambda: ('', ''), - lambda: ('{^in^}', r'\cxds in\cxds '), - lambda: ('{pre^}', r'pre\cxds '), - lambda: ('{pre^} ', r'pre\cxds '), - lambda: ('{pre^} ', r'pre\cxds '), -)) -def test_format_translation(before, expected): - result = format_translation(before) - assert result == expected - - -@parametrize(( - lambda: ({'S/T': '{pre^}'}, - b'{\\rtf1\\ansi{\\*\\cxrev100}\\cxdict{\\*\\cxsystem Plover}' - b'{\\stylesheet{\\s0 Normal;}}\r\n' - b'{\\*\\cxs S///T}pre\\cxds \r\n}\r\n' +RTF_SAVE_TESTS = ( + lambda: ( + ''' + 'S/T': '{pre^}', + ''', + (b'{\\rtf1\\ansi{\\*\\cxrev100}\\cxdict{\\*\\cxsystem Plover}' + b'{\\stylesheet{\\s0 Normal;}}\r\n' + b'{\\*\\cxs S/T}pre\\cxds \r\n}\r\n') ), -)) -def test_save_dictionary(contents, expected): - with make_dict(b'foo') as filename: - d = RtfDictionary.create(filename) - d.update(contents) - d.save() - with open(filename, 'rb') as fp: - contents = fp.read() - assert contents == expected +) + +@dictionary_test +class TestRtfDictionary: + + DICT_CLASS = RtfDictionary + DICT_EXTENSION = 'rtf' + DICT_REGISTERED = True + DICT_LOAD_TESTS = RTF_LOAD_TESTS + DICT_SAVE_TESTS = RTF_SAVE_TESTS + DICT_SAMPLE = '' + + @staticmethod + def make_dict(contents): + if isinstance(contents, bytes): + return contents + rtf_styles = { + 0: 'Normal', + 1: 'Question', + 2: 'Answer', + 3: 'Colloquy', + 4: 'Continuation Q', + 5: 'Continuation A', + 6: 'Continuation Col', + 7: 'Paren', + 8: 'Centered', + } + rtf = ( + '\r\n'.join( + [r'{\rtf1\ansi\cxdict{\*\cxrev100}{\*\cxsystem Fake Software}'] + + [r'{\s%d %s;}' % (k, v) for k, v in rtf_styles.items()] + + ['}']) + + contents + + '\r\n}' + ) + return rtf.encode('cp1252') diff --git a/test/test_steno.py b/test/test_steno.py index 0491fa566..639aa4e9f 100644 --- a/test/test_steno.py +++ b/test/test_steno.py @@ -5,7 +5,7 @@ from plover.steno import normalize_steno, Stroke -from . import parametrize +from plover_build_utils.testing import parametrize NORMALIZE_TESTS = ( diff --git a/test/test_steno_dictionary.py b/test/test_steno_dictionary.py index 50fb7179c..ce67f75cf 100644 --- a/test/test_steno_dictionary.py +++ b/test/test_steno_dictionary.py @@ -3,54 +3,11 @@ """Unit tests for steno_dictionary.py.""" -import os -import stat -import tempfile - import pytest from plover.steno_dictionary import StenoDictionary, StenoDictionaryCollection -from . import parametrize - - -def test_dictionary(): - notifications = [] - def listener(longest_key): - notifications.append(longest_key) - - d = StenoDictionary() - assert d.longest_key == 0 - - d.add_longest_key_listener(listener) - d[('S',)] = 'a' - assert d.longest_key == 1 - assert notifications == [1] - d[('S', 'S', 'S', 'S')] = 'b' - assert d.longest_key == 4 - assert notifications == [1, 4] - d[('S', 'S')] = 'c' - assert d.longest_key == 4 - assert d[('S', 'S')] == 'c' - assert notifications == [1, 4] - del d[('S', 'S', 'S', 'S')] - assert d.longest_key == 2 - assert notifications == [1, 4, 2] - del d[('S',)] - assert d.longest_key == 2 - assert notifications == [1, 4, 2] - assert d.reverse_lookup('c') == {('S', 'S')} - assert d.casereverse_lookup('c') == {'c'} - d.clear() - assert d.longest_key == 0 - assert notifications == [1, 4, 2, 0] - assert d.reverse_lookup('c') == set() - assert d.casereverse_lookup('c') == set() - - d.remove_longest_key_listener(listener) - d[('S', 'S')] = 'c' - assert d.longest_key == 2 - assert notifications == [1, 4, 2, 0] +from plover_build_utils.testing import dictionary_test def test_dictionary_collection(): @@ -171,17 +128,6 @@ def test_dictionary_collection_longest_key(): assert dc.longest_key == 0 -def test_casereverse_del(): - d = StenoDictionary() - d[('S-G',)] = 'something' - d[('SPH-G',)] = 'something' - assert d.casereverse_lookup('something') == {'something'} - del d[('S-G',)] - assert d.casereverse_lookup('something') == {'something'} - del d[('SPH-G',)] - assert d.casereverse_lookup('something') == set() - - def test_casereverse_lookup(): dc = StenoDictionaryCollection() @@ -259,76 +205,22 @@ def test_dictionary_enabled(): assert dc.reverse_lookup('Testing') == set() -def test_dictionary_readonly(): - class FakeDictionary(StenoDictionary): +@dictionary_test +class TestStenoDictionary: + + class DICT_CLASS(StenoDictionary): + def _load(self, filename): + pass + DICT_EXTENSION = 'dict' + DICT_SAMPLE = b'' + + +@dictionary_test +class TestReadOnlyStenoDictionary: + + class DICT_CLASS(StenoDictionary): + readonly = True def _load(self, filename): pass - def _save(self): - raise NotImplementedError - tf = tempfile.NamedTemporaryFile(delete=False) - try: - tf.close() - d = FakeDictionary.load(tf.name) - # Writable file: not readonly. - assert not d.readonly - # Readonly file: readonly dictionary. - os.chmod(tf.name, stat.S_IREAD) - d = FakeDictionary.load(tf.name) - assert d.readonly - finally: - # Deleting the file will fail on Windows - # if we don't restore write permission. - os.chmod(tf.name, stat.S_IWRITE) - os.unlink(tf.name) - # Assets are always readonly. - d = FakeDictionary.load('asset:plover:assets/main.json') - assert d.readonly - - -TEST_DICTIONARY_UPDATE_DICT = { - ('S-G',): 'something', - ('SPH-G',): 'something', - ('SPH*G',): 'Something', - ('SPH', 'THEUPBG'): 'something', -} -TEST_DICTIONARY_UPDATE_STENODICT = StenoDictionary() -TEST_DICTIONARY_UPDATE_STENODICT.update(TEST_DICTIONARY_UPDATE_DICT) - -@pytest.mark.parametrize('update_from, start_empty', ( - (dict(TEST_DICTIONARY_UPDATE_DICT), True), - (dict(TEST_DICTIONARY_UPDATE_DICT), False), - (list(TEST_DICTIONARY_UPDATE_DICT.items()), True), - (list(TEST_DICTIONARY_UPDATE_DICT.items()), False), - (iter(TEST_DICTIONARY_UPDATE_DICT.items()), True), - (iter(TEST_DICTIONARY_UPDATE_DICT.items()), False), - (TEST_DICTIONARY_UPDATE_STENODICT, True), - (TEST_DICTIONARY_UPDATE_STENODICT, False), -)) -def test_dictionary_update(update_from, start_empty): - d = StenoDictionary() - if not start_empty: - d.update({ - ('SPH*G',): 'not something', - ('STHEUPBG',): 'something', - ('EF', 'REU', 'TH*EUPBG'): 'everything', - }) - assert d[('STHEUPBG',)] == 'something' - assert d[('EF', 'REU', 'TH*EUPBG')] == 'everything' - assert d.reverse_lookup('not something') == {('SPH*G',)} - assert d.longest_key == 3 - d.update(update_from) - assert d[('S-G',)] == 'something' - assert d[('SPH-G',)] == 'something' - assert d[('SPH*G',)] == 'Something' - assert d[('SPH', 'THEUPBG')] == 'something' - if not start_empty: - assert d[('STHEUPBG',)] == 'something' - assert d[('EF', 'REU', 'TH*EUPBG')] == 'everything' - assert d.reverse_lookup('not something') == set() - assert d.reverse_lookup('something') == {('STHEUPBG',), ('S-G',), ('SPH-G',), ('SPH', 'THEUPBG')} - assert d.casereverse_lookup('something') == {'something', 'Something'} - assert d.longest_key == 3 - else: - assert d.reverse_lookup('something') == {('S-G',), ('SPH-G',), ('SPH', 'THEUPBG')} - assert d.casereverse_lookup('something') == {'something', 'Something'} - assert d.longest_key == 2 + DICT_EXTENSION = 'dict' + DICT_SAMPLE = b'' diff --git a/test/test_translation.py b/test/test_translation.py index 9f0a2fb7e..81e0a7378 100644 --- a/test/test_translation.py +++ b/test/test_translation.py @@ -13,9 +13,7 @@ from plover.translation import Translation, Translator, _State from plover.translation import escape_translation, unescape_translation -from plover_build_utils.testing import steno_to_stroke as stroke - -from . import parametrize +from plover_build_utils.testing import parametrize, steno_to_stroke as stroke if PLATFORM == 'mac': diff --git a/test/utils.py b/test/utils.py deleted file mode 100644 index a9ac9f547..000000000 --- a/test/utils.py +++ /dev/null @@ -1,22 +0,0 @@ -from contextlib import contextmanager -import os -import tempfile - -from plover.misc import normalize_path - - -@contextmanager -def make_dict(contents, extension=None, name=None): - kwargs = { 'delete': False } - if name is not None: - kwargs['prefix'] = name + '_' - if extension is not None: - kwargs['suffix'] = '.' + extension - tf = tempfile.NamedTemporaryFile(**kwargs) - try: - tf.write(contents) - tf.close() - yield normalize_path(tf.name) - finally: - os.unlink(tf.name) -