Skip to content

Commit

Permalink
Skip some tests on PyPy
Browse files Browse the repository at this point in the history
  • Loading branch information
popematt committed Jul 26, 2023
1 parent 5988aca commit 90aead5
Showing 1 changed file with 24 additions and 10 deletions.
34 changes: 24 additions & 10 deletions tests/test_benchmark_cli.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,15 @@
# import json
import os
import time
# from itertools import chain
from os.path import abspath, join, dirname

# import cbor2

import sys
from amazon.ion import simpleion
from amazon.ion.equivalence import ion_equals
from amazon.ionbenchmark import ion_benchmark_cli, Format
from amazon.ionbenchmark.Format import format_is_ion, format_is_cbor, format_is_json, rewrite_file_to_format
from amazon.ionbenchmark.ion_benchmark_cli import _str_to_bool, TOOL_VERSION
from tests import parametrize
# from tests.test_simpleion import generate_scalars_text
# from tests.writer_util import SIMPLE_SCALARS_MAP_TEXT
from venv.bin import pytest

doc = ion_benchmark_cli.__doc__
result_table_option_idx = 3
Expand Down Expand Up @@ -58,7 +54,7 @@ def test_option_version():
assert not error_code
assert out.strip() == TOOL_VERSION


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
@parametrize(
('write', 'buffer'),
('write', 'file'),
Expand All @@ -71,32 +67,36 @@ def test_run_benchmark_spec(args):
assert not error_code



@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_option_write(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['write', file])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_option_read(file=generate_test_path('integers.ion')):
# make sure it reads successfully
(error_code, _, _) = run_cli(['read', file])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_option_write_c_extension(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['write', file, '--c-extension', 'true'])
assert not error_code
(error_code, _, _) = run_cli(['write', file, '--c-extension', 'false'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_option_read_c_extension(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['read', file, '--c-extension', 'true'])
assert not error_code
(error_code, _, _) = run_cli(['read', file, '--c-extension', 'false'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_option_read_iterations(file=generate_test_path('integers.ion')):
# This is a potentially flaky test due to the overhead of running the CLI as a new process.
start = time.perf_counter()
Expand All @@ -115,6 +115,7 @@ def test_option_read_iterations(file=generate_test_path('integers.ion')):
assert time_2 > time_1


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_option_write_iterations(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['write', file, '--c-extension', 'true', '--iterations', '100'])
assert not error_code
Expand All @@ -129,36 +130,43 @@ def test_option_write_iterations(file=generate_test_path('integers.ion')):
# execution_with_command(['write', file, '--api', 'load_dump', '--api', 'streaming'])


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_read_duplicated_api(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['read', file, '--api', 'load_dump', '--api', 'load_dump'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_write_duplicated_api(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['write', file, '--api', 'load_dump', '--api', 'load_dump'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_read_multi_format(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['read', file, '--format', 'ion_text', '--format', 'ion_binary'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_write_multi_format(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['write', file, '--format', 'ion_text', '--format', 'ion_binary'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_read_multi_duplicated_format(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['read', file, '--format', 'ion_text', '--format', 'ion_binary', '--format', 'ion_text'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_write_multi_duplicated_format(file=generate_test_path('integers.ion')):
(error_code, _, _) = run_cli(['write', file, '--format', 'ion_text', '--format', 'ion_binary', '--format', 'ion_text', ])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
@parametrize(
*tuple((f.value for f in Format.Format if Format.format_is_json(f.value)))
)
Expand All @@ -167,6 +175,7 @@ def test_write_json_format(f):
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
@parametrize(
*tuple((f.value for f in Format.Format if Format.format_is_json(f.value)))
)
Expand All @@ -175,6 +184,7 @@ def test_read_json_format(f):
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
@parametrize(
*tuple((f.value for f in Format.Format if Format.format_is_cbor(f.value)))
)
Expand All @@ -183,6 +193,7 @@ def test_write_cbor_format(f):
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
@parametrize(
*tuple((f.value for f in Format.Format if Format.format_is_cbor(f.value)))
)
Expand All @@ -191,14 +202,14 @@ def test_read_cbor_format(f):
assert not error_code



@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
@parametrize(*['buffer', 'file'])
def test_write_io_type(f):
(error_code, _, _) = run_cli(['write', generate_test_path('integers.ion'), '--io-type', f'{f}', '--format', 'json'])
assert not error_code



@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
@parametrize(*['buffer', 'file'])
def test_read_io_type(f):
(error_code, _, _) = run_cli(['read', '--io-type', f'{f}', '--format', 'ion_text', '--format', 'ion_binary', generate_test_path('integers.ion')])
Expand Down Expand Up @@ -235,16 +246,19 @@ def assert_ion_string_equals(act, exp):
assert ion_equals(simpleion.loads(act), simpleion.loads(exp))


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_compare_without_regression():
(error_code, _, _) = run_cli(['compare', generate_test_path('compare/cats_baseline.ion'), generate_test_path('compare/cats_baseline.ion'), '--fail'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_compare_with_small_regression():
(error_code, _, _) = run_cli(['compare', generate_test_path('compare/cats_baseline.ion'), generate_test_path('compare/cats_small_regression.ion'), '--fail'])
assert not error_code


@pytest.mark.skipif(sys.implementation.name != 'cpython', reason="benchmark CLI supports only cpython")
def test_compare_with_large_regression():
(error_code, _, _) = run_cli(['compare', './tests/benchmark_sample_data/compare/cats_baseline.ion', generate_test_path('compare/cats_large_regression.ion'), '--fail'])
assert error_code
Expand Down

0 comments on commit 90aead5

Please sign in to comment.