From f739e3ecf1ab903ce35e6bda6009bc1550775c5c Mon Sep 17 00:00:00 2001 From: Muhammad Haseeb <14217455+mhaseeb123@users.noreply.github.com> Date: Thu, 14 Nov 2024 19:57:34 +0000 Subject: [PATCH] Revert style changes --- python/cudf/cudf/tests/test_parquet.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/python/cudf/cudf/tests/test_parquet.py b/python/cudf/cudf/tests/test_parquet.py index 3ccdf4c6013..ad2f498b5da 100644 --- a/python/cudf/cudf/tests/test_parquet.py +++ b/python/cudf/cudf/tests/test_parquet.py @@ -3773,21 +3773,13 @@ def test_parquet_writer_roundtrip_structs_with_arrow_schema( @pytest.mark.parametrize("use_pandas_metadata", [True, False]) @pytest.mark.parametrize("row_groups", [[[0]], None, [[0, 1]]]) def test_parquet_chunked_reader( - chunk_read_limit, - pass_read_limit, - use_pandas_metadata, - row_groups, + chunk_read_limit, pass_read_limit, use_pandas_metadata, row_groups ): df = pd.DataFrame( - { - "a": [1, 2, 3, None] * 10000, - "b": ["av", "qw", None, "xyz"] * 10000, - } + {"a": [1, 2, 3, None] * 10000, "b": ["av", "qw", None, "xyz"] * 10000} ) buffer = BytesIO() - # Write 4 Parquet row groups df.to_parquet(buffer, row_group_size=10000) - # Check with row_groups specified actual = read_parquet_chunked( [buffer], chunk_read_limit=chunk_read_limit,