Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

enforce wheel size limits, README formatting in CI #4754

Merged
merged 5 commits into from
Nov 15, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion ci/build_wheel_cugraph-equivariant.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,7 @@

set -euo pipefail

./ci/build_wheel.sh cugraph-equivariant python/cugraph-equivariant
package_dir="python/cugraph-equivariant"

./ci/build_wheel.sh cugraph-equivariant ${package_dir}
./ci/validate_wheel.sh ${package_dir} dist
5 changes: 4 additions & 1 deletion ci/build_wheel_cugraph.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

set -euo pipefail

package_dir="python/cugraph"

RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"

# Download the pylibcugraph wheel built in the previous step and make it
Expand Down Expand Up @@ -31,4 +33,5 @@ esac
export SKBUILD_CMAKE_ARGS="-DDETECT_CONDA_ENV=OFF;-DFIND_CUGRAPH_CPP=OFF;-DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/${EXTRA_CMAKE_ARGS}"
export SKBUILD_BUILD_TOOL_ARGS="-j${PARALLEL_LEVEL};-l${PARALLEL_LEVEL}"

./ci/build_wheel.sh cugraph python/cugraph
./ci/build_wheel.sh cugraph ${package_dir}
./ci/validate_wheel.sh ${package_dir} final_dist
5 changes: 4 additions & 1 deletion ci/build_wheel_pylibcugraph.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

set -euo pipefail

package_dir="python/pylibcugraph"

PARALLEL_LEVEL=$(python -c \
"from math import ceil; from multiprocessing import cpu_count; print(ceil(cpu_count()/4))")

Expand All @@ -18,4 +20,5 @@ esac
export SKBUILD_CMAKE_ARGS="-DDETECT_CONDA_ENV=OFF;-DFIND_CUGRAPH_CPP=OFF;-DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/${EXTRA_CMAKE_ARGS}"
export SKBUILD_BUILD_TOOL_ARGS="-j${PARALLEL_LEVEL};-l${PARALLEL_LEVEL}"

./ci/build_wheel.sh pylibcugraph python/pylibcugraph
./ci/build_wheel.sh pylibcugraph ${package_dir}
./ci/validate_wheel.sh ${package_dir} final_dist
21 changes: 21 additions & 0 deletions ci/validate_wheel.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/bin/bash
# Copyright (c) 2024, NVIDIA CORPORATION.

set -euo pipefail

package_dir=$1
wheel_dir_relative_path=$2

cd "${package_dir}"

rapids-logger "validate packages with 'pydistcheck'"

pydistcheck \
--inspect \
"$(echo ${wheel_dir_relative_path}/*.whl)"

rapids-logger "validate packages with 'twine'"

twine check \
--strict \
"$(echo ${wheel_dir_relative_path}/*.whl)"
8 changes: 8 additions & 0 deletions python/cugraph-equivariant/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,11 @@ include = [
build-backend = "setuptools.build_meta"
dependencies-file = "../../dependencies.yaml"
matrix-entry = "cuda_suffixed=true"

[tool.pydistcheck]
select = [
"distro-too-large-compressed",
]

# PyPI limit is 100 MiB, fail CI before we get too close to that
max_allowed_size_compressed = '75M'
8 changes: 8 additions & 0 deletions python/cugraph/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -88,3 +88,11 @@ requires = [
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
dependencies-file = "../../dependencies.yaml"
matrix-entry = "cuda_suffixed=true"

[tool.pydistcheck]
select = [
"distro-too-large-compressed",
]

# detect when package size grows significantly
max_allowed_size_compressed = '1.5G'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we do what @betatim proposed for cuml here (and in pylibcugraph) as well, and have separate CUDA 11/12 limits?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah with the benefit of a night of sleep, I do think that's better and the little bit of extra complexity in the script is worth it. I'll do that here.

And put up a PR in cuvs doing that too... it had a similar huge threshold + huge difference across CUDA versions.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just put up a proposal for setting these thresholds, let me know what you think.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks good to me!

8 changes: 8 additions & 0 deletions python/pylibcugraph/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -78,3 +78,11 @@ requires = [
"rmm==24.12.*,>=0.0.0a0",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
matrix-entry = "cuda_suffixed=true;use_cuda_wheels=true"

[tool.pydistcheck]
select = [
"distro-too-large-compressed",
]

# detect when package size grows significantly
max_allowed_size_compressed = '1.5G'
Loading