Skip to content

Refactor benchmark CLI #149

Refactor benchmark CLI

Refactor benchmark CLI #149

name: Performance Regression Detector
on:
pull_request:
branches: [ master ]
jobs:
pr-content-check:
uses: ./.github/workflows/PR-content-check.yml
detect-regression:
name: Detect Regression
# commented out while working on workflow itself
# todo: re-enable when skipping is valued above running all the time
# needs: PR-Content-Check
# if: ${{ needs.PR-Content-Check.outputs.result == 'pass' }}
runs-on: macos-latest
strategy:
matrix:
python-version: ['3.9', '3.10', 'pypy-3.7', 'pypy-3.8']
fail-fast: false
steps:
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
# Generates data used for benchmarking
- name: Checkout ion-data-generator
uses: actions/checkout@v3
with:
repository: amazon-ion/ion-data-generator
ref: main
path: ion-data-generator
- name: Build ion-data-generator
run: cd ion-data-generator && mvn clean install
- name: Generate test Ion Data
env:
jar_file: ion-data-generator/target/ion-data-generator-1.0-SNAPSHOT.jar
schema_dir: ion-data-generator/tst/com/amazon/ion/workflow
run: |
mkdir -p testData
for test in nestedStruct nestedList sexp realWorldDataSchema01 realWorldDataSchema02 realWorldDataSchema03
do
java -jar $jar_file generate -S 50000 --input-ion-schema $schema_dir/${test}.isl testData/${test}.10n
done
- name: Upload test Ion Data to artifacts
uses: actions/upload-artifact@v2
with:
name: test Ion Data
path: testData
# Generates performance results for the current commit
- name: Checkout the current commit of the Ion Python
uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
path: ion-python-current
- name: Create a virtual environment and setup the package
working-directory: ./ion-python-current
run: |
git submodule init && git submodule update && python3 -m venv ./venv && . venv/bin/activate
pip install -r requirements.txt
pip install -e .
- name: Running performance benchmark
working-directory: ./ion-python-current
env:
warmups: 100
iterations: 1000
cli_script: amazon/ionbenchmark/ion_benchmark_cli.py
runner_dir: /Users/runner/work/ion-python/ion-python
run: |
. venv/bin/activate
for testset in nestedStruct nestedList sexp realWorldDataSchema01 realWorldDataSchema02 realWorldDataSchema03
do
python $cli_script read -w $warmups --iterations $iterations -o $runner_dir/benchmarkResults/${testset}_read/new --io-type buffer --format ion_text --format ion_binary --api load_dump $runner_dir/testData/${testset}.10n
python $cli_script write -w $warmups --iterations $iterations -o $runner_dir/benchmarkResults/${testset}_write/new --io-type buffer --format ion_text --format ion_binary --api load_dump $runner_dir/testData/${testset}.10n
done
# Generates performance results for the previous commit
- name: Checkout the master branch of the Ion Python
uses: actions/checkout@v3
with:
ref: master
path: ion-python-master
- name: Create a virtual environment
working-directory: ./ion-python-master
run: |
git submodule init && git submodule update && python3 -m venv ./venv && . venv/bin/activate
pip install -r requirements.txt
pip install -e .
- name: Running performance benchmark
working-directory: ./ion-python-master
env:
warmups: 100
iterations: 1000
cli_script: amazon/ionbenchmark/ion_benchmark_cli.py
runner_dir: /Users/runner/work/ion-python/ion-python
run: |
. venv/bin/activate
for testset in nestedStruct nestedList sexp realWorldDataSchema01 realWorldDataSchema02 realWorldDataSchema03
do
python $cli_script read -w $warmups --iterations $iterations -o $runner_dir/benchmarkResults/${testset}_read/previous --io-type buffer --format ion_text --format ion_binary --api load_dump $runner_dir/testData/${testset}.10n
python $cli_script write -w $warmups --iterations $iterations -o $runner_dir/benchmarkResults/${testset}_write/previous --io-type buffer --format ion_text --format ion_binary --api load_dump $runner_dir/testData/${testset}.10n
done
# Upload resource, results and report
- name: Upload new benchmark results directory to artifacts
uses: actions/upload-artifact@v2
with:
name: Benchmark result
path: benchmarkResults
- name: Upload test Ion Data to artifacts
uses: actions/upload-artifact@v2
with:
name: test Ion Data
path: testData
# Compare results and identify regression
- name: Detect performance regression
id: regression_result
run: |
. /Users/runner/work/ion-python/ion-python/ion-python-current/venv/bin/activate
result=true
cd benchmarkResults && for FILE in *; do message=$(python /Users/runner/work/ion-python/ion-python/ion-python-current/amazon/ionbenchmark/ion_benchmark_cli.py compare --benchmark-result-previous $FILE/previous --benchmark-result-new $FILE/new $FILE/report | tee /dev/stderr) && if [ "$message" != "no regression detected" ]; then result=false; fi; done
echo "::set-output name=regression-result::$result"
if [ "$result" = "true" ]; then echo "No regression detected!" >> $GITHUB_STEP_SUMMARY; fi
- name: Upload comparison reports to the benchmark results directory
uses: actions/upload-artifact@v2
with:
name: Benchmark result
path: benchmarkResults
- name: Fail the workflow if regression happened
env:
regression_detect: ${{steps.regression_result.outputs.regression-result}}
if: ${{ env.regression_detect == 'false' }}
run: |
. /Users/runner/work/ion-python/ion-python/ion-python-current/venv/bin/activate
cd benchmarkResults && echo "**Below files are detected performance regression, please download the benchmark result(s) to see detail:**" >> $GITHUB_STEP_SUMMARY && for FILE in *; do regressionDetection=$(python /Users/runner/work/ion-python/ion-python/ion-python-current/amazon/ionbenchmark/ion_benchmark_cli.py compare --benchmark-result-previous $FILE/previous --benchmark-result-new $FILE/new $FILE/report | tee /dev/stderr) && if [ "$regressionDetection" != "no regression detected" ]; then echo "$FILE" >> $GITHUB_STEP_SUMMARY; fi; done
exit 1