Skip to content

Commit

Permalink
test: run bash-completion tests in PR/CI flows (#292)
Browse files Browse the repository at this point in the history
  • Loading branch information
reubeno authored Feb 3, 2025
1 parent 8dd93aa commit 1e275c0
Show file tree
Hide file tree
Showing 2 changed files with 115 additions and 1 deletion.
68 changes: 67 additions & 1 deletion .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,71 @@ jobs:
main/benchmarks.txt
benchmark-results.md
# Run bash-completion test suite
bash-completion-tests:
name: "External tests / bash-completion test suite"
runs-on: ubuntu-latest
needs: build
steps:
- name: Checkout brush
uses: actions/checkout@v4
with:
path: "brush"

- name: Checkout bash-completion
uses: actions/checkout@v4
with:
repository: "scop/bash-completion"
ref: "2.15.0"
path: "bash-completion"

- name: Download prebuilt brush binaries
uses: actions/download-artifact@v4
with:
name: "binaries-x86_64-linux"
path: "binaries"

- name: Setup downloads
run: |
chmod +x binaries/*
ls -l binaries
- name: Install prerequisites for running tests
run: |
set -x
sudo apt-get update -y
sudo apt-get install -y python3
python3 -m pip install --user pytest pytest-xdist pytest-md-report pytest-json-report
- name: "Run test suite (oracle)"
working-directory: bash-completion/test
run: |
pytest -n 128 --no-summary ./t || true
- name: "Run test suite (brush)"
env:
BASH_COMPLETION_TEST_BASH: ${{ github.workspace }}/binaries/brush --noprofile --input-backend=basic
working-directory: bash-completion/test
run: |
pytest -n 128 \
--json-report \
--json-report-file=${{ github.workspace }}/test-results-bash-completion.json \
./t || true
- name: "Generate report summary"
run: |
python3 brush/scripts/summarize-pytest-results.py \
-r ${{ github.workspace }}/test-results-bash-completion.json \
--title="Test Summary: bash-completion test suite" \
>${{ github.workspace }}/test-results-bash-completion.md
- name: Upload test report
uses: actions/upload-artifact@v4
with:
name: test-reports-bash-completion
path: |
test-results-bash-completion.md
# Test release binary on a variety of OS platforms.
os-tests:
strategy:
Expand Down Expand Up @@ -396,7 +461,7 @@ jobs:
with:
path: sources

- name: Download binaries
- name: Download prebuilt brush binaries
uses: actions/download-artifact@v4
with:
name: binaries-x86_64-linux
Expand All @@ -421,6 +486,7 @@ jobs:
run: ${{ matrix.prereqs_command }}

- name: Run tests
shell: bash
run: |
export BRUSH_PATH=$PWD/binaries/brush
export BRUSH_COMPAT_TEST_CASES=$PWD/sources/brush-shell/tests/cases
Expand Down
48 changes: 48 additions & 0 deletions scripts/summarize-pytest-results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#!/usr/bin/python3
import argparse
import json

parser = argparse.ArgumentParser(description='Summarize pytest results')
parser.add_argument("-r", "--results", dest="results_file_path", type=str, required=True, help="Path to .json pytest results file")
parser.add_argument("--title", dest="title", type=str, default="Pytest results", help="Title to display")

args = parser.parse_args()

with open(args.results_file_path, "r") as results_file:
results = json.load(results_file)

summary = results["summary"]

error_count = summary.get("error") or 0
fail_count = summary.get("failed") or 0
pass_count = summary.get("passed") or 0
skip_count = summary.get("skipped") or 0
expected_fail_count = summary.get("xfailed") or 0
unexpected_pass_count = summary.get("xpassed") or 0

total_count = summary.get("total") or 0
collected_count = summary.get("collected") or 0
deselected_count = summary.get("deselected") or 0

#
# Output
#

print(f"# {args.title}")

print(f"| Outcome | Count | Percentage |")
print(f"| ------------------ | ----------------------: | ---------: |")
print(f"| ✅ Pass | {pass_count} | <span style='color:green'>{pass_count * 100 / total_count:.2f}</span> |")

if error_count > 0:
print(f"| ❗️ Error | {error_count} | <span style='color:red'>{error_count * 100 / total_count:.2f}</span> |")
if fail_count > 0:
print(f"| ❌ Fail | {fail_count} | <span style='color:red'>{fail_count * 100 / total_count:.2f}</span> |")
if skip_count > 0:
print(f"| ⏩ Skip | {skip_count} | {skip_count * 100 / total_count:.2f} |")
if expected_fail_count > 0:
print(f"| ❎ Expected Fail | {expected_fail_count} | {expected_fail_count * 100 / total_count:.2f} |")
if unexpected_pass_count > 0:
print(f"| ✔️ Unexpected Pass | {unexpected_pass_count} | <span style='color:red'>{unexpected_pass_count * 100 / total_count:.2f}</span> |")

print(f"| 📊 Total | {total_count} | {total_count * 100 / total_count:.2f} |")

0 comments on commit 1e275c0

Please sign in to comment.