Skip to content

Commit

Permalink
adding ci files
Browse files Browse the repository at this point in the history
  • Loading branch information
Kernel Patches Daemon committed Oct 24, 2024
1 parent 2164cae commit 0feff1f
Show file tree
Hide file tree
Showing 45 changed files with 3,017 additions and 18 deletions.
49 changes: 49 additions & 0 deletions .github/actions/veristat_baseline_compare/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
name: 'run-veristat'
description: 'Run veristat benchmark'
inputs:
veristat_output:
description: 'Veristat output filepath'
required: true
baseline_name:
description: 'Veristat baseline cache name'
required: true
runs:
using: "composite"
steps:
- uses: actions/upload-artifact@v4
with:
name: ${{ inputs.baseline_name }}
if-no-files-found: error
path: ${{ github.workspace }}/${{ inputs.veristat_output }}

# For pull request:
# - get baseline log from cache
# - compare it to current run
- if: ${{ github.event_name == 'pull_request' }}
uses: actions/cache/restore@v4
with:
key: ${{ inputs.baseline_name }}
restore-keys: |
${{ inputs.baseline_name }}-
path: '${{ github.workspace }}/${{ inputs.baseline_name }}'

- if: ${{ github.event_name == 'pull_request' }}
name: Show veristat comparison
shell: bash
run: ./.github/scripts/compare-veristat-results.sh
env:
BASELINE_PATH: ${{ github.workspace }}/${{ inputs.baseline_name }}
VERISTAT_OUTPUT: ${{ inputs.veristat_output }}

# For push: just put baseline log to cache
- if: ${{ github.event_name == 'push' }}
shell: bash
run: |
mv "${{ github.workspace }}/${{ inputs.veristat_output }}" \
"${{ github.workspace }}/${{ inputs.baseline_name }}"
- if: ${{ github.event_name == 'push' }}
uses: actions/cache/save@v4
with:
key: ${{ inputs.baseline_name }}-${{ github.run_id }}
path: '${{ github.workspace }}/${{ inputs.baseline_name }}'
18 changes: 18 additions & 0 deletions .github/scripts/compare-veristat-results.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash

if [[ ! -f "${BASELINE_PATH}" ]]; then
echo "# No ${BASELINE_PATH} available" >> "${GITHUB_STEP_SUMMARY}"

echo "No ${BASELINE_PATH} available"
echo "Printing veristat results"
cat "${VERISTAT_OUTPUT}"

exit
fi

selftests/bpf/veristat \
--output-format csv \
--emit file,prog,verdict,states \
--compare "${BASELINE_PATH}" "${VERISTAT_OUTPUT}" > compare.csv

python3 ./.github/scripts/veristat_compare.py compare.csv
196 changes: 196 additions & 0 deletions .github/scripts/matrix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
#!/usr/bin/env python3

import os
import dataclasses
import json

from enum import Enum
from typing import Any, Dict, List, Final, Set, Union

MANAGED_OWNER: Final[str] = "kernel-patches"
MANAGED_REPOS: Final[Set[str]] = {
f"{MANAGED_OWNER}/bpf",
f"{MANAGED_OWNER}/vmtest",
}
# We need to run on ubuntu 20.04 because our rootfs is based on debian buster and we
# otherwise get library versioning issue such as
# `./test_verifier: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.34' not found (required by ./test_verifier)`
DEFAULT_RUNNER: Final[str] = "ubuntu-20.04"
DEFAULT_LLVM_VERSION: Final[int] = 17
DEFAULT_SELF_HOSTED_RUNNER_TAGS: Final[List[str]] = ["self-hosted", "docker-noble-main"]


class Arch(str, Enum):
"""
CPU architecture supported by CI.
"""

AARCH64 = "aarch64"
S390X = "s390x"
X86_64 = "x86_64"


class Compiler(str, Enum):
GCC = "gcc"
LLVM = "llvm"


@dataclasses.dataclass
class Toolchain:
compiler: Compiler
# This is relevant ONLY for LLVM and should not be required for GCC
version: int

@property
def short_name(self) -> str:
return str(self.compiler.value)

@property
def full_name(self) -> str:
if self.compiler == Compiler.GCC:
return self.short_name

return f"{self.short_name}-{self.version}"

def to_dict(self) -> Dict[str, Union[str, int]]:
return {
"name": self.short_name,
"fullname": self.full_name,
"version": self.version,
}


@dataclasses.dataclass
class BuildConfig:
arch: Arch
toolchain: Toolchain
kernel: str = "LATEST"
run_veristat: bool = False
parallel_tests: bool = False
build_release: bool = False

@property
def runs_on(self) -> List[str]:
if is_managed_repo():
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [self.arch.value]
return [DEFAULT_RUNNER]

@property
def build_runs_on(self) -> List[str]:
if is_managed_repo():
# Build s390x on x86_64
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [
self.arch.value == "s390x" and Arch.X86_64.value or self.arch.value,
]
return [DEFAULT_RUNNER]

@property
def tests(self) -> Dict[str, Any]:
tests_list = [
"test_progs",
"test_progs_parallel",
"test_progs_no_alu32",
"test_progs_no_alu32_parallel",
"test_verifier",
]

if self.arch.value != "s390x":
tests_list.append("test_maps")

if self.toolchain.version >= 18:
tests_list.append("test_progs_cpuv4")

if not self.parallel_tests:
tests_list = [test for test in tests_list if not test.endswith("parallel")]

return {"include": [generate_test_config(test) for test in tests_list]}

def to_dict(self) -> Dict[str, Any]:
return {
"arch": self.arch.value,
"toolchain": self.toolchain.to_dict(),
"kernel": self.kernel,
"run_veristat": self.run_veristat,
"parallel_tests": self.parallel_tests,
"build_release": self.build_release,
"runs_on": self.runs_on,
"tests": self.tests,
"build_runs_on": self.build_runs_on,
}


def is_managed_repo() -> bool:
return (
os.environ["GITHUB_REPOSITORY_OWNER"] == MANAGED_OWNER
and os.environ["GITHUB_REPOSITORY"] in MANAGED_REPOS
)


def set_output(name, value):
"""Write an output variable to the GitHub output file."""
with open(os.getenv("GITHUB_OUTPUT"), "a", encoding="utf-8") as file:
file.write(f"{name}={value}\n")


def generate_test_config(test: str) -> Dict[str, Union[str, int]]:
"""Create the configuration for the provided test."""
is_parallel = test.endswith("_parallel")
config = {
"test": test,
"continue_on_error": is_parallel,
# While in experimental mode, parallel jobs may get stuck
# anywhere, including in user space where the kernel won't detect
# a problem and panic. We add a second layer of (smaller) timeouts
# here such that if we get stuck in a parallel run, we hit this
# timeout and fail without affecting the overall job success (as
# would be the case if we hit the job-wide timeout). For
# non-experimental jobs, 360 is the default which will be
# superseded by the overall workflow timeout (but we need to
# specify something).
"timeout_minutes": 30 if is_parallel else 360,
}
return config


if __name__ == "__main__":
matrix = [
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
run_veristat=True,
parallel_tests=True,
),
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.LLVM, version=DEFAULT_LLVM_VERSION),
build_release=True,
),
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.LLVM, version=18),
build_release=True,
),
BuildConfig(
arch=Arch.AARCH64,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
),
# BuildConfig(
# arch=Arch.AARCH64,
# toolchain=Toolchain(
# compiler=Compiler.LLVM,
# version=DEFAULT_LLVM_VERSION
# ),
# ),
BuildConfig(
arch=Arch.S390X,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
),
]

# Outside of those repositories we only run on x86_64
if not is_managed_repo():
matrix = [config for config in matrix if config.arch == Arch.X86_64]

json_matrix = json.dumps({"include": [config.to_dict() for config in matrix]})
print(json_matrix)
set_output("build_matrix", json_matrix)
107 changes: 107 additions & 0 deletions .github/scripts/tar-artifact.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
#!/bin/bash

set -eux

# Assumptions:
# - $(pwd) is the root of kernel repo we're tarring
# - zstd is installed by default in the runner images

if [ ! -d "${KBUILD_OUTPUT:-}" ]; then
echo "KBUILD_OUTPUT must be a directory"
exit 1
fi

arch="${1}"
toolchain="${2}"

# Convert a platform (as returned by uname -m) to the kernel
# arch (as expected by ARCH= env).
platform_to_kernel_arch() {
case $1 in
s390x)
echo "s390"
;;
aarch64)
echo "arm64"
;;
riscv64)
echo "riscv"
;;
x86_64)
echo "x86"
;;
*)
echo "$1"
;;
esac
}

# Remove intermediate object files that we have no use for. Ideally
# we'd just exclude them from tar below, but it does not provide
# options to express the precise constraints.
find tools/testing/selftests/ -name "*.o" -a ! -name "*.bpf.o" -print0 | \
xargs --null --max-args=10000 rm

# Strip debug information, which is excessively large (consuming
# bandwidth) while not actually being used (the kernel does not use
# DWARF to symbolize stacktraces).
"${arch}"-linux-gnu-strip --strip-debug "${KBUILD_OUTPUT}"/vmlinux

image_name=$(make ARCH="$(platform_to_kernel_arch "${arch}")" -s image_name)
kbuild_output_file_list=(
".config"
"${image_name}"
"include/config/auto.conf"
"include/generated/autoconf.h"
"vmlinux"
)

# While we are preparing the tarball, move $KBUILD_OUTPUT to a tmp
# location just in case it's inside the repo root
tmp=$(mktemp -d)
mv "${KBUILD_OUTPUT}" "${tmp}"
stashed_kbuild_output=${tmp}/$(basename "${KBUILD_OUTPUT}")

# Note: ${local_kbuild_output} must point to ./kbuild-output because
# of the tar command at the bottom.
local_kbuild_output=$(realpath kbuild-output)
mkdir -p "${local_kbuild_output}"

for file in "${kbuild_output_file_list[@]}"; do
mkdir -p "$(dirname "${local_kbuild_output}/${file}")"
cp -a "${stashed_kbuild_output}/${file}" "${local_kbuild_output}/${file}"
done

additional_file_list=()
if [[ -n "${ARCHIVE_MAKE_HELPERS}" ]]; then
# Package up a bunch of additional infrastructure to support running
# 'make kernelrelease' and bpf tool checks later on.
mapfile -t additional_file_list < <(find . -iname Makefile)
additional_file_list+=(
"scripts/"
"tools/testing/selftests/bpf/"
"tools/include/"
"tools/bpf/bpftool/"
)
fi

mkdir -p selftests
cp -r tools/testing/selftests/bpf selftests
if [[ -n "${BUILD_SCHED_EXT_SELFTESTS}" ]]; then
cp -r tools/testing/selftests/sched_ext selftests
fi

tar -cf - \
kbuild-output \
"${additional_file_list[@]}" \
--exclude '*.cmd' \
--exclude '*.d' \
--exclude '*.h' \
--exclude '*.output' \
selftests/ \
| zstd -T0 -19 -o "vmlinux-${arch}-${toolchain}.tar.zst"

# Cleanup and restore the original KBUILD_OUTPUT
# We have to put KBUILD_OUTPUT back to its original location for actions/cache
rm -rf "${local_kbuild_output}"
mv "${stashed_kbuild_output}" "${KBUILD_OUTPUT}"
Loading

0 comments on commit 0feff1f

Please sign in to comment.