Skip to content

Commit

Permalink
Add integration_tests.yml
Browse files Browse the repository at this point in the history
  • Loading branch information
TaekyungHeo committed May 16, 2024
1 parent e5616d1 commit 8633424
Show file tree
Hide file tree
Showing 2 changed files with 173 additions and 0 deletions.
26 changes: 26 additions & 0 deletions .github/workflows/integration_tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
name: Integration Tests

on: pull_request

jobs:
integration-tests:
runs-on: ubuntu-latest

steps:
- name: Checkout Code
uses: actions/checkout@v2

- name: Setup Python Environment
uses: actions/setup-python@v2
with:
python-version: '3.10'

- name: Install Dependencies
run: |
pip install -r requirements-dev.txt
pip install .
- name: Extract and Validate
run: |
python3 ci_tools/integration_tests.py --tgz_path tests/data/1.0.2-chakra.0.0.4/llama_pytorch24.05.tgz \
--num_files 8 --tolerance 0.05 --expected_times_ms 14597 14597 14968 14638 14649 14700 14677 14735
147 changes: 147 additions & 0 deletions ci_tools/integration_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
import argparse
import re
import subprocess
import tarfile


def run_command(command: str) -> None:
"""
Executes a given shell command and checks for errors.
Args:
command (str): The shell command to execute.
Raises:
RuntimeError: If the command fails.
"""
print(f"Running command: {command}")
try:
subprocess.run(command, check=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Command failed: {command}\nError: {e.stderr.decode()}") from e


def extract_tgz(tgz_path: str, extract_to: str) -> None:
"""
Extracts a .tgz file to the specified directory.
Args:
tgz_path (str): Path to the .tgz file.
extract_to (str): Directory to extract the files to.
"""
print(f"Extracting {tgz_path} to {extract_to}")
with tarfile.open(tgz_path, "r:gz") as tar:
tar.extractall(path=extract_to)


def validate_log(filename: str, expected_time_us: int, tolerance: float) -> None:
"""
Validates the log file to ensure the last operation completes within the expected time with an allowable error.
Args:
filename (str): Path to the log file.
expected_time_us (int): Expected completion time in microseconds.
tolerance (float): Acceptable error percentage as a decimal.
Raises:
ValueError: If the log does not contain the expected output or is outside the acceptable time range.
"""
completion_pattern = re.compile(
r"INFO \[\d{2}/\d{2}/\d{4} \d{2}:\d{2}:\d{2} PM\] GPU Node ID \d+ completed at (\d+)us"
)
with open(filename, "r") as file:
last_time = None
for line in file:
match = completion_pattern.search(line)
if match:
last_time = int(match.group(1))

if last_time is None:
raise ValueError(f"No completion time found in {filename}")

lower_bound = expected_time_us * (1 - tolerance)
upper_bound = expected_time_us * (1 + tolerance)

if not lower_bound <= last_time <= upper_bound:
raise ValueError(
f"Completion time in {filename} is {last_time}us; expected between {lower_bound}us and {upper_bound}us."
)
print(f"Validation successful for {filename}: {last_time}us is within the acceptable range.")


def parse_args():
"""
Parses command line arguments.
"""
parser = argparse.ArgumentParser(description="Run integration tests for chakra_trace_link and chakra_converter.")
parser.add_argument("--tgz_path", type=str, required=True, help="Path to the tgz file to extract.")
parser.add_argument("--num_files", type=int, required=True, help="Number of files (ranks) to process.")
parser.add_argument("--tolerance", type=float, required=True, help="Acceptable error percentage as a decimal.")
parser.add_argument(
"--expected_times_ms", type=int, nargs="+", required=True, help="List of expected times in milliseconds."
)
return parser.parse_args()


def run_trace_link(data_path: str, num_files: int) -> None:
"""
Runs chakra_trace_link for each pair of input files.
Args:
data_path (str): The directory where the data files are located.
num_files (int): The number of file pairs to process.
"""
commands = [
f"chakra_trace_link --pytorch-et-file {data_path}/chakra_host_et_{i}.json "
f"--kineto-file {data_path}/kineto_{i}.json "
f"--output-file {data_path}/chakra_et_plus_{i}.json &"
for i in range(num_files)
]
for command in commands:
run_command(command)
run_command("wait")


def run_converter(data_path: str, num_files: int) -> None:
"""
Runs chakra_converter for each output of chakra_trace_link.
Args:
data_path (str): The directory where the output files are located.
num_files (int): The number of output files to process.
"""
commands = [
f"chakra_converter --input_filename {data_path}/chakra_et_plus_{i}.json "
f"--output_filename {data_path}/chakra_final_{i}.chakra "
f"--input_type PyTorch --log_filename /tmp/rank_{i}.log &"
for i in range(num_files)
]
for command in commands:
run_command(command)
run_command("wait")


def main() -> None:
"""
Main function to execute the integration test sequence.
"""
args = parse_args()
data_path = "tests/data/llama_pytorch24.05"

# Extracting files
extract_tgz(args.tgz_path, data_path)

expected_times_us = [time * 1000 for time in args.expected_times_ms]

# Run trace link and converter processes
run_trace_link(data_path, args.num_files)
run_converter(data_path, args.num_files)

# Validate output logs
for i in range(args.num_files):
log_file = f"/tmp/rank_{i}.log"
validate_log(log_file, expected_times_us[i], args.tolerance)


if __name__ == "__main__":
main()

0 comments on commit 8633424

Please sign in to comment.