Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reformat Python codes with black and introduce Python linting via GitHub actions #13

Closed
wants to merge 9 commits into from
35 changes: 35 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
[flake8]
enable-extensions = G
select = B,C,E,F,G,P,SIM1,T4,W,B9,TOR0,TOR1,TOR2
max-line-length = 120
# C408 ignored because we like the dict keyword argument syntax
# E501 is not flexible enough, we're using B950 instead
ignore =
E203,E305,E402,E501,E721,E741,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
# fix these lints in the future
E275,
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
# to line this up with executable bit
EXE001,
# these ignores are from flake8-bugbear; please fix!
B007,B008,B017,B019,B023,B028,B903,B904,B905,B906,B907
# these ignores are from flake8-comprehensions; please fix!
C407,
# these ignores are from flake8-logging-format; please fix!
G100,G101,G200,G201,G202
# these ignores are from flake8-simplify. please fix or ignore with commented reason
SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12,
# flake8-simplify code styles
SIM102,SIM103,SIM106,SIM112,
# TorchFix codes that don't make sense for PyTorch itself:
# removed and deprecated PyTorch functions.
TOR001,TOR101,
# TODO(kit1980): fix all TOR102 issues
# `torch.load` without `weights_only` parameter is unsafe
TOR102,
P201,
per-file-ignores =
__init__.py: F401
optional-ascii-coding = True
exclude =
./.git,
25 changes: 25 additions & 0 deletions .github/workflows/python_lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: Python Linting

on: [push, pull_request]

jobs:
lint-and-format:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2

- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.8'

- name: Install dependencies
run: |
pip install black flake8

- name: Check code formatting with Black
run: black --check .

- name: Run Flake8
run: flake8 .
116 changes: 57 additions & 59 deletions et_converter/et_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,11 @@
from .flexflow2chakra_converter import FlexFlow2ChakraConverter
from .pytorch2chakra_converter import PyTorch2ChakraConverter


def get_logger(log_filename: str) -> logging.Logger:
formatter = logging.Formatter(
"%(levelname)s [%(asctime)s] %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p")
"%(levelname)s [%(asctime)s] %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p"
)

file_handler = FileHandler(log_filename, mode="w")
file_handler.setLevel(logging.DEBUG)
Expand All @@ -30,64 +31,60 @@ def get_logger(log_filename: str) -> logging.Logger:

return logger


def main() -> None:
parser = argparse.ArgumentParser(
description="Execution Trace Converter"
)
parser = argparse.ArgumentParser(description="Execution Trace Converter")
parser.add_argument(
"--input_type",
type=str,
default=None,
required=True,
help="Input execution trace type"
"--input_type",
type=str,
default=None,
required=True,
help="Input execution trace type",
)
parser.add_argument(
"--input_filename",
type=str,
default=None,
required=True,
help="Input execution trace filename"
"--input_filename",
type=str,
default=None,
required=True,
help="Input execution trace filename",
)
parser.add_argument(
"--output_filename",
type=str,
default=None,
required=True,
help="Output Chakra execution trace filename"
"--output_filename",
type=str,
default=None,
required=True,
help="Output Chakra execution trace filename",
)
parser.add_argument(
"--num_dims",
type=int,
default=None,
required=True,
help="Number of dimensions in the network topology"
"--num_dims",
type=int,
default=None,
required=True,
help="Number of dimensions in the network topology",
)
parser.add_argument(
"--num_npus",
type=int,
default=None,
required="Text" in sys.argv,
help="Number of NPUs in a system"
"--num_npus",
type=int,
default=None,
required="Text" in sys.argv,
help="Number of NPUs in a system",
)
parser.add_argument(
"--num_passes",
type=int,
default=None,
required="Text" in sys.argv,
help="Number of training passes"
"--num_passes",
type=int,
default=None,
required="Text" in sys.argv,
help="Number of training passes",
)
parser.add_argument(
"--npu_frequency",
type=int,
default=None,
required="FlexFlow" in sys.argv,
help="NPU frequency in MHz"
"--npu_frequency",
type=int,
default=None,
required="FlexFlow" in sys.argv,
help="NPU frequency in MHz",
)
parser.add_argument(
"--log_filename",
type=str,
default="debug.log",
help="Log filename"
"--log_filename", type=str, default="debug.log", help="Log filename"
)
args = parser.parse_args()

Expand All @@ -97,27 +94,27 @@ def main() -> None:
try:
if args.input_type == "Text":
converter = Text2ChakraConverter(
args.input_filename,
args.output_filename,
args.num_dims,
args.num_npus,
args.num_passes,
logger)
args.input_filename,
args.output_filename,
args.num_dims,
args.num_npus,
args.num_passes,
logger,
)
converter.convert()
elif args.input_type == "FlexFlow":
converter = FlexFlow2ChakraConverter(
args.input_filename,
args.output_filename,
args.num_dims,
args.npu_frequency,
logger)
args.input_filename,
args.output_filename,
args.num_dims,
args.npu_frequency,
logger,
)
converter.convert()
elif args.input_type == "PyTorch":
converter = PyTorch2ChakraConverter(
args.input_filename,
args.output_filename,
args.num_dims,
logger)
args.input_filename, args.output_filename, args.num_dims, logger
)
converter.convert()
else:
logger.error(f"{args.input_type} unsupported")
Expand All @@ -127,5 +124,6 @@ def main() -> None:
logger.debug(traceback.format_exc())
sys.exit(1)


if __name__ == "__main__":
main()
Loading
Loading