From 9fdfde67d5e56122fe09ab1a1a87b9fe952e733f Mon Sep 17 00:00:00 2001 From: Longtao Wang Date: Tue, 19 Jan 2021 14:03:42 +0800 Subject: [PATCH 001/120] fixed istanbul process logic --- kernel/c_api.h | 55 ------------------------------------ kernel/dlopen.h | 2 +- kernel/model.go | 75 ++++++++++++++++++++++++++++++++++--------------- 3 files changed, 54 insertions(+), 78 deletions(-) delete mode 100644 kernel/c_api.h diff --git a/kernel/c_api.h b/kernel/c_api.h deleted file mode 100644 index 497b5280..00000000 --- a/kernel/c_api.h +++ /dev/null @@ -1,55 +0,0 @@ -/*! - * Copyright (c) 2016 by Contributors - * \file cvm/c_api.h - * \brief C API of CVM symbolic construction and pass. - * Enables construction and transformation of Graph - * in any other host languages. - */ -#ifndef CVM_C_API_H_ -#define CVM_C_API_H_ - -/*! \brief CVM_DLL prefix for windows */ -#ifdef _WIN32 -#ifdef CVM_EXPORTS -#define CVM_DLL __declspec(dllexport) -#else -#define CVM_DLL __declspec(dllimport) -#endif -#else -#define CVM_DLL __attribute__((visibility("default"))) -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -const int SUCCEED = 0; -const int ERROR_LOGIC = 1; -const int ERROR_RUNTIME = 2; - -int CVMAPILoadModel(const char *graph_json, int graph_strlen, - const char *param_bytes, int param_strlen, - void **net, // pass reference of network - int device_type, int device_id); -int CVMAPIFreeModel(void *net); -int CVMAPIInference(void *net, - char *input_data, int input_len, - char *output_data); - -int CVMAPIGetVersion(void *net, char *version); -int CVMAPIGetPreprocessMethod(void *net, char *method); - -int CVMAPIGetInputLength(void *net, unsigned long long *size); -int CVMAPIGetOutputLength(void *net, unsigned long long *size); -int CVMAPIGetInputTypeSize(void *net, unsigned long long *size); -int CVMAPIGetOutputTypeSize(void *net, unsigned long long *size); - -int CVMAPIGetStorageSize(void *net, unsigned long long *gas); -int CVMAPIGetGasFromModel(void *net, unsigned long long *gas); -int CVMAPIGetGasFromGraphFile(const char *graph_json, unsigned long long *gas); - -#ifdef __cplusplus -} /* end extern "C" */ -#endif - -#endif // CVM_C_API_H_ diff --git a/kernel/dlopen.h b/kernel/dlopen.h index 939bd7d4..ed9550a4 100644 --- a/kernel/dlopen.h +++ b/kernel/dlopen.h @@ -6,7 +6,7 @@ #include #include #include -#include "c_api.h" +#include "cvm/c_api.h" void* plugin_open(const char* path, char** err) { void* lib = dlopen(path, RTLD_NOW|RTLD_GLOBAL); diff --git a/kernel/model.go b/kernel/model.go index 4cb18451..01d04c19 100644 --- a/kernel/model.go +++ b/kernel/model.go @@ -61,43 +61,74 @@ func (m *Model) GetInputLength() uint64 { return m.input_size } +func (m *Model) preProcessInputData(data []byte) ([]byte, int) { + var err error + + if len(data) < int(m.input_size) { + log.Warn("input length less than input size", + "input length", len(data), "expected", m.input_size) + return nil, ERROR_LOGIC + } + + if data, err = ToAlignedData(data[:m.input_size], int(m.input_byte)); err != nil { + log.Warn("input ToAlignedData invalid", "error", err) + return nil, ERROR_LOGIC + } + + return data, SUCCEED +} + +// TODO(ryt): test it in istanbul version code +// The process logic need more considerations since the contract inference API, aka +// `INFER` and `INFERARRAY`, accepts the memory aligned size bytes array as input, +// which may cause `ERROR_LOGIC` all the time. +func (m *Model) preProcessInputDataV2(data []byte) ([]byte, int) { + var err error + + if len(data) != int(m.input_size) { + log.Warn("input length not matched", + "input length", len(data), "expected", m.input_size) + return nil, ERROR_LOGIC + } + + if data, err = ToAlignedData(data, int(m.input_byte)); err != nil { + log.Warn("input ToAlignedData invalid", "error", err) + return nil, ERROR_LOGIC + } + + return data, SUCCEED +} + func (m *Model) Predict(data []byte, cvmVersion int) ([]byte, int) { var ( - output []byte - status int - err error + output []byte + status int + err error + err_code int ) + if cvmVersion == CVM_VERSION_ONE { - if len(data) < int(m.input_size) { - log.Warn("input length less than input size", - "input length", len(data), "expected", m.input_size) - return nil, ERROR_LOGIC - } - if data, err = ToAlignedData(data[:m.input_size], int(m.input_byte)); err != nil { - log.Warn("input ToAlignedData invalid", "error", err) - return nil, ERROR_LOGIC - } + data, err_code = m.preProcessInputData(data) } else { - // TODO(ryt): test it in istanbuer version code - if len(data) != int(m.input_size) { - log.Warn("input length not matched", - "input length", len(data), "expected", m.input_size) - return nil, ERROR_LOGIC - } - if data, err = ToAlignedData(data, int(m.input_byte)); err != nil { - log.Warn("input ToAlignedData invalid", "error", err) - return nil, ERROR_LOGIC - } + data, err_code = m.preProcessInputData(data) + // data, err_code = m.preProcessInputDataV2(data) + } + + if err_code != SUCCEED { + return nil, err_code } + if output, status = m.lib.Inference(m.model, data); status != SUCCEED { return nil, status } + if m.output_byte > 1 { if output, err = SwitchEndian(output, int(m.output_byte)); err != nil { log.Warn("output SwitchEndian invalid", "error", err) return nil, ERROR_LOGIC } } + return output, status } From 82917ece736a8ce2042e67633f14cd5d800bec34 Mon Sep 17 00:00:00 2001 From: Longtao Wang Date: Tue, 19 Jan 2021 14:47:59 +0800 Subject: [PATCH 002/120] add golang deps: c_api.h --- kernel/c_api.h | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ kernel/dlopen.h | 2 +- 2 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 kernel/c_api.h diff --git a/kernel/c_api.h b/kernel/c_api.h new file mode 100644 index 00000000..497b5280 --- /dev/null +++ b/kernel/c_api.h @@ -0,0 +1,55 @@ +/*! + * Copyright (c) 2016 by Contributors + * \file cvm/c_api.h + * \brief C API of CVM symbolic construction and pass. + * Enables construction and transformation of Graph + * in any other host languages. + */ +#ifndef CVM_C_API_H_ +#define CVM_C_API_H_ + +/*! \brief CVM_DLL prefix for windows */ +#ifdef _WIN32 +#ifdef CVM_EXPORTS +#define CVM_DLL __declspec(dllexport) +#else +#define CVM_DLL __declspec(dllimport) +#endif +#else +#define CVM_DLL __attribute__((visibility("default"))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +const int SUCCEED = 0; +const int ERROR_LOGIC = 1; +const int ERROR_RUNTIME = 2; + +int CVMAPILoadModel(const char *graph_json, int graph_strlen, + const char *param_bytes, int param_strlen, + void **net, // pass reference of network + int device_type, int device_id); +int CVMAPIFreeModel(void *net); +int CVMAPIInference(void *net, + char *input_data, int input_len, + char *output_data); + +int CVMAPIGetVersion(void *net, char *version); +int CVMAPIGetPreprocessMethod(void *net, char *method); + +int CVMAPIGetInputLength(void *net, unsigned long long *size); +int CVMAPIGetOutputLength(void *net, unsigned long long *size); +int CVMAPIGetInputTypeSize(void *net, unsigned long long *size); +int CVMAPIGetOutputTypeSize(void *net, unsigned long long *size); + +int CVMAPIGetStorageSize(void *net, unsigned long long *gas); +int CVMAPIGetGasFromModel(void *net, unsigned long long *gas); +int CVMAPIGetGasFromGraphFile(const char *graph_json, unsigned long long *gas); + +#ifdef __cplusplus +} /* end extern "C" */ +#endif + +#endif // CVM_C_API_H_ diff --git a/kernel/dlopen.h b/kernel/dlopen.h index ed9550a4..939bd7d4 100644 --- a/kernel/dlopen.h +++ b/kernel/dlopen.h @@ -6,7 +6,7 @@ #include #include #include -#include "cvm/c_api.h" +#include "c_api.h" void* plugin_open(const char* path, char** err) { void* lib = dlopen(path, RTLD_NOW|RTLD_GLOBAL); From 27bb582d770c944508dcca33e6bee911788f4373 Mon Sep 17 00:00:00 2001 From: Longtao Wang Date: Mon, 30 Aug 2021 16:50:41 +0800 Subject: [PATCH 003/120] [update]: remove ycm conf file --- .ycm_extra_conf.py | 94 ---------------------------------------------- 1 file changed, 94 deletions(-) delete mode 100644 .ycm_extra_conf.py diff --git a/.ycm_extra_conf.py b/.ycm_extra_conf.py deleted file mode 100644 index ac37c2b2..00000000 --- a/.ycm_extra_conf.py +++ /dev/null @@ -1,94 +0,0 @@ -import os -import json -import ycm_core - -BUILD_DIRECTORY = 'build' -SRC_LANG = { - 'cuda': ['.cuh', '.cu'], - 'c++': ['.c', '.cc', '.cxx', '.cpp', '.h', '.hpp', '.hxx', 'hh'] -} - -def DirectoryOfThisScript(): - return os.path.dirname(os.path.abspath(__file__)) - -def FindCMakeCompilationFile(): - current_dir = DirectoryOfThisScript() - walk_dirs = [ - current_dir, - os.path.join(current_dir, BUILD_DIRECTORY), - ] - for x in os.listdir(os.path.join(current_dir, BUILD_DIRECTORY)): - x = os.path.join(current_dir, BUILD_DIRECTORY, x) - if os.path.isdir(x): - walk_dirs.append(x) - - db_fname = 'compile_commands.json' - walk_files = [os.path.join(x, db_fname) for x in walk_dirs] - files = [x for x in walk_files if os.path.exists(x)] - return files - -def GCC_BIN(flags, binary): - if 'c++' not in binary: - return [] - - with os.popen(binary + " -dumpversion") as f: - version = f.readline().strip() - - flag = "-I/usr/include/c++/" + version - if flag not in flags: - flags.append(flag) - return flags - -def CXXFLAGS(flags, options): - CXX_KEYS = ['-I', '-W', '-D', '-m', '-s', '-f'] - for opt in options: - if opt[:2] in CXX_KEYS and opt not in flags: - flags.append(opt) - return flags - -def CMakeFlags(flags): - files = FindCMakeCompilationFile() - if not files: - return flags - - with open(files[0], "r") as fin: - commands = json.load(fin) - - CMAKE_COM_KEY = "command" - for com in commands: - if CMAKE_COM_KEY in com: - com = com[CMAKE_COM_KEY] - com = [x for x in com.split(' ') if x] - flags = GCC_BIN(flags, com[0]) - flags = CXXFLAGS(flags, com) - return flags - -def SourceLangFlags(flags, filename): - ext = os.path.splitext(filename)[-1] - for lang, suffixs in SRC_LANG.items(): - if ext in suffixs and lang not in flags: - flags.extend(['-x', lang]) - return flags - - -COMMON_FLAGS = [ '-I/usr/lib/', '-I/usr/include/'] -INIT_FLAGS = { - 'init_ok': False, - 'flags': COMMON_FLAGS, -} -def Settings(**kwargs): - filename = kwargs['filename'] - if kwargs['language'] != 'cfamily': - return {} - -# def FlagsForFile(filename): - if not INIT_FLAGS['init_ok']: - INIT_FLAGS['init_ok'] = True - INIT_FLAGS['flags'] = CMakeFlags(INIT_FLAGS['flags']) - - final_flags = INIT_FLAGS['flags'] - final_flags = SourceLangFlags(final_flags, filename) - - return { - 'flags': final_flags, - } From a0614b459d048237a19a8dd4a3cee7c1521bdca2 Mon Sep 17 00:00:00 2001 From: Longtao Wang Date: Mon, 30 Aug 2021 17:07:34 +0800 Subject: [PATCH 004/120] [feature]: add common utils and main --- main.py | 34 ++ python/mrt/__init__.py | 4 +- python/mrt/common/__init__.py | 0 python/mrt/common/bash.py | 43 +++ python/mrt/common/cmd.py | 680 ++++++++++++++++++++++++++++++++++ python/mrt/common/dfs.py | 87 +++++ python/mrt/common/log.py | 134 +++++++ python/mrt/common/thread.py | 150 ++++++++ 8 files changed, 1130 insertions(+), 2 deletions(-) create mode 100644 main.py create mode 100644 python/mrt/common/__init__.py create mode 100644 python/mrt/common/bash.py create mode 100644 python/mrt/common/cmd.py create mode 100644 python/mrt/common/dfs.py create mode 100644 python/mrt/common/log.py create mode 100644 python/mrt/common/thread.py diff --git a/main.py b/main.py new file mode 100644 index 00000000..15b59607 --- /dev/null +++ b/main.py @@ -0,0 +1,34 @@ +import sys +from os import path + +# set up dependencies +__ROOT__ = path.dirname(path.realpath(__file__)) +sys.path.insert(0, path.join(__ROOT__, "python")) + +import logging + +from mrt.common import cmd, log, thread + +LOG_MSG = ",".join(["{}:{}".format(l, n) \ + for l, n in zip(log.LOG_LEVELS, log.LOG_NAMES)]) + +@cmd.option("-v", "--verbosity", metavar="LEVEL", + choices=log.LOG_NAMES, default=log.level2name(log.DEBUG), + help="log verbosity to pring information, " + \ + "available options: {}".format(log.LOG_NAMES) + \ + " by default {}".format(log.level2name(log.DEBUG))) +@cmd.global_options() +def global_func(args): + log.Init(log.name2level(args.verbosity)) + +@cmd.module("", as_main=True, + description=""" +CVM Python Tool +""") +def cvm_main(args): + print("null") + thread.start_services(args) + +if __name__ == "__main__": + logger = logging.getLogger("main") + cmd.Run() diff --git a/python/mrt/__init__.py b/python/mrt/__init__.py index d4f0985e..cf148f2a 100644 --- a/python/mrt/__init__.py +++ b/python/mrt/__init__.py @@ -1,2 +1,2 @@ -from . import transformer -from . import tfm_ops +# from . import transformer +# from . import tfm_ops diff --git a/python/mrt/common/__init__.py b/python/mrt/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/common/bash.py b/python/mrt/common/bash.py new file mode 100644 index 00000000..5176e96f --- /dev/null +++ b/python/mrt/common/bash.py @@ -0,0 +1,43 @@ +import os +from os import path +import logging +import subprocess + +bash_logger = logging.getLogger("bash") + +BBCODE_ROOT = path.abspath( + path.join(__file__, "../../../..")) + +def make_dirs(dir_path): + os.makedirs(dir_path, exist_ok=True) + +def shell_exec(*commands, check_error=True): + str_com = " ".join([str(c) for c in commands]) + bash_logger.debug(str_com) + code = os.system(str_com) + if check_error and (code != 0): + raise RuntimeError( + "command execute terminated: {}".format(str_com)) + return code + +def check_output(*commands): + return subprocess.check_output(commands, stderr=subprocess.STDOUT) + +class DirEntry: + def __init__(self, target_dir): + self._curr_dir = os.getcwd() + self._tar_dir = target_dir + + def __enter__(self): + bash_logger.debug("cd {}".format(self._tar_dir)) + os.chdir(self._tar_dir) + + def __exit__(self, *args): + bash_logger.debug("cd {}".format(self._curr_dir)) + os.chdir(self._curr_dir) + +# using with python `with` primitive enter block +def enter(target_dir, create=False): + if create: + make_dirs(target_dir) + return DirEntry(target_dir) diff --git a/python/mrt/common/cmd.py b/python/mrt/common/cmd.py new file mode 100644 index 00000000..b5f5e355 --- /dev/null +++ b/python/mrt/common/cmd.py @@ -0,0 +1,680 @@ +""" BBCode CMD Module Design + + The target is to auto inject command line parser into registry + table with the python decorator syntax, and the module could + collect the neccessary options and print usage with `-h` option. + + User try to invoke the main function: `Run` to start customized + application, which will auto run the appropriate module function. + + >>> from bbcode.common import cmd + >>> cmd.Run() + + Interfaces + ========== + @func Run: Mainly program entry function + @func module: Module main entry function, the releated function + will trigger after the command line set name. + + One notable thing to be indicated is that the dependencies + between modules should not contains cycle, or will raise + runtime error. The dependent module's options will be treated + as group reference, and group options will be copied into + current module's group options. + + @param refs: List of module string, auto combine module options + by referece to with current module. + @func group: Group module wrapper function. + @func option: Wrapper function by the `add_argument` in + `argparse.ArgumentParser`. + + Notices: The main entry must be zero or one instance, or will raise + error. + + GroupEntry + ========== + A group entry is represented as optional arguments in command line, + but could execute multi-group main function in single command, + different from the module entry. + + ModuleEntry + =========== + A module entry acts as an sub command at shell, like `git status`. + The module interface contains `mod_ref` and `mod_main`, which refer + to the same `CmdEntry` instance. The entry will be achieved via the + sub parsers method defined in argparse library. + + EntryType + ========= + The options registered by register_option may be collected as + cluster, which has permission access, like public, private, ... + etc. We'd like to support more feasible and extensible usage for + developers, provided the problems occured in naive coding and + reported to us. + @PUBLIC: + @PRIVATE: + + >>> @option("-p", "--pool-size") + >>> @module("cmd.test", permission="PRIVATE") + >>> def cmd_test(args): + >>> pass + +""" + +from __future__ import annotations + +from typing import Sequence, Dict, Set, List +from typing import Callable + +import copy +import json +import logging +from enum import Enum + +import argparse + +from .dfs import dfs_visit + +__all__ = [ + "PUBLIC", "PRIVATE", + "module", "group", "option", "global_options", + "Run"] + +class CmdName: + """ Formatted Cmder Name + + 1. Dot splitted name, aka "cmd.test" + 2. Array for module names, aka ["cmd", "test"] + 3. Group related name, aka "cmd-test", always with prefix:"--" + 4. Argument name parsed from command line, aka "cmd_test" + """ + + def __init__(self, log_name : str): + self.name = str(log_name) + + if isinstance(log_name, CmdName): + self.name = copy.copy(log_name.name) + elif isinstance(log_name, list): + self.name = " ".join(log_name) + elif isinstance(log_name, str): + all_spliter = ["-", "_", "."] + for f in all_spliter: + if f in log_name: + self.name = log_name.replace(f, " ") + break + + def __repr__(self): + return self.name + + # hashable type, can be used at dict type + def __hash__(self): + return hash(self.name) + def __eq__(self, other : CmdName): + if isinstance(other, str): + other = CmdName(other) + return other.name == self.name + + @property + def mod_name(self): + return self.name.replace(" ", ".") + + @property + def mod_array(self): + return [n for n in self.name.split(" ") if n] + + @property + def mod_prefix_arr(self): + return [ + ".".join(self.mod_array[:i+1]) \ + for i in range(len(self.mod_array))] + + @property + def opt_name(self): # command line option name, `--` prefix + return "--" + self.name.replace(" ", "-") + + @property + def code_name(self): # code name after argparser parse. + return self.name.replace(" ", "_") + + @staticmethod + def topo_sort(cmd_names : Sequence[CmdName]) -> Sequence[CmdName]: + return sorted(cmd_names, key=lambda x : len(x.name), reverse=True) + +PUBLIC = 0 +PRIVATE = 1 + +class CmdOption: + def __init__(self, *args, **kw): + self.args = list(args) + self.kw = dict(kw) + + def __repr__(self): + return "%s,%s" % (self.args, self.kw) + +class GroupOption: + def __init__(self, permission, entry): + self.options : Sequence[CmdOption] = [] + self.permission = permission + self.entry = entry + + def add_option(self, *args, **kw): + self.options.append(CmdOption(*args, **kw)) + + def to_string(self): + ser = ", ".join([str(o) for o in self.options]) + type_str = "PUBLIC" if self.permission == PUBLIC else "PRIVATE" + return "%s [%s]" % (type_str, ser) + +class CmdFunction: + def __init__(self, group_opt : GroupOption): + self.options : GroupOption = group_opt + self.func = None + + def __repr__(self): + func_type = "MAIN" if self.func else "PASS" + return "%s (%s)" % (func_type, self.func) + + def __call__(self, *args, **kw): + if self.func is None: + raise RuntimeError("module {}: null module function".format( + self.options.entry.name)) + return self.func(*args, **kw) + + def wrapper(self, func): + if self.func is not None: + raise RuntimeError("module {}: duplicated functions".format( + self.options.entry.name)) + self.func = func + return self + + def move(self) -> CmdFunction: + """ This function move the wrapper function and relative + options into other. this will be empty attr after + invoking. + """ + move_func = CmdFunction(self.options) + move_func.func = self.func + self.func = None + return move_func + + def empty(self) -> bool: + return self.func is None + +class GroupEntry: + def __init__(self, name : CmdName): + self.name = name + self.options : Sequence[GroupOption] = [] + self.params : CmdOption = CmdOption() + self.func : CmdFunction = CmdFunction( + self.group_option(PUBLIC)) + + def register_parser(self, *args, **kw): + self.params.args.extend(args) + self.params.kw.update(kw) + return self + + def to_string(self, new_line = False): + split_str = "\n\t" if new_line else " " + ser = "name=%s " % self.name.name + ser += "params=%s " % str(self.params) + + opt_str = ("," + split_str).join([o.to_string() \ + for o in self.options if o.options]) + ser += "options=[%s%s] " % (split_str, opt_str) + return ser + + def by_main_func(self, permission) -> CmdFunction: + self.func.options.permission = permission + return self.func + + def by_pass_func(self, permission) -> CmdFunction: + return CmdFunction(self.group_option(permission)) + + def group_option(self, permission) -> GroupOption: + self.options.append(GroupOption(permission, self)) + return self.options[-1] + + def public_group_entry(self) -> GroupEntry: + gentry = GroupEntry(self.name) + gentry.options = [opt for opt in self.options \ + if opt.permission == PUBLIC] + gentry.params = self.params + gentry.func = self.func + return gentry + +class ModEntry(GroupEntry): + def __init__(self, name : CmdName): + super(ModEntry, self).__init__(name) + self.references : Set[str] = set() + self.groups : Dict[str, GroupEntry] = {} + # enable global options flag, set via module function + self.enable_global_opt = True + + def __str__(self): + return self.name.name + + def to_string(self, new_line = True): + split_str = "\n\t" if new_line else " " + ser = super(ModEntry, self).to_string() + gser = "".join(["," + split_str + v.to_string() \ + for v in self.groups.values()]) + ser += "groups=[%s]" % gser + return ser + + def group_entry(self, group_name : CmdName) -> GroupEntry: + assert isinstance(group_name, CmdName) + if group_name not in self.groups: + self.groups[group_name] = GroupEntry(group_name) + return self.groups[group_name] + + def public_group_entry(self) -> GroupEntry: + gentry = super(ModEntry, self).public_group_entry() + + # disable default module enable option. + # if not self.func.empty(): + # gopt = GroupOption(self.func.options.permission, gentry) + # gopt.add_option( + # self.name.opt_name, + # action="store_true", + # help="enable module " + str(self.name)) + # gentry.options.insert(0, gopt) + + gentry.params = copy.deepcopy(self.params) + gentry.params.args.insert(0, str(self.name)) + # group has no help option + gentry.params.kw.pop("help", None) + return gentry + + def as_groups(self): + common_groups = { self.name: self.public_group_entry() } + for k, v in self.groups.items(): + common_groups[k] = v.public_group_entry() + return common_groups + + def update_groups(self, common_groups): + for k, v in common_groups.items(): + if k not in self.groups: + self.groups[k] = v + +class CmdStorage: + STORE : Dict[CmdName, ModEntry] = {} + PARSERS = {} + GLOBAL_NAME = CmdName("common options") + + @staticmethod + def get_entry(mod_name, default = None) -> ModEntry: + if isinstance(mod_name, str): + mod_name = CmdName(mod_name) + + if mod_name not in CmdStorage.STORE: + if default is None: + default = ModEntry(mod_name) + CmdStorage.STORE[mod_name] = default + return CmdStorage.STORE[mod_name] + + @staticmethod + def refs_analysis(): + graph = CmdStorage.STORE.values() + def refs_generator(entry : ModEntry): + return [CmdStorage.get_entry(n) for n in entry.references] + + def cycling_trigger(dfs_path : List[ModEntry]): + dfs_path.append(dfs_path[0]) + + common_groups = {} + for entry in dfs_path[:-1]: + # TODO: may cause undeterministic behavior, since + # group names may be duplicated. + common_groups.update(entry.as_groups()) + + for idx, entry in enumerate(dfs_path[:-1]): + # remove dependency reference in ref_path + entry.references.remove(dfs_path[idx+1].name.mod_name) + entry.update_groups(common_groups) + + dfs_path.pop(-1) + + def visit_func(entry : ModEntry, ref_size : int, index : int): + if ref_size != index: + return + + # remove refs and update current entry's groups + for ref_entry in refs_generator(entry): + entry.update_groups(ref_entry.as_groups()) + + if entry.name in entry.groups: + del entry.groups[entry.name] + + dfs_visit( + graph, + refs_generator, + cycling_trigger = cycling_trigger, + visit_func = visit_func) + + @staticmethod + def init_parser(parser : argparse.ArgumentParser, + entry : ModEntry, + pre_func): + logger = logging.getLogger("cmd.parser") + + has_main_entry = not entry.func.empty() + for group in entry.groups.values(): + if not group.func.empty(): + has_main_entry = True + # There is no need to create group options and options + if not has_main_entry: + return + + for group in entry.groups.values(): + # skip empty group options + if not group.options: + continue + + try: + gparser = parser.add_argument_group( + *group.params.args, **group.params.kw) + except Exception as e: + logger.error("module({}):group({}): {}".format( + entry.name, group.name, e)) + raise e + for gopt in group.options: + for opt in gopt.options: + try: + gparser.add_argument(*opt.args, **opt.kw) + except argparse.ArgumentError as e: + logger.error("module({}):group({}): {}".format( + entry.name, group.name, e)) + raise e + + for gopt in entry.options: + for opt in gopt.options: + try: + parser.add_argument(*opt.args, **opt.kw) + except argparse.ArgumentError as e: + logger.error("module({}): {}".format( + entry.name, e)) + raise e + + def _func(args): + # invoke prepare function + if entry.enable_global_opt and not pre_func.empty(): + pre_func(args) + + for group in entry.groups.values(): + if group.func.empty(): + continue + if getattr(args, group.name.code_name, None): + return group.func(args) + + if not entry.func.empty(): + return entry.func(args) + + raise RuntimeError( + "can not find module [" + entry.name.name + \ + "] main function to run") + parser.set_defaults(func=_func) + + @staticmethod + def init_parser_object(parser_object, mod_name, entry, pre_func): + logger = logging.getLogger("cmd.parser") + + # add subparser + if "sub_parser" not in parser_object: + parser_object["sub_parser"] = \ + parser_object["parser"].add_subparsers( + title = "COMMAND", + description = "supportive sub commands") + + entry.params.kw.setdefault( + "formatter_class", + argparse.RawDescriptionHelpFormatter) + + try: + mod_parser = parser_object["sub_parser"].add_parser( + mod_name, *entry.params.args, **entry.params.kw) + except Exception as e: + logger.error("module({}): {}".format( + entry.name, e)) + raise e + + CmdStorage.init_parser(mod_parser, entry, pre_func) + parser_object[mod_name] = { "parser": mod_parser, } + return mod_parser + + @staticmethod + def init_parsers() -> argparse.ArgumentParser: + CmdStorage.refs_analysis() + + pre_entry = CmdStorage.get_entry(CmdStorage.GLOBAL_NAME) + CmdStorage.STORE.pop(CmdStorage.GLOBAL_NAME) + pre_func = pre_entry.func.move() + pre_groups = pre_entry.as_groups() + + # remove unuseful module path + for name in CmdName.topo_sort(CmdStorage.STORE.keys()): + entry = CmdStorage.STORE[name] + if getattr(entry, "has_main_entry", None): + continue + has_main_entry = not entry.func.empty() + for group in entry.groups.values(): + if not group.func.empty(): + has_main_entry = True + + if has_main_entry: + for mod_name in entry.name.mod_prefix_arr: + if mod_name in CmdStorage.STORE: + setattr(CmdStorage.get_entry(mod_name), + "has_main_entry", True) + else: + del CmdStorage.STORE[name] + + # init root parser descriptions + root_entry = CmdStorage.get_entry("") + root_entry.params.kw.setdefault( + "description", + "bbcode helper script, implemented via python3") + root_entry.params.kw.setdefault( + "formatter_class", + argparse.RawDescriptionHelpFormatter) + if root_entry.enable_global_opt: + root_entry.update_groups(pre_groups) + + logger = logging.getLogger("cmd.parser") + try: + root_parser = argparse.ArgumentParser( + *root_entry.params.args, **root_entry.params.kw) + except Exception as e: + logger.error("module({}): {}".format( + root_entry.name, e)) + raise e + + CmdStorage.init_parser(root_parser, root_entry, pre_func) + + # set root parser object + CmdStorage.PARSERS["parser"] = root_parser + + for entry in list(CmdStorage.STORE.values()): + parser_object = CmdStorage.PARSERS + for mod_name, prefix in zip( + entry.name.mod_array, entry.name.mod_prefix_arr): + if mod_name not in parser_object: + mod_entry = CmdStorage.get_entry(prefix) + if mod_entry.enable_global_opt: + mod_entry.update_groups(pre_groups) + CmdStorage.init_parser_object( + parser_object, mod_name, + mod_entry, pre_func) + parser_object = parser_object[mod_name] + return root_parser + + @staticmethod + def get_parser(parser_path) -> argparse.ArgumentParser: + parser_object = CmdStorage.PARSERS + for mod_name in CmdName(parser_path).mod_array: + if mod_name not in parser_object: + raise RuntimeError("cannot find parser: " + parser_path) + parser_object = parser_object[mod_name] + return parser_object["parser"] + + +""" CMD Registration API +""" + +def option(*args, **kw): + """ ArgParse:add_argument function wrapper options + + Parameters + ========== + + name or flags: aka "-f", "--foo" + action: available options are + "store"(default), + "store_const", + "store_true", "store_false", + "append", "append_const", + "count", + "help"(disabled), + "version", "extend" + nargs: + const: + default: store default value, None by default + type: argument type + choices: available options + required: make optional argument required like "-f" + help: print help information + metavar: meta variable in usage + dest: + + """ + def _func(func : CmdFunction): + func.options.add_option(*args, **kw) + return func + return _func + +def module(mod_name, *args, + as_main = False, refs = [], + with_global_opt = True, + permission = PUBLIC, **kw): + """ Module Interface + + Root Parser Params + ================== + prog - The name of the program (default: sys.argv[0]) + usage - The string describing the program usage + (default: generated from arguments added to parser) + description - Text to display before the argument help + (default: none) + epilog - Text to display after the argument help (default: none) + parents - A list of ArgumentParser objects whose arguments + should also be included + formatter_class - A class for customizing the help output + prefix_chars - The set of characters that prefix optional + arguments (default: ‘-‘) + fromfile_prefix_chars - The set of characters that prefix + files from which additional arguments should be read + (default: None) + argument_default - The global default value for arguments + (default: None) + conflict_handler - The strategy for resolving conflicting + optionals (usually unnecessary) + add_help - Add a -h/--help option to the parser (default: True) + allow_abbrev - Allows long options to be abbreviated if + the abbreviation is unambiguous. (default: True) + exit_on_error - Determines whether or not ArgumentParser + exits with error info when an error occurs. (default: True) + + Sub Module Params + ================= + title - title for the sub-parser group in help output; + by default “subcommands” if description is provided, + otherwise uses title for positional arguments + description - description for the sub-parser group in help + output, by default None + prog - usage information that will be displayed with + sub-command help, by default the name of the program + and any positional arguments before the subparser argument + parser_class - class which will be used to create sub-parser + instances, by default the class of the current parser + (e.g. ArgumentParser) + action - the basic type of action to be taken when this + argument is encountered at the command line + dest - name of the attribute under which sub-command name + will be stored; by default None and no value is stored + required - Whether or not a subcommand must be provided, + by default False (added in 3.7) + help - help for sub-parser group in help output, + by default None + metavar - string presenting available sub-commands in help; + by default it is None and presents sub-commands in + form {cmd1, cmd2, ..} + """ + + mod_entry = CmdStorage.get_entry(mod_name) + mod_entry.enable_global_opt = with_global_opt + mod_entry.references.update(refs) + mod_entry.register_parser(*args, **kw) + if as_main: + return mod_entry.by_main_func(permission).wrapper + return mod_entry.by_pass_func(permission).wrapper + +def group(mod_name, + as_main = False, refs = [], + permission = PRIVATE, + # group parameters + group_name = None, with_short=False, + description=None): + mod_entry = CmdStorage.get_entry(mod_name) + mod_entry.references.update(refs) + def _func(func): + gname = CmdName(func.__name__) + if group_name is not None: + gname = CmdName(group_name) + gentry = mod_entry.group_entry(gname) + + desc = description if description else func.__doc__ + gentry.register_parser( + title=str(gentry.name), + description=desc) + + if as_main: + gfunc = gentry.by_main_func(permission) + short_opt = "-{}".format(gname.name[0]) + opt_args = [short_opt] if with_short else [] + opt_args.append(gentry.name.opt_name) + gfunc.options.add_option( + *opt_args, + action="store_true", + help="enable module " + str(gentry.name)) + else: + gfunc = gentry.by_pass_func(permission) + return gfunc.wrapper(func) + return _func + +def global_options(refs=[]): + return module(CmdStorage.GLOBAL_NAME, refs=refs, as_main=True) + +def parser(name) -> argparse.ArgumentParser: + parser_object = CmdStorage.PARSERS + for mod_name in CmdName(name).mod_array: + if mod_name in parser_object: + parser_object = parser_object[mod_name] + else: + raise RuntimeError("parser:{} not found".format(name)) + return parser_object["parser"] + +# Convenient class for user to create args +class Args: + pass + +def Run(): + root_parser = CmdStorage.init_parsers() + args = root_parser.parse_args() + + if getattr(args, "func", None): + args.func(args) + return args + + raise RuntimeError( + "cannot find the mainly function to run, " + + "please set main function via mod_main or group_main." + ) diff --git a/python/mrt/common/dfs.py b/python/mrt/common/dfs.py new file mode 100644 index 00000000..b0a4b099 --- /dev/null +++ b/python/mrt/common/dfs.py @@ -0,0 +1,87 @@ +from typing import List, Callable +from typing import TypeVar + +NodeT = TypeVar("node") +GraphT = List[NodeT] + +RefsFuncT = Callable[[NodeT], GraphT] + +CycleNodeT = GraphT +CyclingFuncT = Callable[[GraphT], None] + +VisitFuncT = Callable[[NodeT, int, int], None] + +def _default_cycling_trigger(graph : CycleNodeT): + raise RuntimeError( + "graph has cycle with path: {}->{}".format( + "->".join([str(n) for n in graph]), + str(graph[0]))) + +def _default_visit_func( + node : NodeT, + ref_size : int, # all child size + # current visit sequence index, range [0, child_size] + index : int): + pass + +def dfs_visit( + graph : GraphT, + refs_generator: RefsFuncT, + visit_func : VisitFuncT = _default_visit_func, + cycling_trigger : CyclingFuncT = _default_cycling_trigger): + """ Abstract Graph DFS Visit Algorithm + + The solution is deep first sequence, with quick leaf + cut of set visited attribute. + + Parameters + ========== + graph: node array, and node should support stringlify. + refs_generator: function with node as parameter, + to get the reference nodes for input node. + """ + visited_nodes : GraphT = [] + for node in list(graph): + _dfs_impl(node, + refs_generator, + visit_func, + cycling_trigger, + visited_nodes) + +def _dfs_impl( + node : NodeT, + refs_generator : RefsFuncT, + visit_func : VisitFuncT, + cycling_trigger : CyclingFuncT, + visited_nodes : GraphT, + dfs_path : GraphT = []): + if node in visited_nodes: + return + + dfs_path.append(node) + + ref_nodes = refs_generator(node) + ref_size = len(ref_nodes) + + for idx, ref_node in enumerate(list(ref_nodes)): + # dfs visit function, interface format + visit_func(node, ref_size, idx) + + # cycling to skip, process after the dfs visit + # for current node + if ref_node in dfs_path: + cycling_trigger(dfs_path) + continue + + _dfs_impl(ref_node, + refs_generator, + visit_func, + cycling_trigger, + visited_nodes, + dfs_path) + + # last visit function, the ref_size ranged in [0, N] + visit_func(node, ref_size, ref_size) + + dfs_path.pop(-1) + visited_nodes.append(node) diff --git a/python/mrt/common/log.py b/python/mrt/common/log.py new file mode 100644 index 00000000..82d6ffc8 --- /dev/null +++ b/python/mrt/common/log.py @@ -0,0 +1,134 @@ +from datetime import datetime +from typing import List + +import logging + +TRACE = logging.DEBUG // 2 +DEBUG = logging.DEBUG +INFO = logging.INFO +WARN = logging.WARNING +ERROR = logging.ERROR +FATAL = logging.CRITICAL + +logging.addLevelName(TRACE, "TRACE") +logging.addLevelName(DEBUG, "DEBUG") +logging.addLevelName(INFO, "INFO") +logging.addLevelName(WARN, "WARN") +logging.addLevelName(ERROR, "ERROR") +logging.addLevelName(FATAL, "FATAL") + +LOG_LEVELS = [TRACE, DEBUG, INFO, WARN, ERROR, FATAL] +LOG_NAMES = [logging.getLevelName(l).strip() for l in LOG_LEVELS] + +def level2name(log_level): + assert log_level in LOG_LEVELS + return LOG_NAMES[LOG_LEVELS.index(log_level)] + +def name2level(log_name): + assert log_name in LOG_NAMES + return LOG_LEVELS[LOG_NAMES.index(log_name)] + + +class ColorFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None, style='%'): + super(ColorFormatter, self).__init__(fmt, datefmt, style) + + self._colors = { + "TRACE": "\033[38;5;111m", + "DEBUG": "\033[38;5;111m", + "INFO": "\033[38;5;47m", + "WARN": "\033[38;5;178m", + "ERROR": "\033[38;5;196m", + "FATAL": "\033[30;48;5;196m", + } + self._default = "\033[38;5;15m" + self._reset = "\033[0m" + + def format(self, record): + message = super(ColorFormatter, self).format(record) + log_color = self._colors.get(record.levelname, self._default) + message = log_color + message + self._reset + return message + +class FilterList(logging.Filter): + """ Filter with logging module + + Filter rules as below: + {allow|disable log name} > level no > keywords > + {inheritance from parent log name} > by default filter + """ + def __init__(self, default=False, allows=[], disables=[], + keywords=[], log_level=logging.INFO): + self.rules = {} + self._internal_filter_rule = "_internal_filter_rule" + self.log_level = log_level + self.keywords = keywords + + self.rules[self._internal_filter_rule] = default + for name in allows: + splits = name.split(".") + rules = self.rules + for split in splits: + if split not in rules: + rules[split] = {} + rules = rules[split] + + rules[self._internal_filter_rule] = True + + for name in disables: + splits = name.split(".") + rules = self.rules + for split in splits: + if split not in rules: + rules[split] = {} + rules = rules[split] + + rules[self._internal_filter_rule] = False + + def filter(self, record): + rules = self.rules + rv = rules[self._internal_filter_rule] + + splits = record.name.split(".") + for split in splits: + if split in rules: + rules = rules[split] + if self._internal_filter_rule in rules: + rv = rules[self._internal_filter_rule] + else: + if record.levelno >= self.log_level: + return True + + for keyword in self.keywords: + if keyword in record.getMessage(): + return True + return rv + return rv + +def Init(log_level): + assert log_level in LOG_LEVELS + logging.basicConfig(level=log_level) + formatter = ColorFormatter( + fmt="[ %(asctime)s %(name)10s %(levelname)5s ] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S") + + log_filter = FilterList( + log_level=log_level, + default=False) + for handler in logging.root.handlers: + handler.addFilter(log_filter) + handler.setFormatter(formatter) + +if __name__ == "__main__": + from . import cmd + + @cmd.module("log", as_main=True, + help="log test module", permission=cmd.PRIVATE) + def test_main(args): + Init(args) + logging.debug("test") + logging.info("test") + logging.warning("test") + logging.error("test") + + cmd.Run() diff --git a/python/mrt/common/thread.py b/python/mrt/common/thread.py new file mode 100644 index 00000000..a8d08001 --- /dev/null +++ b/python/mrt/common/thread.py @@ -0,0 +1,150 @@ +import os +import sys +import logging +import signal +from threading import Event, Thread, Lock + +__QUIT_EVENT__ = Event() +""" Maintain primitive for naive array of __QUIT_EVENTS__. """ +__LOCK__ = Lock() +__QUIT_EVENTS__ = [] + +logger = logging.getLogger("service") + +# Exit handlers module +__EXIT_HANDLERS__ = [] + +def register_exit_handler(func): + __EXIT_HANDLERS__.append(func) + return func + +def safe_exit(*unused_args): + if __QUIT_EVENT__.is_set(): + logger.warning("Duplicated exit for threads") + return + + print("") + logger.info("shutting down ...") + + __LOCK__.acquire() + __QUIT_EVENT__.set() + for event in __QUIT_EVENTS__: + event.set() + __LOCK__.release() + + for exit_func in __EXIT_HANDLERS__: + t = Thread(target=exit_func) + t.join() + +# register signal processor in global. +for sig in ('TERM', 'HUP', 'INT'): + signal.signal( + getattr(signal, 'SIG'+sig), + safe_exit); + +# Interrupt Design +class ThreadInterruptError(Exception): + pass + +def is_interrupted(): + return __QUIT_EVENT__.is_set() + +def interrupt_point(): + if is_interrupted(): + raise ThreadInterruptError() + +def wait_for_event(timeout): + event = Event() + + __LOCK__.acquire() + if is_interrupted(): + event.set() + __QUIT_EVENTS__.append(event) + __LOCK__.release() + + event.wait(timeout) + + __LOCK__.acquire() + __QUIT_EVENTS__.remove(event) + __LOCK__.release() + + return event + +def wait(timeout): + """ Thread wait function, raise error if trigger signal int. """ + if wait_for_event(timeout).is_set(): + raise ThreadInterruptError() + +def interrupt(): + """ Interrupt program, stop all threads including current. """ + safe_exit() + interrupt_point() + +def _thread_safe_func(func): + def error_func(*args, **kw): + try: + func(*args, **kw) + except ThreadInterruptError: + pass + except Exception as e: + logger.error("func:{} exit with error: {}".format( + func.__name__, e)) + safe_exit() + return error_func + +def as_thread_func(func): + def _container(*args, **kwargs): + t : Thread = Thread( + target=_thread_safe_func(func), + args=args, kwargs=kwargs) + t.start() + return t + return _container + +def as_daemon_thread(func): + """ Daemon Thread Wrapper + + Thread will be closed after main thread exits automatically, + quick function to programming in development. + """ + def _container(*args, **kwargs): + t : Thread = Thread( + target=_thread_safe_func(func), + args=args, kwargs=kwargs, + daemon=True) + t.start() + return t + return _container + + +# Service module +__REGISTER_SERVICES__ = {} + +def register_service(name, auto_reload=False, time_out=5): + if name in __REGISTER_SERVICES__: + raise RuntimeError("service:{} has been registered".format( + name)) + + def _func(func): + def _auto_reload(*args, **kw): + func(*args, **kw) + + while auto_reload: + logger.warning( + "service:{} closed, restart in {} seconds".format( + name, time_out)) + + interrupt_point() + if wait_for_event(time_out).is_set(): + return + + func(*args, **kw) + + __REGISTER_SERVICES__[name] = as_thread_func(_auto_reload) + return func + return _func + +def start_services(*args, **kw): + for name, srv_func in __REGISTER_SERVICES__.items(): + logger.debug("start service - {}".format(name)) + srv_func(*args, **kw) From b7acd15c95bc2f7f259f7cd34ac609d0efee651d Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 3 Sep 2021 19:15:07 +0800 Subject: [PATCH 005/120] initialize cvm_main --- main.py | 345 +++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 344 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index 15b59607..eec5a8e4 100644 --- a/main.py +++ b/main.py @@ -1,5 +1,6 @@ import sys from os import path +from mrt.conf import MRT_MODEL_ROOT # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -21,12 +22,354 @@ def global_func(args): log.Init(log.name2level(args.verbosity)) + +class DeviceTypeAction: + def __init__(self, option_strings, dest, nargs=None, **kwargs): + pass + def __call__(self, parser, namespace, values, option_string=None): + pass + +@cmd.option("--model-dir", nargs='?', type=str, default=MRT_MODEL_ROOT) +@cmd.option("--default_device_type", nargs='?', type=str, + dest="default_model_ctx", choices=['cpu', 'gpu'], + default='cpu', action=DeviceTypeAction) @cmd.module("", as_main=True, description=""" CVM Python Tool """) def cvm_main(args): - print("null") + # default + # verbosity = _get_val(cfg, sec, 'Verbosity', + # dtype=int_t, dval=logging.NOTSET) + # utils.log_init(level=verbosity) + # logger = logging.getLogger("log.main") + model_dir = args.model_dir + if model_dir.startswith("~"): + model_dir = path.expanduser(model_dir) + assert path.exists(model_dir), \ + "Please create the folder `data` first" + model_name = args.model_name + model_prefix = path.join(model_dir, model_name) + default_model_ctx = args.default_model_ctx + # TODO + raise NotImplementedError("to be implemented, proceeding...") + input_shape = _get_val(cfg, sec, 'Input_shape', dtype=tuple_t) + start_pos = {'DEFAULT': 0, 'PREPARE': 1, 'SPLIT_MODEL': 2, \ + 'CALIBRATION': 3, 'QUANTIZATION': 4, \ + 'MERGE_MODEL': 5} + start = _get_val(cfg, sec, 'Start', dtype=str_t, dval='DEFAULT') + _check(start in start_pos.keys(), sec, 'Start', + message="Please choose a value from `%s`" % start_pos.keys()) + start_point = start_pos[start] + + # prepare + sec = 'PREPARE' + sym_file, prm_file = _load_fname(model_prefix, suffix='prepare') + sym_path, prm_path = _load_fname(model_prefix) + if not path.exists(sym_path) or not path.exists(prm_path): + save_model(model_name, data_dir=model_dir, ctx=model_ctx) + # save_model(model_name, sym_path=sym_path, prm_path=prm_path) + + if start_point < 1: + model = Model.load(sym_path, prm_path) + model.prepare(set_batch(input_shape, 1)) + dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) + if dump: + model.save(sym_file, prm_file) + logger.info("`%s` stage finihed" % sec) + elif start_point == 1: + _check(path.exists(sym_file) and path.exists(prm_file), 'DEFAULT', + 'Start', message="Check point of `%s` not found, " % sec + \ + "please move the start point earlier") + model = Model.load(sym_file, prm_file) + logger.info("`%s` stage checked" % sec) + + # split model + sec = 'SPLIT_MODEL' + keys = _get_val(cfg, sec, 'Keys', dtype=ARRAY(str_t), dval='') + sym_top_file, prm_top_file = _load_fname(model_prefix, suffix='top') + sym_base_file, prm_base_file = _load_fname(model_prefix, suffix='base') + if keys == '': + _check(start_point != 2, 'DEFAULT', 'Start', + message="Invalid start point") + if start_point <= 1: + logger.info("`%s` stage skipped" % sec) + elif start_point < 2: + base, top = model.split(keys) + dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) + if dump: + top.save(sym_top_file, prm_top_file) + base.save(sym_base_file, prm_base_file) + logger.info("`%s` stage finished" % sec) + elif start_point == 2: + _checkpoint_exist( + sec, *[sym_top_file, prm_top_file, sym_base_file, prm_base_file]) + top = Model.load(sym_top_file, prm_top_file) + base = Model.load(sym_base_file, prm_base_file) + logger.info("`%s` stage checked" % sec) + + # calibration + sec = 'CALIBRATION' + model_name_calib = model_name + '.mrt.calibrate' + batch = _get_val(cfg, sec, 'Batch', dtype=int_t, dval=16) + ds_name = _get_val(cfg, sec, 'Dataset') + dataset_dir = _get_val(cfg, sec, 'Dataset_dir', dval=conf.MRT_DATASET_ROOT) + if start_point < 3: + mrt = model.get_mrt() if keys == '' else base.get_mrt() + calibrate_num = _get_val( + cfg, sec, 'Calibrate_num', dtype=int_t, dval=1) + lambd = _get_val(cfg, sec, 'Lambda', dtype=float_t, dval=None) + shp = set_batch(input_shape, batch) + dataset = ds.DS_REG[ds_name](shp, root=dataset_dir) + data_iter_func = dataset.iter_func() + ctx = _get_ctx(cfg, sec, dctx=model_ctx) + for i in range(calibrate_num): + data, _ = data_iter_func() + mrt.set_data(data) + mrt.calibrate(lambd=lambd, ctx=ctx) + dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) + if dump: + mrt.save(model_name_calib, datadir=model_dir) + logger.info("`%s` stage finished" % sec) + elif start_point == 3: + _checkpoint_exist( + sec, *list(utils.extend_fname( + model_prefix+".mrt.calibrate", with_ext=True))) + mrt = MRT.load(model_name_calib, datadir=model_dir) + if keys != "": + _checkpoint_exist(sec, sym_top_file, prm_top_file) + top = Model.load(sym_top_file, prm_top_file) + logger.info("`%s` stage checkd" % sec) + + # quantization + sec = 'QUANTIZATION' + model_name_quant = model_name + '.mrt.quantize' + if start_point < 4: + restore_names = _get_val( + cfg, sec, 'Restore_name', dtype=ARRAY(str_t), dval=[]) + name_to_op = {} + from sym_utils import topo_sort + for sym in topo_sort(mrt.current_model.symbol): + name, op_name = sym.attr('name'), sym.attr('op_name') + if op_name not in name_to_op: + name_to_op[op_name] = [] + name_to_op[op_name].append(name) + new_names = [] + for name in restore_names: + if name.startswith("_OP_") and name[4:] in name_to_op: + for new_name in name_to_op[name[4:]]: + new_names.append(new_name) + else: + new_names.append(name) + restore_names = set(new_names) + if '_ALL_EXCEPT_' in restore_names: + from tfm_base import _pass_manager + from tfm_ops import disabled_restore_ops + + quantize_ops = [op_name for op_name in _pass_manager["quantize"] \ + if op_name not in disabled_restore_ops] + restore_names_new = [] + for sym in topo_sort(mrt.current_model.symbol): + name, op_name = sym.attr('name'), sym.attr('op_name') + if op_name in quantize_ops and \ + name not in restore_names: + restore_names_new.append(name) + restore_names = set(restore_names_new) + for name in restore_names: + mrt.set_restore(name) + input_precision = _get_val( + cfg, sec, 'Input_precision', dtype=int_t, dval=None) + if input_precision is not None: + mrt.set_input_prec(input_precision) + output_precision = _get_val( + cfg, sec, 'Output_precision', dtype=int_t, dval=None) + if output_precision is not None: + mrt.set_output_prec(output_precision) + ctx = _get_ctx(cfg, sec, dctx=model_ctx) + softmax_lambd = _get_val( + cfg, sec, 'Softmax_lambd', dtype=float_t, dval=None) + if softmax_lambd is not None: + mrt.set_softmax_lambd(softmax_lambd) + shift_bits = _get_val( + cfg, sec, 'Shift_bits', dtype=int_t, dval=None) + if shift_bits is not None: + mrt.set_shift_bits(shift_bits) + thresholds = _get_val( + cfg, sec, 'Thresholds', dtype=PAIR(str_t, float_t), dval=None) + if thresholds is not None: + for name, threshold in thresholds.items(): + mrt.set_threshold(name, threshold) + mrt.quantize() + inputs_ext = mrt.get_inputs_ext() + dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) + if dump: + mrt.save(model_name_quant, datadir=model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + infos = ['oscales: ', oscales, + 'input_ext: ', inputs_ext, + 'input shapes: ', input_shape] + ext_all_file = path.join(model_dir, model_name+".all.quantize.ext") + sim.save_ext(ext_all_file, *infos) + logger.info("`%s` stage finished" % sec) + elif start_point == 4: + _checkpoint_exist( + sec, *list(utils.extend_fname( + model_prefix+'.mrt.quantize', with_ext=True))) + mrt = MRT.load(model_name_quant, datadir=model_dir) + inputs_ext = mrt.get_inputs_ext() + dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) + if keys != "": + _checkpoint_exist(sec, sym_top_file, prm_top_file) + top = Model.load(sym_top_file, prm_top_file) + logger.info("`%s` stage checkd" % sec) + + # merge_model + sec = 'MERGE_MODEL' + sym_all_file, prm_all_file, ext_all_file = _load_fname( + model_prefix, suffix='all.quantize', with_ext=True) + if keys == '': + _check(start_point != 5, 'DEFAULT', 'Start', + message="Invalid start point") + qmodel = mrt.current_model + oscales = mrt.get_output_scales() + logger.info("`%s` stage skipped" % sec) + elif start_point < 5: + qmodel = mrt.current_model + mrt_oscales = mrt.get_output_scales() + model_merger = Model.merger(qmodel, top, mrt.get_maps()) + attribute_deps = _get_val( + cfg, sec, 'Attribute_deps', dtype=PAIR(str_t, str_t, str_t)) + + name_idx = {mrt.get_maps().get( + s.attr("name"), s.attr("name")): i \ + for i, s in enumerate(qmodel.symbol)} + def mergefunc(node, params, graph): + name, op_name = node.attr('name'), node.attr('op_name') + childs, attr = sutils.sym_iter( + node.get_children()), node.list_attr() + if op_name in attribute_deps: + attr_deps = attribute_deps[op_name] + for attr_name, v in attr_deps.items(): + val = sutils.get_attr(attr, attr_name, 0) + attr[attr_name] = int(val*mrt_oscales[name_idx[v]]) + node = sutils.get_mxnet_op(op_name)( + *childs, **attr, name=name) + return node + + qmodel = model_merger.merge(callback=mergefunc) + oscale_maps = _get_val( + cfg, sec, 'Oscale_maps', dtype=PAIR(str_t, str_t)) + oscales = model_merger.get_output_scales( + mrt_oscales, oscale_maps) + inputs_ext = mrt.get_inputs_ext() + dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) + if dump: + qmodel.save(sym_all_file, prm_all_file) + infos = ['oscales: ', oscales, + 'input_ext: ', inputs_ext, + 'input shapes: ', input_shape] + sim.save_ext(ext_all_file, *infos) + logger.info("`%s` stage finished" % sec) + else: + _check(start_point == 5, 'DEFAULT', 'Start', + message='Start_point invalid') + qmodel = Model.load(sym_all_file, prm_all_file) + _, oscales, _, inputs_ext, _, _ = sim.load_ext(ext_all_file) + logger.info("`%s` stage checked" % sec) + + # evaluation + sec = 'EVALUATION' + if sec in cfg.sections(): + # dataset_dir = _get_val(cfg, sec, 'Dataset_dir', dval=conf.MRT_DATASET_ROOT) + iter_num = _get_val(cfg, sec, 'Iter_num', dtype=int_t, dval=0) + batch = _get_val(cfg, sec, 'Batch', dtype=int_t, dval=batch) + ctx = _get_ctx(cfg, sec, dctx=model_ctx) + if isinstance(ctx, mx.Context): + ctx = [ctx] + org_model = Model.load(sym_path, prm_path) + graph = org_model.to_graph(ctx=ctx) + dataset = ds.DS_REG[ds_name](set_batch(input_shape, batch)) + data_iter_func = dataset.iter_func() + metric = dataset.metrics() + + baxis = batch_axis(input_shape) + olen = len(org_model.symbol) + def forward(net, data, ctx): + """ Multiple xpu run support. + """ + data = gluon.utils.split_and_load( + data, ctx_list=ctx, batch_axis=baxis, even_split=False) + outs = [net(d) for d in data] + if olen == 1: + outs = nd.concatenate(outs) + else: + outs = [nd.concatenate([outs[i][j] \ + for i in range(len(outs))]) for j in range(olen)] + return outs + + def evalfunc(data, label): + outs = forward(graph, data, ctx=ctx) + acc = dataset.validate(metric, outs, label) + return acc + + ngpus = len(ctx) + _check( + not batch % ngpus, sec, 'Device_ids', + 'Batch must be divisible by the number of gpus') + split_batch = batch//ngpus + rqmodel = reduce_graph(qmodel, { + 'data': set_batch(input_shape, split_batch)}) + qgraph = rqmodel.to_graph(ctx=ctx) + qmetric = dataset.metrics() + + def quantize(data, label): + data = sim.load_real_data(data, 'data', inputs_ext) + outs = forward(qgraph, data, ctx) + outs = outs / oscales[0] if olen == 1 \ + else [(t / oscales[i]) for i, t in enumerate(outs)] + acc = dataset.validate(qmetric, outs, label) + return acc + + if iter_num > 0: + logger.info("Validating...") + utils.multi_validate(evalfunc, data_iter_func, quantize, + iter_num=iter_num, + logger=logging.getLogger('mrt.validate'), + batch_size=batch) + logger.info("`%s` stage finished" % sec) + + # compilation + sec = 'COMPILATION' + if sec in cfg.sections(): + dump_dir = _get_path( + cfg, sec, 'Dump_dir', is_dir=True, dpath=model_dir) + batch = _get_val(cfg, sec, 'Batch', dtype=int_t, dval=batch) + device_type = _get_val(cfg, sec, 'Device_type', dval='cpu') + device_ids = _get_val( + cfg, sec, 'Device_ids', + dtype=ARRAY(int_t), dval=0) + model_name_tfm = model_name + "_cvm" + qmodel.to_cvm(model_name_tfm, datadir=dump_dir, + input_shape=set_batch(input_shape, batch), + target=device_type, device_ids=device_ids) + + dataset = ds.DS_REG[ds_name](set_batch(input_shape, batch)) + dump_data, _ = dataset.iter_func()() + dump_data = sim.load_real_data( + dump_data.astype("float64"), 'data', mrt.get_inputs_ext()) + model_root = path.join(dump_dir, model_name_tfm) + np.save(path.join(model_root, "data.npy"), + dump_data.astype('int8').asnumpy()) + infos = { + "inputs_ext": inputs_ext, + "oscales": oscales, + "input_shapes": input_shape, + } + sim.save_ext(path.join(model_root, "ext"), infos) + logger.info("`%s` stage finished" % sec) + + # starting processes thread.start_services(args) if __name__ == "__main__": From 6b3317457aa1cf32116b526fd5d0843a16adca8f Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 6 Sep 2021 18:02:16 +0800 Subject: [PATCH 006/120] update main.py default stage --- main.py | 87 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/main.py b/main.py index eec5a8e4..506b0032 100644 --- a/main.py +++ b/main.py @@ -1,48 +1,61 @@ import sys from os import path +import argparse +from typing import Tuple, List, Union +import logging + +import mxnet as mx + from mrt.conf import MRT_MODEL_ROOT +from mrt.common import cmd, log, thread # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) sys.path.insert(0, path.join(__ROOT__, "python")) -import logging - -from mrt.common import cmd, log, thread - LOG_MSG = ",".join(["{}:{}".format(l, n) \ for l, n in zip(log.LOG_LEVELS, log.LOG_NAMES)]) -@cmd.option("-v", "--verbosity", metavar="LEVEL", - choices=log.LOG_NAMES, default=log.level2name(log.DEBUG), - help="log verbosity to pring information, " + \ - "available options: {}".format(log.LOG_NAMES) + \ - " by default {}".format(log.level2name(log.DEBUG))) -@cmd.global_options() -def global_func(args): - log.Init(log.name2level(args.verbosity)) +# @cmd.option("-v", "--verbosity", metavar="LEVEL", + # choices=log.LOG_NAMES, default=log.level2name(log.DEBUG), + # help="log verbosity to pring information, " + \ + # "available options: {}".format(log.LOG_NAMES) + \ + # " by default {}".format(log.level2name(log.DEBUG))) +# @cmd.global_options() +# def global_func(args): + # log.Init(log.name2level(args.verbosity)) +def get_ctx(device_type, device_ids, dctx=mx.cpu()): + contex = dctx + if device_type == 'gpu': + contex = mx.gpu(device_ids[0]) if len(device_ids) == 1 \ + else [mx.gpu(i) for i in device_ids] + # if section == 'CALIBRATION': + # _check(type(contex).__name__ != 'list', section, 'Device_ids', + # message='`Device_ids` should be an integer in Calibration') + return contex -class DeviceTypeAction: - def __init__(self, option_strings, dest, nargs=None, **kwargs): - pass - def __call__(self, parser, namespace, values, option_string=None): - pass -@cmd.option("--model-dir", nargs='?', type=str, default=MRT_MODEL_ROOT) -@cmd.option("--default_device_type", nargs='?', type=str, - dest="default_model_ctx", choices=['cpu', 'gpu'], - default='cpu', action=DeviceTypeAction) +# TODO(ryt): option string abbreviation +@cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) +@cmd.option("model_name", type=str) +@cmd.option("--default-device-type", type=str, default='cpu', + choices=['cpu', 'gpu']) +@cmd.option("--default-device-ids", nargs='+', type=int, default=[0]) +@cmd.option("--verbosity", type=str, default='debug', + choices=['none', 'debug', 'info', 'warning', 'error', 'critical']) +@cmd.option("--input-shape", nargs='+', type=int, default=[-1, 3, 224, 224]) +@cmd.option("--start", type=str, default="default", + choices=['default', 'prepare', 'split_model', + 'calibration', 'quantization', 'merge_model']) @cmd.module("", as_main=True, description=""" CVM Python Tool """) def cvm_main(args): - # default - # verbosity = _get_val(cfg, sec, 'Verbosity', - # dtype=int_t, dval=logging.NOTSET) - # utils.log_init(level=verbosity) - # logger = logging.getLogger("log.main") + # default stage + log.Init(log.name2level(args.verbosity.upper())) + logger = logging.getLogger("log.main") model_dir = args.model_dir if model_dir.startswith("~"): model_dir = path.expanduser(model_dir) @@ -50,19 +63,15 @@ def cvm_main(args): "Please create the folder `data` first" model_name = args.model_name model_prefix = path.join(model_dir, model_name) - default_model_ctx = args.default_model_ctx - # TODO - raise NotImplementedError("to be implemented, proceeding...") - input_shape = _get_val(cfg, sec, 'Input_shape', dtype=tuple_t) - start_pos = {'DEFAULT': 0, 'PREPARE': 1, 'SPLIT_MODEL': 2, \ - 'CALIBRATION': 3, 'QUANTIZATION': 4, \ - 'MERGE_MODEL': 5} - start = _get_val(cfg, sec, 'Start', dtype=str_t, dval='DEFAULT') - _check(start in start_pos.keys(), sec, 'Start', - message="Please choose a value from `%s`" % start_pos.keys()) - start_point = start_pos[start] - - # prepare + model_ctx = get_ctx(args.default_device_type, args.default_device_ids) + input_shape = args.input_shape + start_pos = { + 'default': 0, 'prepare': 1, 'split_model': 2, + 'calibration': 3, 'quantization': 4, 'merge_model': 5} + start_point = start_pos[args.start] + # TODO(ryt), prepare, split_model, calibration, quantization, merge_model + return + # prepare stage sec = 'PREPARE' sym_file, prm_file = _load_fname(model_prefix, suffix='prepare') sym_path, prm_path = _load_fname(model_prefix) From c7982488fcbbeaebcc402f3d1533a4107104ca72 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 6 Sep 2021 18:27:07 +0800 Subject: [PATCH 007/120] upt --- main.py | 56 ++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 16 deletions(-) diff --git a/main.py b/main.py index 506b0032..a122978d 100644 --- a/main.py +++ b/main.py @@ -8,6 +8,7 @@ from mrt.conf import MRT_MODEL_ROOT from mrt.common import cmd, log, thread +from mrt.transformer import Model # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -35,25 +36,45 @@ def get_ctx(device_type, device_ids, dctx=mx.cpu()): # message='`Device_ids` should be an integer in Calibration') return contex +def load_fname(prefix, suffix=None, with_ext=False): + """Get the model files at a given stage. + + Parameters + ---------- + prefix : string + The file path without and extension. + suffix : string + The file suffix with respect to a given stage of MRT. + with_ext: bool + Whether to include ext file. + + Returns + ------- + files : tuple of string + The loaded file names. + """ + suffix = "."+suffix if suffix is not None else "" + return utils.extend_fname(prefix+suffix, with_ext) # TODO(ryt): option string abbreviation @cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) @cmd.option("model_name", type=str) -@cmd.option("--default-device-type", type=str, default='cpu', +@cmd.option("--device-type-default", type=str, default='cpu', choices=['cpu', 'gpu']) -@cmd.option("--default-device-ids", nargs='+', type=int, default=[0]) +@cmd.option("--device-ids-default", nargs='+', type=int, default=[0]) @cmd.option("--verbosity", type=str, default='debug', choices=['none', 'debug', 'info', 'warning', 'error', 'critical']) @cmd.option("--input-shape", nargs='+', type=int, default=[-1, 3, 224, 224]) @cmd.option("--start", type=str, default="default", choices=['default', 'prepare', 'split_model', 'calibration', 'quantization', 'merge_model']) +@cmd.option("--suppress-dump-prepare", type=bool, action='store_false') @cmd.module("", as_main=True, description=""" CVM Python Tool """) def cvm_main(args): - # default stage + # default log.Init(log.name2level(args.verbosity.upper())) logger = logging.getLogger("log.main") model_dir = args.model_dir @@ -69,30 +90,33 @@ def cvm_main(args): 'default': 0, 'prepare': 1, 'split_model': 2, 'calibration': 3, 'quantization': 4, 'merge_model': 5} start_point = start_pos[args.start] - # TODO(ryt), prepare, split_model, calibration, quantization, merge_model - return - # prepare stage - sec = 'PREPARE' - sym_file, prm_file = _load_fname(model_prefix, suffix='prepare') - sym_path, prm_path = _load_fname(model_prefix) + + # prepare + sym_file, prm_file = load_fname(model_prefix, suffix='prepare') + sym_path, prm_path = load_fname(model_prefix) if not path.exists(sym_path) or not path.exists(prm_path): save_model(model_name, data_dir=model_dir, ctx=model_ctx) - # save_model(model_name, sym_path=sym_path, prm_path=prm_path) if start_point < 1: model = Model.load(sym_path, prm_path) model.prepare(set_batch(input_shape, 1)) - dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) - if dump: + if not args.suppress_dump_prepare: model.save(sym_file, prm_file) logger.info("`%s` stage finihed" % sec) elif start_point == 1: - _check(path.exists(sym_file) and path.exists(prm_file), 'DEFAULT', - 'Start', message="Check point of `%s` not found, " % sec + \ - "please move the start point earlier") + if not path.exists(sym_file): + raise RuntimeError( + "sym_file: {} of not found".format(sym_file) + + "please specify the --start flag before 'prepare'") + if not path.exists(prm_file): + raise RuntimeError( + "prm_file: {} of not found".format(prm_file) + + "please specify the --start flag before 'prepare'") model = Model.load(sym_file, prm_file) - logger.info("`%s` stage checked" % sec) + logger.info("preparation stage checked") + # TODO(ryt), split_model, calibration, quantization, merge_model + return # split model sec = 'SPLIT_MODEL' keys = _get_val(cfg, sec, 'Keys', dtype=ARRAY(str_t), dval='') From f0ee2364e44ab0b063fe0682f6b59673838c9867 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 6 Sep 2021 18:37:03 +0800 Subject: [PATCH 008/120] update main.py prepare --- main.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/main.py b/main.py index a122978d..647c570b 100644 --- a/main.py +++ b/main.py @@ -9,6 +9,7 @@ from mrt.conf import MRT_MODEL_ROOT from mrt.common import cmd, log, thread from mrt.transformer import Model +from mrt import utils # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -56,6 +57,23 @@ def load_fname(prefix, suffix=None, with_ext=False): suffix = "."+suffix if suffix is not None else "" return utils.extend_fname(prefix+suffix, with_ext) +def set_batch(input_shape, batch): + """Get the input shape with respect to a specified batch value and an original input shape. + + Parameters + ---------- + input_shape : tuple + The input shape with batch axis unset. + batch : int + The batch value. + + Returns + ------- + ishape : tuple + The input shape with the value of batch axis equal to batch. + """ + return [batch if s == -1 else s for s in input_shape] + # TODO(ryt): option string abbreviation @cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) @cmd.option("model_name", type=str) @@ -68,7 +86,7 @@ def load_fname(prefix, suffix=None, with_ext=False): @cmd.option("--start", type=str, default="default", choices=['default', 'prepare', 'split_model', 'calibration', 'quantization', 'merge_model']) -@cmd.option("--suppress-dump-prepare", type=bool, action='store_false') +@cmd.option("--suppress-dump-prepare", action='store_false') @cmd.module("", as_main=True, description=""" CVM Python Tool @@ -84,7 +102,7 @@ def cvm_main(args): "Please create the folder `data` first" model_name = args.model_name model_prefix = path.join(model_dir, model_name) - model_ctx = get_ctx(args.default_device_type, args.default_device_ids) + model_ctx = get_ctx(args.device_type_default, args.device_ids_default) input_shape = args.input_shape start_pos = { 'default': 0, 'prepare': 1, 'split_model': 2, @@ -102,7 +120,7 @@ def cvm_main(args): model.prepare(set_batch(input_shape, 1)) if not args.suppress_dump_prepare: model.save(sym_file, prm_file) - logger.info("`%s` stage finihed" % sec) + logger.info("preparation stage finihed") elif start_point == 1: if not path.exists(sym_file): raise RuntimeError( From 2304b60863f7c97326763d37d28284283dbc300c Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 8 Sep 2021 10:36:48 +0800 Subject: [PATCH 009/120] upt --- main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/main.py b/main.py index a122978d..e8a7ef7c 100644 --- a/main.py +++ b/main.py @@ -61,14 +61,15 @@ def load_fname(prefix, suffix=None, with_ext=False): @cmd.option("model_name", type=str) @cmd.option("--device-type-default", type=str, default='cpu', choices=['cpu', 'gpu']) -@cmd.option("--device-ids-default", nargs='+', type=int, default=[0]) +@cmd.option("--device-ids-default", nargs="+", type=int, default=[0]) @cmd.option("--verbosity", type=str, default='debug', choices=['none', 'debug', 'info', 'warning', 'error', 'critical']) @cmd.option("--input-shape", nargs='+', type=int, default=[-1, 3, 224, 224]) @cmd.option("--start", type=str, default="default", choices=['default', 'prepare', 'split_model', 'calibration', 'quantization', 'merge_model']) -@cmd.option("--suppress-dump-prepare", type=bool, action='store_false') +@cmd.option("--no-dump-prepare", type=bool, action='store_false') +@cmd.option("--keys", nargs="+", type=str) @cmd.module("", as_main=True, description=""" CVM Python Tool From 7050cf2973637524ab7cf5a8476f8594314baa30 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 8 Sep 2021 11:17:12 +0800 Subject: [PATCH 010/120] update main.py split model --- main.py | 51 ++++++++++++++++++++++----------------------------- 1 file changed, 22 insertions(+), 29 deletions(-) diff --git a/main.py b/main.py index 773de20e..85618c84 100644 --- a/main.py +++ b/main.py @@ -10,6 +10,7 @@ from mrt.common import cmd, log, thread from mrt.transformer import Model from mrt import utils +from mrt.gluon_zoo import save_model # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -86,8 +87,9 @@ def set_batch(input_shape, batch): @cmd.option("--start", type=str, default="default", choices=['default', 'prepare', 'split_model', 'calibration', 'quantization', 'merge_model']) -@cmd.option("--no-dump-prepare", type=bool, action='store_false') -@cmd.option("--keys", nargs="+", type=str) +@cmd.option("--no-dump-prepare", action='store_false') +@cmd.option("--keys", nargs="+", type=str, default="") +@cmd.option("--no-dump-splitmodel", action='store_false') @cmd.module("", as_main=True, description=""" CVM Python Tool @@ -119,47 +121,38 @@ def cvm_main(args): if start_point < 1: model = Model.load(sym_path, prm_path) model.prepare(set_batch(input_shape, 1)) - if not args.suppress_dump_prepare: + if not args.no_dump_prepare: model.save(sym_file, prm_file) logger.info("preparation stage finihed") elif start_point == 1: - if not path.exists(sym_file): - raise RuntimeError( - "sym_file: {} of not found".format(sym_file) + - "please specify the --start flag before 'prepare'") - if not path.exists(prm_file): - raise RuntimeError( - "prm_file: {} of not found".format(prm_file) + - "please specify the --start flag before 'prepare'") + for fpath in [sym_file, prm_file]: + if not path.exists(fpath): + raise RuntimeError("file path {} not found".format(fpath)) model = Model.load(sym_file, prm_file) logger.info("preparation stage checked") - # TODO(ryt), split_model, calibration, quantization, merge_model - return # split model - sec = 'SPLIT_MODEL' - keys = _get_val(cfg, sec, 'Keys', dtype=ARRAY(str_t), dval='') - sym_top_file, prm_top_file = _load_fname(model_prefix, suffix='top') - sym_base_file, prm_base_file = _load_fname(model_prefix, suffix='base') - if keys == '': - _check(start_point != 2, 'DEFAULT', 'Start', - message="Invalid start point") - if start_point <= 1: - logger.info("`%s` stage skipped" % sec) + sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') + sym_base_file, prm_base_file = load_fname(model_prefix, suffix='base') + if args.keys == "": + logger.info("model splitting stage skipped") elif start_point < 2: - base, top = model.split(keys) - dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) - if dump: + base, top = model.split(args.keys) + if args.no_dump_splitmodel: top.save(sym_top_file, prm_top_file) base.save(sym_base_file, prm_base_file) - logger.info("`%s` stage finished" % sec) + logger.info("model splitting stage finished") elif start_point == 2: - _checkpoint_exist( - sec, *[sym_top_file, prm_top_file, sym_base_file, prm_base_file]) + for fpath in \ + [sym_top_file, prm_top_file, sym_base_file, prm_base_file]: + if not path.exists(fpath): + raise RuntimeError("file path {} not found".format(fpath)) top = Model.load(sym_top_file, prm_top_file) base = Model.load(sym_base_file, prm_base_file) - logger.info("`%s` stage checked" % sec) + logger.info("model splitting stage checked") + return + # TODO(ryt), calibration, quantization, merge_model # calibration sec = 'CALIBRATION' model_name_calib = model_name + '.mrt.calibrate' From dfea2c5803a7c2e11e8b273cf3f331917b9cb46f Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 8 Sep 2021 15:50:48 +0800 Subject: [PATCH 011/120] upt main.py calibration --- main.py | 88 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 48 insertions(+), 40 deletions(-) diff --git a/main.py b/main.py index 85618c84..e5746120 100644 --- a/main.py +++ b/main.py @@ -6,11 +6,12 @@ import mxnet as mx -from mrt.conf import MRT_MODEL_ROOT +from mrt.conf import MRT_MODEL_ROOT, MRT_DATASET_ROOT from mrt.common import cmd, log, thread from mrt.transformer import Model from mrt import utils from mrt.gluon_zoo import save_model +from mrt import dataset as ds # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -33,9 +34,6 @@ def get_ctx(device_type, device_ids, dctx=mx.cpu()): if device_type == 'gpu': contex = mx.gpu(device_ids[0]) if len(device_ids) == 1 \ else [mx.gpu(i) for i in device_ids] - # if section == 'CALIBRATION': - # _check(type(contex).__name__ != 'list', section, 'Device_ids', - # message='`Device_ids` should be an integer in Calibration') return contex def load_fname(prefix, suffix=None, with_ext=False): @@ -75,21 +73,31 @@ def set_batch(input_shape, batch): """ return [batch if s == -1 else s for s in input_shape] -# TODO(ryt): option string abbreviation +# TODO: option string abbreviation @cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) @cmd.option("model_name", type=str) -@cmd.option("--device-type-default", type=str, default='cpu', - choices=['cpu', 'gpu']) +@cmd.option("--device-type-default", type=str, default="cpu", + choices=["cpu", "gpu"]) @cmd.option("--device-ids-default", nargs="+", type=int, default=[0]) -@cmd.option("--verbosity", type=str, default='debug', - choices=['none', 'debug', 'info', 'warning', 'error', 'critical']) -@cmd.option("--input-shape", nargs='+', type=int, default=[-1, 3, 224, 224]) +@cmd.option("--verbosity", type=str, default="debug", + choices=["none", "debug", "info", "warning", "error", "critical"]) +@cmd.option("--input-shape", nargs="+", type=int, default=[-1, 3, 224, 224]) @cmd.option("--start", type=str, default="default", - choices=['default', 'prepare', 'split_model', - 'calibration', 'quantization', 'merge_model']) -@cmd.option("--no-dump-prepare", action='store_false') + choices=["default", "prepare", "split_model", + "calibrate", "quantize", "merge_model"]) +@cmd.option("--no-dump-prepare", action="store_false") @cmd.option("--keys", nargs="+", type=str, default="") -@cmd.option("--no-dump-splitmodel", action='store_false') +@cmd.option("--no-dump-splitmodel", action="store_false") +@cmd.option("--batch-calibrate", type=int, default=16) +@cmd.option("--num-calibrate", type=int, default=1) +@cmd.option("--lambd", type=int) +@cmd.option("--dataset", type=str, default="imagenet", + choices=list(ds.DS_REG.keys())) +@cmd.option("--dataset-dir", type=str, default=MRT_DATASET_ROOT) +@cmd.option("--device-type-calibrate", type=str, default="cpu", + choices=["cpu", "gpu"]) +@cmd.option("--device-ids-calibrate", nargs="+", type=int, default=[0]) +@cmd.option("--no-dump-calibrate", action="store_false") @cmd.module("", as_main=True, description=""" CVM Python Tool @@ -134,11 +142,12 @@ def cvm_main(args): # split model sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') sym_base_file, prm_base_file = load_fname(model_prefix, suffix='base') - if args.keys == "": + keys = args.keys + if keys == "": logger.info("model splitting stage skipped") elif start_point < 2: - base, top = model.split(args.keys) - if args.no_dump_splitmodel: + base, top = model.split(keys) + if not args.no_dump_splitmodel: top.save(sym_top_file, prm_top_file) base.save(sym_base_file, prm_base_file) logger.info("model splitting stage finished") @@ -151,41 +160,41 @@ def cvm_main(args): base = Model.load(sym_base_file, prm_base_file) logger.info("model splitting stage checked") - return - # TODO(ryt), calibration, quantization, merge_model - # calibration - sec = 'CALIBRATION' + # calibrate model_name_calib = model_name + '.mrt.calibrate' - batch = _get_val(cfg, sec, 'Batch', dtype=int_t, dval=16) - ds_name = _get_val(cfg, sec, 'Dataset') - dataset_dir = _get_val(cfg, sec, 'Dataset_dir', dval=conf.MRT_DATASET_ROOT) + batch = args.batch_calibrate + ds_name = args.dataset if start_point < 3: mrt = model.get_mrt() if keys == '' else base.get_mrt() - calibrate_num = _get_val( - cfg, sec, 'Calibrate_num', dtype=int_t, dval=1) - lambd = _get_val(cfg, sec, 'Lambda', dtype=float_t, dval=None) shp = set_batch(input_shape, batch) - dataset = ds.DS_REG[ds_name](shp, root=dataset_dir) + dataset = ds.DS_REG[ds_name](shp, root=args.dataset_dir) data_iter_func = dataset.iter_func() - ctx = _get_ctx(cfg, sec, dctx=model_ctx) - for i in range(calibrate_num): + device_type_calibrate = args.device_type_calibrate + device_ids_calibrate = args.device_ids_calibrate + ctx = get_ctx( + device_type_calibrate, device_ids_calibrate, dctx=model_ctx) + for i in range(args.num_calibrate): data, _ = data_iter_func() mrt.set_data(data) - mrt.calibrate(lambd=lambd, ctx=ctx) - dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) - if dump: + mrt.calibrate(lambd=args.lambd, ctx=ctx) + if not args.no_dump_calibrate: mrt.save(model_name_calib, datadir=model_dir) - logger.info("`%s` stage finished" % sec) + logger.info("calibrate stage finished") elif start_point == 3: - _checkpoint_exist( - sec, *list(utils.extend_fname( - model_prefix+".mrt.calibrate", with_ext=True))) + fpaths = utils.extend_fname(model_prefix) + for fpath in fpaths: + if not path.exists(fpath): + raise RuntimeError("file path {} not found".format(fpath)) mrt = MRT.load(model_name_calib, datadir=model_dir) if keys != "": - _checkpoint_exist(sec, sym_top_file, prm_top_file) + for fpath in [sym_top_file, prm_top_file]: + if not path.exists(fpath): + raise RuntimeError("file path {} not found".format(fpath)) top = Model.load(sym_top_file, prm_top_file) - logger.info("`%s` stage checkd" % sec) + logger.info("calibration stage checkd") + return + # TODO(ryt), calibration, quantization, merge_model # quantization sec = 'QUANTIZATION' model_name_quant = model_name + '.mrt.quantize' @@ -326,7 +335,6 @@ def mergefunc(node, params, graph): # evaluation sec = 'EVALUATION' if sec in cfg.sections(): - # dataset_dir = _get_val(cfg, sec, 'Dataset_dir', dval=conf.MRT_DATASET_ROOT) iter_num = _get_val(cfg, sec, 'Iter_num', dtype=int_t, dval=0) batch = _get_val(cfg, sec, 'Batch', dtype=int_t, dval=batch) ctx = _get_ctx(cfg, sec, dctx=model_ctx) From 95b313c653f548e89a8b314a3c9c84bd005a1eea Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 8 Sep 2021 17:27:00 +0800 Subject: [PATCH 012/120] upt main.py quantize --- main.py | 112 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 59 insertions(+), 53 deletions(-) diff --git a/main.py b/main.py index e5746120..3fb4e59a 100644 --- a/main.py +++ b/main.py @@ -3,15 +3,17 @@ import argparse from typing import Tuple, List, Union import logging +import json import mxnet as mx from mrt.conf import MRT_MODEL_ROOT, MRT_DATASET_ROOT from mrt.common import cmd, log, thread -from mrt.transformer import Model +from mrt.transformer import Model, MRT from mrt import utils from mrt.gluon_zoo import save_model from mrt import dataset as ds +from mrt import sym_utils as sutils # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -98,6 +100,19 @@ def set_batch(input_shape, batch): choices=["cpu", "gpu"]) @cmd.option("--device-ids-calibrate", nargs="+", type=int, default=[0]) @cmd.option("--no-dump-calibrate", action="store_false") +@cmd.option("--restore-names", nargs="+", type=str, default=[]) +@cmd.option("--input-precision", type=int) +@cmd.option("--output-precision", type=int) +@cmd.option("--device-type-quantize", type=str, default="cpu", + choices=["cpu", "gpu"]) +@cmd.option("--device-ids-quantize", nargs="+", type=int, default=[0]) +@cmd.option("--softmax-lambd", type=float) +@cmd.option("--shift-bits", type=int) +@cmd.option("--thresholds", type=str) +@cmd.option("--no-dump-quantize", action="store_false") +@cmd.option("--attribute-deps", type=str) +@cmd.option("--oscale-maps", type=str) +@cmd.option("--no-dump-mergemodel", action="store_false") @cmd.module("", as_main=True, description=""" CVM Python Tool @@ -117,7 +132,7 @@ def cvm_main(args): input_shape = args.input_shape start_pos = { 'default': 0, 'prepare': 1, 'split_model': 2, - 'calibration': 3, 'quantization': 4, 'merge_model': 5} + 'calibrate': 3, 'quantize': 4, 'merge_model': 5} start_point = start_pos[args.start] # prepare @@ -169,10 +184,9 @@ def cvm_main(args): shp = set_batch(input_shape, batch) dataset = ds.DS_REG[ds_name](shp, root=args.dataset_dir) data_iter_func = dataset.iter_func() - device_type_calibrate = args.device_type_calibrate - device_ids_calibrate = args.device_ids_calibrate ctx = get_ctx( - device_type_calibrate, device_ids_calibrate, dctx=model_ctx) + args.device_type_calibrate, args.device_ids_calibrate, + dctx=model_ctx) for i in range(args.num_calibrate): data, _ = data_iter_func() mrt.set_data(data) @@ -193,16 +207,13 @@ def cvm_main(args): top = Model.load(sym_top_file, prm_top_file) logger.info("calibration stage checkd") - return - # TODO(ryt), calibration, quantization, merge_model # quantization sec = 'QUANTIZATION' model_name_quant = model_name + '.mrt.quantize' if start_point < 4: - restore_names = _get_val( - cfg, sec, 'Restore_name', dtype=ARRAY(str_t), dval=[]) + restore_names = args.restore_names name_to_op = {} - from sym_utils import topo_sort + from mrt.sym_utils import topo_sort for sym in topo_sort(mrt.current_model.symbol): name, op_name = sym.attr('name'), sym.attr('op_name') if op_name not in name_to_op: @@ -231,32 +242,24 @@ def cvm_main(args): restore_names = set(restore_names_new) for name in restore_names: mrt.set_restore(name) - input_precision = _get_val( - cfg, sec, 'Input_precision', dtype=int_t, dval=None) - if input_precision is not None: - mrt.set_input_prec(input_precision) - output_precision = _get_val( - cfg, sec, 'Output_precision', dtype=int_t, dval=None) - if output_precision is not None: - mrt.set_output_prec(output_precision) - ctx = _get_ctx(cfg, sec, dctx=model_ctx) - softmax_lambd = _get_val( - cfg, sec, 'Softmax_lambd', dtype=float_t, dval=None) - if softmax_lambd is not None: - mrt.set_softmax_lambd(softmax_lambd) - shift_bits = _get_val( - cfg, sec, 'Shift_bits', dtype=int_t, dval=None) - if shift_bits is not None: - mrt.set_shift_bits(shift_bits) - thresholds = _get_val( - cfg, sec, 'Thresholds', dtype=PAIR(str_t, float_t), dval=None) + if args.input_precision is not None: + mrt.set_input_prec(args.input_precision) + if args.output_precision is not None: + mrt.set_output_prec(args.output_precision) + ctx = get_ctx( + args.device_type_quantize, args.device_ids_quantize, + dctx=model_ctx) + if args.softmax_lambd is not None: + mrt.set_softmax_lambd(args.softmax_lambd) + if args.shift_bits is not None: + mrt.set_shift_bits(args.shift_bits) + thresholds = json.loads(args.thresholds) if thresholds is not None: for name, threshold in thresholds.items(): mrt.set_threshold(name, threshold) mrt.quantize() inputs_ext = mrt.get_inputs_ext() - dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) - if dump: + if not args.no_dump_quantize: mrt.save(model_name_quant, datadir=model_dir) oscales = mrt.get_output_scales() inputs_ext = mrt.get_inputs_ext() @@ -265,35 +268,37 @@ def cvm_main(args): 'input shapes: ', input_shape] ext_all_file = path.join(model_dir, model_name+".all.quantize.ext") sim.save_ext(ext_all_file, *infos) - logger.info("`%s` stage finished" % sec) + logger.info("quantization stage finished") elif start_point == 4: - _checkpoint_exist( - sec, *list(utils.extend_fname( - model_prefix+'.mrt.quantize', with_ext=True))) + fpaths = utils.extend_fname( + model_prefix+".mrt.quantize", with_ext=True) + for fpath in fpaths: + if not path.exists(fpath): + raise RuntimeError("file path {} not found".format(fpath)) mrt = MRT.load(model_name_quant, datadir=model_dir) inputs_ext = mrt.get_inputs_ext() - dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) if keys != "": - _checkpoint_exist(sec, sym_top_file, prm_top_file) + for fpath in [sym_top_file, prm_top_file]: + if not path.exists(fpath): + raise RuntimeError("file path {} not found".format(fpath)) top = Model.load(sym_top_file, prm_top_file) - logger.info("`%s` stage checkd" % sec) + logger.info("quantization stage checkd") + + # TODO(ryt), merge_model # merge_model sec = 'MERGE_MODEL' - sym_all_file, prm_all_file, ext_all_file = _load_fname( + sym_all_file, prm_all_file, ext_all_file = load_fname( model_prefix, suffix='all.quantize', with_ext=True) - if keys == '': - _check(start_point != 5, 'DEFAULT', 'Start', - message="Invalid start point") + if keys == "": qmodel = mrt.current_model oscales = mrt.get_output_scales() - logger.info("`%s` stage skipped" % sec) + logger.info("model merging stage skipped") elif start_point < 5: qmodel = mrt.current_model mrt_oscales = mrt.get_output_scales() model_merger = Model.merger(qmodel, top, mrt.get_maps()) - attribute_deps = _get_val( - cfg, sec, 'Attribute_deps', dtype=PAIR(str_t, str_t, str_t)) + attribute_deps = json.loads(args.attribute_deps) name_idx = {mrt.get_maps().get( s.attr("name"), s.attr("name")): i \ @@ -312,26 +317,27 @@ def mergefunc(node, params, graph): return node qmodel = model_merger.merge(callback=mergefunc) - oscale_maps = _get_val( - cfg, sec, 'Oscale_maps', dtype=PAIR(str_t, str_t)) + oscale_maps = json.loads(args.oscale_maps) oscales = model_merger.get_output_scales( mrt_oscales, oscale_maps) inputs_ext = mrt.get_inputs_ext() - dump = _get_val(cfg, sec, 'Dump', dtype=bool_t, dval=False) - if dump: + if not args.no_dump_mergemodel: qmodel.save(sym_all_file, prm_all_file) infos = ['oscales: ', oscales, 'input_ext: ', inputs_ext, 'input shapes: ', input_shape] sim.save_ext(ext_all_file, *infos) - logger.info("`%s` stage finished" % sec) + logger.info("model merging stage finished") else: - _check(start_point == 5, 'DEFAULT', 'Start', - message='Start_point invalid') + for fpath in [sym_all_file, prm_all_file]: + if not path.exists(fpath): + raise RuntimeError("file path {} not found".format(fpath)) qmodel = Model.load(sym_all_file, prm_all_file) _, oscales, _, inputs_ext, _, _ = sim.load_ext(ext_all_file) - logger.info("`%s` stage checked" % sec) + logger.info("model merging stage checked") + return + # TODO # evaluation sec = 'EVALUATION' if sec in cfg.sections(): From 8d003e3d0bacdc3fe53b5ad88ed5348bd1879dbc Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 8 Sep 2021 18:40:16 +0800 Subject: [PATCH 013/120] update main.py evaluate compile --- main.py | 123 ++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 43 deletions(-) diff --git a/main.py b/main.py index 3fb4e59a..62667e35 100644 --- a/main.py +++ b/main.py @@ -6,14 +6,17 @@ import json import mxnet as mx +from mxnet import gluon, ndarray as nd +import numpy as np from mrt.conf import MRT_MODEL_ROOT, MRT_DATASET_ROOT from mrt.common import cmd, log, thread -from mrt.transformer import Model, MRT +from mrt.transformer import Model, MRT, reduce_graph from mrt import utils from mrt.gluon_zoo import save_model from mrt import dataset as ds from mrt import sym_utils as sutils +from mrt import sim_quant_helper as sim # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -38,6 +41,23 @@ def get_ctx(device_type, device_ids, dctx=mx.cpu()): else [mx.gpu(i) for i in device_ids] return contex +def batch_axis(input_shape): + """Get the batch axis entry of an input shape. + + Parameters + ---------- + input_shape : tuple + The data shape related to dataset. + + Returns + ------- + axis : int + The batch axis entry of an input shape. + """ + idx = [i for i, s in enumerate(input_shape) if s == -1] + assert len(idx) == 1 + return idx[0] + def load_fname(prefix, suffix=None, with_ext=False): """Get the model files at a given stage. @@ -85,8 +105,8 @@ def set_batch(input_shape, batch): choices=["none", "debug", "info", "warning", "error", "critical"]) @cmd.option("--input-shape", nargs="+", type=int, default=[-1, 3, 224, 224]) @cmd.option("--start", type=str, default="default", - choices=["default", "prepare", "split_model", - "calibrate", "quantize", "merge_model"]) + choices=["default", "prepare", "splitmodel", + "calibrate", "quantize", "mergemodel"]) @cmd.option("--no-dump-prepare", action="store_false") @cmd.option("--keys", nargs="+", type=str, default="") @cmd.option("--no-dump-splitmodel", action="store_false") @@ -113,6 +133,18 @@ def set_batch(input_shape, batch): @cmd.option("--attribute-deps", type=str) @cmd.option("--oscale-maps", type=str) @cmd.option("--no-dump-mergemodel", action="store_false") +@cmd.option("--evaluate", action="store_true") +@cmd.option("--batch-evaluate", type=int) +@cmd.option("--device-type-evaluate", type=str, default="cpu", + choices=["cpu", "gpu"]) +@cmd.option("--device-ids-evaluate", nargs="+", type=int, default=[0]) +@cmd.option("--num-iter", type=int, default=0) +@cmd.option("--compile", action="store_true") +@cmd.option("--batch-compile", type=int) +@cmd.option("--dump-dir", type=str, default="/data1/tmp") +@cmd.option("--device-type-compile", type=str, default="cpu", + choices=["cpu", "gpu"]) +@cmd.option("--device-ids-compile", nargs="+", type=int, default=[0]) @cmd.module("", as_main=True, description=""" CVM Python Tool @@ -131,8 +163,8 @@ def cvm_main(args): model_ctx = get_ctx(args.device_type_default, args.device_ids_default) input_shape = args.input_shape start_pos = { - 'default': 0, 'prepare': 1, 'split_model': 2, - 'calibrate': 3, 'quantize': 4, 'merge_model': 5} + 'default': 0, 'prepare': 1, 'splitmodel': 2, + 'calibrate': 3, 'quantize': 4, 'mergemodel': 5} start_point = start_pos[args.start] # prepare @@ -154,11 +186,15 @@ def cvm_main(args): model = Model.load(sym_file, prm_file) logger.info("preparation stage checked") - # split model + # splitmodel sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') sym_base_file, prm_base_file = load_fname(model_prefix, suffix='base') keys = args.keys if keys == "": + if start_point == 2: + raise RuntimeError( + "this model does not support model splitting stage" + + "please respecify --start flag") logger.info("model splitting stage skipped") elif start_point < 2: base, top = model.split(keys) @@ -184,6 +220,9 @@ def cvm_main(args): shp = set_batch(input_shape, batch) dataset = ds.DS_REG[ds_name](shp, root=args.dataset_dir) data_iter_func = dataset.iter_func() + if len(args.device_ids_calibrate) > 1: + raise RuntimeError( + "device ids should be an integer in calibration stage") ctx = get_ctx( args.device_type_calibrate, args.device_ids_calibrate, dctx=model_ctx) @@ -207,7 +246,7 @@ def cvm_main(args): top = Model.load(sym_top_file, prm_top_file) logger.info("calibration stage checkd") - # quantization + # quantize sec = 'QUANTIZATION' model_name_quant = model_name + '.mrt.quantize' if start_point < 4: @@ -253,8 +292,8 @@ def cvm_main(args): mrt.set_softmax_lambd(args.softmax_lambd) if args.shift_bits is not None: mrt.set_shift_bits(args.shift_bits) - thresholds = json.loads(args.thresholds) - if thresholds is not None: + if args.thresholds is not None: + thresholds = json.loads(args.thresholds) for name, threshold in thresholds.items(): mrt.set_threshold(name, threshold) mrt.quantize() @@ -284,13 +323,14 @@ def cvm_main(args): top = Model.load(sym_top_file, prm_top_file) logger.info("quantization stage checkd") - # TODO(ryt), merge_model - - # merge_model - sec = 'MERGE_MODEL' + # mergemodel sym_all_file, prm_all_file, ext_all_file = load_fname( model_prefix, suffix='all.quantize', with_ext=True) if keys == "": + if start_point == 5: + raise RuntimeError( + "this model does not support model merging stage" + + "please respecify --start flag") qmodel = mrt.current_model oscales = mrt.get_output_scales() logger.info("model merging stage skipped") @@ -336,14 +376,13 @@ def mergefunc(node, params, graph): _, oscales, _, inputs_ext, _, _ = sim.load_ext(ext_all_file) logger.info("model merging stage checked") - return - # TODO - # evaluation - sec = 'EVALUATION' - if sec in cfg.sections(): - iter_num = _get_val(cfg, sec, 'Iter_num', dtype=int_t, dval=0) - batch = _get_val(cfg, sec, 'Batch', dtype=int_t, dval=batch) - ctx = _get_ctx(cfg, sec, dctx=model_ctx) + # evaluate + if args.evaluate: + if args.batch_evaluate is not None: + batch = args.batch_evaluate + ctx = get_ctx( + args.device_type_evaluate, args.device_ids_evaluate, + dctx=model_ctx) if isinstance(ctx, mx.Context): ctx = [ctx] org_model = Model.load(sym_path, prm_path) @@ -373,9 +412,8 @@ def evalfunc(data, label): return acc ngpus = len(ctx) - _check( - not batch % ngpus, sec, 'Device_ids', - 'Batch must be divisible by the number of gpus') + if batch % ngpus: + raise RuntimeError("Batch must be divisible by the number of gpus") split_batch = batch//ngpus rqmodel = reduce_graph(qmodel, { 'data': set_batch(input_shape, split_batch)}) @@ -390,34 +428,33 @@ def quantize(data, label): acc = dataset.validate(qmetric, outs, label) return acc - if iter_num > 0: + if args.num_iter > 0: logger.info("Validating...") utils.multi_validate(evalfunc, data_iter_func, quantize, - iter_num=iter_num, + iter_num=args.num_iter, logger=logging.getLogger('mrt.validate'), batch_size=batch) - logger.info("`%s` stage finished" % sec) - - # compilation - sec = 'COMPILATION' - if sec in cfg.sections(): - dump_dir = _get_path( - cfg, sec, 'Dump_dir', is_dir=True, dpath=model_dir) - batch = _get_val(cfg, sec, 'Batch', dtype=int_t, dval=batch) - device_type = _get_val(cfg, sec, 'Device_type', dval='cpu') - device_ids = _get_val( - cfg, sec, 'Device_ids', - dtype=ARRAY(int_t), dval=0) + logger.info("evaluatation stage finished") + + # compile + if args.compile: + if args.batch_compile is not None: + batch = args.batch_compile model_name_tfm = model_name + "_cvm" - qmodel.to_cvm(model_name_tfm, datadir=dump_dir, + if len(args.device_ids_compile) > 1: + raise RuntimeError( + "device ids should be an integer in compilation stage") + device_ids_compile = args.device_ids_compile[0] + qmodel.to_cvm(model_name_tfm, datadir=args.dump_dir, input_shape=set_batch(input_shape, batch), - target=device_type, device_ids=device_ids) + target=args.device_type_compile, + device_ids=device_ids_compile) dataset = ds.DS_REG[ds_name](set_batch(input_shape, batch)) dump_data, _ = dataset.iter_func()() dump_data = sim.load_real_data( dump_data.astype("float64"), 'data', mrt.get_inputs_ext()) - model_root = path.join(dump_dir, model_name_tfm) + model_root = path.join(args.dump_dir, model_name_tfm) np.save(path.join(model_root, "data.npy"), dump_data.astype('int8').asnumpy()) infos = { @@ -426,10 +463,10 @@ def quantize(data, label): "input_shapes": input_shape, } sim.save_ext(path.join(model_root, "ext"), infos) - logger.info("`%s` stage finished" % sec) + logger.info("compilation stage finished") # starting processes - thread.start_services(args) + # thread.start_services(args) if __name__ == "__main__": logger = logging.getLogger("main") From b9398ce993fd9939e28cb374e6fc746128dfd96b Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 11 Sep 2021 10:56:18 +0800 Subject: [PATCH 014/120] upt --- main.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/main.py b/main.py index 62667e35..157a6261 100644 --- a/main.py +++ b/main.py @@ -465,9 +465,6 @@ def quantize(data, label): sim.save_ext(path.join(model_root, "ext"), infos) logger.info("compilation stage finished") - # starting processes - # thread.start_services(args) - if __name__ == "__main__": logger = logging.getLogger("main") cmd.Run() From 13ba06eb0f2510e73b6a30d77bc612b28b4f6f42 Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 11 Sep 2021 17:44:52 +0800 Subject: [PATCH 015/120] seprate main.py mrt_prepare mrt_calibrate --- main.py | 295 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 166 insertions(+), 129 deletions(-) diff --git a/main.py b/main.py index 157a6261..91bfd83b 100644 --- a/main.py +++ b/main.py @@ -95,31 +95,135 @@ def set_batch(input_shape, batch): """ return [batch if s == -1 else s for s in input_shape] -# TODO: option string abbreviation -@cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) +def save_ext(fname, logger=logging, **info_map): + try: + info_s = json.dumps(info_map, indent=4) + except: + logger.error("Json seralize invalid with data: {}".format(info_map)) + with open(fname, "w") as f: + f.write(info_s) + +def load_ext(fname, logger=logging): + with open(fname, "r") as f: + try: + info_map = json.load(f) + except: + logger.error("Json deserialize invalid, fname: {}".format(fname)) + return info_map + @cmd.option("model_name", type=str) -@cmd.option("--device-type-default", type=str, default="cpu", - choices=["cpu", "gpu"]) -@cmd.option("--device-ids-default", nargs="+", type=int, default=[0]) +@cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) +@cmd.module("modelprefix") +def get_model_prefix(args): + model_dir = args.model_dir + if model_dir.startswith("~"): + model_dir = path.expanduser(model_dir) + model_name = args.model_name + assert path.exists(model_dir), \ + "model_dir: {} does not exist".format(model_dir) + model_prefix = path.join(model_dir, model_name) + return model_prefix + @cmd.option("--verbosity", type=str, default="debug", choices=["none", "debug", "info", "warning", "error", "critical"]) +@cmd.module("logger") +def get_logger(args): + log.Init(log.name2level(args.verbosity.upper())) + logger = logging.getLogger("log.main") + return logger + +@cmd.option("--device-type-prepare", type=str, default="cpu", + choices=["cpu", "gpu"]) +@cmd.option("--device-ids-prepare", nargs="+", type=int, default=[0]) @cmd.option("--input-shape", nargs="+", type=int, default=[-1, 3, 224, 224]) -@cmd.option("--start", type=str, default="default", - choices=["default", "prepare", "splitmodel", - "calibrate", "quantize", "mergemodel"]) -@cmd.option("--no-dump-prepare", action="store_false") -@cmd.option("--keys", nargs="+", type=str, default="") -@cmd.option("--no-dump-splitmodel", action="store_false") +@cmd.option("--split-keys", nargs="+", type=str, default="") +@cmd.module("prepare", as_main=True, refs=["modelprefix", "logger"], + description=""" +MRT Python Tool: preparation stage +""") +def mrt_prepare(args): + model_dir = args.model_dir + model_prefix = get_model_prefix(args) + logger = get_logger(args) + input_shape = args.input_shape + + # preparation + sym_path, prm_path = load_fname(model_prefix) + if not path.exists(sym_path) or not path.exists(prm_path): + save_model( + args.model_name, data_dir=model_dir, + ctx=get_ctx(args.device_type_prepare, args.device_ids_prepare)) + model = Model.load(sym_path, prm_path) + model.prepare(set_batch(input_shape, 1)) + sym_file, prm_file, ext_file = load_fname( + model_prefix, suffix="prepare", with_ext=True) + model.save(sym_file, prm_file) + save_ext( + ext_file, logger=logger, input_shape=input_shape) + logger.info("preparation stage finihed") + + # model splitting + split_keys = args.split_keys + if split_keys: + sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') + sym_base_file, prm_base_file = load_fname( + model_prefix, suffix="base") + base, top = model.split(split_keys) + top.save(sym_top_file, prm_top_file) + base.save(sym_base_file, prm_base_file) + save_ext( + ext_file, logger=logger, input_shape=input_shape, + split_keys=split_keys) + logger.info("model splitting finished") + else: + logger.info("model splitting skipped") + @cmd.option("--batch-calibrate", type=int, default=16) @cmd.option("--num-calibrate", type=int, default=1) @cmd.option("--lambd", type=int) -@cmd.option("--dataset", type=str, default="imagenet", +@cmd.option("--dataset-name", type=str, default="imagenet", choices=list(ds.DS_REG.keys())) @cmd.option("--dataset-dir", type=str, default=MRT_DATASET_ROOT) @cmd.option("--device-type-calibrate", type=str, default="cpu", choices=["cpu", "gpu"]) @cmd.option("--device-ids-calibrate", nargs="+", type=int, default=[0]) -@cmd.option("--no-dump-calibrate", action="store_false") +@cmd.module("calibrate", as_main=True, refs=["modelprefix", "logger"], + description=""" +MRT Python Tool: calibration stage +""") +def mrt_calibrate(args): + model_prefix = get_model_prefix(args) + logger = get_logger(args) + _, _, ext_prepare_file = load_fname( + model_prefix, suffix="prepare", with_ext=True) + info_map = load_ext(ext_prepare_file, logger=logger) + dataset_name = args.dataset_name + + if info_map.get("split_keys", "") == "": + sym_file, prm_file = load_fname(model_prefix, suffix="prepare") + mrt = Model.load(sym_file, prm_file).get_mrt() + else: + sym_base_file, prm_base_file = load_fname( + model_prefix, suffix="base") + mrt = Model.load(sym_base_file, prm_base_file).get_mrt() + shp = set_batch(info_map["input_shape"], args.batch_calibrate) + dataset = ds.DS_REG[dataset_name](shp, root=args.dataset_dir) + data_iter_func = dataset.iter_func() + if len(args.device_ids_calibrate) > 1: + raise RuntimeError( + "device ids should be an integer in calibration stage") + ctx = get_ctx(args.device_type_calibrate, args.device_ids_calibrate) + for i in range(args.num_calibrate): + data, _ = data_iter_func() + mrt.set_data(data) + mrt.calibrate(lambd=args.lambd, ctx=ctx) + mrt.save(args.model_name+".mrt.calibrate", datadir=args.model_dir) + _, _, ext_file = load_fname( + model_prefix, suffix="mrt.calibrate", with_ext=True) + info_map["dataset_name"] = dataset_name + save_ext(ext_file, logger=logger, **info_map) + logger.info("calibrate stage finished") + @cmd.option("--restore-names", nargs="+", type=str, default=[]) @cmd.option("--input-precision", type=int) @cmd.option("--output-precision", type=int) @@ -129,125 +233,14 @@ def set_batch(input_shape, batch): @cmd.option("--softmax-lambd", type=float) @cmd.option("--shift-bits", type=int) @cmd.option("--thresholds", type=str) -@cmd.option("--no-dump-quantize", action="store_false") @cmd.option("--attribute-deps", type=str) @cmd.option("--oscale-maps", type=str) -@cmd.option("--no-dump-mergemodel", action="store_false") -@cmd.option("--evaluate", action="store_true") -@cmd.option("--batch-evaluate", type=int) -@cmd.option("--device-type-evaluate", type=str, default="cpu", - choices=["cpu", "gpu"]) -@cmd.option("--device-ids-evaluate", nargs="+", type=int, default=[0]) -@cmd.option("--num-iter", type=int, default=0) -@cmd.option("--compile", action="store_true") -@cmd.option("--batch-compile", type=int) -@cmd.option("--dump-dir", type=str, default="/data1/tmp") -@cmd.option("--device-type-compile", type=str, default="cpu", - choices=["cpu", "gpu"]) -@cmd.option("--device-ids-compile", nargs="+", type=int, default=[0]) -@cmd.module("", as_main=True, +@cmd.module("quantize", as_main=True, description=""" -CVM Python Tool +MRT Python Tool: quantization stage """) -def cvm_main(args): - # default - log.Init(log.name2level(args.verbosity.upper())) - logger = logging.getLogger("log.main") - model_dir = args.model_dir - if model_dir.startswith("~"): - model_dir = path.expanduser(model_dir) - assert path.exists(model_dir), \ - "Please create the folder `data` first" - model_name = args.model_name - model_prefix = path.join(model_dir, model_name) - model_ctx = get_ctx(args.device_type_default, args.device_ids_default) - input_shape = args.input_shape - start_pos = { - 'default': 0, 'prepare': 1, 'splitmodel': 2, - 'calibrate': 3, 'quantize': 4, 'mergemodel': 5} - start_point = start_pos[args.start] - - # prepare - sym_file, prm_file = load_fname(model_prefix, suffix='prepare') - sym_path, prm_path = load_fname(model_prefix) - if not path.exists(sym_path) or not path.exists(prm_path): - save_model(model_name, data_dir=model_dir, ctx=model_ctx) - - if start_point < 1: - model = Model.load(sym_path, prm_path) - model.prepare(set_batch(input_shape, 1)) - if not args.no_dump_prepare: - model.save(sym_file, prm_file) - logger.info("preparation stage finihed") - elif start_point == 1: - for fpath in [sym_file, prm_file]: - if not path.exists(fpath): - raise RuntimeError("file path {} not found".format(fpath)) - model = Model.load(sym_file, prm_file) - logger.info("preparation stage checked") - - # splitmodel - sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') - sym_base_file, prm_base_file = load_fname(model_prefix, suffix='base') - keys = args.keys - if keys == "": - if start_point == 2: - raise RuntimeError( - "this model does not support model splitting stage" + - "please respecify --start flag") - logger.info("model splitting stage skipped") - elif start_point < 2: - base, top = model.split(keys) - if not args.no_dump_splitmodel: - top.save(sym_top_file, prm_top_file) - base.save(sym_base_file, prm_base_file) - logger.info("model splitting stage finished") - elif start_point == 2: - for fpath in \ - [sym_top_file, prm_top_file, sym_base_file, prm_base_file]: - if not path.exists(fpath): - raise RuntimeError("file path {} not found".format(fpath)) - top = Model.load(sym_top_file, prm_top_file) - base = Model.load(sym_base_file, prm_base_file) - logger.info("model splitting stage checked") - - # calibrate - model_name_calib = model_name + '.mrt.calibrate' - batch = args.batch_calibrate - ds_name = args.dataset - if start_point < 3: - mrt = model.get_mrt() if keys == '' else base.get_mrt() - shp = set_batch(input_shape, batch) - dataset = ds.DS_REG[ds_name](shp, root=args.dataset_dir) - data_iter_func = dataset.iter_func() - if len(args.device_ids_calibrate) > 1: - raise RuntimeError( - "device ids should be an integer in calibration stage") - ctx = get_ctx( - args.device_type_calibrate, args.device_ids_calibrate, - dctx=model_ctx) - for i in range(args.num_calibrate): - data, _ = data_iter_func() - mrt.set_data(data) - mrt.calibrate(lambd=args.lambd, ctx=ctx) - if not args.no_dump_calibrate: - mrt.save(model_name_calib, datadir=model_dir) - logger.info("calibrate stage finished") - elif start_point == 3: - fpaths = utils.extend_fname(model_prefix) - for fpath in fpaths: - if not path.exists(fpath): - raise RuntimeError("file path {} not found".format(fpath)) - mrt = MRT.load(model_name_calib, datadir=model_dir) - if keys != "": - for fpath in [sym_top_file, prm_top_file]: - if not path.exists(fpath): - raise RuntimeError("file path {} not found".format(fpath)) - top = Model.load(sym_top_file, prm_top_file) - logger.info("calibration stage checkd") - +def mrt_quantize(args): # quantize - sec = 'QUANTIZATION' model_name_quant = model_name + '.mrt.quantize' if start_point < 4: restore_names = args.restore_names @@ -376,7 +369,16 @@ def mergefunc(node, params, graph): _, oscales, _, inputs_ext, _, _ = sim.load_ext(ext_all_file) logger.info("model merging stage checked") - # evaluate +@cmd.option("--batch-evaluate", type=int) +@cmd.option("--device-type-evaluate", type=str, default="cpu", + choices=["cpu", "gpu"]) +@cmd.option("--device-ids-evaluate", nargs="+", type=int, default=[0]) +@cmd.option("--num-iter", type=int, default=0) +@cmd.module("evaluate", as_main=True, + description=""" +MRT Python Tool: quantization stage +""") +def mrt_evaluate(args): if args.evaluate: if args.batch_evaluate is not None: batch = args.batch_evaluate @@ -436,7 +438,16 @@ def quantize(data, label): batch_size=batch) logger.info("evaluatation stage finished") - # compile +@cmd.option("--batch-compile", type=int) +@cmd.option("--dump-dir", type=str, default="/data1/tmp") +@cmd.option("--device-type-compile", type=str, default="cpu", + choices=["cpu", "gpu"]) +@cmd.option("--device-ids-compile", nargs="+", type=int, default=[0]) +@cmd.module("compile", as_main=True, + description=""" +MRT Python Tool: compilation stage +""") +def mrt_compile(args): if args.compile: if args.batch_compile is not None: batch = args.batch_compile @@ -465,6 +476,32 @@ def quantize(data, label): sim.save_ext(path.join(model_root, "ext"), infos) logger.info("compilation stage finished") +@cmd.option("--start-after", type=str, + choices=["prepare", "calibrate", "quantize"]) +@cmd.option("--evaluate", action="store_true") +@cmd.option("--compile", action="store_true") +@cmd.module("main", as_main=True, + refs=["prepare", "calibrate", "quantize", + "evaluate", "compile"], + description=""" +MRT Python Tool +""") +def main(args): + start_pos = 0 + start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} + if args.start_after in start_pos_map: + start_pos = start_pos_map[args.start_after] + if start_pos < 1: + mrt_prepare(args) + if start_pos < 2: + mrt_calibrate(args) + if start_pos < 3: + mrt_quantize(args) + if args.evaluate: + mrt_evaluate(args) + if args.compile: + mrt_compile(args) + if __name__ == "__main__": logger = logging.getLogger("main") cmd.Run() From e01660bc47678d1a360e4955844da17b28fd5dd7 Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 11 Sep 2021 19:06:26 +0800 Subject: [PATCH 016/120] separate main.py mrt_quantize (not merge yet) --- main.py | 216 +++++++++++++++++++++++++++----------------------------- 1 file changed, 104 insertions(+), 112 deletions(-) diff --git a/main.py b/main.py index 91bfd83b..cd24858e 100644 --- a/main.py +++ b/main.py @@ -12,6 +12,7 @@ from mrt.conf import MRT_MODEL_ROOT, MRT_DATASET_ROOT from mrt.common import cmd, log, thread from mrt.transformer import Model, MRT, reduce_graph +from mrt.sym_utils import topo_sort from mrt import utils from mrt.gluon_zoo import save_model from mrt import dataset as ds @@ -95,21 +96,27 @@ def set_batch(input_shape, batch): """ return [batch if s == -1 else s for s in input_shape] -def save_ext(fname, logger=logging, **info_map): +def save_conf(fname, logger=logging, **conf_map): try: - info_s = json.dumps(info_map, indent=4) + info_s = json.dumps(conf_map, indent=4) except: - logger.error("Json seralize invalid with data: {}".format(info_map)) + logger.error("Json seralize invalid with data: {}".format(conf_map)) with open(fname, "w") as f: f.write(info_s) -def load_ext(fname, logger=logging): +def load_conf(fname, logger=logging): with open(fname, "r") as f: try: - info_map = json.load(f) + conf_map = json.load(f) except: logger.error("Json deserialize invalid, fname: {}".format(fname)) - return info_map + return conf_map + +def check_file_existance(*fpaths, logger=logging): + for fpath in fpaths: + if not path.exists(fpath): + logger.error("fpath: {} does not exist".format(fpath)) + raise FileNotFoundError @cmd.option("model_name", type=str) @cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) @@ -142,24 +149,24 @@ def get_logger(args): MRT Python Tool: preparation stage """) def mrt_prepare(args): - model_dir = args.model_dir model_prefix = get_model_prefix(args) logger = get_logger(args) - input_shape = args.input_shape + conf_prep_file = model_prefix + ".prepare.conf" + conf_map = {} # preparation sym_path, prm_path = load_fname(model_prefix) if not path.exists(sym_path) or not path.exists(prm_path): save_model( - args.model_name, data_dir=model_dir, + args.model_name, data_dir=args.model_dir, ctx=get_ctx(args.device_type_prepare, args.device_ids_prepare)) model = Model.load(sym_path, prm_path) - model.prepare(set_batch(input_shape, 1)) - sym_file, prm_file, ext_file = load_fname( - model_prefix, suffix="prepare", with_ext=True) - model.save(sym_file, prm_file) - save_ext( - ext_file, logger=logger, input_shape=input_shape) + model.prepare(set_batch(args.input_shape, 1)) + sym_prep_file, prm_prep_file = load_fname( + model_prefix, suffix="prepare") + model.save(sym_prep_file, prm_prep_file) + conf_map["input_shape"] = args.input_shape + save_conf(conf_prep_file, logger=logger, **conf_map) logger.info("preparation stage finihed") # model splitting @@ -171,9 +178,8 @@ def mrt_prepare(args): base, top = model.split(split_keys) top.save(sym_top_file, prm_top_file) base.save(sym_base_file, prm_base_file) - save_ext( - ext_file, logger=logger, input_shape=input_shape, - split_keys=split_keys) + conf_map["split_keys"] = split_keys + save_conf(conf_prep_file, logger=logger, **conf_map) logger.info("model splitting finished") else: logger.info("model splitting skipped") @@ -194,20 +200,23 @@ def mrt_prepare(args): def mrt_calibrate(args): model_prefix = get_model_prefix(args) logger = get_logger(args) - _, _, ext_prepare_file = load_fname( - model_prefix, suffix="prepare", with_ext=True) - info_map = load_ext(ext_prepare_file, logger=logger) - dataset_name = args.dataset_name - - if info_map.get("split_keys", "") == "": - sym_file, prm_file = load_fname(model_prefix, suffix="prepare") + conf_prep_file = model_prefix + ".prepare.conf" + check_file_existance(conf_prep_file, logger=logger) + conf_map = load_conf(conf_prep_file, logger=logger) + + # calibration + if conf_map.get("split_keys", "") == "": + sym_prep_file, prm_prep_file = load_fname( + model_prefix, suffix="prepare") + check_file_existance(sym_prep_file, prm_prep_file, logger=logger) mrt = Model.load(sym_file, prm_file).get_mrt() else: sym_base_file, prm_base_file = load_fname( model_prefix, suffix="base") + check_file_existance(sym_base_file, prm_base_file, logger=logger) mrt = Model.load(sym_base_file, prm_base_file).get_mrt() - shp = set_batch(info_map["input_shape"], args.batch_calibrate) - dataset = ds.DS_REG[dataset_name](shp, root=args.dataset_dir) + shp = set_batch(conf_map["input_shape"], args.batch_calibrate) + dataset = ds.DS_REG[args.dataset_name](shp, root=args.dataset_dir) data_iter_func = dataset.iter_func() if len(args.device_ids_calibrate) > 1: raise RuntimeError( @@ -218,10 +227,8 @@ def mrt_calibrate(args): mrt.set_data(data) mrt.calibrate(lambd=args.lambd, ctx=ctx) mrt.save(args.model_name+".mrt.calibrate", datadir=args.model_dir) - _, _, ext_file = load_fname( - model_prefix, suffix="mrt.calibrate", with_ext=True) - info_map["dataset_name"] = dataset_name - save_ext(ext_file, logger=logger, **info_map) + conf_map["dataset_name"] = args.dataset_name + save_conf(model_prefix+".mrt.calibrate.conf", logger=logger, **conf_map) logger.info("calibrate stage finished") @cmd.option("--restore-names", nargs="+", type=str, default=[]) @@ -235,86 +242,78 @@ def mrt_calibrate(args): @cmd.option("--thresholds", type=str) @cmd.option("--attribute-deps", type=str) @cmd.option("--oscale-maps", type=str) -@cmd.module("quantize", as_main=True, +@cmd.module("quantize", as_main=True, refs=["modelprefix", "logger"], description=""" MRT Python Tool: quantization stage """) def mrt_quantize(args): - # quantize - model_name_quant = model_name + '.mrt.quantize' - if start_point < 4: - restore_names = args.restore_names - name_to_op = {} - from mrt.sym_utils import topo_sort + model_prefix = get_model_prefix(args) + logger = get_logger(args) + conf_calib_file = model_prefix + ".mrt.calibrate.conf" + check_file_existance(conf_calib_file, logger=logger) + conf_map = load_conf(conf_calib_file, logger=logger) + sym_calib_file, prm_calib_file, ext_calib_file = load_fname( + model_prefix, suffix="mrt.calibrate", with_ext=True) + check_file_existance( + sym_calib_file, prm_calib_file, ext_calib_file, logger=logger) + mrt = MRT.load(args.model_name+".mrt.calibrate", datadir=args.model_dir) + + # restoration configuration + restore_names = args.restore_names + name_to_op = {} + for sym in topo_sort(mrt.current_model.symbol): + name, op_name = sym.attr('name'), sym.attr('op_name') + if op_name not in name_to_op: + name_to_op[op_name] = [] + name_to_op[op_name].append(name) + new_names = [] + for name in restore_names: + if name.startswith("_OP_") and name[4:] in name_to_op: + for new_name in name_to_op[name[4:]]: + new_names.append(new_name) + else: + new_names.append(name) + restore_names = set(new_names) + if '_ALL_EXCEPT_' in restore_names: + from tfm_base import _pass_manager + from tfm_ops import disabled_restore_ops + + quantize_ops = [op_name for op_name in _pass_manager["quantize"] \ + if op_name not in disabled_restore_ops] + restore_names_new = [] for sym in topo_sort(mrt.current_model.symbol): name, op_name = sym.attr('name'), sym.attr('op_name') - if op_name not in name_to_op: - name_to_op[op_name] = [] - name_to_op[op_name].append(name) - new_names = [] - for name in restore_names: - if name.startswith("_OP_") and name[4:] in name_to_op: - for new_name in name_to_op[name[4:]]: - new_names.append(new_name) - else: - new_names.append(name) - restore_names = set(new_names) - if '_ALL_EXCEPT_' in restore_names: - from tfm_base import _pass_manager - from tfm_ops import disabled_restore_ops - - quantize_ops = [op_name for op_name in _pass_manager["quantize"] \ - if op_name not in disabled_restore_ops] - restore_names_new = [] - for sym in topo_sort(mrt.current_model.symbol): - name, op_name = sym.attr('name'), sym.attr('op_name') - if op_name in quantize_ops and \ - name not in restore_names: - restore_names_new.append(name) - restore_names = set(restore_names_new) - for name in restore_names: - mrt.set_restore(name) - if args.input_precision is not None: - mrt.set_input_prec(args.input_precision) - if args.output_precision is not None: - mrt.set_output_prec(args.output_precision) - ctx = get_ctx( - args.device_type_quantize, args.device_ids_quantize, - dctx=model_ctx) - if args.softmax_lambd is not None: - mrt.set_softmax_lambd(args.softmax_lambd) - if args.shift_bits is not None: - mrt.set_shift_bits(args.shift_bits) - if args.thresholds is not None: - thresholds = json.loads(args.thresholds) - for name, threshold in thresholds.items(): - mrt.set_threshold(name, threshold) - mrt.quantize() - inputs_ext = mrt.get_inputs_ext() - if not args.no_dump_quantize: - mrt.save(model_name_quant, datadir=model_dir) - oscales = mrt.get_output_scales() - inputs_ext = mrt.get_inputs_ext() - infos = ['oscales: ', oscales, - 'input_ext: ', inputs_ext, - 'input shapes: ', input_shape] - ext_all_file = path.join(model_dir, model_name+".all.quantize.ext") - sim.save_ext(ext_all_file, *infos) - logger.info("quantization stage finished") - elif start_point == 4: - fpaths = utils.extend_fname( - model_prefix+".mrt.quantize", with_ext=True) - for fpath in fpaths: - if not path.exists(fpath): - raise RuntimeError("file path {} not found".format(fpath)) - mrt = MRT.load(model_name_quant, datadir=model_dir) - inputs_ext = mrt.get_inputs_ext() - if keys != "": - for fpath in [sym_top_file, prm_top_file]: - if not path.exists(fpath): - raise RuntimeError("file path {} not found".format(fpath)) - top = Model.load(sym_top_file, prm_top_file) - logger.info("quantization stage checkd") + if op_name in quantize_ops and \ + name not in restore_names: + restore_names_new.append(name) + restore_names = set(restore_names_new) + for name in restore_names: + mrt.set_restore(name) + + # hyper parameters configuration + if args.input_precision is not None: + mrt.set_input_prec(args.input_precision) + if args.output_precision is not None: + mrt.set_output_prec(args.output_precision) + ctx = get_ctx(args.device_type_quantize, args.device_ids_quantize) + if args.softmax_lambd is not None: + mrt.set_softmax_lambd(args.softmax_lambd) + if args.shift_bits is not None: + mrt.set_shift_bits(args.shift_bits) + if args.thresholds is not None: + thresholds = json.loads(args.thresholds) + for name, threshold in thresholds.items(): + mrt.set_threshold(name, threshold) + + # quantization + mrt.quantize() + mrt.save(args.model_name + ".mrt.quantize", datadir=args.model_dir) + conf_map["oscales"] = mrt.get_output_scales() + conf_map["inputs_ext"] = mrt.get_inputs_ext() + _, _, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + save_conf(ext_quant_file, logger=logger, **conf_map) + logger.info("quantization stage finished") # mergemodel sym_all_file, prm_all_file, ext_all_file = load_fname( @@ -361,13 +360,6 @@ def mergefunc(node, params, graph): 'input shapes: ', input_shape] sim.save_ext(ext_all_file, *infos) logger.info("model merging stage finished") - else: - for fpath in [sym_all_file, prm_all_file]: - if not path.exists(fpath): - raise RuntimeError("file path {} not found".format(fpath)) - qmodel = Model.load(sym_all_file, prm_all_file) - _, oscales, _, inputs_ext, _, _ = sim.load_ext(ext_all_file) - logger.info("model merging stage checked") @cmd.option("--batch-evaluate", type=int) @cmd.option("--device-type-evaluate", type=str, default="cpu", From abce6ebdff728a2a9467dc089cdaa5e354e0dd74 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 13 Sep 2021 11:50:32 +0800 Subject: [PATCH 017/120] seperate main.py mrt_quantize (with merge) --- main.py | 69 ++++++++++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/main.py b/main.py index cd24858e..e6c93c31 100644 --- a/main.py +++ b/main.py @@ -307,36 +307,36 @@ def mrt_quantize(args): # quantization mrt.quantize() - mrt.save(args.model_name + ".mrt.quantize", datadir=args.model_dir) - conf_map["oscales"] = mrt.get_output_scales() - conf_map["inputs_ext"] = mrt.get_inputs_ext() - _, _, ext_quant_file = load_fname( - model_prefix, suffix="mrt.quantize", with_ext=True) - save_conf(ext_quant_file, logger=logger, **conf_map) + mrt.save(args.model_name+".mrt.quantize", datadir=args.model_dir) + input_shape = conf_map["input_shape"] + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + infos = ["oscales: ", oscales, + "input_ext: ", inputs_ext, + "input shapes: ", input_shape] + ext_mrt_file = path.join( + args.model_dir, args.model_name+".mrt.quantize.ext") + sim.save_ext(ext_mrt_file, *infos) + save_conf(model_prefix+".mrt.quantize.conf", logger=logger, **conf_map) logger.info("quantization stage finished") - # mergemodel sym_all_file, prm_all_file, ext_all_file = load_fname( - model_prefix, suffix='all.quantize', with_ext=True) - if keys == "": - if start_point == 5: - raise RuntimeError( - "this model does not support model merging stage" + - "please respecify --start flag") - qmodel = mrt.current_model - oscales = mrt.get_output_scales() - logger.info("model merging stage skipped") - elif start_point < 5: + model_prefix, suffix="all.quantize", with_ext=True) + + # mergemodel + split_keys = conf_map["split_keys"] + if split_keys: qmodel = mrt.current_model - mrt_oscales = mrt.get_output_scales() - model_merger = Model.merger(qmodel, top, mrt.get_maps()) + if args.attribute_deps is None: + logger.error("model merging, please specify --attribute_deps") + raise RuntimeError attribute_deps = json.loads(args.attribute_deps) - + mrt_oscales = mrt.get_output_scales() name_idx = {mrt.get_maps().get( s.attr("name"), s.attr("name")): i \ for i, s in enumerate(qmodel.symbol)} def mergefunc(node, params, graph): - name, op_name = node.attr('name'), node.attr('op_name') + name, op_name = node.attr("name"), node.attr("op_name") childs, attr = sutils.sym_iter( node.get_children()), node.list_attr() if op_name in attribute_deps: @@ -347,19 +347,28 @@ def mergefunc(node, params, graph): node = sutils.get_mxnet_op(op_name)( *childs, **attr, name=name) return node - + sym_top_file, prm_top_file = load_fname(model_prefix, suffix="top") + check_file_existance(sym_top_file) + check_file_existance(prm_top_file) + top = Model.load(sym_top_file, prm_top_file) + model_merger = Model.merger(qmodel, top, mrt.get_maps()) qmodel = model_merger.merge(callback=mergefunc) + if args.oscale_maps is None: + logger.error("model merging, please specify --oscale_maps") + raise RuntimeError oscale_maps = json.loads(args.oscale_maps) oscales = model_merger.get_output_scales( mrt_oscales, oscale_maps) - inputs_ext = mrt.get_inputs_ext() - if not args.no_dump_mergemodel: - qmodel.save(sym_all_file, prm_all_file) - infos = ['oscales: ', oscales, - 'input_ext: ', inputs_ext, - 'input shapes: ', input_shape] - sim.save_ext(ext_all_file, *infos) - logger.info("model merging stage finished") + qmodel.save(sym_all_file, prm_all_file) + infos = ['oscales: ', oscales, + 'input_ext: ', inputs_ext, + 'input shapes: ', input_shape] + sim.save_ext(ext_all_file, *infos) + logger.info("model merging finished") + else: + mrt.save(args.model_name+".all.quantize", datadir=args.model_dir) + sim.save_ext(ext_all_file, *infos) + logger.info("model merging skipped") @cmd.option("--batch-evaluate", type=int) @cmd.option("--device-type-evaluate", type=str, default="cpu", From 1ca2573c7cdf3a5633535bff63ac6da8d2137c75 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 13 Sep 2021 11:56:38 +0800 Subject: [PATCH 018/120] upt --- main.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/main.py b/main.py index e6c93c31..69d6ad03 100644 --- a/main.py +++ b/main.py @@ -320,9 +320,6 @@ def mrt_quantize(args): save_conf(model_prefix+".mrt.quantize.conf", logger=logger, **conf_map) logger.info("quantization stage finished") - sym_all_file, prm_all_file, ext_all_file = load_fname( - model_prefix, suffix="all.quantize", with_ext=True) - # mergemodel split_keys = conf_map["split_keys"] if split_keys: @@ -359,6 +356,8 @@ def mergefunc(node, params, graph): oscale_maps = json.loads(args.oscale_maps) oscales = model_merger.get_output_scales( mrt_oscales, oscale_maps) + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) qmodel.save(sym_all_file, prm_all_file) infos = ['oscales: ', oscales, 'input_ext: ', inputs_ext, @@ -366,8 +365,6 @@ def mergefunc(node, params, graph): sim.save_ext(ext_all_file, *infos) logger.info("model merging finished") else: - mrt.save(args.model_name+".all.quantize", datadir=args.model_dir) - sim.save_ext(ext_all_file, *infos) logger.info("model merging skipped") @cmd.option("--batch-evaluate", type=int) From 1a40de34235dd7d152f04738cf2cb07807420f1a Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 13 Sep 2021 17:22:22 +0800 Subject: [PATCH 019/120] seperate main.py evaluate --- main.py | 189 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 103 insertions(+), 86 deletions(-) diff --git a/main.py b/main.py index 69d6ad03..89b2d686 100644 --- a/main.py +++ b/main.py @@ -115,8 +115,7 @@ def load_conf(fname, logger=logging): def check_file_existance(*fpaths, logger=logging): for fpath in fpaths: if not path.exists(fpath): - logger.error("fpath: {} does not exist".format(fpath)) - raise FileNotFoundError + raise FileNotFoundError("fpath: {} does not exist".format(fpath)) @cmd.option("model_name", type=str) @cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) @@ -185,7 +184,7 @@ def mrt_prepare(args): logger.info("model splitting skipped") @cmd.option("--batch-calibrate", type=int, default=16) -@cmd.option("--num-calibrate", type=int, default=1) +@cmd.option("--calibrate-num", type=int, default=1) @cmd.option("--lambd", type=int) @cmd.option("--dataset-name", type=str, default="imagenet", choices=list(ds.DS_REG.keys())) @@ -209,7 +208,7 @@ def mrt_calibrate(args): sym_prep_file, prm_prep_file = load_fname( model_prefix, suffix="prepare") check_file_existance(sym_prep_file, prm_prep_file, logger=logger) - mrt = Model.load(sym_file, prm_file).get_mrt() + mrt = Model.load(sym_prep_file, prm_prep_file).get_mrt() else: sym_base_file, prm_base_file = load_fname( model_prefix, suffix="base") @@ -222,13 +221,13 @@ def mrt_calibrate(args): raise RuntimeError( "device ids should be an integer in calibration stage") ctx = get_ctx(args.device_type_calibrate, args.device_ids_calibrate) - for i in range(args.num_calibrate): + for i in range(args.calibrate_num): data, _ = data_iter_func() mrt.set_data(data) mrt.calibrate(lambd=args.lambd, ctx=ctx) mrt.save(args.model_name+".mrt.calibrate", datadir=args.model_dir) conf_map["dataset_name"] = args.dataset_name - save_conf(model_prefix+".mrt.calibrate.conf", logger=logger, **conf_map) + save_conf(model_prefix+".calibrate.conf", logger=logger, **conf_map) logger.info("calibrate stage finished") @cmd.option("--restore-names", nargs="+", type=str, default=[]) @@ -249,7 +248,7 @@ def mrt_calibrate(args): def mrt_quantize(args): model_prefix = get_model_prefix(args) logger = get_logger(args) - conf_calib_file = model_prefix + ".mrt.calibrate.conf" + conf_calib_file = model_prefix + ".calibrate.conf" check_file_existance(conf_calib_file, logger=logger) conf_map = load_conf(conf_calib_file, logger=logger) sym_calib_file, prm_calib_file, ext_calib_file = load_fname( @@ -257,6 +256,7 @@ def mrt_quantize(args): check_file_existance( sym_calib_file, prm_calib_file, ext_calib_file, logger=logger) mrt = MRT.load(args.model_name+".mrt.calibrate", datadir=args.model_dir) + conf_quant_file = model_prefix + ".quantize.conf" # restoration configuration restore_names = args.restore_names @@ -311,22 +311,17 @@ def mrt_quantize(args): input_shape = conf_map["input_shape"] oscales = mrt.get_output_scales() inputs_ext = mrt.get_inputs_ext() - infos = ["oscales: ", oscales, - "input_ext: ", inputs_ext, - "input shapes: ", input_shape] - ext_mrt_file = path.join( - args.model_dir, args.model_name+".mrt.quantize.ext") - sim.save_ext(ext_mrt_file, *infos) - save_conf(model_prefix+".mrt.quantize.conf", logger=logger, **conf_map) + infos = [oscales, inputs_ext] + ext_all_file = model_prefix + ".all.quantize.ext" + sim.save_ext(ext_all_file, *infos) + save_conf(conf_quant_file, logger=logger, **conf_map) logger.info("quantization stage finished") # mergemodel - split_keys = conf_map["split_keys"] - if split_keys: + if conf_map.get("split_keys", "") != "": qmodel = mrt.current_model if args.attribute_deps is None: - logger.error("model merging, please specify --attribute_deps") - raise RuntimeError + raise RuntimeError("model merging, please specify --attribute_deps") attribute_deps = json.loads(args.attribute_deps) mrt_oscales = mrt.get_output_scales() name_idx = {mrt.get_maps().get( @@ -345,24 +340,21 @@ def mergefunc(node, params, graph): *childs, **attr, name=name) return node sym_top_file, prm_top_file = load_fname(model_prefix, suffix="top") - check_file_existance(sym_top_file) - check_file_existance(prm_top_file) + check_file_existance(sym_top_file, prm_top_file, logger=logger) top = Model.load(sym_top_file, prm_top_file) model_merger = Model.merger(qmodel, top, mrt.get_maps()) qmodel = model_merger.merge(callback=mergefunc) if args.oscale_maps is None: - logger.error("model merging, please specify --oscale_maps") - raise RuntimeError + raise RuntimeError("model merging, please specify --oscale_maps") oscale_maps = json.loads(args.oscale_maps) oscales = model_merger.get_output_scales( mrt_oscales, oscale_maps) sym_all_file, prm_all_file, ext_all_file = load_fname( model_prefix, suffix="all.quantize", with_ext=True) qmodel.save(sym_all_file, prm_all_file) - infos = ['oscales: ', oscales, - 'input_ext: ', inputs_ext, - 'input shapes: ', input_shape] + infos = [oscales, inputs_ext] sim.save_ext(ext_all_file, *infos) + save_conf(conf_quant_file, logger=logger, **conf_map) logger.info("model merging finished") else: logger.info("model merging skipped") @@ -371,70 +363,95 @@ def mergefunc(node, params, graph): @cmd.option("--device-type-evaluate", type=str, default="cpu", choices=["cpu", "gpu"]) @cmd.option("--device-ids-evaluate", nargs="+", type=int, default=[0]) -@cmd.option("--num-iter", type=int, default=0) -@cmd.module("evaluate", as_main=True, +@cmd.option("--iter-num", type=int, default=0) +@cmd.module("evaluate", as_main=True, refs=["modelprefix", "logger"], description=""" -MRT Python Tool: quantization stage +MRT Python Tool: evaluation stage """) def mrt_evaluate(args): - if args.evaluate: - if args.batch_evaluate is not None: - batch = args.batch_evaluate - ctx = get_ctx( - args.device_type_evaluate, args.device_ids_evaluate, - dctx=model_ctx) - if isinstance(ctx, mx.Context): - ctx = [ctx] - org_model = Model.load(sym_path, prm_path) - graph = org_model.to_graph(ctx=ctx) - dataset = ds.DS_REG[ds_name](set_batch(input_shape, batch)) - data_iter_func = dataset.iter_func() - metric = dataset.metrics() - - baxis = batch_axis(input_shape) - olen = len(org_model.symbol) - def forward(net, data, ctx): - """ Multiple xpu run support. - """ - data = gluon.utils.split_and_load( - data, ctx_list=ctx, batch_axis=baxis, even_split=False) - outs = [net(d) for d in data] - if olen == 1: - outs = nd.concatenate(outs) - else: - outs = [nd.concatenate([outs[i][j] \ - for i in range(len(outs))]) for j in range(olen)] - return outs - - def evalfunc(data, label): - outs = forward(graph, data, ctx=ctx) - acc = dataset.validate(metric, outs, label) - return acc - - ngpus = len(ctx) - if batch % ngpus: - raise RuntimeError("Batch must be divisible by the number of gpus") - split_batch = batch//ngpus - rqmodel = reduce_graph(qmodel, { - 'data': set_batch(input_shape, split_batch)}) - qgraph = rqmodel.to_graph(ctx=ctx) - qmetric = dataset.metrics() - - def quantize(data, label): - data = sim.load_real_data(data, 'data', inputs_ext) - outs = forward(qgraph, data, ctx) - outs = outs / oscales[0] if olen == 1 \ - else [(t / oscales[i]) for i, t in enumerate(outs)] - acc = dataset.validate(qmetric, outs, label) - return acc - - if args.num_iter > 0: - logger.info("Validating...") - utils.multi_validate(evalfunc, data_iter_func, quantize, - iter_num=args.num_iter, - logger=logging.getLogger('mrt.validate'), - batch_size=batch) - logger.info("evaluatation stage finished") + model_prefix = get_model_prefix(args) + logger = get_logger(args) + batch = args.batch_evaluate + conf_quant_file = model_prefix + ".quantize.conf" + check_file_existance(conf_quant_file, logger=logger) + conf_map = load_conf(conf_quant_file, logger=logger) + ctx = get_ctx( + args.device_type_evaluate, args.device_ids_evaluate) + if isinstance(ctx, mx.Context): + ctx = [ctx] + + # forward function for the orginal model + omodel = Model.load(*load_fname(model_prefix)) + graph = omodel.to_graph(ctx=ctx) + dataset_name = conf_map["dataset_name"] + input_shape = conf_map["input_shape"] + dataset = ds.DS_REG[dataset_name](set_batch(input_shape, batch)) + data_iter_func = dataset.iter_func() + metric = dataset.metrics() + baxis = batch_axis(input_shape) + olen = len(omodel.symbol) + + def forward(net, data, ctx): + """ Multiple xpu run support. + """ + data = gluon.utils.split_and_load( + data, ctx_list=ctx, batch_axis=baxis, even_split=False) + outs = [net(d) for d in data] + if olen == 1: + outs = nd.concatenate(outs) + else: + outs = [nd.concatenate([outs[i][j] \ + for i in range(len(outs))]) for j in range(olen)] + return outs + + def evalfunc(data, label): + outs = forward(graph, data, ctx=ctx) + acc = dataset.validate(metric, outs, label) + return acc + + # forward function for the quantized model + num_xpus = len(ctx) + if batch % num_xpus: + raise RuntimeError("Batch must be divisible by the number of xpus") + split_batch = batch // num_xpus + if conf_map.get("split_keys", "") != "": + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + check_file_existance( + sym_all_file, prm_all_file, ext_all_file, logger=logger) + qmodel = Model.load(sym_all_file, prm_all_file) + oscales, inputs_ext = sim.load_ext(ext_all_file) + else: + sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + check_file_existance( + sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) + mrt = MRT.load(args.model_name+".mrt.quantize", datadir=args.model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + rqmodel = reduce_graph(qmodel, { + 'data': set_batch(input_shape, split_batch)}) + qgraph = rqmodel.to_graph(ctx=ctx) + qmetric = dataset.metrics() + + def quantize(data, label): + data = sim.load_real_data(data, 'data', inputs_ext) + outs = forward(qgraph, data, ctx) + outs = outs / oscales[0] if olen == 1 \ + else [(t / oscales[i]) for i, t in enumerate(outs)] + acc = dataset.validate(qmetric, outs, label) + return acc + + # evaluate + if args.iter_num > 0: + logger.info("Validating...") + utils.multi_validate( + evalfunc, data_iter_func, quantize, iter_num=args.iter_num, + logger=logging.getLogger('mrt.validate'), batch_size=batch) + logger.info("evaluatation stage finished") + else: + logger.info("evaluatation stage skipped") @cmd.option("--batch-compile", type=int) @cmd.option("--dump-dir", type=str, default="/data1/tmp") From 1ffb87cec583db2882491acb5f7aafe65a7e11b4 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 13 Sep 2021 18:20:12 +0800 Subject: [PATCH 020/120] upt main.py mrt module --- main.py | 60 +++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 21 deletions(-) diff --git a/main.py b/main.py index 89b2d686..4c7a5e7e 100644 --- a/main.py +++ b/main.py @@ -35,9 +35,18 @@ # def global_func(args): # log.Init(log.name2level(args.verbosity)) -def get_ctx(device_type, device_ids, dctx=mx.cpu()): +default_device_type = "cpu" +default_device_ids = [0] +default_ctx = mx.cpu() +default_batch = 16 + +def get_ctx(device_type, device_ids, dctx=default_ctx): + if device_type is None: + device_type = default_device_type + if device_ids is None: + device_ids = default_device_ids contex = dctx - if device_type == 'gpu': + if device_type == "gpu": contex = mx.gpu(device_ids[0]) if len(device_ids) == 1 \ else [mx.gpu(i) for i in device_ids] return contex @@ -138,9 +147,8 @@ def get_logger(args): logger = logging.getLogger("log.main") return logger -@cmd.option("--device-type-prepare", type=str, default="cpu", - choices=["cpu", "gpu"]) -@cmd.option("--device-ids-prepare", nargs="+", type=int, default=[0]) +@cmd.option("--device-type-prepare", type=str, choices=["cpu", "gpu"]) +@cmd.option("--device-ids-prepare", nargs="+", type=int) @cmd.option("--input-shape", nargs="+", type=int, default=[-1, 3, 224, 224]) @cmd.option("--split-keys", nargs="+", type=str, default="") @cmd.module("prepare", as_main=True, refs=["modelprefix", "logger"], @@ -183,20 +191,21 @@ def mrt_prepare(args): else: logger.info("model splitting skipped") -@cmd.option("--batch-calibrate", type=int, default=16) +@cmd.option("--batch-calibrate", type=int) @cmd.option("--calibrate-num", type=int, default=1) @cmd.option("--lambd", type=int) @cmd.option("--dataset-name", type=str, default="imagenet", choices=list(ds.DS_REG.keys())) @cmd.option("--dataset-dir", type=str, default=MRT_DATASET_ROOT) -@cmd.option("--device-type-calibrate", type=str, default="cpu", - choices=["cpu", "gpu"]) -@cmd.option("--device-ids-calibrate", nargs="+", type=int, default=[0]) +@cmd.option("--device-type-calibrate", type=str, choices=["cpu", "gpu"]) +@cmd.option("--device-ids-calibrate", nargs="+", type=int) @cmd.module("calibrate", as_main=True, refs=["modelprefix", "logger"], description=""" MRT Python Tool: calibration stage """) def mrt_calibrate(args): + batch = default_batch if args.batch_calibrate is None \ + else args.batch_calibrate model_prefix = get_model_prefix(args) logger = get_logger(args) conf_prep_file = model_prefix + ".prepare.conf" @@ -214,7 +223,7 @@ def mrt_calibrate(args): model_prefix, suffix="base") check_file_existance(sym_base_file, prm_base_file, logger=logger) mrt = Model.load(sym_base_file, prm_base_file).get_mrt() - shp = set_batch(conf_map["input_shape"], args.batch_calibrate) + shp = set_batch(conf_map["input_shape"], batch) dataset = ds.DS_REG[args.dataset_name](shp, root=args.dataset_dir) data_iter_func = dataset.iter_func() if len(args.device_ids_calibrate) > 1: @@ -233,9 +242,8 @@ def mrt_calibrate(args): @cmd.option("--restore-names", nargs="+", type=str, default=[]) @cmd.option("--input-precision", type=int) @cmd.option("--output-precision", type=int) -@cmd.option("--device-type-quantize", type=str, default="cpu", - choices=["cpu", "gpu"]) -@cmd.option("--device-ids-quantize", nargs="+", type=int, default=[0]) +@cmd.option("--device-type-quantize", type=str, choices=["cpu", "gpu"]) +@cmd.option("--device-ids-quantize", nargs="+", type=int) @cmd.option("--softmax-lambd", type=float) @cmd.option("--shift-bits", type=int) @cmd.option("--thresholds", type=str) @@ -360,9 +368,8 @@ def mergefunc(node, params, graph): logger.info("model merging skipped") @cmd.option("--batch-evaluate", type=int) -@cmd.option("--device-type-evaluate", type=str, default="cpu", - choices=["cpu", "gpu"]) -@cmd.option("--device-ids-evaluate", nargs="+", type=int, default=[0]) +@cmd.option("--device-type-evaluate", type=str, choices=["cpu", "gpu"]) +@cmd.option("--device-ids-evaluate", nargs="+", type=int) @cmd.option("--iter-num", type=int, default=0) @cmd.module("evaluate", as_main=True, refs=["modelprefix", "logger"], description=""" @@ -371,12 +378,12 @@ def mergefunc(node, params, graph): def mrt_evaluate(args): model_prefix = get_model_prefix(args) logger = get_logger(args) - batch = args.batch_evaluate + batch = default_batch if args.batch_evaluate is None \ + else args.batch_evaluate conf_quant_file = model_prefix + ".quantize.conf" check_file_existance(conf_quant_file, logger=logger) conf_map = load_conf(conf_quant_file, logger=logger) - ctx = get_ctx( - args.device_type_evaluate, args.device_ids_evaluate) + ctx = get_ctx(args.device_type_evaluate, args.device_ids_evaluate) if isinstance(ctx, mx.Context): ctx = [ctx] @@ -493,17 +500,28 @@ def mrt_compile(args): @cmd.option("--start-after", type=str, choices=["prepare", "calibrate", "quantize"]) +@cmd.option("--device-type", type=str, default=default_device_type, + choices=["cpu", "gpu"]) +@cmd.option("--device-ids", nargs="+", type=int, default=default_device_ids) +@cmd.option("--batch", type=int, default=default_batch) @cmd.option("--evaluate", action="store_true") @cmd.option("--compile", action="store_true") -@cmd.module("main", as_main=True, +@cmd.module("mrt", as_main=True, refs=["prepare", "calibrate", "quantize", "evaluate", "compile"], description=""" MRT Python Tool """) -def main(args): +def mrt_main(args): + # setting up attributes for all passes + for prefix in ["batch", "device_type", "device_ids"]: + for attr in dir(args): + if attr.startswith(prefix+"_") and getattr(args, attr) is None: + setattr(args, attr, getattr(args, prefix)) + start_pos = 0 start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} + return if args.start_after in start_pos_map: start_pos = start_pos_map[args.start_after] if start_pos < 1: From 5bc3bde7385d755188aa65762c22d2c446418aea Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 13 Sep 2021 19:09:57 +0800 Subject: [PATCH 021/120] seperate main.py compile --- main.py | 82 +++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 54 insertions(+), 28 deletions(-) diff --git a/main.py b/main.py index 4c7a5e7e..b01106e0 100644 --- a/main.py +++ b/main.py @@ -465,38 +465,64 @@ def quantize(data, label): @cmd.option("--device-type-compile", type=str, default="cpu", choices=["cpu", "gpu"]) @cmd.option("--device-ids-compile", nargs="+", type=int, default=[0]) -@cmd.module("compile", as_main=True, +@cmd.module("compile", as_main=True, refs=["modelprefix", "logger"], description=""" MRT Python Tool: compilation stage """) def mrt_compile(args): - if args.compile: - if args.batch_compile is not None: - batch = args.batch_compile - model_name_tfm = model_name + "_cvm" - if len(args.device_ids_compile) > 1: - raise RuntimeError( - "device ids should be an integer in compilation stage") - device_ids_compile = args.device_ids_compile[0] - qmodel.to_cvm(model_name_tfm, datadir=args.dump_dir, - input_shape=set_batch(input_shape, batch), - target=args.device_type_compile, - device_ids=device_ids_compile) - - dataset = ds.DS_REG[ds_name](set_batch(input_shape, batch)) - dump_data, _ = dataset.iter_func()() - dump_data = sim.load_real_data( - dump_data.astype("float64"), 'data', mrt.get_inputs_ext()) - model_root = path.join(args.dump_dir, model_name_tfm) - np.save(path.join(model_root, "data.npy"), - dump_data.astype('int8').asnumpy()) - infos = { - "inputs_ext": inputs_ext, - "oscales": oscales, - "input_shapes": input_shape, - } - sim.save_ext(path.join(model_root, "ext"), infos) - logger.info("compilation stage finished") + model_prefix = get_model_prefix(args) + logger = get_logger(args) + batch = default_batch if args.batch_compile is None \ + else args.batch_compile + conf_quant_file = model_prefix + ".quantize.conf" + check_file_existance(conf_quant_file, logger=logger) + conf_map = load_conf(conf_quant_file, logger=logger) + if args.device_type_compile is None: + args.device_type_compile = default_device_type + if args.device_ids_compile is None: + args.device_ids_compile = default_device_ids + if len(args.device_ids_compile) > 1: + raise RuntimeError( + "device ids should be an integer in compilation stage") + input_shape = conf_map["input_shape"] + + # compilation + model_name_tfm = args.model_name + "_cvm" + device_ids_compile = args.device_ids_compile[0] + if conf_map.get("split_keys", "") != "": + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + check_file_existance( + sym_all_file, prm_all_file, ext_all_file, logger=logger) + qmodel = Model.load(sym_all_file, prm_all_file) + oscales, inputs_ext = sim.load_ext(ext_all_file) + else: + sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + check_file_existance( + sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) + mrt = MRT.load(args.model_name+".mrt.quantize", datadir=args.model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + qmodel.to_cvm( + model_name_tfm, datadir=args.dump_dir, + input_shape=set_batch(input_shape, batch), + target=args.device_type_compile, device_ids=device_ids_compile) + dataset = ds.DS_REG[conf_map["dataset_name"]](set_batch(input_shape, batch)) + dump_data, _ = dataset.iter_func()() + dump_data = sim.load_real_data( + dump_data.astype("float64"), "data", mrt.get_inputs_ext()) + model_root = path.join(args.dump_dir, model_name_tfm) + np.save( + path.join(model_root, "data.npy"), dump_data.astype("int8").asnumpy()) + infos = { + "inputs_ext": inputs_ext, + "oscales": oscales, + "input_shapes": input_shape, + } + sim.save_ext(path.join(model_root, "ext"), infos) + logger.info("compilation stage finished") @cmd.option("--start-after", type=str, choices=["prepare", "calibrate", "quantize"]) From 4719b1da650308ab6696f2fb58250a55f5e4a454 Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 18 Sep 2021 10:29:29 +0800 Subject: [PATCH 022/120] fix broadcast_div --- main.py | 2 +- python/mrt/tfm_ops.py | 36 +++++------------------------------- 2 files changed, 6 insertions(+), 32 deletions(-) diff --git a/main.py b/main.py index b01106e0..84a035ef 100644 --- a/main.py +++ b/main.py @@ -472,7 +472,7 @@ def quantize(data, label): def mrt_compile(args): model_prefix = get_model_prefix(args) logger = get_logger(args) - batch = default_batch if args.batch_compile is None \ + batch = 1 if args.batch_compile is None \ else args.batch_compile conf_quant_file = model_prefix + ".quantize.conf" check_file_existance(conf_quant_file, logger=logger) diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index 66964a4c..a3859b04 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -1206,37 +1206,11 @@ def quantize(self, op, **kwargs): return _quantize_scale(op, **kwargs) -# @register_pass("calculate_ops") -# @register_pass("fuse_transpose") -# @register_pass("rewrite") -# @register_pass("prepare_for_compile") -# @register_pass("compile") -# @register_transformer("broadcast_div") -# class BroadcastDiv(Transformer): -# def quantize(self, op, **kwargs): -# precs, scales = kwargs["precs"], kwargs["scales"] -# th_dict = kwargs["th_dict"] -# name, op_name = op.attr("name"), op.attr("op_name") -# X, Y = sym_iter(op.get_children()) -# xn, yn = X.attr("name"), Y.attr("name") -# -# xs, ys = scales[xn], scales[yn] -# th = th_dict[name] -# -# if get_bit(th*xs/ys) > MAX_BIT: -# ys = xs / scale(th, MAX_BIT) -# yprec = min(get_bit(th_dict[yn] * ys), MAX_BIT) -# Y, _, ys = requant( -# Y, yprec, oname=N.n("denominator"), **kwargs) -# -# xs = scale(th, MAX_BIT) * ys -# xprec = get_bit(th_dict[xn] * xs) -# X, _, xs = requant( -# X, xprec, oscale=xs, oname=N.n("numerator"), **kwargs) -# -# oscale = scales[name] = xs / ys -# precs[name][OUT_KEY] = get_bit(th * oscale) -# return get_mxnet_op(op_name)(X, Y, name=name) +@register_pass("prepare_for_compile") +@register_pass("compile") +@register_transformer("broadcast_div") +class BroadcastDiv(Transformer): + pass @register_pass("calculate_ops") From c35fae0d8847253d81280dacccf53d1eeb62eb4b Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Sep 2021 15:41:43 +0800 Subject: [PATCH 023/120] upt --- main.py | 39 ++------------ python/mrt/mrt_passes.py | 110 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 115 insertions(+), 34 deletions(-) create mode 100644 python/mrt/mrt_passes.py diff --git a/main.py b/main.py index 84a035ef..b68e8125 100644 --- a/main.py +++ b/main.py @@ -18,6 +18,7 @@ from mrt import dataset as ds from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim +import mrt.mrt_passes as mpass # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -156,40 +157,10 @@ def get_logger(args): MRT Python Tool: preparation stage """) def mrt_prepare(args): - model_prefix = get_model_prefix(args) - logger = get_logger(args) - conf_prep_file = model_prefix + ".prepare.conf" - conf_map = {} - - # preparation - sym_path, prm_path = load_fname(model_prefix) - if not path.exists(sym_path) or not path.exists(prm_path): - save_model( - args.model_name, data_dir=args.model_dir, - ctx=get_ctx(args.device_type_prepare, args.device_ids_prepare)) - model = Model.load(sym_path, prm_path) - model.prepare(set_batch(args.input_shape, 1)) - sym_prep_file, prm_prep_file = load_fname( - model_prefix, suffix="prepare") - model.save(sym_prep_file, prm_prep_file) - conf_map["input_shape"] = args.input_shape - save_conf(conf_prep_file, logger=logger, **conf_map) - logger.info("preparation stage finihed") - - # model splitting - split_keys = args.split_keys - if split_keys: - sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') - sym_base_file, prm_base_file = load_fname( - model_prefix, suffix="base") - base, top = model.split(split_keys) - top.save(sym_top_file, prm_top_file) - base.save(sym_base_file, prm_base_file) - conf_map["split_keys"] = split_keys - save_conf(conf_prep_file, logger=logger, **conf_map) - logger.info("model splitting finished") - else: - logger.info("model splitting skipped") + mpass.mrt_prepare( + args.model_dir, args.model_name, args.verbosity, + args.device_type_prepare, args.device_ids_prepare, + args.input_shape, args.split_keys) @cmd.option("--batch-calibrate", type=int) @cmd.option("--calibrate-num", type=int, default=1) diff --git a/python/mrt/mrt_passes.py b/python/mrt/mrt_passes.py new file mode 100644 index 00000000..33a3e7d5 --- /dev/null +++ b/python/mrt/mrt_passes.py @@ -0,0 +1,110 @@ +from os import path +import logging +import json + +from mrt.gluon_zoo import save_model +from mrt.common import log +from mrt import utils +from mrt.transformer import Model + +def get_model_prefix(model_dir, model_name): + if model_dir.startswith("~"): + model_dir = path.expanduser(model_dir) + assert path.exists(model_dir), \ + "model_dir: {} does not exist".format(model_dir) + model_prefix = path.join(model_dir, model_name) + return model_prefix + +def get_logger(verbosity): + log.Init(log.name2level(verbosity.upper())) + logger = logging.getLogger("log.main") + return logger + +def set_batch(input_shape, batch): + """Get the input shape with respect to a specified batch value and an original input shape. + + Parameters + ---------- + input_shape : tuple + The input shape with batch axis unset. + batch : int + The batch value. + + Returns + ------- + ishape : tuple + The input shape with the value of batch axis equal to batch. + """ + return [batch if s == -1 else s for s in input_shape] + +def load_fname(prefix, suffix=None, with_ext=False): + """Get the model files at a given stage. + + Parameters + ---------- + prefix : string + The file path without and extension. + suffix : string + The file suffix with respect to a given stage of MRT. + with_ext: bool + Whether to include ext file. + + Returns + ------- + files : tuple of string + The loaded file names. + """ + suffix = "."+suffix if suffix is not None else "" + return utils.extend_fname(prefix+suffix, with_ext) + +def save_conf(fname, logger=logging, **conf_map): + try: + info_s = json.dumps(conf_map, indent=4) + except: + logger.error("Json seralize invalid with data: {}".format(conf_map)) + raise RuntimeError + with open(fname, "w") as f: + f.write(info_s) + +def mrt_prepare( + model_dir, model_name, verbosity, device_type_prepare, + device_ids_prepare, input_shape, split_keys): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_prep_file = model_prefix + ".prepare.conf" + conf_map = {} + + # preparation + sym_path, prm_path = load_fname(model_prefix) + if not path.exists(sym_path) or not path.exists(prm_path): + save_model( + model_name, data_dir=model_dir, + ctx=get_ctx(device_type_prepare, device_ids_prepare)) + model = Model.load(sym_path, prm_path) + model.prepare(set_batch(input_shape, 1)) + sym_prep_file, prm_prep_file = load_fname( + model_prefix, suffix="prepare") + model.save(sym_prep_file, prm_prep_file) + conf_map["input_shape"] = input_shape + save_conf(conf_prep_file, logger=logger, **conf_map) + logger.info("preparation stage finihed") + + # model splitting + if split_keys: + sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') + sym_base_file, prm_base_file = load_fname( + model_prefix, suffix="base") + base, top = model.split(split_keys) + top.save(sym_top_file, prm_top_file) + base.save(sym_base_file, prm_base_file) + conf_map["split_keys"] = split_keys + save_conf(conf_prep_file, logger=logger, **conf_map) + logger.info("model splitting finished") + else: + logger.info("model splitting skipped") + +def mrt_calibrate(): + pass + +def mrt_quantize(): + pass From 5cebcc220b1d003d56991c5d8257882aa5c87798 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Sep 2021 16:19:01 +0800 Subject: [PATCH 024/120] upt --- main.py | 48 +++-------------- python/mrt/mrt_passes.py | 110 --------------------------------------- 2 files changed, 8 insertions(+), 150 deletions(-) delete mode 100644 python/mrt/mrt_passes.py diff --git a/main.py b/main.py index b68e8125..003edb49 100644 --- a/main.py +++ b/main.py @@ -18,7 +18,7 @@ from mrt import dataset as ds from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim -import mrt.mrt_passes as mpass +import mrt.mrt_entry as mentry # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) @@ -39,7 +39,6 @@ default_device_type = "cpu" default_device_ids = [0] default_ctx = mx.cpu() -default_batch = 16 def get_ctx(device_type, device_ids, dctx=default_ctx): if device_type is None: @@ -157,12 +156,12 @@ def get_logger(args): MRT Python Tool: preparation stage """) def mrt_prepare(args): - mpass.mrt_prepare( + mentry.mrt_prepare( args.model_dir, args.model_name, args.verbosity, args.device_type_prepare, args.device_ids_prepare, args.input_shape, args.split_keys) -@cmd.option("--batch-calibrate", type=int) +@cmd.option("--batch-calibrate", type=int, default=mentry.default_batch) @cmd.option("--calibrate-num", type=int, default=1) @cmd.option("--lambd", type=int) @cmd.option("--dataset-name", type=str, default="imagenet", @@ -175,40 +174,10 @@ def mrt_prepare(args): MRT Python Tool: calibration stage """) def mrt_calibrate(args): - batch = default_batch if args.batch_calibrate is None \ - else args.batch_calibrate - model_prefix = get_model_prefix(args) - logger = get_logger(args) - conf_prep_file = model_prefix + ".prepare.conf" - check_file_existance(conf_prep_file, logger=logger) - conf_map = load_conf(conf_prep_file, logger=logger) - - # calibration - if conf_map.get("split_keys", "") == "": - sym_prep_file, prm_prep_file = load_fname( - model_prefix, suffix="prepare") - check_file_existance(sym_prep_file, prm_prep_file, logger=logger) - mrt = Model.load(sym_prep_file, prm_prep_file).get_mrt() - else: - sym_base_file, prm_base_file = load_fname( - model_prefix, suffix="base") - check_file_existance(sym_base_file, prm_base_file, logger=logger) - mrt = Model.load(sym_base_file, prm_base_file).get_mrt() - shp = set_batch(conf_map["input_shape"], batch) - dataset = ds.DS_REG[args.dataset_name](shp, root=args.dataset_dir) - data_iter_func = dataset.iter_func() - if len(args.device_ids_calibrate) > 1: - raise RuntimeError( - "device ids should be an integer in calibration stage") - ctx = get_ctx(args.device_type_calibrate, args.device_ids_calibrate) - for i in range(args.calibrate_num): - data, _ = data_iter_func() - mrt.set_data(data) - mrt.calibrate(lambd=args.lambd, ctx=ctx) - mrt.save(args.model_name+".mrt.calibrate", datadir=args.model_dir) - conf_map["dataset_name"] = args.dataset_name - save_conf(model_prefix+".calibrate.conf", logger=logger, **conf_map) - logger.info("calibrate stage finished") + mentry.mrt_calibrate( + args.model_dir, args.model_name, args.verbosity, args.dataset_name, + args.dataset_dir, args.device_type_calibrate, args.device_ids_calibrate, + args.calibrate_num, args.lambd, batch=args.batch_calibrate) @cmd.option("--restore-names", nargs="+", type=str, default=[]) @cmd.option("--input-precision", type=int) @@ -500,7 +469,7 @@ def mrt_compile(args): @cmd.option("--device-type", type=str, default=default_device_type, choices=["cpu", "gpu"]) @cmd.option("--device-ids", nargs="+", type=int, default=default_device_ids) -@cmd.option("--batch", type=int, default=default_batch) +@cmd.option("--batch", type=int, default=mentry.default_batch) @cmd.option("--evaluate", action="store_true") @cmd.option("--compile", action="store_true") @cmd.module("mrt", as_main=True, @@ -518,7 +487,6 @@ def mrt_main(args): start_pos = 0 start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} - return if args.start_after in start_pos_map: start_pos = start_pos_map[args.start_after] if start_pos < 1: diff --git a/python/mrt/mrt_passes.py b/python/mrt/mrt_passes.py deleted file mode 100644 index 33a3e7d5..00000000 --- a/python/mrt/mrt_passes.py +++ /dev/null @@ -1,110 +0,0 @@ -from os import path -import logging -import json - -from mrt.gluon_zoo import save_model -from mrt.common import log -from mrt import utils -from mrt.transformer import Model - -def get_model_prefix(model_dir, model_name): - if model_dir.startswith("~"): - model_dir = path.expanduser(model_dir) - assert path.exists(model_dir), \ - "model_dir: {} does not exist".format(model_dir) - model_prefix = path.join(model_dir, model_name) - return model_prefix - -def get_logger(verbosity): - log.Init(log.name2level(verbosity.upper())) - logger = logging.getLogger("log.main") - return logger - -def set_batch(input_shape, batch): - """Get the input shape with respect to a specified batch value and an original input shape. - - Parameters - ---------- - input_shape : tuple - The input shape with batch axis unset. - batch : int - The batch value. - - Returns - ------- - ishape : tuple - The input shape with the value of batch axis equal to batch. - """ - return [batch if s == -1 else s for s in input_shape] - -def load_fname(prefix, suffix=None, with_ext=False): - """Get the model files at a given stage. - - Parameters - ---------- - prefix : string - The file path without and extension. - suffix : string - The file suffix with respect to a given stage of MRT. - with_ext: bool - Whether to include ext file. - - Returns - ------- - files : tuple of string - The loaded file names. - """ - suffix = "."+suffix if suffix is not None else "" - return utils.extend_fname(prefix+suffix, with_ext) - -def save_conf(fname, logger=logging, **conf_map): - try: - info_s = json.dumps(conf_map, indent=4) - except: - logger.error("Json seralize invalid with data: {}".format(conf_map)) - raise RuntimeError - with open(fname, "w") as f: - f.write(info_s) - -def mrt_prepare( - model_dir, model_name, verbosity, device_type_prepare, - device_ids_prepare, input_shape, split_keys): - model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) - conf_prep_file = model_prefix + ".prepare.conf" - conf_map = {} - - # preparation - sym_path, prm_path = load_fname(model_prefix) - if not path.exists(sym_path) or not path.exists(prm_path): - save_model( - model_name, data_dir=model_dir, - ctx=get_ctx(device_type_prepare, device_ids_prepare)) - model = Model.load(sym_path, prm_path) - model.prepare(set_batch(input_shape, 1)) - sym_prep_file, prm_prep_file = load_fname( - model_prefix, suffix="prepare") - model.save(sym_prep_file, prm_prep_file) - conf_map["input_shape"] = input_shape - save_conf(conf_prep_file, logger=logger, **conf_map) - logger.info("preparation stage finihed") - - # model splitting - if split_keys: - sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') - sym_base_file, prm_base_file = load_fname( - model_prefix, suffix="base") - base, top = model.split(split_keys) - top.save(sym_top_file, prm_top_file) - base.save(sym_base_file, prm_base_file) - conf_map["split_keys"] = split_keys - save_conf(conf_prep_file, logger=logger, **conf_map) - logger.info("model splitting finished") - else: - logger.info("model splitting skipped") - -def mrt_calibrate(): - pass - -def mrt_quantize(): - pass From cdb058311eda54f7d678f80fdbb39fa31bfc75c5 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Sep 2021 16:20:22 +0800 Subject: [PATCH 025/120] upt --- python/mrt/mrt_entry.py | 173 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 python/mrt/mrt_entry.py diff --git a/python/mrt/mrt_entry.py b/python/mrt/mrt_entry.py new file mode 100644 index 00000000..2ea67316 --- /dev/null +++ b/python/mrt/mrt_entry.py @@ -0,0 +1,173 @@ +from os import path +import logging +import json + +import mxnet as mx + +from mrt.gluon_zoo import save_model +from mrt.common import log +from mrt import utils +from mrt.transformer import Model +from mrt import dataset as ds + +default_batch = 16 +default_ctx = mx.cpu() + +def get_model_prefix(model_dir, model_name): + if model_dir.startswith("~"): + model_dir = path.expanduser(model_dir) + assert path.exists(model_dir), \ + "model_dir: {} does not exist".format(model_dir) + model_prefix = path.join(model_dir, model_name) + return model_prefix + +def get_logger(verbosity): + log.Init(log.name2level(verbosity.upper())) + logger = logging.getLogger("log.main") + return logger + +def set_batch(input_shape, batch): + """Get the input shape with respect to a specified batch value and an original input shape. + + Parameters + ---------- + input_shape : tuple + The input shape with batch axis unset. + batch : int + The batch value. + + Returns + ------- + ishape : tuple + The input shape with the value of batch axis equal to batch. + """ + return [batch if s == -1 else s for s in input_shape] + +def load_fname(prefix, suffix=None, with_ext=False): + """Get the model files at a given stage. + + Parameters + ---------- + prefix : string + The file path without and extension. + suffix : string + The file suffix with respect to a given stage of MRT. + with_ext: bool + Whether to include ext file. + + Returns + ------- + files : tuple of string + The loaded file names. + """ + suffix = "."+suffix if suffix is not None else "" + return utils.extend_fname(prefix+suffix, with_ext) + +def save_conf(fname, logger=logging, **conf_map): + try: + info_s = json.dumps(conf_map, indent=4) + except: + logger.error("Json seralize invalid with data: {}".format(conf_map)) + raise RuntimeError + with open(fname, "w") as f: + f.write(info_s) + +def load_conf(fname, logger=logging): + with open(fname, "r") as f: + try: + conf_map = json.load(f) + except: + logger.error("Json deserialize invalid, fname: {}".format(fname)) + return conf_map + +def check_file_existance(*fpaths, logger=logging): + for fpath in fpaths: + if not path.exists(fpath): + raise FileNotFoundError("fpath: {} does not exist".format(fpath)) + +def get_ctx(device_type, device_ids, dctx=default_ctx): + if device_type is None: + device_type = default_device_type + if device_ids is None: + device_ids = default_device_ids + contex = dctx + if device_type == "gpu": + contex = mx.gpu(device_ids[0]) if len(device_ids) == 1 \ + else [mx.gpu(i) for i in device_ids] + return contex + +def mrt_prepare( + model_dir, model_name, verbosity, device_type, device_ids, input_shape, + split_keys): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_prep_file = model_prefix + ".prepare.conf" + conf_map = {} + + # preparation + sym_path, prm_path = load_fname(model_prefix) + if not path.exists(sym_path) or not path.exists(prm_path): + save_model( + model_name, data_dir=model_dir, + ctx=get_ctx(device_type, device_ids)) + model = Model.load(sym_path, prm_path) + model.prepare(set_batch(input_shape, 1)) + sym_prep_file, prm_prep_file = load_fname( + model_prefix, suffix="prepare") + model.save(sym_prep_file, prm_prep_file) + conf_map["input_shape"] = input_shape + save_conf(conf_prep_file, logger=logger, **conf_map) + logger.info("preparation stage finihed") + + # model splitting + if split_keys: + sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') + sym_base_file, prm_base_file = load_fname( + model_prefix, suffix="base") + base, top = model.split(split_keys) + top.save(sym_top_file, prm_top_file) + base.save(sym_base_file, prm_base_file) + conf_map["split_keys"] = split_keys + save_conf(conf_prep_file, logger=logger, **conf_map) + logger.info("model splitting finished") + else: + logger.info("model splitting skipped") + +def mrt_calibrate( + model_dir, model_name, verbosity, dataset_name, dataset_dir, + device_type, device_ids, calibrate_num, lambd, batch=default_batch): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_prep_file = model_prefix + ".prepare.conf" + check_file_existance(conf_prep_file, logger=logger) + conf_map = load_conf(conf_prep_file, logger=logger) + + # calibration + if conf_map.get("split_keys", "") == "": + sym_prep_file, prm_prep_file = load_fname( + model_prefix, suffix="prepare") + check_file_existance(sym_prep_file, prm_prep_file, logger=logger) + mrt = Model.load(sym_prep_file, prm_prep_file).get_mrt() + else: + sym_base_file, prm_base_file = load_fname( + model_prefix, suffix="base") + check_file_existance(sym_base_file, prm_base_file, logger=logger) + mrt = Model.load(sym_base_file, prm_base_file).get_mrt() + shp = set_batch(conf_map["input_shape"], batch) + dataset = ds.DS_REG[dataset_name](shp, root=dataset_dir) + data_iter_func = dataset.iter_func() + if len(device_ids) > 1: + raise RuntimeError( + "device ids should be an integer in calibration stage") + ctx = get_ctx(device_type, device_ids) + for i in range(calibrate_num): + data, _ = data_iter_func() + mrt.set_data(data) + mrt.calibrate(lambd=lambd, ctx=ctx) + mrt.save(model_name+".mrt.calibrate", datadir=model_dir) + conf_map["dataset_name"] = dataset_name + save_conf(model_prefix+".calibrate.conf", logger=logger, **conf_map) + logger.info("calibrate stage finished") + +def mrt_quantize(): + pass From 365f5db01a6d40b996ed61b79c54994fa6029121 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Sep 2021 16:39:47 +0800 Subject: [PATCH 026/120] upt --- main.py | 117 ++------------------------------------- python/mrt/mrt_entry.py | 120 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 122 insertions(+), 115 deletions(-) diff --git a/main.py b/main.py index 003edb49..c495f55b 100644 --- a/main.py +++ b/main.py @@ -194,118 +194,11 @@ def mrt_calibrate(args): MRT Python Tool: quantization stage """) def mrt_quantize(args): - model_prefix = get_model_prefix(args) - logger = get_logger(args) - conf_calib_file = model_prefix + ".calibrate.conf" - check_file_existance(conf_calib_file, logger=logger) - conf_map = load_conf(conf_calib_file, logger=logger) - sym_calib_file, prm_calib_file, ext_calib_file = load_fname( - model_prefix, suffix="mrt.calibrate", with_ext=True) - check_file_existance( - sym_calib_file, prm_calib_file, ext_calib_file, logger=logger) - mrt = MRT.load(args.model_name+".mrt.calibrate", datadir=args.model_dir) - conf_quant_file = model_prefix + ".quantize.conf" - - # restoration configuration - restore_names = args.restore_names - name_to_op = {} - for sym in topo_sort(mrt.current_model.symbol): - name, op_name = sym.attr('name'), sym.attr('op_name') - if op_name not in name_to_op: - name_to_op[op_name] = [] - name_to_op[op_name].append(name) - new_names = [] - for name in restore_names: - if name.startswith("_OP_") and name[4:] in name_to_op: - for new_name in name_to_op[name[4:]]: - new_names.append(new_name) - else: - new_names.append(name) - restore_names = set(new_names) - if '_ALL_EXCEPT_' in restore_names: - from tfm_base import _pass_manager - from tfm_ops import disabled_restore_ops - - quantize_ops = [op_name for op_name in _pass_manager["quantize"] \ - if op_name not in disabled_restore_ops] - restore_names_new = [] - for sym in topo_sort(mrt.current_model.symbol): - name, op_name = sym.attr('name'), sym.attr('op_name') - if op_name in quantize_ops and \ - name not in restore_names: - restore_names_new.append(name) - restore_names = set(restore_names_new) - for name in restore_names: - mrt.set_restore(name) - - # hyper parameters configuration - if args.input_precision is not None: - mrt.set_input_prec(args.input_precision) - if args.output_precision is not None: - mrt.set_output_prec(args.output_precision) - ctx = get_ctx(args.device_type_quantize, args.device_ids_quantize) - if args.softmax_lambd is not None: - mrt.set_softmax_lambd(args.softmax_lambd) - if args.shift_bits is not None: - mrt.set_shift_bits(args.shift_bits) - if args.thresholds is not None: - thresholds = json.loads(args.thresholds) - for name, threshold in thresholds.items(): - mrt.set_threshold(name, threshold) - - # quantization - mrt.quantize() - mrt.save(args.model_name+".mrt.quantize", datadir=args.model_dir) - input_shape = conf_map["input_shape"] - oscales = mrt.get_output_scales() - inputs_ext = mrt.get_inputs_ext() - infos = [oscales, inputs_ext] - ext_all_file = model_prefix + ".all.quantize.ext" - sim.save_ext(ext_all_file, *infos) - save_conf(conf_quant_file, logger=logger, **conf_map) - logger.info("quantization stage finished") - - # mergemodel - if conf_map.get("split_keys", "") != "": - qmodel = mrt.current_model - if args.attribute_deps is None: - raise RuntimeError("model merging, please specify --attribute_deps") - attribute_deps = json.loads(args.attribute_deps) - mrt_oscales = mrt.get_output_scales() - name_idx = {mrt.get_maps().get( - s.attr("name"), s.attr("name")): i \ - for i, s in enumerate(qmodel.symbol)} - def mergefunc(node, params, graph): - name, op_name = node.attr("name"), node.attr("op_name") - childs, attr = sutils.sym_iter( - node.get_children()), node.list_attr() - if op_name in attribute_deps: - attr_deps = attribute_deps[op_name] - for attr_name, v in attr_deps.items(): - val = sutils.get_attr(attr, attr_name, 0) - attr[attr_name] = int(val*mrt_oscales[name_idx[v]]) - node = sutils.get_mxnet_op(op_name)( - *childs, **attr, name=name) - return node - sym_top_file, prm_top_file = load_fname(model_prefix, suffix="top") - check_file_existance(sym_top_file, prm_top_file, logger=logger) - top = Model.load(sym_top_file, prm_top_file) - model_merger = Model.merger(qmodel, top, mrt.get_maps()) - qmodel = model_merger.merge(callback=mergefunc) - if args.oscale_maps is None: - raise RuntimeError("model merging, please specify --oscale_maps") - oscale_maps = json.loads(args.oscale_maps) - oscales = model_merger.get_output_scales( - mrt_oscales, oscale_maps) - sym_all_file, prm_all_file, ext_all_file = load_fname( - model_prefix, suffix="all.quantize", with_ext=True) - qmodel.save(sym_all_file, prm_all_file) - infos = [oscales, inputs_ext] - sim.save_ext(ext_all_file, *infos) - save_conf(conf_quant_file, logger=logger, **conf_map) - logger.info("model merging finished") - else: - logger.info("model merging skipped") + mentry.mrt_quantize( + args.model_dir, args.model_name, args.verbosity, args.restore_names, + args.input_precision, args.output_precision, args.device_type_quantize, + args.device_ids_quantize, args.softmax_lambd, args.shift_bits, + args.thresholds, args.attribute_deps, args.oscale_maps) @cmd.option("--batch-evaluate", type=int) @cmd.option("--device-type-evaluate", type=str, choices=["cpu", "gpu"]) diff --git a/python/mrt/mrt_entry.py b/python/mrt/mrt_entry.py index 2ea67316..620e36af 100644 --- a/python/mrt/mrt_entry.py +++ b/python/mrt/mrt_entry.py @@ -7,8 +7,10 @@ from mrt.gluon_zoo import save_model from mrt.common import log from mrt import utils -from mrt.transformer import Model +from mrt.transformer import Model, MRT from mrt import dataset as ds +from mrt import sym_utils as sutils +from mrt import sim_quant_helper as sim default_batch = 16 default_ctx = mx.cpu() @@ -169,5 +171,117 @@ def mrt_calibrate( save_conf(model_prefix+".calibrate.conf", logger=logger, **conf_map) logger.info("calibrate stage finished") -def mrt_quantize(): - pass +def mrt_quantize( + model_dir, model_name, verbosity, restore_names, input_precision, + output_precision, device_type, device_ids, softmax_lambd, shift_bits, + thresholds, attribute_deps, oscale_maps): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_calib_file = model_prefix + ".calibrate.conf" + check_file_existance(conf_calib_file, logger=logger) + conf_map = load_conf(conf_calib_file, logger=logger) + sym_calib_file, prm_calib_file, ext_calib_file = load_fname( + model_prefix, suffix="mrt.calibrate", with_ext=True) + check_file_existance( + sym_calib_file, prm_calib_file, ext_calib_file, logger=logger) + mrt = MRT.load(model_name+".mrt.calibrate", datadir=model_dir) + conf_quant_file = model_prefix + ".quantize.conf" + + # restoration configuration + name_to_op = {} + for sym in sutils.topo_sort(mrt.current_model.symbol): + name, op_name = sym.attr('name'), sym.attr('op_name') + if op_name not in name_to_op: + name_to_op[op_name] = [] + name_to_op[op_name].append(name) + new_names = [] + for name in restore_names: + if name.startswith("_OP_") and name[4:] in name_to_op: + for new_name in name_to_op[name[4:]]: + new_names.append(new_name) + else: + new_names.append(name) + restore_names = set(new_names) + if '_ALL_EXCEPT_' in restore_names: + from tfm_base import _pass_manager + from tfm_ops import disabled_restore_ops + + quantize_ops = [op_name for op_name in _pass_manager["quantize"] \ + if op_name not in disabled_restore_ops] + restore_names_new = [] + for sym in sutils.topo_sort(mrt.current_model.symbol): + name, op_name = sym.attr('name'), sym.attr('op_name') + if op_name in quantize_ops and \ + name not in restore_names: + restore_names_new.append(name) + restore_names = set(restore_names_new) + for name in restore_names: + mrt.set_restore(name) + + # hyper parameters configuration + if input_precision is not None: + mrt.set_input_prec(input_precision) + if output_precision is not None: + mrt.set_output_prec(output_precision) + ctx = get_ctx(device_type, device_ids) + if softmax_lambd is not None: + mrt.set_softmax_lambd(softmax_lambd) + if shift_bits is not None: + mrt.set_shift_bits(shift_bits) + if thresholds is not None: + thresholds = json.loads(thresholds) + for name, threshold in thresholds.items(): + mrt.set_threshold(name, threshold) + + # quantization + mrt.quantize() + mrt.save(model_name+".mrt.quantize", datadir=model_dir) + input_shape = conf_map["input_shape"] + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + infos = [oscales, inputs_ext] + ext_all_file = model_prefix + ".all.quantize.ext" + sim.save_ext(ext_all_file, *infos) + save_conf(conf_quant_file, logger=logger, **conf_map) + logger.info("quantization stage finished") + + # mergemodel + if conf_map.get("split_keys", "") != "": + qmodel = mrt.current_model + if attribute_deps is None: + raise RuntimeError("model merging, please specify --attribute_deps") + attribute_deps = json.loads(attribute_deps) + mrt_oscales = mrt.get_output_scales() + name_idx = {mrt.get_maps().get( + s.attr("name"), s.attr("name")): i \ + for i, s in enumerate(qmodel.symbol)} + def mergefunc(node, params, graph): + name, op_name = node.attr("name"), node.attr("op_name") + childs, attr = sutils.sym_iter( + node.get_children()), node.list_attr() + if op_name in attribute_deps: + attr_deps = attribute_deps[op_name] + for attr_name, v in attr_deps.items(): + val = sutils.get_attr(attr, attr_name, 0) + attr[attr_name] = int(val*mrt_oscales[name_idx[v]]) + node = sutils.get_mxnet_op(op_name)( + *childs, **attr, name=name) + return node + sym_top_file, prm_top_file = load_fname(model_prefix, suffix="top") + check_file_existance(sym_top_file, prm_top_file, logger=logger) + top = Model.load(sym_top_file, prm_top_file) + model_merger = Model.merger(qmodel, top, mrt.get_maps()) + qmodel = model_merger.merge(callback=mergefunc) + if oscale_maps is None: + raise RuntimeError("model merging, please specify --oscale_maps") + oscale_maps = json.loads(oscale_maps) + oscales = model_merger.get_output_scales(mrt_oscales, oscale_maps) + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + qmodel.save(sym_all_file, prm_all_file) + infos = [oscales, inputs_ext] + sim.save_ext(ext_all_file, *infos) + save_conf(conf_quant_file, logger=logger, **conf_map) + logger.info("model merging finished") + else: + logger.info("model merging skipped") From 5d23e2ad043dd07f9faee6d0da528ffd59447c7d Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Sep 2021 16:54:51 +0800 Subject: [PATCH 027/120] upt --- main.py | 106 ++------------------------ python/mrt/mrt_entry.py | 160 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 164 insertions(+), 102 deletions(-) diff --git a/main.py b/main.py index c495f55b..5115d239 100644 --- a/main.py +++ b/main.py @@ -51,23 +51,6 @@ def get_ctx(device_type, device_ids, dctx=default_ctx): else [mx.gpu(i) for i in device_ids] return contex -def batch_axis(input_shape): - """Get the batch axis entry of an input shape. - - Parameters - ---------- - input_shape : tuple - The data shape related to dataset. - - Returns - ------- - axis : int - The batch axis entry of an input shape. - """ - idx = [i for i, s in enumerate(input_shape) if s == -1] - assert len(idx) == 1 - return idx[0] - def load_fname(prefix, suffix=None, with_ext=False): """Get the model files at a given stage. @@ -200,7 +183,7 @@ def mrt_quantize(args): args.device_ids_quantize, args.softmax_lambd, args.shift_bits, args.thresholds, args.attribute_deps, args.oscale_maps) -@cmd.option("--batch-evaluate", type=int) +@cmd.option("--batch-evaluate", type=int, default=mentry.default_batch) @cmd.option("--device-type-evaluate", type=str, choices=["cpu", "gpu"]) @cmd.option("--device-ids-evaluate", nargs="+", type=int) @cmd.option("--iter-num", type=int, default=0) @@ -209,89 +192,10 @@ def mrt_quantize(args): MRT Python Tool: evaluation stage """) def mrt_evaluate(args): - model_prefix = get_model_prefix(args) - logger = get_logger(args) - batch = default_batch if args.batch_evaluate is None \ - else args.batch_evaluate - conf_quant_file = model_prefix + ".quantize.conf" - check_file_existance(conf_quant_file, logger=logger) - conf_map = load_conf(conf_quant_file, logger=logger) - ctx = get_ctx(args.device_type_evaluate, args.device_ids_evaluate) - if isinstance(ctx, mx.Context): - ctx = [ctx] - - # forward function for the orginal model - omodel = Model.load(*load_fname(model_prefix)) - graph = omodel.to_graph(ctx=ctx) - dataset_name = conf_map["dataset_name"] - input_shape = conf_map["input_shape"] - dataset = ds.DS_REG[dataset_name](set_batch(input_shape, batch)) - data_iter_func = dataset.iter_func() - metric = dataset.metrics() - baxis = batch_axis(input_shape) - olen = len(omodel.symbol) - - def forward(net, data, ctx): - """ Multiple xpu run support. - """ - data = gluon.utils.split_and_load( - data, ctx_list=ctx, batch_axis=baxis, even_split=False) - outs = [net(d) for d in data] - if olen == 1: - outs = nd.concatenate(outs) - else: - outs = [nd.concatenate([outs[i][j] \ - for i in range(len(outs))]) for j in range(olen)] - return outs - - def evalfunc(data, label): - outs = forward(graph, data, ctx=ctx) - acc = dataset.validate(metric, outs, label) - return acc - - # forward function for the quantized model - num_xpus = len(ctx) - if batch % num_xpus: - raise RuntimeError("Batch must be divisible by the number of xpus") - split_batch = batch // num_xpus - if conf_map.get("split_keys", "") != "": - sym_all_file, prm_all_file, ext_all_file = load_fname( - model_prefix, suffix="all.quantize", with_ext=True) - check_file_existance( - sym_all_file, prm_all_file, ext_all_file, logger=logger) - qmodel = Model.load(sym_all_file, prm_all_file) - oscales, inputs_ext = sim.load_ext(ext_all_file) - else: - sym_quant_file, prm_quant_file, ext_quant_file = load_fname( - model_prefix, suffix="mrt.quantize", with_ext=True) - check_file_existance( - sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) - mrt = MRT.load(args.model_name+".mrt.quantize", datadir=args.model_dir) - oscales = mrt.get_output_scales() - inputs_ext = mrt.get_inputs_ext() - qmodel = mrt.current_model - rqmodel = reduce_graph(qmodel, { - 'data': set_batch(input_shape, split_batch)}) - qgraph = rqmodel.to_graph(ctx=ctx) - qmetric = dataset.metrics() - - def quantize(data, label): - data = sim.load_real_data(data, 'data', inputs_ext) - outs = forward(qgraph, data, ctx) - outs = outs / oscales[0] if olen == 1 \ - else [(t / oscales[i]) for i, t in enumerate(outs)] - acc = dataset.validate(qmetric, outs, label) - return acc - - # evaluate - if args.iter_num > 0: - logger.info("Validating...") - utils.multi_validate( - evalfunc, data_iter_func, quantize, iter_num=args.iter_num, - logger=logging.getLogger('mrt.validate'), batch_size=batch) - logger.info("evaluatation stage finished") - else: - logger.info("evaluatation stage skipped") + mentry.mrt_evaluate( + args.model_dir, args.model_name, args.verbosity, + args.device_type_evaluate, args.device_ids_evaluate, args.iter_num, + batch=args.batch_evaluate) @cmd.option("--batch-compile", type=int) @cmd.option("--dump-dir", type=str, default="/data1/tmp") diff --git a/python/mrt/mrt_entry.py b/python/mrt/mrt_entry.py index 620e36af..12a6601b 100644 --- a/python/mrt/mrt_entry.py +++ b/python/mrt/mrt_entry.py @@ -3,11 +3,12 @@ import json import mxnet as mx +from mxnet import gluon, ndarray as nd from mrt.gluon_zoo import save_model from mrt.common import log from mrt import utils -from mrt.transformer import Model, MRT +from mrt.transformer import Model, MRT, reduce_graph from mrt import dataset as ds from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim @@ -98,6 +99,23 @@ def get_ctx(device_type, device_ids, dctx=default_ctx): else [mx.gpu(i) for i in device_ids] return contex +def get_batch_axis(input_shape): + """Get the batch axis entry of an input shape. + + Parameters + ---------- + input_shape : tuple + The data shape related to dataset. + + Returns + ------- + axis : int + The batch axis entry of an input shape. + """ + idx = [i for i, s in enumerate(input_shape) if s == -1] + assert len(idx) == 1 + return idx[0] + def mrt_prepare( model_dir, model_name, verbosity, device_type, device_ids, input_shape, split_keys): @@ -285,3 +303,143 @@ def mergefunc(node, params, graph): logger.info("model merging finished") else: logger.info("model merging skipped") + +def mrt_evaluate( + model_dir, model_name, verbosity, device_type, device_ids, iter_num, + batch=default_batch): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_quant_file = model_prefix + ".quantize.conf" + check_file_existance(conf_quant_file, logger=logger) + conf_map = load_conf(conf_quant_file, logger=logger) + ctx = get_ctx(device_type, device_ids) + if isinstance(ctx, mx.Context): + ctx = [ctx] + + # forward function for the orginal model + omodel = Model.load(*load_fname(model_prefix)) + graph = omodel.to_graph(ctx=ctx) + dataset_name = conf_map["dataset_name"] + input_shape = conf_map["input_shape"] + dataset = ds.DS_REG[dataset_name](set_batch(input_shape, batch)) + data_iter_func = dataset.iter_func() + metric = dataset.metrics() + baxis = get_batch_axis(input_shape) + olen = len(omodel.symbol) + + def forward(net, data, ctx): + """ Multiple xpu run support. + """ + data = gluon.utils.split_and_load( + data, ctx_list=ctx, batch_axis=baxis, even_split=False) + outs = [net(d) for d in data] + if olen == 1: + outs = nd.concatenate(outs) + else: + outs = [nd.concatenate([outs[i][j] \ + for i in range(len(outs))]) for j in range(olen)] + return outs + + def evalfunc(data, label): + outs = forward(graph, data, ctx=ctx) + acc = dataset.validate(metric, outs, label) + return acc + + # forward function for the quantized model + num_xpus = len(ctx) + if batch % num_xpus: + raise RuntimeError("Batch must be divisible by the number of xpus") + split_batch = batch // num_xpus + if conf_map.get("split_keys", "") != "": + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + check_file_existance( + sym_all_file, prm_all_file, ext_all_file, logger=logger) + qmodel = Model.load(sym_all_file, prm_all_file) + oscales, inputs_ext = sim.load_ext(ext_all_file) + else: + sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + check_file_existance( + sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) + mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + rqmodel = reduce_graph(qmodel, { + 'data': set_batch(input_shape, split_batch)}) + qgraph = rqmodel.to_graph(ctx=ctx) + qmetric = dataset.metrics() + + def quantize(data, label): + data = sim.load_real_data(data, 'data', inputs_ext) + outs = forward(qgraph, data, ctx) + outs = outs / oscales[0] if olen == 1 \ + else [(t / oscales[i]) for i, t in enumerate(outs)] + acc = dataset.validate(qmetric, outs, label) + return acc + + # evaluate + if iter_num > 0: + logger.info("Validating...") + utils.multi_validate( + evalfunc, data_iter_func, quantize, iter_num=iter_num, + logger=logging.getLogger('mrt.validate'), batch_size=batch) + logger.info("evaluatation stage finished") + else: + logger.info("evaluatation stage skipped") + +# def mrt_compile(args): + # model_prefix = get_model_prefix(args) + # logger = get_logger(args) + # batch = 1 if args.batch_compile is None \ + # else args.batch_compile + # conf_quant_file = model_prefix + ".quantize.conf" + # check_file_existance(conf_quant_file, logger=logger) + # conf_map = load_conf(conf_quant_file, logger=logger) + # if args.device_type_compile is None: + # args.device_type_compile = default_device_type + # if args.device_ids_compile is None: + # args.device_ids_compile = default_device_ids + # if len(args.device_ids_compile) > 1: + # raise RuntimeError( + # "device ids should be an integer in compilation stage") + # input_shape = conf_map["input_shape"] + + # # compilation + # model_name_tfm = args.model_name + "_cvm" + # device_ids_compile = args.device_ids_compile[0] + # if conf_map.get("split_keys", "") != "": + # sym_all_file, prm_all_file, ext_all_file = load_fname( + # model_prefix, suffix="all.quantize", with_ext=True) + # check_file_existance( + # sym_all_file, prm_all_file, ext_all_file, logger=logger) + # qmodel = Model.load(sym_all_file, prm_all_file) + # oscales, inputs_ext = sim.load_ext(ext_all_file) + # else: + # sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + # model_prefix, suffix="mrt.quantize", with_ext=True) + # check_file_existance( + # sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) + # mrt = MRT.load(args.model_name+".mrt.quantize", datadir=args.model_dir) + # oscales = mrt.get_output_scales() + # inputs_ext = mrt.get_inputs_ext() + # qmodel = mrt.current_model + # qmodel.to_cvm( + # model_name_tfm, datadir=args.dump_dir, + # input_shape=set_batch(input_shape, batch), + # target=args.device_type_compile, device_ids=device_ids_compile) + # dataset = ds.DS_REG[conf_map["dataset_name"]](set_batch(input_shape, batch)) + # dump_data, _ = dataset.iter_func()() + # dump_data = sim.load_real_data( + # dump_data.astype("float64"), "data", mrt.get_inputs_ext()) + # model_root = path.join(args.dump_dir, model_name_tfm) + # np.save( + # path.join(model_root, "data.npy"), dump_data.astype("int8").asnumpy()) + # infos = { + # "inputs_ext": inputs_ext, + # "oscales": oscales, + # "input_shapes": input_shape, + # } + # sim.save_ext(path.join(model_root, "ext"), infos) + # logger.info("compilation stage finished") From aec80dd7449db40dcc52662107715369b010d989 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Sep 2021 17:14:55 +0800 Subject: [PATCH 028/120] upt --- main.py | 59 ++-------------------- python/mrt/mrt_entry.py | 108 ++++++++++++++++++++-------------------- 2 files changed, 59 insertions(+), 108 deletions(-) diff --git a/main.py b/main.py index 5115d239..0126f872 100644 --- a/main.py +++ b/main.py @@ -197,7 +197,7 @@ def mrt_evaluate(args): args.device_type_evaluate, args.device_ids_evaluate, args.iter_num, batch=args.batch_evaluate) -@cmd.option("--batch-compile", type=int) +@cmd.option("--batch-compile", type=int, default=mentry.default_batch) @cmd.option("--dump-dir", type=str, default="/data1/tmp") @cmd.option("--device-type-compile", type=str, default="cpu", choices=["cpu", "gpu"]) @@ -207,59 +207,10 @@ def mrt_evaluate(args): MRT Python Tool: compilation stage """) def mrt_compile(args): - model_prefix = get_model_prefix(args) - logger = get_logger(args) - batch = 1 if args.batch_compile is None \ - else args.batch_compile - conf_quant_file = model_prefix + ".quantize.conf" - check_file_existance(conf_quant_file, logger=logger) - conf_map = load_conf(conf_quant_file, logger=logger) - if args.device_type_compile is None: - args.device_type_compile = default_device_type - if args.device_ids_compile is None: - args.device_ids_compile = default_device_ids - if len(args.device_ids_compile) > 1: - raise RuntimeError( - "device ids should be an integer in compilation stage") - input_shape = conf_map["input_shape"] - - # compilation - model_name_tfm = args.model_name + "_cvm" - device_ids_compile = args.device_ids_compile[0] - if conf_map.get("split_keys", "") != "": - sym_all_file, prm_all_file, ext_all_file = load_fname( - model_prefix, suffix="all.quantize", with_ext=True) - check_file_existance( - sym_all_file, prm_all_file, ext_all_file, logger=logger) - qmodel = Model.load(sym_all_file, prm_all_file) - oscales, inputs_ext = sim.load_ext(ext_all_file) - else: - sym_quant_file, prm_quant_file, ext_quant_file = load_fname( - model_prefix, suffix="mrt.quantize", with_ext=True) - check_file_existance( - sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) - mrt = MRT.load(args.model_name+".mrt.quantize", datadir=args.model_dir) - oscales = mrt.get_output_scales() - inputs_ext = mrt.get_inputs_ext() - qmodel = mrt.current_model - qmodel.to_cvm( - model_name_tfm, datadir=args.dump_dir, - input_shape=set_batch(input_shape, batch), - target=args.device_type_compile, device_ids=device_ids_compile) - dataset = ds.DS_REG[conf_map["dataset_name"]](set_batch(input_shape, batch)) - dump_data, _ = dataset.iter_func()() - dump_data = sim.load_real_data( - dump_data.astype("float64"), "data", mrt.get_inputs_ext()) - model_root = path.join(args.dump_dir, model_name_tfm) - np.save( - path.join(model_root, "data.npy"), dump_data.astype("int8").asnumpy()) - infos = { - "inputs_ext": inputs_ext, - "oscales": oscales, - "input_shapes": input_shape, - } - sim.save_ext(path.join(model_root, "ext"), infos) - logger.info("compilation stage finished") + mentry.mrt_compile( + args.model_dir, args.model_name, args.verbosity, args.dump_dir, + device_type=args.device_type_compile, + device_ids=args.device_ids_compile, batch=args.batch_compile) @cmd.option("--start-after", type=str, choices=["prepare", "calibrate", "quantize"]) diff --git a/python/mrt/mrt_entry.py b/python/mrt/mrt_entry.py index 12a6601b..23ad04cf 100644 --- a/python/mrt/mrt_entry.py +++ b/python/mrt/mrt_entry.py @@ -4,6 +4,7 @@ import mxnet as mx from mxnet import gluon, ndarray as nd +import numpy as np from mrt.gluon_zoo import save_model from mrt.common import log @@ -13,6 +14,8 @@ from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim +default_device_type = "cpu" +default_device_ids = [0] default_batch = 16 default_ctx = mx.cpu() @@ -389,57 +392,54 @@ def quantize(data, label): else: logger.info("evaluatation stage skipped") -# def mrt_compile(args): - # model_prefix = get_model_prefix(args) - # logger = get_logger(args) - # batch = 1 if args.batch_compile is None \ - # else args.batch_compile - # conf_quant_file = model_prefix + ".quantize.conf" - # check_file_existance(conf_quant_file, logger=logger) - # conf_map = load_conf(conf_quant_file, logger=logger) - # if args.device_type_compile is None: - # args.device_type_compile = default_device_type - # if args.device_ids_compile is None: - # args.device_ids_compile = default_device_ids - # if len(args.device_ids_compile) > 1: - # raise RuntimeError( - # "device ids should be an integer in compilation stage") - # input_shape = conf_map["input_shape"] - - # # compilation - # model_name_tfm = args.model_name + "_cvm" - # device_ids_compile = args.device_ids_compile[0] - # if conf_map.get("split_keys", "") != "": - # sym_all_file, prm_all_file, ext_all_file = load_fname( - # model_prefix, suffix="all.quantize", with_ext=True) - # check_file_existance( - # sym_all_file, prm_all_file, ext_all_file, logger=logger) - # qmodel = Model.load(sym_all_file, prm_all_file) - # oscales, inputs_ext = sim.load_ext(ext_all_file) - # else: - # sym_quant_file, prm_quant_file, ext_quant_file = load_fname( - # model_prefix, suffix="mrt.quantize", with_ext=True) - # check_file_existance( - # sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) - # mrt = MRT.load(args.model_name+".mrt.quantize", datadir=args.model_dir) - # oscales = mrt.get_output_scales() - # inputs_ext = mrt.get_inputs_ext() - # qmodel = mrt.current_model - # qmodel.to_cvm( - # model_name_tfm, datadir=args.dump_dir, - # input_shape=set_batch(input_shape, batch), - # target=args.device_type_compile, device_ids=device_ids_compile) - # dataset = ds.DS_REG[conf_map["dataset_name"]](set_batch(input_shape, batch)) - # dump_data, _ = dataset.iter_func()() - # dump_data = sim.load_real_data( - # dump_data.astype("float64"), "data", mrt.get_inputs_ext()) - # model_root = path.join(args.dump_dir, model_name_tfm) - # np.save( - # path.join(model_root, "data.npy"), dump_data.astype("int8").asnumpy()) - # infos = { - # "inputs_ext": inputs_ext, - # "oscales": oscales, - # "input_shapes": input_shape, - # } - # sim.save_ext(path.join(model_root, "ext"), infos) - # logger.info("compilation stage finished") +def mrt_compile( + model_dir, model_name, verbosity, dump_dir, + batch=default_batch, device_type=default_device_type, + device_ids=default_device_ids): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_quant_file = model_prefix + ".quantize.conf" + check_file_existance(conf_quant_file, logger=logger) + conf_map = load_conf(conf_quant_file, logger=logger) + if len(device_ids) > 1: + raise RuntimeError( + "device ids should be an integer in compilation stage") + input_shape = conf_map["input_shape"] + + # compilation + model_name_tfm = model_name + "_cvm" + device_ids_compile = device_ids[0] + if conf_map.get("split_keys", "") != "": + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + check_file_existance( + sym_all_file, prm_all_file, ext_all_file, logger=logger) + qmodel = Model.load(sym_all_file, prm_all_file) + oscales, inputs_ext = sim.load_ext(ext_all_file) + else: + sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + check_file_existance( + sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) + mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + qmodel.to_cvm( + model_name_tfm, datadir=dump_dir, + input_shape=set_batch(input_shape, batch), target=device_type, + device_ids=device_ids_compile) + dataset = ds.DS_REG[conf_map["dataset_name"]](set_batch(input_shape, batch)) + dump_data, _ = dataset.iter_func()() + dump_data = sim.load_real_data( + dump_data.astype("float64"), "data", mrt.get_inputs_ext()) + model_root = path.join(dump_dir, model_name_tfm) + np.save( + path.join(model_root, "data.npy"), dump_data.astype("int8").asnumpy()) + infos = { + "inputs_ext": inputs_ext, + "oscales": oscales, + "input_shapes": input_shape, + } + sim.save_ext(path.join(model_root, "ext"), infos) + logger.info("compilation stage finished") From f11829818decda3acba7c99a9a1bfd0f061e1e9e Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Sep 2021 18:55:59 +0800 Subject: [PATCH 029/120] upt --- main.py | 94 ++++----------------------------------------------------- 1 file changed, 6 insertions(+), 88 deletions(-) diff --git a/main.py b/main.py index 0126f872..9045461a 100644 --- a/main.py +++ b/main.py @@ -36,99 +36,17 @@ # def global_func(args): # log.Init(log.name2level(args.verbosity)) -default_device_type = "cpu" -default_device_ids = [0] -default_ctx = mx.cpu() - -def get_ctx(device_type, device_ids, dctx=default_ctx): - if device_type is None: - device_type = default_device_type - if device_ids is None: - device_ids = default_device_ids - contex = dctx - if device_type == "gpu": - contex = mx.gpu(device_ids[0]) if len(device_ids) == 1 \ - else [mx.gpu(i) for i in device_ids] - return contex - -def load_fname(prefix, suffix=None, with_ext=False): - """Get the model files at a given stage. - - Parameters - ---------- - prefix : string - The file path without and extension. - suffix : string - The file suffix with respect to a given stage of MRT. - with_ext: bool - Whether to include ext file. - - Returns - ------- - files : tuple of string - The loaded file names. - """ - suffix = "."+suffix if suffix is not None else "" - return utils.extend_fname(prefix+suffix, with_ext) - -def set_batch(input_shape, batch): - """Get the input shape with respect to a specified batch value and an original input shape. - - Parameters - ---------- - input_shape : tuple - The input shape with batch axis unset. - batch : int - The batch value. - - Returns - ------- - ishape : tuple - The input shape with the value of batch axis equal to batch. - """ - return [batch if s == -1 else s for s in input_shape] - -def save_conf(fname, logger=logging, **conf_map): - try: - info_s = json.dumps(conf_map, indent=4) - except: - logger.error("Json seralize invalid with data: {}".format(conf_map)) - with open(fname, "w") as f: - f.write(info_s) - -def load_conf(fname, logger=logging): - with open(fname, "r") as f: - try: - conf_map = json.load(f) - except: - logger.error("Json deserialize invalid, fname: {}".format(fname)) - return conf_map - -def check_file_existance(*fpaths, logger=logging): - for fpath in fpaths: - if not path.exists(fpath): - raise FileNotFoundError("fpath: {} does not exist".format(fpath)) - @cmd.option("model_name", type=str) @cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) @cmd.module("modelprefix") def get_model_prefix(args): - model_dir = args.model_dir - if model_dir.startswith("~"): - model_dir = path.expanduser(model_dir) - model_name = args.model_name - assert path.exists(model_dir), \ - "model_dir: {} does not exist".format(model_dir) - model_prefix = path.join(model_dir, model_name) - return model_prefix + pass @cmd.option("--verbosity", type=str, default="debug", choices=["none", "debug", "info", "warning", "error", "critical"]) @cmd.module("logger") def get_logger(args): - log.Init(log.name2level(args.verbosity.upper())) - logger = logging.getLogger("log.main") - return logger + pass @cmd.option("--device-type-prepare", type=str, choices=["cpu", "gpu"]) @cmd.option("--device-ids-prepare", nargs="+", type=int) @@ -214,15 +132,15 @@ def mrt_compile(args): @cmd.option("--start-after", type=str, choices=["prepare", "calibrate", "quantize"]) -@cmd.option("--device-type", type=str, default=default_device_type, +@cmd.option("--device-type", type=str, default=mentry.default_device_type, choices=["cpu", "gpu"]) -@cmd.option("--device-ids", nargs="+", type=int, default=default_device_ids) +@cmd.option("--device-ids", nargs="+", type=int, + default=mentry.default_device_ids) @cmd.option("--batch", type=int, default=mentry.default_batch) @cmd.option("--evaluate", action="store_true") @cmd.option("--compile", action="store_true") @cmd.module("mrt", as_main=True, - refs=["prepare", "calibrate", "quantize", - "evaluate", "compile"], + refs=["prepare", "calibrate", "quantize", "evaluate", "compile"], description=""" MRT Python Tool """) From 455ac7d73b935bbd6d5c293940dec1be19f43f7e Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Sep 2021 18:58:46 +0800 Subject: [PATCH 030/120] upt --- main.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/main.py b/main.py index 9045461a..09076082 100644 --- a/main.py +++ b/main.py @@ -137,8 +137,8 @@ def mrt_compile(args): @cmd.option("--device-ids", nargs="+", type=int, default=mentry.default_device_ids) @cmd.option("--batch", type=int, default=mentry.default_batch) -@cmd.option("--evaluate", action="store_true") -@cmd.option("--compile", action="store_true") +@cmd.option("--run-evaluate", action="store_true") +@cmd.option("--run-compile", action="store_true") @cmd.module("mrt", as_main=True, refs=["prepare", "calibrate", "quantize", "evaluate", "compile"], description=""" @@ -150,7 +150,6 @@ def mrt_main(args): for attr in dir(args): if attr.startswith(prefix+"_") and getattr(args, attr) is None: setattr(args, attr, getattr(args, prefix)) - start_pos = 0 start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} if args.start_after in start_pos_map: @@ -161,9 +160,9 @@ def mrt_main(args): mrt_calibrate(args) if start_pos < 3: mrt_quantize(args) - if args.evaluate: + if args.run_evaluate: mrt_evaluate(args) - if args.compile: + if args.run_compile: mrt_compile(args) if __name__ == "__main__": From cc838123ced974ad6754e4538a7bc6c0593c9457 Mon Sep 17 00:00:00 2001 From: ryt Date: Sun, 26 Sep 2021 19:02:29 +0800 Subject: [PATCH 031/120] upt yaml congifuration --- main.py | 26 ++++++++++---------- main2.py | 20 ++++++++++++++++ python/mrt/conf.py | 3 +++ python/mrt/defaults.py | 54 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 90 insertions(+), 13 deletions(-) create mode 100644 main2.py create mode 100644 python/mrt/defaults.py diff --git a/main.py b/main.py index 09076082..f8b9c494 100644 --- a/main.py +++ b/main.py @@ -56,7 +56,7 @@ def get_logger(args): description=""" MRT Python Tool: preparation stage """) -def mrt_prepare(args): +def cmd_prepare(args): mentry.mrt_prepare( args.model_dir, args.model_name, args.verbosity, args.device_type_prepare, args.device_ids_prepare, @@ -74,7 +74,7 @@ def mrt_prepare(args): description=""" MRT Python Tool: calibration stage """) -def mrt_calibrate(args): +def cmd_calibrate(args): mentry.mrt_calibrate( args.model_dir, args.model_name, args.verbosity, args.dataset_name, args.dataset_dir, args.device_type_calibrate, args.device_ids_calibrate, @@ -94,7 +94,7 @@ def mrt_calibrate(args): description=""" MRT Python Tool: quantization stage """) -def mrt_quantize(args): +def cmd_quantize(args): mentry.mrt_quantize( args.model_dir, args.model_name, args.verbosity, args.restore_names, args.input_precision, args.output_precision, args.device_type_quantize, @@ -109,7 +109,7 @@ def mrt_quantize(args): description=""" MRT Python Tool: evaluation stage """) -def mrt_evaluate(args): +def cmd_evaluate(args): mentry.mrt_evaluate( args.model_dir, args.model_name, args.verbosity, args.device_type_evaluate, args.device_ids_evaluate, args.iter_num, @@ -124,7 +124,7 @@ def mrt_evaluate(args): description=""" MRT Python Tool: compilation stage """) -def mrt_compile(args): +def cmd_compile(args): mentry.mrt_compile( args.model_dir, args.model_name, args.verbosity, args.dump_dir, device_type=args.device_type_compile, @@ -139,12 +139,12 @@ def mrt_compile(args): @cmd.option("--batch", type=int, default=mentry.default_batch) @cmd.option("--run-evaluate", action="store_true") @cmd.option("--run-compile", action="store_true") -@cmd.module("mrt", as_main=True, +@cmd.module("cmd", as_main=True, refs=["prepare", "calibrate", "quantize", "evaluate", "compile"], description=""" -MRT Python Tool +MRT CMD Tool """) -def mrt_main(args): +def cmd_main(args): # setting up attributes for all passes for prefix in ["batch", "device_type", "device_ids"]: for attr in dir(args): @@ -155,15 +155,15 @@ def mrt_main(args): if args.start_after in start_pos_map: start_pos = start_pos_map[args.start_after] if start_pos < 1: - mrt_prepare(args) + cmd_prepare(args) if start_pos < 2: - mrt_calibrate(args) + cmd_calibrate(args) if start_pos < 3: - mrt_quantize(args) + cmd_quantize(args) if args.run_evaluate: - mrt_evaluate(args) + cmd_evaluate(args) if args.run_compile: - mrt_compile(args) + cmd_compile(args) if __name__ == "__main__": logger = logging.getLogger("main") diff --git a/main2.py b/main2.py new file mode 100644 index 00000000..a9c21cde --- /dev/null +++ b/main2.py @@ -0,0 +1,20 @@ +import sys +from os import path + +from mrt.defaults import get_cfg_defaults +from mrt.conf import YAML_ROOT +from mrt import mrt_entry as mentry + +if __name__ == "__main__": + assert len(sys.argv) == 2, len(sys.argv) + model_name = sys.argv[1] + yaml_file = path.join(YAML_ROOT, model_name+".yaml") + cfg = get_cfg_defaults() + cfg.merge_from_file(yaml_file) + cfg.freeze() + + + # if cfg.SYSTEM.NUM_GPUS > 0: + # my_project.setup_multi_gpu_support() + + # model = my_project.create_model(cfg) diff --git a/python/mrt/conf.py b/python/mrt/conf.py index 49089ed7..e552e731 100644 --- a/python/mrt/conf.py +++ b/python/mrt/conf.py @@ -9,3 +9,6 @@ if not os.path.exists(MRT_DATASET_ROOT): os.makedirs(MRT_DATASET_ROOT) + +YAML_ROOT = os.path.expanduser("~/mrt_yaml_root") +os.makedirs(YAML_ROOT, exist_ok=True) diff --git a/python/mrt/defaults.py b/python/mrt/defaults.py new file mode 100644 index 00000000..6f6e3edc --- /dev/null +++ b/python/mrt/defaults.py @@ -0,0 +1,54 @@ +from yacs.config import CfgNode as CN +from . import conf +from mrt import mrt_entry as mentry + +MRT_CFG = CN() + +MRT_CFG.COMMON = CN() +MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT +MRT_CFG.COMMON.MODEL_NAME = conf.MRT_MODEL_ROOT +MRT_CFG.COMMON.VERBOSITY = "debug" + +MRT_CFG.PREPARE = CN() +MRT_CFG.PREPARE.DEVICE_TYPE = "cpu" +MRT_CFG.PREPARE.DEVICE_IDS = [0] +MRT_CFG.PREPARE.INPUT_SHAPE = [-1, 3, 224, 224] +MRT_CFG.PREPARE.SPLIT_KEYS = "" + +MRT_CFG.CALIBRATE = CN() +MRT_CFG.CALIBRATE.BATCH = mentry.default_batch +MRT_CFG.CALIBRATE.NUM = 1 +MRT_CFG.CALIBRATE.LAMBD = None +MRT_CFG.CALIBRATE.DATASET_NAME = "imagenet" +MRT_CFG.CALIBRATE.DATASET_DIR = conf.MRT_DATASET_ROOT +MRT_CFG.CALIBRATE.DEVICE_TYPE = "cpu" +MRT_CFG.CALIBRATE.DEVICE_IDS = [0] + +MRT_CFG.QUANTIZE = CN() +MRT_CFG.QUANTIZE.RESTORE_NAMES = [] +MRT_CFG.QUANTIZE.INPUT_PRECISION = None +MRT_CFG.QUANTIZE.OUTPUT_PRECISION = None +MRT_CFG.QUANTIZE.DEVICE_TYPE = "cpu" +MRT_CFG.QUANTIZE.DEVICE_IDS = [0] +MRT_CFG.QUANTIZE.SOFTMAX_LAMBD = None +MRT_CFG.QUANTIZE.SHIFT_BITS = None +MRT_CFG.QUANTIZE.THRESHOLDS = None +MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = None +MRT_CFG.QUANTIZE.OSCALE_MAPS = None + +MRT_CFG.EVALUATE = CN() +MRT_CFG.EVALUATE.BATCH = mentry.default_batch +MRT_CFG.EVALUATE.DEVICE_TYPE = "cpu" +MRT_CFG.EVALUATE.DEVICE_IDS = [0] +MRT_CFG.EVALUATE.ITER_NUM = 10 + +MRT_CFG.COMPILE = CN() +MRT_CFG.COMPILE.DUMP_DIR = "/data1/tmp" +MRT_CFG.COMPILE.DEVICE_TYPE = "cpu" +MRT_CFG.COMPILE.DEVICE_IDS = [0] + +def get_cfg_defaults(): + """Get a yacs CfgNode object with default values for mrt.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + return MRT_CFG.clone() From 7230ca8e24a907fc3831923f423f535cc38248b9 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 27 Sep 2021 11:51:56 +0800 Subject: [PATCH 032/120] upt --- main2.py | 76 ++++++++++++++++++++++++++++++++++++++---- python/mrt/defaults.py | 28 ++++++++++------ 2 files changed, 86 insertions(+), 18 deletions(-) diff --git a/main2.py b/main2.py index a9c21cde..9a0e1943 100644 --- a/main2.py +++ b/main2.py @@ -5,16 +5,78 @@ from mrt.conf import YAML_ROOT from mrt import mrt_entry as mentry +def yaml_prepare(CM, CN): + mentry.mrt_prepare( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, + CN.DEVICE_IDS, CN.INPUT_SHAPE, CN.SPLIT_KEYS) + +def yaml_calibrate(CM, CN): + mentry.mrt_calibrate( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DATASET_NAME, + CN.DATASET_DIR, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.NUM_CALIBRATE, + CN.LAMBD, batch=CN. BATCH) + +def yaml_quantize(CM, CN): + mentry.mrt_quantize( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.RESTORE_NAMES, + CN.INPUT_PRECISION, CN.OUTPUT_PRECISION, CN.DEVICE_TYPE, CN.DEVICE_IDS, + CN.SOFTMAX_LAMBD, CN.SHIFT_BITS, CN.THRESHOLDS, CN.ATTRIBUTE_DEPS, + CN.OSCALE_MAPS) + +def yaml_evaluate(CM, CN): + mentry.mrt_evaluate( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, CN.DEVICE_IDS, + CN.ITER_NUM, batch=CN.BATCH) + +def yaml_compile(CM, CN): + mentry.mrt_compile( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DUMP_DIR, + device_type=CN.DEVICE_TYPE, device_ids=CN.DEVICE_IDS, batch=CN.BATCH) + +def yaml_main(cfg): + for prefix in ["BATCH", "DEVICE_TYPE", "DEVICE_IDS"]: + for subcfg in [ + cfg.PREPARE, cfg.CALIBRATE, cfg.QUANTIZE, cfg.EVALUATE, + cfg.COMPILE]: + for attr in dir(subcfg): + if attr == prefix and getattr(subcfg, prefix) is None: + setattr(subcfg, prefix, getattr(cfg.COMMON, prefix) + start_pos = 0 + start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} + if cfg.COMMON.START_AFTER in start_pos_map: + start_pos = start_pos_map[cfg.COMMON.START_AFTER] + if start_pos < 1: + yaml_prepare(cfg.COMMON, cfg.PREPARE) + if start_pos < 2: + yaml_calibrate(cfg.COMMON, cfg.CALIBRATE) + if start_pos < 3: + yaml_quantize(cfg.COMMON, cfg.QUANTIZE) + if cfg.COMMON.RUN_EVALUATE: + yaml_evaluate(cfg.COMMON, cfg.EVALUATE) + if cfg.COMMON.RUN_COMPILE: + yaml_compile(cfg.COMMON, cfg.COMPILE) + if __name__ == "__main__": - assert len(sys.argv) == 2, len(sys.argv) + assert len(sys.argv) >= 2, len(sys.argv) model_name = sys.argv[1] yaml_file = path.join(YAML_ROOT, model_name+".yaml") cfg = get_cfg_defaults() cfg.merge_from_file(yaml_file) cfg.freeze() - - - # if cfg.SYSTEM.NUM_GPUS > 0: - # my_project.setup_multi_gpu_support() - - # model = my_project.create_model(cfg) + if len(sys.argv) == 3: + entry_name = sys.argv[2] + yaml_func_name = "mrt_{}".format(entry_name) + if not hasattr(mentry, yaml_func_name): + raise RuntimeError( + "invalid entry_name: {}, yaml_func_name: {}".format( + entry_name, yaml_func_name)) + yaml_func = getattr(mentry, yaml_func_name) + cfg_node_name = entry_name.upper() + if not hasattr(cfg, cfg_node_name): + raise RuntimeError( + "invalid entry_name: {}, cfg_node_name: {}".format( + entry_name, cfg_node_name)) + cfg_node = getattr(cfg, cfg_node_name) + yaml_func(cfg.COMMON, cfg_node) + else: + yaml_main(cfg) diff --git a/python/mrt/defaults.py b/python/mrt/defaults.py index 6f6e3edc..84801727 100644 --- a/python/mrt/defaults.py +++ b/python/mrt/defaults.py @@ -8,28 +8,34 @@ MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT MRT_CFG.COMMON.MODEL_NAME = conf.MRT_MODEL_ROOT MRT_CFG.COMMON.VERBOSITY = "debug" +MRT_CFG.COMMON.START_AFTER = None +MRT_CFG.COMMON.DEVICE_TYPE = mentry.default_device_type +MRT_CFG.COMMON.DEVICE_IDS = mentry.default_device_ids +MRT_CFG.COMMON.BATCH = mentry.default_batch +MRT_CFG.COMMON.RUN_EVALUATE = True +MRT_CFG.COMMON.RUN_COMPILE = True MRT_CFG.PREPARE = CN() -MRT_CFG.PREPARE.DEVICE_TYPE = "cpu" -MRT_CFG.PREPARE.DEVICE_IDS = [0] +MRT_CFG.PREPARE.DEVICE_TYPE = mentry.default_device_type +MRT_CFG.PREPARE.DEVICE_IDS = mentry.default_device_ids MRT_CFG.PREPARE.INPUT_SHAPE = [-1, 3, 224, 224] MRT_CFG.PREPARE.SPLIT_KEYS = "" MRT_CFG.CALIBRATE = CN() MRT_CFG.CALIBRATE.BATCH = mentry.default_batch -MRT_CFG.CALIBRATE.NUM = 1 +MRT_CFG.CALIBRATE.NUM_CALIB = 1 MRT_CFG.CALIBRATE.LAMBD = None MRT_CFG.CALIBRATE.DATASET_NAME = "imagenet" MRT_CFG.CALIBRATE.DATASET_DIR = conf.MRT_DATASET_ROOT -MRT_CFG.CALIBRATE.DEVICE_TYPE = "cpu" -MRT_CFG.CALIBRATE.DEVICE_IDS = [0] +MRT_CFG.CALIBRATE.DEVICE_TYPE = mentry.default_device_type +MRT_CFG.CALIBRATE.DEVICE_IDS = mentry.default_device_ids MRT_CFG.QUANTIZE = CN() MRT_CFG.QUANTIZE.RESTORE_NAMES = [] MRT_CFG.QUANTIZE.INPUT_PRECISION = None MRT_CFG.QUANTIZE.OUTPUT_PRECISION = None -MRT_CFG.QUANTIZE.DEVICE_TYPE = "cpu" -MRT_CFG.QUANTIZE.DEVICE_IDS = [0] +MRT_CFG.QUANTIZE.DEVICE_TYPE = mentry.default_device_type +MRT_CFG.QUANTIZE.DEVICE_IDS = mentry.default_device_ids MRT_CFG.QUANTIZE.SOFTMAX_LAMBD = None MRT_CFG.QUANTIZE.SHIFT_BITS = None MRT_CFG.QUANTIZE.THRESHOLDS = None @@ -38,14 +44,14 @@ MRT_CFG.EVALUATE = CN() MRT_CFG.EVALUATE.BATCH = mentry.default_batch -MRT_CFG.EVALUATE.DEVICE_TYPE = "cpu" -MRT_CFG.EVALUATE.DEVICE_IDS = [0] +MRT_CFG.EVALUATE.DEVICE_TYPE = mentry.default_device_type +MRT_CFG.EVALUATE.DEVICE_IDS = mentry.default_device_ids MRT_CFG.EVALUATE.ITER_NUM = 10 MRT_CFG.COMPILE = CN() MRT_CFG.COMPILE.DUMP_DIR = "/data1/tmp" -MRT_CFG.COMPILE.DEVICE_TYPE = "cpu" -MRT_CFG.COMPILE.DEVICE_IDS = [0] +MRT_CFG.COMPILE.DEVICE_TYPE = mentry.default_device_type +MRT_CFG.COMPILE.DEVICE_IDS = mentry.default_device_ids def get_cfg_defaults(): """Get a yacs CfgNode object with default values for mrt.""" From b1e6a0b7c770cf2081f72af3762907a477cae888 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 27 Sep 2021 15:29:24 +0800 Subject: [PATCH 033/120] upt --- main2.py | 19 ++++++++++--------- python/mrt/{defaults.py => yaml_defaults.py} | 1 + 2 files changed, 11 insertions(+), 9 deletions(-) rename python/mrt/{defaults.py => yaml_defaults.py} (98%) diff --git a/main2.py b/main2.py index 9a0e1943..aac1bd05 100644 --- a/main2.py +++ b/main2.py @@ -1,10 +1,12 @@ import sys from os import path -from mrt.defaults import get_cfg_defaults +from mrt.yaml_defaults import get_cfg_defaults from mrt.conf import YAML_ROOT from mrt import mrt_entry as mentry +thismodule = sys.modules[__name__] + def yaml_prepare(CM, CN): mentry.mrt_prepare( CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, @@ -13,7 +15,7 @@ def yaml_prepare(CM, CN): def yaml_calibrate(CM, CN): mentry.mrt_calibrate( CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DATASET_NAME, - CN.DATASET_DIR, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.NUM_CALIBRATE, + CN.DATASET_DIR, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.NUM_CALIB, CN.LAMBD, batch=CN. BATCH) def yaml_quantize(CM, CN): @@ -35,12 +37,11 @@ def yaml_compile(CM, CN): def yaml_main(cfg): for prefix in ["BATCH", "DEVICE_TYPE", "DEVICE_IDS"]: - for subcfg in [ - cfg.PREPARE, cfg.CALIBRATE, cfg.QUANTIZE, cfg.EVALUATE, - cfg.COMPILE]: + for subcfg in [cfg.PREPARE, cfg.CALIBRATE, cfg.QUANTIZE, + cfg.EVALUATE, cfg.COMPILE]: for attr in dir(subcfg): if attr == prefix and getattr(subcfg, prefix) is None: - setattr(subcfg, prefix, getattr(cfg.COMMON, prefix) + setattr(subcfg, prefix, getattr(cfg.COMMON, prefix)) start_pos = 0 start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} if cfg.COMMON.START_AFTER in start_pos_map: @@ -65,12 +66,12 @@ def yaml_main(cfg): cfg.freeze() if len(sys.argv) == 3: entry_name = sys.argv[2] - yaml_func_name = "mrt_{}".format(entry_name) - if not hasattr(mentry, yaml_func_name): + yaml_func_name = "yaml_{}".format(entry_name) + if not hasattr(thismodule, yaml_func_name): raise RuntimeError( "invalid entry_name: {}, yaml_func_name: {}".format( entry_name, yaml_func_name)) - yaml_func = getattr(mentry, yaml_func_name) + yaml_func = getattr(thismodule, yaml_func_name) cfg_node_name = entry_name.upper() if not hasattr(cfg, cfg_node_name): raise RuntimeError( diff --git a/python/mrt/defaults.py b/python/mrt/yaml_defaults.py similarity index 98% rename from python/mrt/defaults.py rename to python/mrt/yaml_defaults.py index 84801727..b9f694b5 100644 --- a/python/mrt/defaults.py +++ b/python/mrt/yaml_defaults.py @@ -49,6 +49,7 @@ MRT_CFG.EVALUATE.ITER_NUM = 10 MRT_CFG.COMPILE = CN() +MRT_CFG.COMPILE.BATCH = 1 MRT_CFG.COMPILE.DUMP_DIR = "/data1/tmp" MRT_CFG.COMPILE.DEVICE_TYPE = mentry.default_device_type MRT_CFG.COMPILE.DEVICE_IDS = mentry.default_device_ids From 230bd9052af643f0a8014db8670a937d0c67584b Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 27 Sep 2021 18:59:15 +0800 Subject: [PATCH 034/120] upt --- main2.py | 20 ++++++++++++++++---- python/mrt/conf.py | 3 --- python/mrt/mrt_entry.py | 2 +- python/mrt/yaml_defaults.py | 2 +- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/main2.py b/main2.py index aac1bd05..3f3da7ea 100644 --- a/main2.py +++ b/main2.py @@ -2,7 +2,6 @@ from os import path from mrt.yaml_defaults import get_cfg_defaults -from mrt.conf import YAML_ROOT from mrt import mrt_entry as mentry thismodule = sys.modules[__name__] @@ -19,6 +18,14 @@ def yaml_calibrate(CM, CN): CN.LAMBD, batch=CN. BATCH) def yaml_quantize(CM, CN): + if CN.is_frozen(): + CN.defrost() + for attr in ["THRESHOLDS", "ATTRIBUTE_DEPS", "OSCALE_MAPS"]: + v = getattr(CN, attr) + if v is not None: + setattr(CN, attr, v[1:-1]) + if not CN.is_frozen(): + CN.freeze() mentry.mrt_quantize( CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.RESTORE_NAMES, CN.INPUT_PRECISION, CN.OUTPUT_PRECISION, CN.DEVICE_TYPE, CN.DEVICE_IDS, @@ -36,12 +43,16 @@ def yaml_compile(CM, CN): device_type=CN.DEVICE_TYPE, device_ids=CN.DEVICE_IDS, batch=CN.BATCH) def yaml_main(cfg): + if cfg.is_frozen(): + cfg.defrost() for prefix in ["BATCH", "DEVICE_TYPE", "DEVICE_IDS"]: for subcfg in [cfg.PREPARE, cfg.CALIBRATE, cfg.QUANTIZE, cfg.EVALUATE, cfg.COMPILE]: for attr in dir(subcfg): if attr == prefix and getattr(subcfg, prefix) is None: setattr(subcfg, prefix, getattr(cfg.COMMON, prefix)) + if not cfg.is_frozen(): + cfg.freeze() start_pos = 0 start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} if cfg.COMMON.START_AFTER in start_pos_map: @@ -58,9 +69,10 @@ def yaml_main(cfg): yaml_compile(cfg.COMMON, cfg.COMPILE) if __name__ == "__main__": - assert len(sys.argv) >= 2, len(sys.argv) - model_name = sys.argv[1] - yaml_file = path.join(YAML_ROOT, model_name+".yaml") + assert len(sys.argv) in [2,3], len(sys.argv) + yaml_file = sys.argv[1] + if yaml_file.startswith("~"): + yaml_file = path.expanduser(yaml_file) cfg = get_cfg_defaults() cfg.merge_from_file(yaml_file) cfg.freeze() diff --git a/python/mrt/conf.py b/python/mrt/conf.py index e552e731..49089ed7 100644 --- a/python/mrt/conf.py +++ b/python/mrt/conf.py @@ -9,6 +9,3 @@ if not os.path.exists(MRT_DATASET_ROOT): os.makedirs(MRT_DATASET_ROOT) - -YAML_ROOT = os.path.expanduser("~/mrt_yaml_root") -os.makedirs(YAML_ROOT, exist_ok=True) diff --git a/python/mrt/mrt_entry.py b/python/mrt/mrt_entry.py index 23ad04cf..2f729b4b 100644 --- a/python/mrt/mrt_entry.py +++ b/python/mrt/mrt_entry.py @@ -432,7 +432,7 @@ def mrt_compile( dataset = ds.DS_REG[conf_map["dataset_name"]](set_batch(input_shape, batch)) dump_data, _ = dataset.iter_func()() dump_data = sim.load_real_data( - dump_data.astype("float64"), "data", mrt.get_inputs_ext()) + dump_data.astype("float64"), "data", inputs_ext) model_root = path.join(dump_dir, model_name_tfm) np.save( path.join(model_root, "data.npy"), dump_data.astype("int8").asnumpy()) diff --git a/python/mrt/yaml_defaults.py b/python/mrt/yaml_defaults.py index b9f694b5..35f0b0d0 100644 --- a/python/mrt/yaml_defaults.py +++ b/python/mrt/yaml_defaults.py @@ -40,7 +40,7 @@ MRT_CFG.QUANTIZE.SHIFT_BITS = None MRT_CFG.QUANTIZE.THRESHOLDS = None MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = None -MRT_CFG.QUANTIZE.OSCALE_MAPS = None +MRT_CFG.QUANTIZE.OSCALE_MAPS = "" MRT_CFG.EVALUATE = CN() MRT_CFG.EVALUATE.BATCH = mentry.default_batch From ec8c1037bb5266b1cf91549f6fa3ed67a41eaeb9 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 27 Sep 2021 19:01:42 +0800 Subject: [PATCH 035/120] add mrt user doc --- docs/mrt/mrt_user_guide.md | 202 +++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 docs/mrt/mrt_user_guide.md diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md new file mode 100644 index 00000000..75dcd4b0 --- /dev/null +++ b/docs/mrt/mrt_user_guide.md @@ -0,0 +1,202 @@ +[TOC] + +# YAML Configuration Examples + +## alexnet + +```yaml +COMMON: + MODEL_NAME: alexnet + VERBOSITY: debug + START_AFTER: calibrate + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [2] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [2] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10 + +``` + +## ssd_512_voc_resnet50_v1_voc + +```yaml +COMMON: + MODEL_NAME: ssd_512_resnet50_v1_voc + VERBOSITY: info + RUN_EVALUATE: True +PREPARE: + INPUT_SHAPE: [-1,3,512,512] + SPLIT_KEYS: "[ssd0_multiperclassdecoder0_zeros_like0, ssd0_multiperclassdecoder0_slice_axis0, ssd0_normalizedboxcenterdecoder0_concat0]" +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: voc + DEVICE_TYPE: gpu + DEVICE_IDS: [2] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [2] + THRESHOLDS: "\"{\"data\":2.64,\"ssd0_multiperclassdecoder0_slice_axis0\":1}\"" + ATTRIBUTE_DEPS: "\"{\"_greater_scalar\": {\"scalar\": \"ssd0_multiperclassdecoder0_slice_axis0\"}, \"_contrib_box_nms\": {\"valid_thresh\": \"ssd0_multiperclassdecoder0_slice_axis0\"}}\"" + OSCALE_MAPS: "\"{\"ssd0_slice_axis41\": \"ssd0_multiperclassdecoder0_zeros_like0\", \"ssd0_slice_axis42\": \"ssd0_multiperclassdecoder0_slice_axis0\", \"ssd0_slice_axis43\": \"ssd0_normalizedboxcenterdecoder0_concat0\"}\"" +EVALUATE: + BATCH: 15 + DEVICE_TYPE: gpu + DEVICE_IDS: "[0,1,2]" + ITER_NUM: 10 + +``` + +# CMD Configuration Examples + +## alexnet + +### cmd_prepare + +```bash +python main.py prepare alexnet +``` + +### cmd_calibrate + +```bash +python main.py calibrate alexnet \ +--batch-calibrate 16 \ +--lambd 16 \ +--calibrate-num 1 \ +--dataset-name imagenet \ +--device-type-calibrate gpu \ +--device-ids-calibrate 2 +``` + +### cmd_quantize + +```bash +python main.py quantize alexnet \ +--input-precision 8 \ +--output-precision 8 \ +--device-type-quantize gpu \ +--device-ids-quantize 2 +``` + +### cmd_evaluate + +```bash +python main.py evaluate alexnet \ +--batch-evaluate 160 \ +--device-type-evaluate gpu \ +--device-ids-evaluate 0 \ +--iter-num 10 +``` + +### cmd_compile + +```bash +python main.py compile alexnet +``` + +### cmd_main + +```bash +python main.py cmd alexnet \ +--batch-calibrate 16 \ +--lambd 16 \ +--calibrate-num 1 \ +--dataset-name imagenet \ +--device-type-calibrate gpu \ +--device-ids-calibrate 2 \ +--input-precision 8 \ +--output-precision 8 \ +--device-type-quantize gpu \ +--device-ids-quantize 2 \ +--batch-evaluate 160 \ +--device-type-evaluate gpu \ +--device-ids-evaluate 0 \ +--iter-num 10 \ +--run-evaluate \ +--run-compile +``` + +## ssd_512_voc_resnet50_v1_voc + +### cmd_prepare + +```bash +python main.py prepare ssd_512_resnet50_v1_voc \ +--verbosity info \ +--split-keys \ +ssd0_multiperclassdecoder0_zeros_like0 \ +ssd0_multiperclassdecoder0_slice_axis0 \ +ssd0_normalizedboxcenterdecoder0_concat0 \ +--input-shape -1 3 512 512 +``` + +### cmd_calibrate + +```bash +python main.py calibrate ssd_512_resnet50_v1_voc \ +--verbosity info \ +--dataset-name voc \ +--device-type-calibrate gpu \ +--device-ids-calibrate 2 +``` + +### cmd_quantize + +```bash +python main.py quantize ssd_512_resnet50_v1_voc \ +--verbosity info \ +--thresholds \ +"{ \ + \"data\": 2.64, \ + \"ssd0_multiperclassdecoder0_slice_axis0\": 1 \ +}" \ +--output-precision 30 \ +--attribute-deps \ +"{ \ + \"_greater_scalar\": { \ + \"scalar\": \"ssd0_multiperclassdecoder0_slice_axis0\" \ + }, \ + \"_contrib_box_nms\": { \ + \"valid_thresh\": \"ssd0_multiperclassdecoder0_slice_axis0\" \ + } \ +}" \ +--oscale-maps \ +"{ \ + \"ssd0_slice_axis41\": \"ssd0_multiperclassdecoder0_zeros_like0\", \ + \"ssd0_slice_axis42\": \"ssd0_multiperclassdecoder0_slice_axis0\", \ + \"ssd0_slice_axis43\": \"ssd0_normalizedboxcenterdecoder0_concat0\" \ +}" +``` + +### cmd_evaluate + +```bash +python main.py evaluate ssd_512_resnet50_v1_voc \ +--verbosity info \ +--batch-evaluate 14 \ +--device-type-evaluate gpu \ +--device-ids-evaluate 0 1 2 3 \ +--iter-num 100 +``` + +### cmd_compile + +```bash +python main.py compile ssd_512_resnet50_v1_voc +``` + From 37478c3c8bfd8e38669951c7219c5102b6c35948 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 29 Sep 2021 14:54:53 +0800 Subject: [PATCH 036/120] upt doc and todo --- docs/mrt/mrt_user_guide.md | 11 +++++++++++ main2.py | 3 +++ python/mrt/mrt_entry.py | 5 +++++ 3 files changed, 19 insertions(+) diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md index 75dcd4b0..93041170 100644 --- a/docs/mrt/mrt_user_guide.md +++ b/docs/mrt/mrt_user_guide.md @@ -1,5 +1,16 @@ [TOC] +# Introduction + +evoke passes by `mrt_entry.py` + +1. for debugging purpose: `mrt_prepare`, `mrt_calibrate`, `mrt_quantize` +2. for evaluation purpose: `mrt_evaluate` +3. for compilation purpose: `mrt_compile` +4. for the whole process, the configuration module will combine the afore mentioned processes. + +Currently supported configuration format by MRT: `yaml`, `argparse`, `ini`(not integrated into mrt_entry.py yet) + # YAML Configuration Examples ## alexnet diff --git a/main2.py b/main2.py index 3f3da7ea..0e5d696d 100644 --- a/main2.py +++ b/main2.py @@ -6,6 +6,9 @@ thismodule = sys.modules[__name__] +#TODO yaml merge argparse, research: searching, stk +#TODO define in usage loc + def yaml_prepare(CM, CN): mentry.mrt_prepare( CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, diff --git a/python/mrt/mrt_entry.py b/python/mrt/mrt_entry.py index 2f729b4b..78c5205e 100644 --- a/python/mrt/mrt_entry.py +++ b/python/mrt/mrt_entry.py @@ -14,6 +14,11 @@ from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim +#TODO v3 folder +#TODO rename: v3 pass, prepare.py,.. +#TODO join default conf, process +#TODO main jungle server # python server, flask + default_device_type = "cpu" default_device_ids = [0] default_batch = 16 From 81327bd23385ac3b760e15e54357b443822c0be9 Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 9 Oct 2021 16:11:32 +0800 Subject: [PATCH 037/120] upt --- main.py | 2 +- main2.py | 4 ++-- python/mrt/{ => V3}/mrt_entry.py | 1 - python/mrt/{ => V3}/yaml_defaults.py | 4 ++-- 4 files changed, 5 insertions(+), 6 deletions(-) rename python/mrt/{ => V3}/mrt_entry.py (99%) rename python/mrt/{ => V3}/yaml_defaults.py (97%) diff --git a/main.py b/main.py index f8b9c494..19b3bfea 100644 --- a/main.py +++ b/main.py @@ -18,7 +18,7 @@ from mrt import dataset as ds from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim -import mrt.mrt_entry as mentry +import mrt.V3.mrt_entry as mentry # set up dependencies __ROOT__ = path.dirname(path.realpath(__file__)) diff --git a/main2.py b/main2.py index 0e5d696d..6ee67eea 100644 --- a/main2.py +++ b/main2.py @@ -1,8 +1,8 @@ import sys from os import path -from mrt.yaml_defaults import get_cfg_defaults -from mrt import mrt_entry as mentry +from mrt.V3.yaml_defaults import get_cfg_defaults +import mrt.V3.mrt_entry as mentry thismodule = sys.modules[__name__] diff --git a/python/mrt/mrt_entry.py b/python/mrt/V3/mrt_entry.py similarity index 99% rename from python/mrt/mrt_entry.py rename to python/mrt/V3/mrt_entry.py index 78c5205e..3abeaa7d 100644 --- a/python/mrt/mrt_entry.py +++ b/python/mrt/V3/mrt_entry.py @@ -14,7 +14,6 @@ from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim -#TODO v3 folder #TODO rename: v3 pass, prepare.py,.. #TODO join default conf, process #TODO main jungle server # python server, flask diff --git a/python/mrt/yaml_defaults.py b/python/mrt/V3/yaml_defaults.py similarity index 97% rename from python/mrt/yaml_defaults.py rename to python/mrt/V3/yaml_defaults.py index 35f0b0d0..f77a7349 100644 --- a/python/mrt/yaml_defaults.py +++ b/python/mrt/V3/yaml_defaults.py @@ -1,6 +1,6 @@ from yacs.config import CfgNode as CN -from . import conf -from mrt import mrt_entry as mentry +from mrt import conf +import mrt.V3.mrt_entry as mentry MRT_CFG = CN() From f5f0a609f0ebf02b24d651f2022741cbfe286e9a Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 9 Oct 2021 16:16:02 +0800 Subject: [PATCH 038/120] upt --- docs/mrt/mrt_user_guide.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md index 93041170..9fcafff4 100644 --- a/docs/mrt/mrt_user_guide.md +++ b/docs/mrt/mrt_user_guide.md @@ -15,6 +15,8 @@ Currently supported configuration format by MRT: `yaml`, `argparse`, `ini`(not i ## alexnet +Config `alexnet.yaml` as follows. + ```yaml COMMON: MODEL_NAME: alexnet @@ -41,6 +43,14 @@ EVALUATE: ``` +run command + +```python +python main2.py ~/mrt_yaml_root/alexnet.yaml +``` + + + ## ssd_512_voc_resnet50_v1_voc ```yaml @@ -72,10 +82,20 @@ EVALUATE: ``` +run command + +```python +python main2.py ~/mrt_yaml_root/ssd.yaml +``` + # CMD Configuration Examples ## alexnet +```python +python main.py cmd alexnet +``` + ### cmd_prepare ```bash From 7d3023884789b1bd8dc3362fd9608fc760f0f625 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 13 Oct 2021 16:43:24 +0800 Subject: [PATCH 039/120] upt --- main2.py | 59 ++--- python/mrt/V3/calibrate.py | 60 +++++ python/mrt/V3/evaluate.py | 105 ++++++++ python/mrt/V3/mrt_compile.py | 72 ++++++ python/mrt/V3/mrt_entry.py | 431 --------------------------------- python/mrt/V3/prepare.py | 58 +++++ python/mrt/V3/quantize.py | 151 ++++++++++++ python/mrt/V3/utils.py | 131 ++++++++++ python/mrt/V3/yaml_defaults.py | 61 ----- 9 files changed, 593 insertions(+), 535 deletions(-) create mode 100644 python/mrt/V3/calibrate.py create mode 100644 python/mrt/V3/evaluate.py create mode 100644 python/mrt/V3/mrt_compile.py create mode 100644 python/mrt/V3/prepare.py create mode 100644 python/mrt/V3/quantize.py create mode 100644 python/mrt/V3/utils.py delete mode 100644 python/mrt/V3/yaml_defaults.py diff --git a/main2.py b/main2.py index 6ee67eea..c93b0c6a 100644 --- a/main2.py +++ b/main2.py @@ -1,49 +1,20 @@ import sys from os import path -from mrt.V3.yaml_defaults import get_cfg_defaults +from mrt.V3.mrt_pass import get_cfg_defaults import mrt.V3.mrt_entry as mentry +from mrt.V3 import \ + preparation, calibration, quantization, evaluation, compilation thismodule = sys.modules[__name__] #TODO yaml merge argparse, research: searching, stk -#TODO define in usage loc -def yaml_prepare(CM, CN): - mentry.mrt_prepare( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, - CN.DEVICE_IDS, CN.INPUT_SHAPE, CN.SPLIT_KEYS) - -def yaml_calibrate(CM, CN): - mentry.mrt_calibrate( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DATASET_NAME, - CN.DATASET_DIR, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.NUM_CALIB, - CN.LAMBD, batch=CN. BATCH) - -def yaml_quantize(CM, CN): - if CN.is_frozen(): - CN.defrost() - for attr in ["THRESHOLDS", "ATTRIBUTE_DEPS", "OSCALE_MAPS"]: - v = getattr(CN, attr) - if v is not None: - setattr(CN, attr, v[1:-1]) - if not CN.is_frozen(): - CN.freeze() - mentry.mrt_quantize( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.RESTORE_NAMES, - CN.INPUT_PRECISION, CN.OUTPUT_PRECISION, CN.DEVICE_TYPE, CN.DEVICE_IDS, - CN.SOFTMAX_LAMBD, CN.SHIFT_BITS, CN.THRESHOLDS, CN.ATTRIBUTE_DEPS, - CN.OSCALE_MAPS) - -def yaml_evaluate(CM, CN): - mentry.mrt_evaluate( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, CN.DEVICE_IDS, - CN.ITER_NUM, batch=CN.BATCH) - -def yaml_compile(CM, CN): - mentry.mrt_compile( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DUMP_DIR, - device_type=CN.DEVICE_TYPE, device_ids=CN.DEVICE_IDS, batch=CN.BATCH) +# def yaml_calibrate(CM, CN): + # mentry.mrt_calibrate( + # CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DATASET_NAME, + # CN.DATASET_DIR, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.NUM_CALIB, + # CN.LAMBD, batch=CN. BATCH) def yaml_main(cfg): if cfg.is_frozen(): @@ -81,12 +52,14 @@ def yaml_main(cfg): cfg.freeze() if len(sys.argv) == 3: entry_name = sys.argv[2] - yaml_func_name = "yaml_{}".format(entry_name) - if not hasattr(thismodule, yaml_func_name): - raise RuntimeError( - "invalid entry_name: {}, yaml_func_name: {}".format( - entry_name, yaml_func_name)) - yaml_func = getattr(thismodule, yaml_func_name) + # if not hasattr(thismodule, entry_name): + # raise RuntimeError( + # "invalid entry_name: {}, entry_name: {}".format( + # entry_name, entry_name)) + subpass_module = getattr(thismodule, entry_name) + cls_name = entry_name.upper()[0] + entry_name[1:] + subpass_cls = getattr(subpass_module, cls_name) + yaml_impl_func = getattr(subpass_cls, "yaml_impl") cfg_node_name = entry_name.upper() if not hasattr(cfg, cfg_node_name): raise RuntimeError( diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py new file mode 100644 index 00000000..ebd7a0e0 --- /dev/null +++ b/python/mrt/V3/calibrate.py @@ -0,0 +1,60 @@ +from yacs.config import CfgNode as CN + +from mrt import conf +from mrt.V3.utils import ( + MRT_CFG, default_device_type, default_device_ids, default_batch, + get_model_prefix, get_logger, set_batch, load_fname, save_conf, + load_conf, check_file_existance, get_ctx) + +MRT_CFG.CALIBRATION = CN() +MRT_CFG.CALIBRATION.BATCH = default_batch, +MRT_CFG.CALIBRATION.NUM_CALIB = 1, +MRT_CFG.CALIBRATION.LAMBD = None, +MRT_CFG.CALIBRATION.DATASET_NAME = "imagenet", +MRT_CFG.CALIBRATION.DATASET_DIR = conf.MRT_DATASET_ROOT, +MRT_CFG.CALIBRATION.DEVICE_TYPE = default_device_type, +MRT_CFG.CALIBRATION.DEVICE_IDS = default_device_ids, + +def calibrate( + model_dir, model_name, verbosity, dataset_name, dataset_dir, + device_type, device_ids, calibrate_num, lambd, batch=default_batch): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_prep_file = model_prefix + ".prepare.conf" + check_file_existance(conf_prep_file, logger=logger) + conf_map = load_conf(conf_prep_file, logger=logger) + + # calibration + if conf_map.get("split_keys", "") == "": + sym_prep_file, prm_prep_file = load_fname( + model_prefix, suffix="prepare") + check_file_existance(sym_prep_file, prm_prep_file, logger=logger) + mrt = Model.load(sym_prep_file, prm_prep_file).get_mrt() + else: + sym_base_file, prm_base_file = load_fname( + model_prefix, suffix="base") + check_file_existance(sym_base_file, prm_base_file, logger=logger) + mrt = Model.load(sym_base_file, prm_base_file).get_mrt() + shp = set_batch(conf_map["input_shape"], batch) + dataset = ds.DS_REG[dataset_name](shp, root=dataset_dir) + data_iter_func = dataset.iter_func() + if len(device_ids) > 1: + raise RuntimeError( + "device ids should be an integer in calibration stage") + ctx = get_ctx(device_type, device_ids) + for i in range(calibrate_num): + data, _ = data_iter_func() + mrt.set_data(data) + mrt.calibrate(lambd=lambd, ctx=ctx) + mrt.save(model_name+".mrt.calibrate", datadir=model_dir) + conf_map["dataset_name"] = dataset_name + save_conf(model_prefix+".calibrate.conf", logger=logger, **conf_map) + logger.info("calibrate stage finished") + +def yaml_calibrate(): + CM = MRT_CFG.COMMON + CN = MRT_CFG.CALIBRATE + calibrate( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DATASET_NAME, + CN.DATASET_DIR, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.NUM_CALIB, + CN.LAMBD, batch=CN.BATCH) diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py new file mode 100644 index 00000000..71884d8b --- /dev/null +++ b/python/mrt/V3/evaluate.py @@ -0,0 +1,105 @@ +from yacs.config import CfgNode as CN + +from mrt import conf +from mrt.V3.utils import ( + MRT_CFG, default_device_type, default_device_ids, default_batch, + get_model_prefix, get_logger, set_batch, load_fname, + load_conf, check_file_existance, get_ctx, get_batch_axis) + +MRT_CFG.EVALUATE = CN() +MRT_CFG.EVALUATE.BATCH = default_batch +MRT_CFG.EVALUATE.DEVICE_TYPE = default_device_type +MRT_CFG.EVALUATE.DEVICE_IDS = default_device_ids +MRT_CFG.EVALUATE.ITER_NUM = 10 + +def evaluate( + model_dir, model_name, verbosity, device_type, device_ids, iter_num, + batch=default_batch): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_quant_file = model_prefix + ".quantize.conf" + check_file_existance(conf_quant_file, logger=logger) + conf_map = load_conf(conf_quant_file, logger=logger) + ctx = get_ctx(device_type, device_ids) + if isinstance(ctx, mx.Context): + ctx = [ctx] + + # forward function for the orginal model + omodel = Model.load(*load_fname(model_prefix)) + graph = omodel.to_graph(ctx=ctx) + dataset_name = conf_map["dataset_name"] + input_shape = conf_map["input_shape"] + dataset = ds.DS_REG[dataset_name](set_batch(input_shape, batch)) + data_iter_func = dataset.iter_func() + metric = dataset.metrics() + baxis = get_batch_axis(input_shape) + olen = len(omodel.symbol) + + def forward(net, data, ctx): + """ Multiple xpu run support. + """ + data = gluon.utils.split_and_load( + data, ctx_list=ctx, batch_axis=baxis, even_split=False) + outs = [net(d) for d in data] + if olen == 1: + outs = nd.concatenate(outs) + else: + outs = [nd.concatenate([outs[i][j] \ + for i in range(len(outs))]) for j in range(olen)] + return outs + + def evalfunc(data, label): + outs = forward(graph, data, ctx=ctx) + acc = dataset.validate(metric, outs, label) + return acc + + # forward function for the quantized model + num_xpus = len(ctx) + if batch % num_xpus: + raise RuntimeError("Batch must be divisible by the number of xpus") + split_batch = batch // num_xpus + if conf_map.get("split_keys", "") != "": + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + check_file_existance( + sym_all_file, prm_all_file, ext_all_file, logger=logger) + qmodel = Model.load(sym_all_file, prm_all_file) + oscales, inputs_ext = sim.load_ext(ext_all_file) + else: + sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + check_file_existance( + sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) + mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + rqmodel = reduce_graph(qmodel, { + 'data': set_batch(input_shape, split_batch)}) + qgraph = rqmodel.to_graph(ctx=ctx) + qmetric = dataset.metrics() + + def quantize(data, label): + data = sim.load_real_data(data, 'data', inputs_ext) + outs = forward(qgraph, data, ctx) + outs = outs / oscales[0] if olen == 1 \ + else [(t / oscales[i]) for i, t in enumerate(outs)] + acc = dataset.validate(qmetric, outs, label) + return acc + + # evaluate + if iter_num > 0: + logger.info("Validating...") + utils.multi_validate( + evalfunc, data_iter_func, quantize, iter_num=iter_num, + logger=logging.getLogger('mrt.validate'), batch_size=batch) + logger.info("evaluatation stage finished") + else: + logger.info("evaluatation stage skipped") + +def yaml_evaluate(): + CM = MRT_CFG.COMMON + CN = MRT_CFG.EVALUATE + evaluate( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, CN.DEVICE_IDS, + CN.ITER_NUM, batch=CN.BATCH) diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py new file mode 100644 index 00000000..20150da6 --- /dev/null +++ b/python/mrt/V3/mrt_compile.py @@ -0,0 +1,72 @@ +from os import path +from yacs.config import CfgNode as CN + +from mrt import conf +from mrt.V3.utils import ( + MRT_CFG, default_device_type, default_device_ids, default_batch, + get_model_prefix, get_logger, set_batch, load_fname, + load_conf, check_file_existance) + +MRT_CFG.COMPILE = CN() +MRT_CFG.COMPILE.BATCH = 1 +MRT_CFG.COMPILE.DUMP_DIR = "/data1/tmp" +MRT_CFG.COMPILE.DEVICE_TYPE = default_device_type +MRT_CFG.COMPILE.DEVICE_IDS = default_device_ids + +def mrt_compile( + model_dir, model_name, verbosity, dump_dir, + batch=default_batch, device_type=default_device_type, + device_ids=default_device_ids): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_quant_file = model_prefix + ".quantize.conf" + check_file_existance(conf_quant_file, logger=logger) + conf_map = load_conf(conf_quant_file, logger=logger) + if len(device_ids) > 1: + raise RuntimeError( + "device ids should be an integer in compilation stage") + input_shape = conf_map["input_shape"] + + model_name_tfm = model_name + "_cvm" + device_ids_compile = device_ids[0] + if conf_map.get("split_keys", "") != "": + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + check_file_existance( + sym_all_file, prm_all_file, ext_all_file, logger=logger) + qmodel = Model.load(sym_all_file, prm_all_file) + oscales, inputs_ext = sim.load_ext(ext_all_file) + else: + sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + check_file_existance( + sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) + mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + qmodel.to_cvm( + model_name_tfm, datadir=dump_dir, + input_shape=set_batch(input_shape, batch), target=device_type, + device_ids=device_ids_compile) + dataset = ds.DS_REG[conf_map["dataset_name"]](set_batch(input_shape, batch)) + dump_data, _ = dataset.iter_func()() + dump_data = sim.load_real_data( + dump_data.astype("float64"), "data", inputs_ext) + model_root = path.join(dump_dir, model_name_tfm) + np.save( + path.join(model_root, "data.npy"), dump_data.astype("int8").asnumpy()) + infos = { + "inputs_ext": inputs_ext, + "oscales": oscales, + "input_shapes": input_shape, + } + sim.save_ext(path.join(model_root, "ext"), infos) + logger.info("compilation stage finished") + +def yaml_compile(): + CM = MRT_CFG.COMMON + CN = MRT_CFG.COMPILE + mrt_compile( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DUMP_DIR, + device_type=CN.DEVICE_TYPE, device_ids=CN.DEVICE_IDS, batch=CN.BATCH) diff --git a/python/mrt/V3/mrt_entry.py b/python/mrt/V3/mrt_entry.py index 3abeaa7d..24e11a52 100644 --- a/python/mrt/V3/mrt_entry.py +++ b/python/mrt/V3/mrt_entry.py @@ -14,436 +14,5 @@ from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim -#TODO rename: v3 pass, prepare.py,.. -#TODO join default conf, process #TODO main jungle server # python server, flask -default_device_type = "cpu" -default_device_ids = [0] -default_batch = 16 -default_ctx = mx.cpu() - -def get_model_prefix(model_dir, model_name): - if model_dir.startswith("~"): - model_dir = path.expanduser(model_dir) - assert path.exists(model_dir), \ - "model_dir: {} does not exist".format(model_dir) - model_prefix = path.join(model_dir, model_name) - return model_prefix - -def get_logger(verbosity): - log.Init(log.name2level(verbosity.upper())) - logger = logging.getLogger("log.main") - return logger - -def set_batch(input_shape, batch): - """Get the input shape with respect to a specified batch value and an original input shape. - - Parameters - ---------- - input_shape : tuple - The input shape with batch axis unset. - batch : int - The batch value. - - Returns - ------- - ishape : tuple - The input shape with the value of batch axis equal to batch. - """ - return [batch if s == -1 else s for s in input_shape] - -def load_fname(prefix, suffix=None, with_ext=False): - """Get the model files at a given stage. - - Parameters - ---------- - prefix : string - The file path without and extension. - suffix : string - The file suffix with respect to a given stage of MRT. - with_ext: bool - Whether to include ext file. - - Returns - ------- - files : tuple of string - The loaded file names. - """ - suffix = "."+suffix if suffix is not None else "" - return utils.extend_fname(prefix+suffix, with_ext) - -def save_conf(fname, logger=logging, **conf_map): - try: - info_s = json.dumps(conf_map, indent=4) - except: - logger.error("Json seralize invalid with data: {}".format(conf_map)) - raise RuntimeError - with open(fname, "w") as f: - f.write(info_s) - -def load_conf(fname, logger=logging): - with open(fname, "r") as f: - try: - conf_map = json.load(f) - except: - logger.error("Json deserialize invalid, fname: {}".format(fname)) - return conf_map - -def check_file_existance(*fpaths, logger=logging): - for fpath in fpaths: - if not path.exists(fpath): - raise FileNotFoundError("fpath: {} does not exist".format(fpath)) - -def get_ctx(device_type, device_ids, dctx=default_ctx): - if device_type is None: - device_type = default_device_type - if device_ids is None: - device_ids = default_device_ids - contex = dctx - if device_type == "gpu": - contex = mx.gpu(device_ids[0]) if len(device_ids) == 1 \ - else [mx.gpu(i) for i in device_ids] - return contex - -def get_batch_axis(input_shape): - """Get the batch axis entry of an input shape. - - Parameters - ---------- - input_shape : tuple - The data shape related to dataset. - - Returns - ------- - axis : int - The batch axis entry of an input shape. - """ - idx = [i for i, s in enumerate(input_shape) if s == -1] - assert len(idx) == 1 - return idx[0] - -def mrt_prepare( - model_dir, model_name, verbosity, device_type, device_ids, input_shape, - split_keys): - model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) - conf_prep_file = model_prefix + ".prepare.conf" - conf_map = {} - - # preparation - sym_path, prm_path = load_fname(model_prefix) - if not path.exists(sym_path) or not path.exists(prm_path): - save_model( - model_name, data_dir=model_dir, - ctx=get_ctx(device_type, device_ids)) - model = Model.load(sym_path, prm_path) - model.prepare(set_batch(input_shape, 1)) - sym_prep_file, prm_prep_file = load_fname( - model_prefix, suffix="prepare") - model.save(sym_prep_file, prm_prep_file) - conf_map["input_shape"] = input_shape - save_conf(conf_prep_file, logger=logger, **conf_map) - logger.info("preparation stage finihed") - - # model splitting - if split_keys: - sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') - sym_base_file, prm_base_file = load_fname( - model_prefix, suffix="base") - base, top = model.split(split_keys) - top.save(sym_top_file, prm_top_file) - base.save(sym_base_file, prm_base_file) - conf_map["split_keys"] = split_keys - save_conf(conf_prep_file, logger=logger, **conf_map) - logger.info("model splitting finished") - else: - logger.info("model splitting skipped") - -def mrt_calibrate( - model_dir, model_name, verbosity, dataset_name, dataset_dir, - device_type, device_ids, calibrate_num, lambd, batch=default_batch): - model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) - conf_prep_file = model_prefix + ".prepare.conf" - check_file_existance(conf_prep_file, logger=logger) - conf_map = load_conf(conf_prep_file, logger=logger) - - # calibration - if conf_map.get("split_keys", "") == "": - sym_prep_file, prm_prep_file = load_fname( - model_prefix, suffix="prepare") - check_file_existance(sym_prep_file, prm_prep_file, logger=logger) - mrt = Model.load(sym_prep_file, prm_prep_file).get_mrt() - else: - sym_base_file, prm_base_file = load_fname( - model_prefix, suffix="base") - check_file_existance(sym_base_file, prm_base_file, logger=logger) - mrt = Model.load(sym_base_file, prm_base_file).get_mrt() - shp = set_batch(conf_map["input_shape"], batch) - dataset = ds.DS_REG[dataset_name](shp, root=dataset_dir) - data_iter_func = dataset.iter_func() - if len(device_ids) > 1: - raise RuntimeError( - "device ids should be an integer in calibration stage") - ctx = get_ctx(device_type, device_ids) - for i in range(calibrate_num): - data, _ = data_iter_func() - mrt.set_data(data) - mrt.calibrate(lambd=lambd, ctx=ctx) - mrt.save(model_name+".mrt.calibrate", datadir=model_dir) - conf_map["dataset_name"] = dataset_name - save_conf(model_prefix+".calibrate.conf", logger=logger, **conf_map) - logger.info("calibrate stage finished") - -def mrt_quantize( - model_dir, model_name, verbosity, restore_names, input_precision, - output_precision, device_type, device_ids, softmax_lambd, shift_bits, - thresholds, attribute_deps, oscale_maps): - model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) - conf_calib_file = model_prefix + ".calibrate.conf" - check_file_existance(conf_calib_file, logger=logger) - conf_map = load_conf(conf_calib_file, logger=logger) - sym_calib_file, prm_calib_file, ext_calib_file = load_fname( - model_prefix, suffix="mrt.calibrate", with_ext=True) - check_file_existance( - sym_calib_file, prm_calib_file, ext_calib_file, logger=logger) - mrt = MRT.load(model_name+".mrt.calibrate", datadir=model_dir) - conf_quant_file = model_prefix + ".quantize.conf" - - # restoration configuration - name_to_op = {} - for sym in sutils.topo_sort(mrt.current_model.symbol): - name, op_name = sym.attr('name'), sym.attr('op_name') - if op_name not in name_to_op: - name_to_op[op_name] = [] - name_to_op[op_name].append(name) - new_names = [] - for name in restore_names: - if name.startswith("_OP_") and name[4:] in name_to_op: - for new_name in name_to_op[name[4:]]: - new_names.append(new_name) - else: - new_names.append(name) - restore_names = set(new_names) - if '_ALL_EXCEPT_' in restore_names: - from tfm_base import _pass_manager - from tfm_ops import disabled_restore_ops - - quantize_ops = [op_name for op_name in _pass_manager["quantize"] \ - if op_name not in disabled_restore_ops] - restore_names_new = [] - for sym in sutils.topo_sort(mrt.current_model.symbol): - name, op_name = sym.attr('name'), sym.attr('op_name') - if op_name in quantize_ops and \ - name not in restore_names: - restore_names_new.append(name) - restore_names = set(restore_names_new) - for name in restore_names: - mrt.set_restore(name) - - # hyper parameters configuration - if input_precision is not None: - mrt.set_input_prec(input_precision) - if output_precision is not None: - mrt.set_output_prec(output_precision) - ctx = get_ctx(device_type, device_ids) - if softmax_lambd is not None: - mrt.set_softmax_lambd(softmax_lambd) - if shift_bits is not None: - mrt.set_shift_bits(shift_bits) - if thresholds is not None: - thresholds = json.loads(thresholds) - for name, threshold in thresholds.items(): - mrt.set_threshold(name, threshold) - - # quantization - mrt.quantize() - mrt.save(model_name+".mrt.quantize", datadir=model_dir) - input_shape = conf_map["input_shape"] - oscales = mrt.get_output_scales() - inputs_ext = mrt.get_inputs_ext() - infos = [oscales, inputs_ext] - ext_all_file = model_prefix + ".all.quantize.ext" - sim.save_ext(ext_all_file, *infos) - save_conf(conf_quant_file, logger=logger, **conf_map) - logger.info("quantization stage finished") - - # mergemodel - if conf_map.get("split_keys", "") != "": - qmodel = mrt.current_model - if attribute_deps is None: - raise RuntimeError("model merging, please specify --attribute_deps") - attribute_deps = json.loads(attribute_deps) - mrt_oscales = mrt.get_output_scales() - name_idx = {mrt.get_maps().get( - s.attr("name"), s.attr("name")): i \ - for i, s in enumerate(qmodel.symbol)} - def mergefunc(node, params, graph): - name, op_name = node.attr("name"), node.attr("op_name") - childs, attr = sutils.sym_iter( - node.get_children()), node.list_attr() - if op_name in attribute_deps: - attr_deps = attribute_deps[op_name] - for attr_name, v in attr_deps.items(): - val = sutils.get_attr(attr, attr_name, 0) - attr[attr_name] = int(val*mrt_oscales[name_idx[v]]) - node = sutils.get_mxnet_op(op_name)( - *childs, **attr, name=name) - return node - sym_top_file, prm_top_file = load_fname(model_prefix, suffix="top") - check_file_existance(sym_top_file, prm_top_file, logger=logger) - top = Model.load(sym_top_file, prm_top_file) - model_merger = Model.merger(qmodel, top, mrt.get_maps()) - qmodel = model_merger.merge(callback=mergefunc) - if oscale_maps is None: - raise RuntimeError("model merging, please specify --oscale_maps") - oscale_maps = json.loads(oscale_maps) - oscales = model_merger.get_output_scales(mrt_oscales, oscale_maps) - sym_all_file, prm_all_file, ext_all_file = load_fname( - model_prefix, suffix="all.quantize", with_ext=True) - qmodel.save(sym_all_file, prm_all_file) - infos = [oscales, inputs_ext] - sim.save_ext(ext_all_file, *infos) - save_conf(conf_quant_file, logger=logger, **conf_map) - logger.info("model merging finished") - else: - logger.info("model merging skipped") - -def mrt_evaluate( - model_dir, model_name, verbosity, device_type, device_ids, iter_num, - batch=default_batch): - model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) - conf_quant_file = model_prefix + ".quantize.conf" - check_file_existance(conf_quant_file, logger=logger) - conf_map = load_conf(conf_quant_file, logger=logger) - ctx = get_ctx(device_type, device_ids) - if isinstance(ctx, mx.Context): - ctx = [ctx] - - # forward function for the orginal model - omodel = Model.load(*load_fname(model_prefix)) - graph = omodel.to_graph(ctx=ctx) - dataset_name = conf_map["dataset_name"] - input_shape = conf_map["input_shape"] - dataset = ds.DS_REG[dataset_name](set_batch(input_shape, batch)) - data_iter_func = dataset.iter_func() - metric = dataset.metrics() - baxis = get_batch_axis(input_shape) - olen = len(omodel.symbol) - - def forward(net, data, ctx): - """ Multiple xpu run support. - """ - data = gluon.utils.split_and_load( - data, ctx_list=ctx, batch_axis=baxis, even_split=False) - outs = [net(d) for d in data] - if olen == 1: - outs = nd.concatenate(outs) - else: - outs = [nd.concatenate([outs[i][j] \ - for i in range(len(outs))]) for j in range(olen)] - return outs - - def evalfunc(data, label): - outs = forward(graph, data, ctx=ctx) - acc = dataset.validate(metric, outs, label) - return acc - - # forward function for the quantized model - num_xpus = len(ctx) - if batch % num_xpus: - raise RuntimeError("Batch must be divisible by the number of xpus") - split_batch = batch // num_xpus - if conf_map.get("split_keys", "") != "": - sym_all_file, prm_all_file, ext_all_file = load_fname( - model_prefix, suffix="all.quantize", with_ext=True) - check_file_existance( - sym_all_file, prm_all_file, ext_all_file, logger=logger) - qmodel = Model.load(sym_all_file, prm_all_file) - oscales, inputs_ext = sim.load_ext(ext_all_file) - else: - sym_quant_file, prm_quant_file, ext_quant_file = load_fname( - model_prefix, suffix="mrt.quantize", with_ext=True) - check_file_existance( - sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) - mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir) - oscales = mrt.get_output_scales() - inputs_ext = mrt.get_inputs_ext() - qmodel = mrt.current_model - rqmodel = reduce_graph(qmodel, { - 'data': set_batch(input_shape, split_batch)}) - qgraph = rqmodel.to_graph(ctx=ctx) - qmetric = dataset.metrics() - - def quantize(data, label): - data = sim.load_real_data(data, 'data', inputs_ext) - outs = forward(qgraph, data, ctx) - outs = outs / oscales[0] if olen == 1 \ - else [(t / oscales[i]) for i, t in enumerate(outs)] - acc = dataset.validate(qmetric, outs, label) - return acc - - # evaluate - if iter_num > 0: - logger.info("Validating...") - utils.multi_validate( - evalfunc, data_iter_func, quantize, iter_num=iter_num, - logger=logging.getLogger('mrt.validate'), batch_size=batch) - logger.info("evaluatation stage finished") - else: - logger.info("evaluatation stage skipped") - -def mrt_compile( - model_dir, model_name, verbosity, dump_dir, - batch=default_batch, device_type=default_device_type, - device_ids=default_device_ids): - model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) - conf_quant_file = model_prefix + ".quantize.conf" - check_file_existance(conf_quant_file, logger=logger) - conf_map = load_conf(conf_quant_file, logger=logger) - if len(device_ids) > 1: - raise RuntimeError( - "device ids should be an integer in compilation stage") - input_shape = conf_map["input_shape"] - - # compilation - model_name_tfm = model_name + "_cvm" - device_ids_compile = device_ids[0] - if conf_map.get("split_keys", "") != "": - sym_all_file, prm_all_file, ext_all_file = load_fname( - model_prefix, suffix="all.quantize", with_ext=True) - check_file_existance( - sym_all_file, prm_all_file, ext_all_file, logger=logger) - qmodel = Model.load(sym_all_file, prm_all_file) - oscales, inputs_ext = sim.load_ext(ext_all_file) - else: - sym_quant_file, prm_quant_file, ext_quant_file = load_fname( - model_prefix, suffix="mrt.quantize", with_ext=True) - check_file_existance( - sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) - mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir) - oscales = mrt.get_output_scales() - inputs_ext = mrt.get_inputs_ext() - qmodel = mrt.current_model - qmodel.to_cvm( - model_name_tfm, datadir=dump_dir, - input_shape=set_batch(input_shape, batch), target=device_type, - device_ids=device_ids_compile) - dataset = ds.DS_REG[conf_map["dataset_name"]](set_batch(input_shape, batch)) - dump_data, _ = dataset.iter_func()() - dump_data = sim.load_real_data( - dump_data.astype("float64"), "data", inputs_ext) - model_root = path.join(dump_dir, model_name_tfm) - np.save( - path.join(model_root, "data.npy"), dump_data.astype("int8").asnumpy()) - infos = { - "inputs_ext": inputs_ext, - "oscales": oscales, - "input_shapes": input_shape, - } - sim.save_ext(path.join(model_root, "ext"), infos) - logger.info("compilation stage finished") diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py new file mode 100644 index 00000000..1fbbe952 --- /dev/null +++ b/python/mrt/V3/prepare.py @@ -0,0 +1,58 @@ +from os import path +from yacs.config import CfgNode as CN + +from mrt.gluon_zoo import save_model +from mrt.transformer import Model +from mrt.V3.utils import ( + MRT_CFG, default_device_type, default_device_ids, + get_model_prefix, get_logger, set_batch, load_fname, save_conf, get_ctx) + +MRT_CFG.PREPARE= CN() +MRT_CFG.PREPARE.DEVICE_TYPE = default_device_type +MRT_CFG.PREPARE.DEVICE_IDS = default_device_ids +MRT_CFG.PREPARE.INPUT_SHAPE = [-1, 3, 224, 224] +MRT_CFG.PREPARE.SPLIT_KEYS = "" + +def prepare( + model_dir, model_name, verbosity, device_type, device_ids, input_shape, + split_keys): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_prep_file = model_prefix + ".prepare.conf" + conf_map = {} + + # preparation + sym_path, prm_path = load_fname(model_prefix) + if not path.exists(sym_path) or not path.exists(prm_path): + save_model( + model_name, data_dir=model_dir, + ctx=get_ctx(device_type, device_ids)) + model = Model.load(sym_path, prm_path) + model.prepare(set_batch(input_shape, 1)) + sym_prep_file, prm_prep_file = load_fname( + model_prefix, suffix="prepare") + model.save(sym_prep_file, prm_prep_file) + conf_map["input_shape"] = input_shape + save_conf(conf_prep_file, logger=logger, **conf_map) + logger.info("preparation stage finihed") + + # model splitting + if split_keys: + sym_top_file, prm_top_file = load_fname(model_prefix, suffix='top') + sym_base_file, prm_base_file = load_fname( + model_prefix, suffix="base") + base, top = model.split(split_keys) + top.save(sym_top_file, prm_top_file) + base.save(sym_base_file, prm_base_file) + conf_map["split_keys"] = split_keys + save_conf(conf_prep_file, logger=logger, **conf_map) + logger.info("model splitting finished") + else: + logger.info("model splitting skipped") + +def yaml_prepare(): + CM = MRT_CFG.COMMON + CN = MRT_CFG.PREPARE + prepare( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, + CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.INPUT_SHAPE, CN.SPLIT_KEYS) diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py new file mode 100644 index 00000000..64024704 --- /dev/null +++ b/python/mrt/V3/quantize.py @@ -0,0 +1,151 @@ +from yacs.config import CfgNode as CN + +from mrt import conf +from mrt.V3.utils import ( + MRT_CFG, default_device_type, default_device_ids, + get_model_prefix, get_logger, load_fname, save_conf, + load_conf, check_file_existance, get_ctx) + +MRT_CFG.QUANTIZE = CN() +MRT_CFG.QUANTIZE.RESTORE_NAMES = [] +MRT_CFG.QUANTIZE.INPUT_PRECISION = None +MRT_CFG.QUANTIZE.OUTPUT_PRECISION = None +MRT_CFG.QUANTIZE.DEVICE_TYPE = default_device_type +MRT_CFG.QUANTIZE.DEVICE_IDS = default_device_ids +MRT_CFG.QUANTIZE.SOFTMAX_LAMBD = None +MRT_CFG.QUANTIZE.SHIFT_BITS = None +MRT_CFG.QUANTIZE.THRESHOLDS = None +MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = None +MRT_CFG.QUANTIZE.OSCALE_MAPS = "" + +def quantize( + model_dir, model_name, verbosity, restore_names, input_precision, + output_precision, device_type, device_ids, softmax_lambd, shift_bits, + thresholds, attribute_deps, oscale_maps): + model_prefix = get_model_prefix(model_dir, model_name) + logger = get_logger(verbosity) + conf_calib_file = model_prefix + ".calibrate.conf" + check_file_existance(conf_calib_file, logger=logger) + conf_map = load_conf(conf_calib_file, logger=logger) + sym_calib_file, prm_calib_file, ext_calib_file = load_fname( + model_prefix, suffix="mrt.calibrate", with_ext=True) + check_file_existance( + sym_calib_file, prm_calib_file, ext_calib_file, logger=logger) + mrt = MRT.load(model_name+".mrt.calibrate", datadir=model_dir) + conf_quant_file = model_prefix + ".quantize.conf" + + # restoration configuration + name_to_op = {} + for sym in sutils.topo_sort(mrt.current_model.symbol): + name, op_name = sym.attr('name'), sym.attr('op_name') + if op_name not in name_to_op: + name_to_op[op_name] = [] + name_to_op[op_name].append(name) + new_names = [] + for name in restore_names: + if name.startswith("_OP_") and name[4:] in name_to_op: + for new_name in name_to_op[name[4:]]: + new_names.append(new_name) + else: + new_names.append(name) + restore_names = set(new_names) + if '_ALL_EXCEPT_' in restore_names: + from tfm_base import _pass_manager + from tfm_ops import disabled_restore_ops + + quantize_ops = [op_name for op_name in _pass_manager["quantize"] \ + if op_name not in disabled_restore_ops] + restore_names_new = [] + for sym in sutils.topo_sort(mrt.current_model.symbol): + name, op_name = sym.attr('name'), sym.attr('op_name') + if op_name in quantize_ops and \ + name not in restore_names: + restore_names_new.append(name) + restore_names = set(restore_names_new) + for name in restore_names: + mrt.set_restore(name) + + # hyper parameters configuration + if input_precision is not None: + mrt.set_input_prec(input_precision) + if output_precision is not None: + mrt.set_output_prec(output_precision) + ctx = get_ctx(device_type, device_ids) + if softmax_lambd is not None: + mrt.set_softmax_lambd(softmax_lambd) + if shift_bits is not None: + mrt.set_shift_bits(shift_bits) + if thresholds is not None: + thresholds = json.loads(thresholds) + for name, threshold in thresholds.items(): + mrt.set_threshold(name, threshold) + + # quantization + mrt.quantize() + mrt.save(model_name+".mrt.quantize", datadir=model_dir) + input_shape = conf_map["input_shape"] + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + infos = [oscales, inputs_ext] + ext_all_file = model_prefix + ".all.quantize.ext" + sim.save_ext(ext_all_file, *infos) + save_conf(conf_quant_file, logger=logger, **conf_map) + logger.info("quantization stage finished") + + # mergemodel + if conf_map.get("split_keys", "") != "": + qmodel = mrt.current_model + if attribute_deps is None: + raise RuntimeError("model merging, please specify --attribute_deps") + attribute_deps = json.loads(attribute_deps) + mrt_oscales = mrt.get_output_scales() + name_idx = {mrt.get_maps().get( + s.attr("name"), s.attr("name")): i \ + for i, s in enumerate(qmodel.symbol)} + def mergefunc(node, params, graph): + name, op_name = node.attr("name"), node.attr("op_name") + childs, attr = sutils.sym_iter( + node.get_children()), node.list_attr() + if op_name in attribute_deps: + attr_deps = attribute_deps[op_name] + for attr_name, v in attr_deps.items(): + val = sutils.get_attr(attr, attr_name, 0) + attr[attr_name] = int(val*mrt_oscales[name_idx[v]]) + node = sutils.get_mxnet_op(op_name)( + *childs, **attr, name=name) + return node + sym_top_file, prm_top_file = load_fname(model_prefix, suffix="top") + check_file_existance(sym_top_file, prm_top_file, logger=logger) + top = Model.load(sym_top_file, prm_top_file) + model_merger = Model.merger(qmodel, top, mrt.get_maps()) + qmodel = model_merger.merge(callback=mergefunc) + if oscale_maps is None: + raise RuntimeError("model merging, please specify --oscale_maps") + oscale_maps = json.loads(oscale_maps) + oscales = model_merger.get_output_scales(mrt_oscales, oscale_maps) + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + qmodel.save(sym_all_file, prm_all_file) + infos = [oscales, inputs_ext] + sim.save_ext(ext_all_file, *infos) + save_conf(conf_quant_file, logger=logger, **conf_map) + logger.info("model merging finished") + else: + logger.info("model merging skipped") + +def yaml_quantize(): + CM = MRT_CFG.COMMON + CN = MRT_CFG.QUANTIZE + if CN.is_frozen(): + CN.defrost() + for attr in ["THRESHOLDS", "ATTRIBUTE_DEPS", "OSCALE_MAPS"]: + v = getattr(CN, attr) + if v is not None: + setattr(CN, attr, v[1:-1]) + if not CN.is_frozen(): + CN.freeze() + quantize( + CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.RESTORE_NAMES, + CN.INPUT_PRECISION, CN.OUTPUT_PRECISION, CN.DEVICE_TYPE, CN.DEVICE_IDS, + CN.SOFTMAX_LAMBD, CN.SHIFT_BITS, CN.THRESHOLDS, CN.ATTRIBUTE_DEPS, + CN.OSCALE_MAPS) diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py new file mode 100644 index 00000000..249d1fac --- /dev/null +++ b/python/mrt/V3/utils.py @@ -0,0 +1,131 @@ +from os import path +import logging +import json +from yacs.config import CfgNode as CN + +import mxnet as mx + +from mrt import conf + +default_device_type = "cpu" +default_device_ids = [0] +default_batch = 16 +default_ctx = mx.cpu() + +# TODO: jiazhen branch code design +MRT_CFG = CN() +MRT_CFG.COMMON = CN() +MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT +MRT_CFG.COMMON.MODEL_NAME = conf.MRT_MODEL_ROOT +MRT_CFG.COMMON.VERBOSITY = "debug" +MRT_CFG.COMMON.START_AFTER = None +MRT_CFG.COMMON.DEVICE_TYPE = default_device_type +MRT_CFG.COMMON.DVICE_IDS = default_device_ids +MRT_CFG.COMMON.BATCH = default_batch +MRT_CFG.COMMON.RUN_EVALUATE = True +MRT_CFG.COMMON.RUN_COMPILE = True + +def get_model_prefix(model_dir, model_name): + if model_dir.startswith("~"): + model_dir = path.expanduser(model_dir) + assert path.exists(model_dir), \ + "model_dir: {} does not exist".format(model_dir) + model_prefix = path.join(model_dir, model_name) + return model_prefix + +def get_logger(verbosity): + log.Init(log.name2level(verbosity.upper())) + logger = logging.getLogger("log.main") + return logger + +def set_batch(input_shape, batch): + """Get the input shape with respect to a specified batch value and an original input shape. + + Parameters + ---------- + input_shape : tuple + The input shape with batch axis unset. + batch : int + The batch value. + + Returns + ------- + ishape : tuple + The input shape with the value of batch axis equal to batch. + """ + return [batch if s == -1 else s for s in input_shape] + +def load_fname(prefix, suffix=None, with_ext=False): + """Get the model files at a given stage. + + Parameters + ---------- + prefix : string + The file path without and extension. + suffix : string + The file suffix with respect to a given stage of MRT. + with_ext: bool + Whether to include ext file. + + Returns + ------- + files : tuple of string + The loaded file names. + """ + suffix = "."+suffix if suffix is not None else "" + +def save_conf(fname, logger=logging, **conf_map): + try: + info_s = json.dumps(conf_map, indent=4) + except: + logger.error("Json seralize invalid with data: {}".format(conf_map)) + raise RuntimeError + with open(fname, "w") as f: + f.write(info_s) + +def load_conf(fname, logger=logging): + with open(fname, "r") as f: + try: + conf_map = json.load(f) + except: + logger.error("Json deserialize invalid, fname: {}".format(fname)) + return conf_map + +def check_file_existance(*fpaths, logger=logging): + for fpath in fpaths: + if not path.exists(fpath): + raise FileNotFoundError("fpath: {} does not exist".format(fpath)) + +def get_ctx(device_type, device_ids, dctx=default_ctx): + if device_type is None: + device_type = default_device_type + if device_ids is None: + device_ids = default_device_ids + contex = dctx + if device_type == "gpu": + contex = mx.gpu(device_ids[0]) if len(device_ids) == 1 \ + else [mx.gpu(i) for i in device_ids] + return contex + +def get_batch_axis(input_shape): + """Get the batch axis entry of an input shape. + + Parameters + ---------- + input_shape : tuple + The data shape related to dataset. + + Returns + ------- + axis : int + The batch axis entry of an input shape. + """ + idx = [i for i, s in enumerate(input_shape) if s == -1] + assert len(idx) == 1 + return idx[0] + +def get_cfg_defaults(): + """Get a yacs CfgNode object with default values for mrt.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + return MRT_CFG.clone() diff --git a/python/mrt/V3/yaml_defaults.py b/python/mrt/V3/yaml_defaults.py deleted file mode 100644 index f77a7349..00000000 --- a/python/mrt/V3/yaml_defaults.py +++ /dev/null @@ -1,61 +0,0 @@ -from yacs.config import CfgNode as CN -from mrt import conf -import mrt.V3.mrt_entry as mentry - -MRT_CFG = CN() - -MRT_CFG.COMMON = CN() -MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT -MRT_CFG.COMMON.MODEL_NAME = conf.MRT_MODEL_ROOT -MRT_CFG.COMMON.VERBOSITY = "debug" -MRT_CFG.COMMON.START_AFTER = None -MRT_CFG.COMMON.DEVICE_TYPE = mentry.default_device_type -MRT_CFG.COMMON.DEVICE_IDS = mentry.default_device_ids -MRT_CFG.COMMON.BATCH = mentry.default_batch -MRT_CFG.COMMON.RUN_EVALUATE = True -MRT_CFG.COMMON.RUN_COMPILE = True - -MRT_CFG.PREPARE = CN() -MRT_CFG.PREPARE.DEVICE_TYPE = mentry.default_device_type -MRT_CFG.PREPARE.DEVICE_IDS = mentry.default_device_ids -MRT_CFG.PREPARE.INPUT_SHAPE = [-1, 3, 224, 224] -MRT_CFG.PREPARE.SPLIT_KEYS = "" - -MRT_CFG.CALIBRATE = CN() -MRT_CFG.CALIBRATE.BATCH = mentry.default_batch -MRT_CFG.CALIBRATE.NUM_CALIB = 1 -MRT_CFG.CALIBRATE.LAMBD = None -MRT_CFG.CALIBRATE.DATASET_NAME = "imagenet" -MRT_CFG.CALIBRATE.DATASET_DIR = conf.MRT_DATASET_ROOT -MRT_CFG.CALIBRATE.DEVICE_TYPE = mentry.default_device_type -MRT_CFG.CALIBRATE.DEVICE_IDS = mentry.default_device_ids - -MRT_CFG.QUANTIZE = CN() -MRT_CFG.QUANTIZE.RESTORE_NAMES = [] -MRT_CFG.QUANTIZE.INPUT_PRECISION = None -MRT_CFG.QUANTIZE.OUTPUT_PRECISION = None -MRT_CFG.QUANTIZE.DEVICE_TYPE = mentry.default_device_type -MRT_CFG.QUANTIZE.DEVICE_IDS = mentry.default_device_ids -MRT_CFG.QUANTIZE.SOFTMAX_LAMBD = None -MRT_CFG.QUANTIZE.SHIFT_BITS = None -MRT_CFG.QUANTIZE.THRESHOLDS = None -MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = None -MRT_CFG.QUANTIZE.OSCALE_MAPS = "" - -MRT_CFG.EVALUATE = CN() -MRT_CFG.EVALUATE.BATCH = mentry.default_batch -MRT_CFG.EVALUATE.DEVICE_TYPE = mentry.default_device_type -MRT_CFG.EVALUATE.DEVICE_IDS = mentry.default_device_ids -MRT_CFG.EVALUATE.ITER_NUM = 10 - -MRT_CFG.COMPILE = CN() -MRT_CFG.COMPILE.BATCH = 1 -MRT_CFG.COMPILE.DUMP_DIR = "/data1/tmp" -MRT_CFG.COMPILE.DEVICE_TYPE = mentry.default_device_type -MRT_CFG.COMPILE.DEVICE_IDS = mentry.default_device_ids - -def get_cfg_defaults(): - """Get a yacs CfgNode object with default values for mrt.""" - # Return a clone so that the defaults will not be altered - # This is for the "local variable" use pattern - return MRT_CFG.clone() From d0e1e26a7a18f4a93b79691906daa7995914753f Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 13 Oct 2021 16:43:59 +0800 Subject: [PATCH 040/120] upt --- python/mrt/V3/mrt_entry.py | 1 - python/mrt/V3/utils.py | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/mrt/V3/mrt_entry.py b/python/mrt/V3/mrt_entry.py index 24e11a52..1e86153d 100644 --- a/python/mrt/V3/mrt_entry.py +++ b/python/mrt/V3/mrt_entry.py @@ -14,5 +14,4 @@ from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim -#TODO main jungle server # python server, flask diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 249d1fac..01b409f2 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -7,12 +7,13 @@ from mrt import conf +# TODO: jiazhen branch code design +#TODO main jungle server # python server, flask + default_device_type = "cpu" default_device_ids = [0] default_batch = 16 default_ctx = mx.cpu() - -# TODO: jiazhen branch code design MRT_CFG = CN() MRT_CFG.COMMON = CN() MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT From 495bd0cabf4c052452f4a2edf5abebc6cd2791ae Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 13 Oct 2021 17:19:29 +0800 Subject: [PATCH 041/120] upt --- main2.py | 32 +++++++++++--------------------- python/mrt/V3/calibrate.py | 16 ++++++++-------- python/mrt/V3/mrt_compile.py | 2 +- python/mrt/V3/utils.py | 1 + 4 files changed, 21 insertions(+), 30 deletions(-) diff --git a/main2.py b/main2.py index c93b0c6a..6b070e4f 100644 --- a/main2.py +++ b/main2.py @@ -1,21 +1,11 @@ import sys from os import path -from mrt.V3.mrt_pass import get_cfg_defaults -import mrt.V3.mrt_entry as mentry -from mrt.V3 import \ - preparation, calibration, quantization, evaluation, compilation +from mrt.V3.utils import get_cfg_defaults +from mrt.V3 import prepare, calibrate, quantize, evaluate, mrt_compile thismodule = sys.modules[__name__] -#TODO yaml merge argparse, research: searching, stk - -# def yaml_calibrate(CM, CN): - # mentry.mrt_calibrate( - # CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DATASET_NAME, - # CN.DATASET_DIR, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.NUM_CALIB, - # CN.LAMBD, batch=CN. BATCH) - def yaml_main(cfg): if cfg.is_frozen(): cfg.defrost() @@ -52,20 +42,20 @@ def yaml_main(cfg): cfg.freeze() if len(sys.argv) == 3: entry_name = sys.argv[2] - # if not hasattr(thismodule, entry_name): - # raise RuntimeError( - # "invalid entry_name: {}, entry_name: {}".format( - # entry_name, entry_name)) - subpass_module = getattr(thismodule, entry_name) - cls_name = entry_name.upper()[0] + entry_name[1:] - subpass_cls = getattr(subpass_module, cls_name) - yaml_impl_func = getattr(subpass_cls, "yaml_impl") - cfg_node_name = entry_name.upper() + if not hasattr(thismodule, entry_name): + raise RuntimeError("invalid entry_name: {}".format(entry_name)) + mrt_module = getattr(thismodule, entry_name) + yaml_func = getattr(mrt_module, "yaml_{}".format(entry_name)) + if entry_name == "compile": + cfg_node_name = "COMPILE" + else: + cfg_node_name = entry_name.upper() if not hasattr(cfg, cfg_node_name): raise RuntimeError( "invalid entry_name: {}, cfg_node_name: {}".format( entry_name, cfg_node_name)) cfg_node = getattr(cfg, cfg_node_name) + yaml_func() yaml_func(cfg.COMMON, cfg_node) else: yaml_main(cfg) diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index ebd7a0e0..cd22341a 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -6,14 +6,14 @@ get_model_prefix, get_logger, set_batch, load_fname, save_conf, load_conf, check_file_existance, get_ctx) -MRT_CFG.CALIBRATION = CN() -MRT_CFG.CALIBRATION.BATCH = default_batch, -MRT_CFG.CALIBRATION.NUM_CALIB = 1, -MRT_CFG.CALIBRATION.LAMBD = None, -MRT_CFG.CALIBRATION.DATASET_NAME = "imagenet", -MRT_CFG.CALIBRATION.DATASET_DIR = conf.MRT_DATASET_ROOT, -MRT_CFG.CALIBRATION.DEVICE_TYPE = default_device_type, -MRT_CFG.CALIBRATION.DEVICE_IDS = default_device_ids, +MRT_CFG.CALIBRATE = CN() +MRT_CFG.CALIBRATE.BATCH = default_batch +MRT_CFG.CALIBRATE.NUM_CALIB = 1 +MRT_CFG.CALIBRATE.LAMBD = None +MRT_CFG.CALIBRATE.DATASET_NAME = "imagenet" +MRT_CFG.CALIBRATE.DATASET_DIR = conf.MRT_DATASET_ROOT +MRT_CFG.CALIBRATE.DEVICE_TYPE = default_device_type +MRT_CFG.CALIBRATE.DEVICE_IDS = default_device_ids def calibrate( model_dir, model_name, verbosity, dataset_name, dataset_dir, diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index 20150da6..7d4de884 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -64,7 +64,7 @@ def mrt_compile( sim.save_ext(path.join(model_root, "ext"), infos) logger.info("compilation stage finished") -def yaml_compile(): +def yaml_mrt_compile(): CM = MRT_CFG.COMMON CN = MRT_CFG.COMPILE mrt_compile( diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 01b409f2..1f854001 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -9,6 +9,7 @@ # TODO: jiazhen branch code design #TODO main jungle server # python server, flask +#TODO yaml merge argparse, research: searching, stk default_device_type = "cpu" default_device_ids = [0] From 632fbeb1646766cd9c524156b0f30e47ac1568c1 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 13 Oct 2021 19:06:45 +0800 Subject: [PATCH 042/120] upt --- main2.py | 23 ++++++++++++----------- python/mrt/V3/prepare.py | 4 +--- python/mrt/V3/utils.py | 12 ++++++++---- 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/main2.py b/main2.py index 6b070e4f..dbd925b1 100644 --- a/main2.py +++ b/main2.py @@ -46,16 +46,17 @@ def yaml_main(cfg): raise RuntimeError("invalid entry_name: {}".format(entry_name)) mrt_module = getattr(thismodule, entry_name) yaml_func = getattr(mrt_module, "yaml_{}".format(entry_name)) - if entry_name == "compile": - cfg_node_name = "COMPILE" - else: - cfg_node_name = entry_name.upper() - if not hasattr(cfg, cfg_node_name): - raise RuntimeError( - "invalid entry_name: {}, cfg_node_name: {}".format( - entry_name, cfg_node_name)) - cfg_node = getattr(cfg, cfg_node_name) - yaml_func() - yaml_func(cfg.COMMON, cfg_node) + CM = cfg.COMMON + CN = cfg.PREPARE + # if entry_name == "compile": + # cfg_node_name = "COMPILE" + # else: + # cfg_node_name = entry_name.upper() + # if not hasattr(cfg, cfg_node_name): + # raise RuntimeError( + # "invalid entry_name: {}, cfg_node_name: {}".format( + # entry_name, cfg_node_name)) + # cfg_node = getattr(cfg, cfg_node_name) + yaml_func(CM, CN) else: yaml_main(cfg) diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index 1fbbe952..3ea21b8d 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -50,9 +50,7 @@ def prepare( else: logger.info("model splitting skipped") -def yaml_prepare(): - CM = MRT_CFG.COMMON - CN = MRT_CFG.PREPARE +def yaml_prepare(CM, CN): prepare( CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.INPUT_SHAPE, CN.SPLIT_KEYS) diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 1f854001..b4792abf 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -6,23 +6,26 @@ import mxnet as mx from mrt import conf +from mrt.common import log +from mrt.utils import extend_fname # TODO: jiazhen branch code design -#TODO main jungle server # python server, flask -#TODO yaml merge argparse, research: searching, stk +# TODO main jungle server # python server, flask +# TODO yaml merge argparse, research: searching, stk default_device_type = "cpu" default_device_ids = [0] default_batch = 16 default_ctx = mx.cpu() + MRT_CFG = CN() MRT_CFG.COMMON = CN() MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT -MRT_CFG.COMMON.MODEL_NAME = conf.MRT_MODEL_ROOT +MRT_CFG.COMMON.MODEL_NAME = "" MRT_CFG.COMMON.VERBOSITY = "debug" MRT_CFG.COMMON.START_AFTER = None MRT_CFG.COMMON.DEVICE_TYPE = default_device_type -MRT_CFG.COMMON.DVICE_IDS = default_device_ids +MRT_CFG.COMMON.DEVICE_IDS = default_device_ids MRT_CFG.COMMON.BATCH = default_batch MRT_CFG.COMMON.RUN_EVALUATE = True MRT_CFG.COMMON.RUN_COMPILE = True @@ -75,6 +78,7 @@ def load_fname(prefix, suffix=None, with_ext=False): The loaded file names. """ suffix = "."+suffix if suffix is not None else "" + return extend_fname(prefix+suffix, with_ext) def save_conf(fname, logger=logging, **conf_map): try: From efac7da32a9498e0bfb30aa3c56c4594354016f5 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 15 Oct 2021 10:50:19 +0800 Subject: [PATCH 043/120] upt --- docs/mrt/mrt_user_guide.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md index 9fcafff4..5891edc7 100644 --- a/docs/mrt/mrt_user_guide.md +++ b/docs/mrt/mrt_user_guide.md @@ -40,7 +40,6 @@ EVALUATE: DEVICE_TYPE: gpu DEVICE_IDS: [0] ITER_NUM: 10 - ``` run command From 315d48ed42419f273651678f039e9c768d3c236e Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 15 Oct 2021 11:42:38 +0800 Subject: [PATCH 044/120] upt --- main2.py | 21 +++++++++------------ python/mrt/V3/calibrate.py | 13 +++++++------ python/mrt/V3/evaluate.py | 18 ++++++++++++------ python/mrt/V3/mrt_compile.py | 15 +++++++++------ python/mrt/V3/mrt_entry.py | 17 ----------------- python/mrt/V3/prepare.py | 7 ++++--- python/mrt/V3/quantize.py | 29 +++++++++++++++-------------- python/mrt/V3/utils.py | 1 - 8 files changed, 56 insertions(+), 65 deletions(-) delete mode 100644 python/mrt/V3/mrt_entry.py diff --git a/main2.py b/main2.py index dbd925b1..97247702 100644 --- a/main2.py +++ b/main2.py @@ -42,21 +42,18 @@ def yaml_main(cfg): cfg.freeze() if len(sys.argv) == 3: entry_name = sys.argv[2] + if entry_name == "compile": + entry_name = "mrt_compile" if not hasattr(thismodule, entry_name): raise RuntimeError("invalid entry_name: {}".format(entry_name)) mrt_module = getattr(thismodule, entry_name) yaml_func = getattr(mrt_module, "yaml_{}".format(entry_name)) - CM = cfg.COMMON - CN = cfg.PREPARE - # if entry_name == "compile": - # cfg_node_name = "COMPILE" - # else: - # cfg_node_name = entry_name.upper() - # if not hasattr(cfg, cfg_node_name): - # raise RuntimeError( - # "invalid entry_name: {}, cfg_node_name: {}".format( - # entry_name, cfg_node_name)) - # cfg_node = getattr(cfg, cfg_node_name) - yaml_func(CM, CN) + cm_cfg = cfg.COMMON + if entry_name == "mrt_compile": + cfg_name = "COMPILE" + else: + cfg_name = entry_name.upper() + pass_cfg = getattr(cfg, cfg_name) + yaml_func(cm_cfg, pass_cfg) else: yaml_main(cfg) diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index cd22341a..2aeb7ccc 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -1,5 +1,7 @@ from yacs.config import CfgNode as CN +from mrt.transformer import Model +from mrt import dataset as ds from mrt import conf from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, @@ -51,10 +53,9 @@ def calibrate( save_conf(model_prefix+".calibrate.conf", logger=logger, **conf_map) logger.info("calibrate stage finished") -def yaml_calibrate(): - CM = MRT_CFG.COMMON - CN = MRT_CFG.CALIBRATE +def yaml_calibrate(cm_cfg, pass_cfg): calibrate( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DATASET_NAME, - CN.DATASET_DIR, CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.NUM_CALIB, - CN.LAMBD, batch=CN.BATCH) + cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, + pass_cfg.DATASET_NAME, pass_cfg.DATASET_DIR, pass_cfg.DEVICE_TYPE, + pass_cfg.DEVICE_IDS, pass_cfg.NUM_CALIB, pass_cfg.LAMBD, + batch=pass_cfg.BATCH) diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index 71884d8b..a5becc61 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -1,6 +1,13 @@ from yacs.config import CfgNode as CN +import logging -from mrt import conf +import mxnet as mx +from mxnet import gluon, ndarray as nd + +from mrt.transformer import Model, MRT, reduce_graph +from mrt import dataset as ds +from mrt import utils +from mrt import sim_quant_helper as sim from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, @@ -97,9 +104,8 @@ def quantize(data, label): else: logger.info("evaluatation stage skipped") -def yaml_evaluate(): - CM = MRT_CFG.COMMON - CN = MRT_CFG.EVALUATE +def yaml_evaluate(cm_cfg, pass_cfg): evaluate( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DEVICE_TYPE, CN.DEVICE_IDS, - CN.ITER_NUM, batch=CN.BATCH) + cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, + pass_cfg.DEVICE_TYPE, pass_cfg.DEVICE_IDS, pass_cfg.ITER_NUM, + batch=pass_cfg.BATCH) diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index 7d4de884..d1f5c821 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -1,7 +1,11 @@ from os import path from yacs.config import CfgNode as CN -from mrt import conf +import numpy as np + +from mrt.transformer import Model, MRT +from mrt import dataset as ds +from mrt import sim_quant_helper as sim from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, @@ -64,9 +68,8 @@ def mrt_compile( sim.save_ext(path.join(model_root, "ext"), infos) logger.info("compilation stage finished") -def yaml_mrt_compile(): - CM = MRT_CFG.COMMON - CN = MRT_CFG.COMPILE +def yaml_mrt_compile(cm_cfg, pass_cfg): mrt_compile( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.DUMP_DIR, - device_type=CN.DEVICE_TYPE, device_ids=CN.DEVICE_IDS, batch=CN.BATCH) + cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, + pass_cfg.DUMP_DIR, device_type=pass_cfg.DEVICE_TYPE, + device_ids=pass_cfg.DEVICE_IDS, batch=pass_cfg.BATCH) diff --git a/python/mrt/V3/mrt_entry.py b/python/mrt/V3/mrt_entry.py deleted file mode 100644 index 1e86153d..00000000 --- a/python/mrt/V3/mrt_entry.py +++ /dev/null @@ -1,17 +0,0 @@ -from os import path -import logging -import json - -import mxnet as mx -from mxnet import gluon, ndarray as nd -import numpy as np - -from mrt.gluon_zoo import save_model -from mrt.common import log -from mrt import utils -from mrt.transformer import Model, MRT, reduce_graph -from mrt import dataset as ds -from mrt import sym_utils as sutils -from mrt import sim_quant_helper as sim - - diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index 3ea21b8d..6ebd2e39 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -50,7 +50,8 @@ def prepare( else: logger.info("model splitting skipped") -def yaml_prepare(CM, CN): +def yaml_prepare(cm_cfg, pass_cfg): prepare( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, - CN.DEVICE_TYPE, CN.DEVICE_IDS, CN.INPUT_SHAPE, CN.SPLIT_KEYS) + cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, + pass_cfg.DEVICE_TYPE, pass_cfg.DEVICE_IDS, pass_cfg.INPUT_SHAPE, + pass_cfg.SPLIT_KEYS) diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 64024704..5b834845 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -1,6 +1,8 @@ from yacs.config import CfgNode as CN -from mrt import conf +from mrt.transformer import Model, MRT +from mrt import sym_utils as sutils +from mrt import sim_quant_helper as sim from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, get_model_prefix, get_logger, load_fname, save_conf, @@ -133,19 +135,18 @@ def mergefunc(node, params, graph): else: logger.info("model merging skipped") -def yaml_quantize(): - CM = MRT_CFG.COMMON - CN = MRT_CFG.QUANTIZE - if CN.is_frozen(): - CN.defrost() +def yaml_quantize(cm_cfg, pass_cfg): + if pass_cfg.is_frozen(): + pass_cfg.defrost() for attr in ["THRESHOLDS", "ATTRIBUTE_DEPS", "OSCALE_MAPS"]: - v = getattr(CN, attr) + v = getattr(pass_cfg, attr) if v is not None: - setattr(CN, attr, v[1:-1]) - if not CN.is_frozen(): - CN.freeze() + setattr(pass_cfg, attr, v[1:-1]) + if not pass_cfg.is_frozen(): + pass_cfg.freeze() quantize( - CM.MODEL_DIR, CM.MODEL_NAME, CM.VERBOSITY, CN.RESTORE_NAMES, - CN.INPUT_PRECISION, CN.OUTPUT_PRECISION, CN.DEVICE_TYPE, CN.DEVICE_IDS, - CN.SOFTMAX_LAMBD, CN.SHIFT_BITS, CN.THRESHOLDS, CN.ATTRIBUTE_DEPS, - CN.OSCALE_MAPS) + cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, + pass_cfg.RESTORE_NAMES, pass_cfg.INPUT_PRECISION, + pass_cfg.OUTPUT_PRECISION, pass_cfg.DEVICE_TYPE, pass_cfg.DEVICE_IDS, + pass_cfg.SOFTMAX_LAMBD, pass_cfg.SHIFT_BITS, pass_cfg.THRESHOLDS, + pass_cfg.ATTRIBUTE_DEPS, pass_cfg.OSCALE_MAPS) diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index b4792abf..0695137e 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -11,7 +11,6 @@ # TODO: jiazhen branch code design # TODO main jungle server # python server, flask -# TODO yaml merge argparse, research: searching, stk default_device_type = "cpu" default_device_ids = [0] From 6cc0e26c4bfd76f1e8e0b706590ea9b43f5c2bc1 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 15 Oct 2021 11:49:56 +0800 Subject: [PATCH 045/120] upt --- main2.py | 11 +++++------ python/mrt/V3/quantize.py | 1 + 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/main2.py b/main2.py index 97247702..e185f94d 100644 --- a/main2.py +++ b/main2.py @@ -22,15 +22,15 @@ def yaml_main(cfg): if cfg.COMMON.START_AFTER in start_pos_map: start_pos = start_pos_map[cfg.COMMON.START_AFTER] if start_pos < 1: - yaml_prepare(cfg.COMMON, cfg.PREPARE) + prepare.yaml_prepare(cfg.COMMON, cfg.PREPARE) if start_pos < 2: - yaml_calibrate(cfg.COMMON, cfg.CALIBRATE) + calibrate.yaml_calibrate(cfg.COMMON, cfg.CALIBRATE) if start_pos < 3: - yaml_quantize(cfg.COMMON, cfg.QUANTIZE) + quantize.yaml_quantize(cfg.COMMON, cfg.QUANTIZE) if cfg.COMMON.RUN_EVALUATE: - yaml_evaluate(cfg.COMMON, cfg.EVALUATE) + evaluate.yaml_evaluate(cfg.COMMON, cfg.EVALUATE) if cfg.COMMON.RUN_COMPILE: - yaml_compile(cfg.COMMON, cfg.COMPILE) + mrt_compile.yaml_mrt_compile(cfg.COMMON, cfg.COMPILE) if __name__ == "__main__": assert len(sys.argv) in [2,3], len(sys.argv) @@ -47,7 +47,6 @@ def yaml_main(cfg): if not hasattr(thismodule, entry_name): raise RuntimeError("invalid entry_name: {}".format(entry_name)) mrt_module = getattr(thismodule, entry_name) - yaml_func = getattr(mrt_module, "yaml_{}".format(entry_name)) cm_cfg = cfg.COMMON if entry_name == "mrt_compile": cfg_name = "COMPILE" diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 5b834845..6c3664e6 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -1,4 +1,5 @@ from yacs.config import CfgNode as CN +import json from mrt.transformer import Model, MRT from mrt import sym_utils as sutils From 67eb13eaf93c886845c0315c7212eac4cf27eead Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 15 Oct 2021 11:50:12 +0800 Subject: [PATCH 046/120] upt --- docs/mrt/mrt_user_guide.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md index 5891edc7..0d96c2ea 100644 --- a/docs/mrt/mrt_user_guide.md +++ b/docs/mrt/mrt_user_guide.md @@ -48,7 +48,20 @@ run command python main2.py ~/mrt_yaml_root/alexnet.yaml ``` +or run either of the following commands for each pass. +```bash +# preparation +python main2.py ~/mrt_yaml_root/alexnet.yaml prepare +# calibration +python main2.py ~/mrt_yaml_root/alexnet.yaml calibrate +# quantization +python main2.py ~/mrt_yaml_root/alexnet.yaml quantize +# evaluation +python main2.py ~/mrt_yaml_root/alexnet.yaml evaluate +# compilation +python main2.py ~/mrt_yaml_root/alexnet.yaml compile +``` ## ssd_512_voc_resnet50_v1_voc From 7dcdf710dc461d4b27712f51ab274e670c5716e1 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 15 Oct 2021 19:07:05 +0800 Subject: [PATCH 047/120] update frontend --- .gitignore | 3 + python/mrt/frontend/frontend/__init__.py | 0 python/mrt/frontend/frontend/asgi.py | 16 +++ python/mrt/frontend/frontend/settings.py | 125 ++++++++++++++++++ python/mrt/frontend/frontend/urls.py | 22 +++ python/mrt/frontend/frontend/wsgi.py | 16 +++ python/mrt/frontend/manage.py | 22 +++ python/mrt/frontend/polls/__init__.py | 0 python/mrt/frontend/polls/admin.py | 3 + python/mrt/frontend/polls/apps.py | 6 + .../mrt/frontend/polls/migrations/__init__.py | 0 python/mrt/frontend/polls/models.py | 3 + python/mrt/frontend/polls/tests.py | 3 + python/mrt/frontend/polls/urls.py | 7 + python/mrt/frontend/polls/views.py | 7 + 15 files changed, 233 insertions(+) create mode 100644 python/mrt/frontend/frontend/__init__.py create mode 100644 python/mrt/frontend/frontend/asgi.py create mode 100644 python/mrt/frontend/frontend/settings.py create mode 100644 python/mrt/frontend/frontend/urls.py create mode 100644 python/mrt/frontend/frontend/wsgi.py create mode 100755 python/mrt/frontend/manage.py create mode 100644 python/mrt/frontend/polls/__init__.py create mode 100644 python/mrt/frontend/polls/admin.py create mode 100644 python/mrt/frontend/polls/apps.py create mode 100644 python/mrt/frontend/polls/migrations/__init__.py create mode 100644 python/mrt/frontend/polls/models.py create mode 100644 python/mrt/frontend/polls/tests.py create mode 100644 python/mrt/frontend/polls/urls.py create mode 100644 python/mrt/frontend/polls/views.py diff --git a/.gitignore b/.gitignore index cbce3515..754e0a02 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,6 @@ out/* docs/html docs/doctrees docs/doxygen_output + +# django +python/mrt/frontend/db.sqlite3 diff --git a/python/mrt/frontend/frontend/__init__.py b/python/mrt/frontend/frontend/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/frontend/frontend/asgi.py b/python/mrt/frontend/frontend/asgi.py new file mode 100644 index 00000000..7f108ee6 --- /dev/null +++ b/python/mrt/frontend/frontend/asgi.py @@ -0,0 +1,16 @@ +""" +ASGI config for frontend project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ +""" + +import os + +from django.core.asgi import get_asgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') + +application = get_asgi_application() diff --git a/python/mrt/frontend/frontend/settings.py b/python/mrt/frontend/frontend/settings.py new file mode 100644 index 00000000..bf92f355 --- /dev/null +++ b/python/mrt/frontend/frontend/settings.py @@ -0,0 +1,125 @@ +""" +Django settings for frontend project. + +Generated by 'django-admin startproject' using Django 3.2.8. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/3.2/ref/settings/ +""" + +from pathlib import Path + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = 'django-insecure-)2)3=6b0g%w4-^g675xpd(@+^9l4(*z(3s)m=4)l&td0sc=290' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = [] + + +# Application definition + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +ROOT_URLCONF = 'frontend.urls' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'frontend.wsgi.application' + + +# Database +# https://docs.djangoproject.com/en/3.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } +} + + +# Password validation +# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/3.2/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/3.2/howto/static-files/ + +STATIC_URL = '/static/' + +# Default primary key field type +# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field + +DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' diff --git a/python/mrt/frontend/frontend/urls.py b/python/mrt/frontend/frontend/urls.py new file mode 100644 index 00000000..90ced762 --- /dev/null +++ b/python/mrt/frontend/frontend/urls.py @@ -0,0 +1,22 @@ +"""frontend URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/3.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" +from django.contrib import admin +from django.urls import include, path + +urlpatterns = [ + path('polls/', include('polls.urls')), + path('admin/', admin.site.urls), +] diff --git a/python/mrt/frontend/frontend/wsgi.py b/python/mrt/frontend/frontend/wsgi.py new file mode 100644 index 00000000..0b586f31 --- /dev/null +++ b/python/mrt/frontend/frontend/wsgi.py @@ -0,0 +1,16 @@ +""" +WSGI config for frontend project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') + +application = get_wsgi_application() diff --git a/python/mrt/frontend/manage.py b/python/mrt/frontend/manage.py new file mode 100755 index 00000000..c9c8c1fa --- /dev/null +++ b/python/mrt/frontend/manage.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/python/mrt/frontend/polls/__init__.py b/python/mrt/frontend/polls/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/frontend/polls/admin.py b/python/mrt/frontend/polls/admin.py new file mode 100644 index 00000000..8c38f3f3 --- /dev/null +++ b/python/mrt/frontend/polls/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/python/mrt/frontend/polls/apps.py b/python/mrt/frontend/polls/apps.py new file mode 100644 index 00000000..5a5f94ca --- /dev/null +++ b/python/mrt/frontend/polls/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class PollsConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'polls' diff --git a/python/mrt/frontend/polls/migrations/__init__.py b/python/mrt/frontend/polls/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/frontend/polls/models.py b/python/mrt/frontend/polls/models.py new file mode 100644 index 00000000..71a83623 --- /dev/null +++ b/python/mrt/frontend/polls/models.py @@ -0,0 +1,3 @@ +from django.db import models + +# Create your models here. diff --git a/python/mrt/frontend/polls/tests.py b/python/mrt/frontend/polls/tests.py new file mode 100644 index 00000000..7ce503c2 --- /dev/null +++ b/python/mrt/frontend/polls/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/python/mrt/frontend/polls/urls.py b/python/mrt/frontend/polls/urls.py new file mode 100644 index 00000000..2fcca0db --- /dev/null +++ b/python/mrt/frontend/polls/urls.py @@ -0,0 +1,7 @@ +from django.urls import path + +from . import views + +urlpatterns = [ + path('', views.index, name='index') +] diff --git a/python/mrt/frontend/polls/views.py b/python/mrt/frontend/polls/views.py new file mode 100644 index 00000000..faeda46e --- /dev/null +++ b/python/mrt/frontend/polls/views.py @@ -0,0 +1,7 @@ +from django.shortcuts import render + +# Create your views here. +from django.http import HttpResponse + +def index(request): + return HttpResponse("Hello, world. You're at the ppolls index.") From 3fe6d04297d6566add230fb2ed5a152ca7cb0994 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 20 Oct 2021 19:25:01 +0800 Subject: [PATCH 048/120] simplify yaml interfaces for mrt.v3 --- main2.py | 18 +++++++++------ python/mrt/V3/calibrate.py | 22 +++++++++--------- python/mrt/V3/evaluate.py | 18 +++++++-------- python/mrt/V3/mrt_compile.py | 19 ++++++++-------- python/mrt/V3/prepare.py | 18 +++++++-------- python/mrt/V3/quantize.py | 43 +++++++++++++++++++----------------- 6 files changed, 73 insertions(+), 65 deletions(-) diff --git a/main2.py b/main2.py index e185f94d..ec126231 100644 --- a/main2.py +++ b/main2.py @@ -2,7 +2,11 @@ from os import path from mrt.V3.utils import get_cfg_defaults -from mrt.V3 import prepare, calibrate, quantize, evaluate, mrt_compile +from mrt.V3.prepare import prepare +from mrt.V3.calibrate import calibrate +from mrt.V3.quantize import quantize +from mrt.V3.evaluate import evaluate +from mrt.V3.mrt_compile import mrt_compile thismodule = sys.modules[__name__] @@ -22,15 +26,15 @@ def yaml_main(cfg): if cfg.COMMON.START_AFTER in start_pos_map: start_pos = start_pos_map[cfg.COMMON.START_AFTER] if start_pos < 1: - prepare.yaml_prepare(cfg.COMMON, cfg.PREPARE) + prepare(cfg.COMMON, cfg.PREPARE) if start_pos < 2: - calibrate.yaml_calibrate(cfg.COMMON, cfg.CALIBRATE) + calibrate(cfg.COMMON, cfg.CALIBRATE) if start_pos < 3: - quantize.yaml_quantize(cfg.COMMON, cfg.QUANTIZE) + quantize(cfg.COMMON, cfg.QUANTIZE) if cfg.COMMON.RUN_EVALUATE: - evaluate.yaml_evaluate(cfg.COMMON, cfg.EVALUATE) + evaluate(cfg.COMMON, cfg.EVALUATE) if cfg.COMMON.RUN_COMPILE: - mrt_compile.yaml_mrt_compile(cfg.COMMON, cfg.COMPILE) + mrt_compile(cfg.COMMON, cfg.COMPILE) if __name__ == "__main__": assert len(sys.argv) in [2,3], len(sys.argv) @@ -46,7 +50,7 @@ def yaml_main(cfg): entry_name = "mrt_compile" if not hasattr(thismodule, entry_name): raise RuntimeError("invalid entry_name: {}".format(entry_name)) - mrt_module = getattr(thismodule, entry_name) + yaml_func = getattr(thismodule, entry_name) cm_cfg = cfg.COMMON if entry_name == "mrt_compile": cfg_name = "COMPILE" diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index 2aeb7ccc..ca34a050 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -17,9 +17,18 @@ MRT_CFG.CALIBRATE.DEVICE_TYPE = default_device_type MRT_CFG.CALIBRATE.DEVICE_IDS = default_device_ids -def calibrate( - model_dir, model_name, verbosity, dataset_name, dataset_dir, - device_type, device_ids, calibrate_num, lambd, batch=default_batch): +def calibrate(cm_cfg, pass_cfg): + model_dir = cm_cfg.MODEL_DIR + model_name = cm_cfg.MODEL_NAME + verbosity = cm_cfg.VERBOSITY + dataset_name = pass_cfg.DATASET_NAME + dataset_dir = pass_cfg.DATASET_DIR + device_type = pass_cfg.DEVICE_TYPE + device_ids = pass_cfg.DEVICE_IDS + calibrate_num = pass_cfg.NUM_CALIB + lambd = pass_cfg.LAMBD + batch=pass_cfg.BATCH + model_prefix = get_model_prefix(model_dir, model_name) logger = get_logger(verbosity) conf_prep_file = model_prefix + ".prepare.conf" @@ -52,10 +61,3 @@ def calibrate( conf_map["dataset_name"] = dataset_name save_conf(model_prefix+".calibrate.conf", logger=logger, **conf_map) logger.info("calibrate stage finished") - -def yaml_calibrate(cm_cfg, pass_cfg): - calibrate( - cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, - pass_cfg.DATASET_NAME, pass_cfg.DATASET_DIR, pass_cfg.DEVICE_TYPE, - pass_cfg.DEVICE_IDS, pass_cfg.NUM_CALIB, pass_cfg.LAMBD, - batch=pass_cfg.BATCH) diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index a5becc61..a534701f 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -19,9 +19,15 @@ MRT_CFG.EVALUATE.DEVICE_IDS = default_device_ids MRT_CFG.EVALUATE.ITER_NUM = 10 -def evaluate( - model_dir, model_name, verbosity, device_type, device_ids, iter_num, - batch=default_batch): +def evaluate(cm_cfg, pass_cfg): + model_dir = cm_cfg.MODEL_DIR + model_name = cm_cfg.MODEL_NAME + verbosity = cm_cfg.VERBOSITY + device_type = pass_cfg.DEVICE_TYPE + device_ids = pass_cfg.DEVICE_IDS + iter_num = pass_cfg.ITER_NUM + batch = pass_cfg.BATCH + model_prefix = get_model_prefix(model_dir, model_name) logger = get_logger(verbosity) conf_quant_file = model_prefix + ".quantize.conf" @@ -103,9 +109,3 @@ def quantize(data, label): logger.info("evaluatation stage finished") else: logger.info("evaluatation stage skipped") - -def yaml_evaluate(cm_cfg, pass_cfg): - evaluate( - cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, - pass_cfg.DEVICE_TYPE, pass_cfg.DEVICE_IDS, pass_cfg.ITER_NUM, - batch=pass_cfg.BATCH) diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index d1f5c821..698bfe49 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -17,10 +17,15 @@ MRT_CFG.COMPILE.DEVICE_TYPE = default_device_type MRT_CFG.COMPILE.DEVICE_IDS = default_device_ids -def mrt_compile( - model_dir, model_name, verbosity, dump_dir, - batch=default_batch, device_type=default_device_type, - device_ids=default_device_ids): +def mrt_compile(cm_cfg, pass_cfg): + model_dir = cm_cfg.MODEL_DIR + model_name = cm_cfg.MODEL_NAME + verbosity = cm_cfg.VERBOSITY + dump_dir = pass_cfg.DUMP_DIR + device_type = pass_cfg.DEVICE_TYPE + device_ids = pass_cfg.DEVICE_IDS + batch = pass_cfg.BATCH + model_prefix = get_model_prefix(model_dir, model_name) logger = get_logger(verbosity) conf_quant_file = model_prefix + ".quantize.conf" @@ -67,9 +72,3 @@ def mrt_compile( } sim.save_ext(path.join(model_root, "ext"), infos) logger.info("compilation stage finished") - -def yaml_mrt_compile(cm_cfg, pass_cfg): - mrt_compile( - cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, - pass_cfg.DUMP_DIR, device_type=pass_cfg.DEVICE_TYPE, - device_ids=pass_cfg.DEVICE_IDS, batch=pass_cfg.BATCH) diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index 6ebd2e39..d04936ce 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -13,9 +13,15 @@ MRT_CFG.PREPARE.INPUT_SHAPE = [-1, 3, 224, 224] MRT_CFG.PREPARE.SPLIT_KEYS = "" -def prepare( - model_dir, model_name, verbosity, device_type, device_ids, input_shape, - split_keys): +def prepare(cm_cfg, pass_cfg): + model_dir = cm_cfg.MODEL_DIR + model_name = cm_cfg.MODEL_NAME + verbosity = cm_cfg.VERBOSITY + device_type = pass_cfg.DEVICE_TYPE + device_ids = pass_cfg.DEVICE_IDS + input_shape = pass_cfg.INPUT_SHAPE + split_keys = pass_cfg.SPLIT_KEYS + model_prefix = get_model_prefix(model_dir, model_name) logger = get_logger(verbosity) conf_prep_file = model_prefix + ".prepare.conf" @@ -49,9 +55,3 @@ def prepare( logger.info("model splitting finished") else: logger.info("model splitting skipped") - -def yaml_prepare(cm_cfg, pass_cfg): - prepare( - cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, - pass_cfg.DEVICE_TYPE, pass_cfg.DEVICE_IDS, pass_cfg.INPUT_SHAPE, - pass_cfg.SPLIT_KEYS) diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 6c3664e6..23c41006 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -21,10 +21,29 @@ MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = None MRT_CFG.QUANTIZE.OSCALE_MAPS = "" -def quantize( - model_dir, model_name, verbosity, restore_names, input_precision, - output_precision, device_type, device_ids, softmax_lambd, shift_bits, - thresholds, attribute_deps, oscale_maps): +def quantize(cm_cfg, pass_cfg): + if pass_cfg.is_frozen(): + pass_cfg.defrost() + for attr in ["THRESHOLDS", "ATTRIBUTE_DEPS", "OSCALE_MAPS"]: + v = getattr(pass_cfg, attr) + if v is not None: + setattr(pass_cfg, attr, v[1:-1]) + if not pass_cfg.is_frozen(): + pass_cfg.freeze() + model_dir = cm_cfg.MODEL_DIR + model_name = cm_cfg.MODEL_NAME + verbosity = cm_cfg.VERBOSITY + restore_names = pass_cfg.RESTORE_NAMES + input_precision = pass_cfg.INPUT_PRECISION + output_precision = pass_cfg.OUTPUT_PRECISION + device_type = pass_cfg.DEVICE_TYPE + device_ids = pass_cfg.DEVICE_IDS + softmax_lambd = pass_cfg.SOFTMAX_LAMBD + shift_bits = pass_cfg.SHIFT_BITS + thresholds = pass_cfg.THRESHOLDS + attribute_deps = pass_cfg.ATTRIBUTE_DEPS + oscale_maps = pass_cfg.OSCALE_MAPS + model_prefix = get_model_prefix(model_dir, model_name) logger = get_logger(verbosity) conf_calib_file = model_prefix + ".calibrate.conf" @@ -135,19 +154,3 @@ def mergefunc(node, params, graph): logger.info("model merging finished") else: logger.info("model merging skipped") - -def yaml_quantize(cm_cfg, pass_cfg): - if pass_cfg.is_frozen(): - pass_cfg.defrost() - for attr in ["THRESHOLDS", "ATTRIBUTE_DEPS", "OSCALE_MAPS"]: - v = getattr(pass_cfg, attr) - if v is not None: - setattr(pass_cfg, attr, v[1:-1]) - if not pass_cfg.is_frozen(): - pass_cfg.freeze() - quantize( - cm_cfg.MODEL_DIR, cm_cfg.MODEL_NAME, cm_cfg.VERBOSITY, - pass_cfg.RESTORE_NAMES, pass_cfg.INPUT_PRECISION, - pass_cfg.OUTPUT_PRECISION, pass_cfg.DEVICE_TYPE, pass_cfg.DEVICE_IDS, - pass_cfg.SOFTMAX_LAMBD, pass_cfg.SHIFT_BITS, pass_cfg.THRESHOLDS, - pass_cfg.ATTRIBUTE_DEPS, pass_cfg.OSCALE_MAPS) From 8264e4e056a874eba93b25f29df87c937cf4abd7 Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 23 Oct 2021 11:35:41 +0800 Subject: [PATCH 049/120] upt --- python/mrt/frontend/frontend/settings.py | 1 + python/mrt/frontend/polls/admin.py | 4 +++- python/mrt/frontend/polls/models.py | 21 ++++++++++++++++++++- python/mrt/frontend/polls/views.py | 9 +++++++-- 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/python/mrt/frontend/frontend/settings.py b/python/mrt/frontend/frontend/settings.py index bf92f355..c13db220 100644 --- a/python/mrt/frontend/frontend/settings.py +++ b/python/mrt/frontend/frontend/settings.py @@ -31,6 +31,7 @@ # Application definition INSTALLED_APPS = [ + 'polls.apps.PollsConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', diff --git a/python/mrt/frontend/polls/admin.py b/python/mrt/frontend/polls/admin.py index 8c38f3f3..6af8ff67 100644 --- a/python/mrt/frontend/polls/admin.py +++ b/python/mrt/frontend/polls/admin.py @@ -1,3 +1,5 @@ from django.contrib import admin -# Register your models here. +from .models import Question + +admin.site.register(Question) diff --git a/python/mrt/frontend/polls/models.py b/python/mrt/frontend/polls/models.py index 71a83623..f881ce92 100644 --- a/python/mrt/frontend/polls/models.py +++ b/python/mrt/frontend/polls/models.py @@ -1,3 +1,22 @@ +import datetime + from django.db import models +from django.utils import timezone + + +class Question(models.Model): + question_text = models.CharField(max_length=200) + pub_date = models.DateTimeField('date published') + def __str__(self): + return self.question_text + def was_published_recently(self): + return self.pub_date >= \ + timezone.now() - datetime.timedelta(days=1) + -# Create your models here. +class Choice(models.Model): + question = models.ForeignKey(Question, on_delete=models.CASCADE) + choice_text = models.CharField(max_length=200) + votes = models.IntegerField(default=0) + def __str__(self): + return self.choice_text diff --git a/python/mrt/frontend/polls/views.py b/python/mrt/frontend/polls/views.py index faeda46e..dc608b0b 100644 --- a/python/mrt/frontend/polls/views.py +++ b/python/mrt/frontend/polls/views.py @@ -1,7 +1,12 @@ from django.shortcuts import render # Create your views here. -from django.http import HttpResponse +from django.http import HttpResponse, HttpResponseRedirect +from django.urls import reverse def index(request): - return HttpResponse("Hello, world. You're at the ppolls index.") + return HttpResponse("Hello, world. You're at the polls index.") + +def redirect_to_year(request): + year = 2006 + return HttpResponseRedirect(reverse('news-year-archieve'), args=(year,)) From 3847863b416f6a0bf149bdca51522e935d45bd8e Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 23 Oct 2021 15:37:15 +0800 Subject: [PATCH 050/120] fix config file --- python/mrt/V3/prepare.py | 2 +- python/mrt/V3/quantize.py | 31 ++++++++++--------------------- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index d04936ce..bfc10266 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -11,7 +11,7 @@ MRT_CFG.PREPARE.DEVICE_TYPE = default_device_type MRT_CFG.PREPARE.DEVICE_IDS = default_device_ids MRT_CFG.PREPARE.INPUT_SHAPE = [-1, 3, 224, 224] -MRT_CFG.PREPARE.SPLIT_KEYS = "" +MRT_CFG.PREPARE.SPLIT_KEYS = [] def prepare(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 23c41006..47d2358d 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -1,5 +1,4 @@ from yacs.config import CfgNode as CN -import json from mrt.transformer import Model, MRT from mrt import sym_utils as sutils @@ -17,19 +16,11 @@ MRT_CFG.QUANTIZE.DEVICE_IDS = default_device_ids MRT_CFG.QUANTIZE.SOFTMAX_LAMBD = None MRT_CFG.QUANTIZE.SHIFT_BITS = None -MRT_CFG.QUANTIZE.THRESHOLDS = None -MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = None -MRT_CFG.QUANTIZE.OSCALE_MAPS = "" +MRT_CFG.QUANTIZE.THRESHOLDS = [] +MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = [] +MRT_CFG.QUANTIZE.OSCALE_MAPS = [] def quantize(cm_cfg, pass_cfg): - if pass_cfg.is_frozen(): - pass_cfg.defrost() - for attr in ["THRESHOLDS", "ATTRIBUTE_DEPS", "OSCALE_MAPS"]: - v = getattr(pass_cfg, attr) - if v is not None: - setattr(pass_cfg, attr, v[1:-1]) - if not pass_cfg.is_frozen(): - pass_cfg.freeze() model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY @@ -40,9 +31,10 @@ def quantize(cm_cfg, pass_cfg): device_ids = pass_cfg.DEVICE_IDS softmax_lambd = pass_cfg.SOFTMAX_LAMBD shift_bits = pass_cfg.SHIFT_BITS - thresholds = pass_cfg.THRESHOLDS - attribute_deps = pass_cfg.ATTRIBUTE_DEPS - oscale_maps = pass_cfg.OSCALE_MAPS + thresholds = {opn: th for opn, th in pass_cfg.THRESHOLDS} + attribute_deps = {attr: {sattr: opn for sattr, opn in attr_map} \ + for attr, attr_map in pass_cfg.ATTRIBUTE_DEPS} + oscale_maps = {opn1: opn2 for opn1, opn2 in pass_cfg.OSCALE_MAPS} model_prefix = get_model_prefix(model_dir, model_name) logger = get_logger(verbosity) @@ -97,8 +89,7 @@ def quantize(cm_cfg, pass_cfg): mrt.set_softmax_lambd(softmax_lambd) if shift_bits is not None: mrt.set_shift_bits(shift_bits) - if thresholds is not None: - thresholds = json.loads(thresholds) + if thresholds != {}: for name, threshold in thresholds.items(): mrt.set_threshold(name, threshold) @@ -117,9 +108,8 @@ def quantize(cm_cfg, pass_cfg): # mergemodel if conf_map.get("split_keys", "") != "": qmodel = mrt.current_model - if attribute_deps is None: + if attribute_deps == {}: raise RuntimeError("model merging, please specify --attribute_deps") - attribute_deps = json.loads(attribute_deps) mrt_oscales = mrt.get_output_scales() name_idx = {mrt.get_maps().get( s.attr("name"), s.attr("name")): i \ @@ -141,9 +131,8 @@ def mergefunc(node, params, graph): top = Model.load(sym_top_file, prm_top_file) model_merger = Model.merger(qmodel, top, mrt.get_maps()) qmodel = model_merger.merge(callback=mergefunc) - if oscale_maps is None: + if oscale_maps == {}: raise RuntimeError("model merging, please specify --oscale_maps") - oscale_maps = json.loads(oscale_maps) oscales = model_merger.get_output_scales(mrt_oscales, oscale_maps) sym_all_file, prm_all_file, ext_all_file = load_fname( model_prefix, suffix="all.quantize", with_ext=True) From ae14392389b2edbfee70bc6796b33c2c61cfff42 Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 23 Oct 2021 15:37:40 +0800 Subject: [PATCH 051/120] upt doc --- docs/mrt/mrt_user_guide.md | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md index 0d96c2ea..f85914fd 100644 --- a/docs/mrt/mrt_user_guide.md +++ b/docs/mrt/mrt_user_guide.md @@ -71,8 +71,12 @@ COMMON: VERBOSITY: info RUN_EVALUATE: True PREPARE: - INPUT_SHAPE: [-1,3,512,512] - SPLIT_KEYS: "[ssd0_multiperclassdecoder0_zeros_like0, ssd0_multiperclassdecoder0_slice_axis0, ssd0_normalizedboxcenterdecoder0_concat0]" + INPUT_SHAPE: [-1, 3, 512, 512] + SPLIT_KEYS: [ + "ssd0_multiperclassdecoder0_zeros_like0", + "ssd0_multiperclassdecoder0_slice_axis0", + "ssd0_normalizedboxcenterdecoder0_concat0" + ] CALIBRATE: NUM_CALIB: 1 LAMBD: 16 @@ -83,15 +87,34 @@ QUANTIZE: OUTPUT_PRECISION: 30 DEVICE_TYPE: gpu DEVICE_IDS: [2] - THRESHOLDS: "\"{\"data\":2.64,\"ssd0_multiperclassdecoder0_slice_axis0\":1}\"" - ATTRIBUTE_DEPS: "\"{\"_greater_scalar\": {\"scalar\": \"ssd0_multiperclassdecoder0_slice_axis0\"}, \"_contrib_box_nms\": {\"valid_thresh\": \"ssd0_multiperclassdecoder0_slice_axis0\"}}\"" - OSCALE_MAPS: "\"{\"ssd0_slice_axis41\": \"ssd0_multiperclassdecoder0_zeros_like0\", \"ssd0_slice_axis42\": \"ssd0_multiperclassdecoder0_slice_axis0\", \"ssd0_slice_axis43\": \"ssd0_normalizedboxcenterdecoder0_concat0\"}\"" + THRESHOLDS: [ + ["data", 2.64], + ["ssd0_multiperclassdecoder0_slice_axis0", 1], + ] + ATTRIBUTE_DEPS: [ + [ + "_greater_scalar", + [ + ["scalar", "ssd0_multiperclassdecoder0_slice_axis0"], + ] + ], + [ + "_contrib_box_nms", + [ + ["valid_thresh", "ssd0_multiperclassdecoder0_slice_axis0"], + ] + ], + ] + OSCALE_MAPS: [ + ["ssd0_slice_axis41", "ssd0_multiperclassdecoder0_zeros_like0"], + ["ssd0_slice_axis42", "ssd0_multiperclassdecoder0_slice_axis0"], + ["ssd0_slice_axis43", "ssd0_normalizedboxcenterdecoder0_concat0"], + ] EVALUATE: BATCH: 15 DEVICE_TYPE: gpu - DEVICE_IDS: "[0,1,2]" + DEVICE_IDS: [0, 1, 2] ITER_NUM: 10 - ``` run command From 8b47af3183d7b3b235c2f9903ece9f7ac2d6d6ca Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 27 Oct 2021 19:03:38 +0800 Subject: [PATCH 052/120] upt --- python/mrt/V3/__init__.py | 6 ++++++ python/mrt/V3/calibrate.py | 19 +++++++++++++++++-- python/mrt/V3/evaluate.py | 8 ++++++++ python/mrt/V3/mrt_compile.py | 14 ++++++++++++-- python/mrt/V3/prepare.py | 17 +++++++++++++++-- python/mrt/V3/quantize.py | 16 +++++++++++++++- python/mrt/V3/utils.py | 12 ++++++++++++ 7 files changed, 85 insertions(+), 7 deletions(-) create mode 100644 python/mrt/V3/__init__.py diff --git a/python/mrt/V3/__init__.py b/python/mrt/V3/__init__.py new file mode 100644 index 00000000..0f25c4d0 --- /dev/null +++ b/python/mrt/V3/__init__.py @@ -0,0 +1,6 @@ +import utils +import prepare +import calibrate +import quantize +import evaluate +import mrt_compile diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index ca34a050..7ee517b0 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -6,17 +6,32 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, save_conf, - load_conf, check_file_existance, get_ctx) + load_conf, check_file_existance, get_ctx, parser) + +default_num_calib = 1 MRT_CFG.CALIBRATE = CN() MRT_CFG.CALIBRATE.BATCH = default_batch -MRT_CFG.CALIBRATE.NUM_CALIB = 1 +MRT_CFG.CALIBRATE.NUM_CALIB = default_num_calib MRT_CFG.CALIBRATE.LAMBD = None MRT_CFG.CALIBRATE.DATASET_NAME = "imagenet" MRT_CFG.CALIBRATE.DATASET_DIR = conf.MRT_DATASET_ROOT MRT_CFG.CALIBRATE.DEVICE_TYPE = default_device_type MRT_CFG.CALIBRATE.DEVICE_IDS = default_device_ids +parser.add_argument("--batch-calibrate", type=int, default=default_batch) +parser.add_argument("--num-calib", type=int, default=default_num_calib) +parser.add_argument("--lambd", type=float) +parser.add_argument( + "--dataset-name", type=str, default="imagenet", + choices=list(ds.DS_REG.keys())) +parser.add_argument("--dataset-dir", type=str, default=conf.MRT_DATASET_ROOT) +parser.add_argument( + "--device-type-calibrate", type=str, choices=["cpu", "gpu"], + default=default_device_type) +parser.add_argument( + "--device-ids-calibrate", type=int, nargs="+", default=default_device_ids) + def calibrate(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index a534701f..a3883ea1 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -19,6 +19,14 @@ MRT_CFG.EVALUATE.DEVICE_IDS = default_device_ids MRT_CFG.EVALUATE.ITER_NUM = 10 +parser.add_argument("--batch-evaluate", type=int, default=default_batch) +parser.add_argument( + "--device-type-evaluate", type=str, choices=["cpu", "gpu"], + default=default_device_type) +parser.add_argument( + "--device-ids-evaluate", type=int, nargs="+", default=default_device_ids) +parser.add_argument("--iter-num", type=int, default=10) + def evaluate(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index 698bfe49..c092a980 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -9,14 +9,24 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, - load_conf, check_file_existance) + load_conf, check_file_existance, parser) + +default_dump_dir = path.join("data1", "tmp") MRT_CFG.COMPILE = CN() MRT_CFG.COMPILE.BATCH = 1 -MRT_CFG.COMPILE.DUMP_DIR = "/data1/tmp" +MRT_CFG.COMPILE.DUMP_DIR = default_dump_dir MRT_CFG.COMPILE.DEVICE_TYPE = default_device_type MRT_CFG.COMPILE.DEVICE_IDS = default_device_ids +parser.add_argument("--batch-compile", type=int, default=1) +parser.add_argument("--dump-dir", type=str, default=default_dump_dir) +parser.add_argument( + "--device-type-compile", type=str, choices=["cpu", "gpu"], + default=default_device_type) +parser.add_argument( + "--device-ids-compile", type=int, nargs="+", default=default_device_ids) + def mrt_compile(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index bfc10266..e0c63680 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -5,14 +5,27 @@ from mrt.transformer import Model from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, - get_model_prefix, get_logger, set_batch, load_fname, save_conf, get_ctx) + get_model_prefix, get_logger, set_batch, load_fname, save_conf, + get_ctx, parser) + +default_input_shape = [-1, 3, 224, 224] MRT_CFG.PREPARE= CN() MRT_CFG.PREPARE.DEVICE_TYPE = default_device_type MRT_CFG.PREPARE.DEVICE_IDS = default_device_ids -MRT_CFG.PREPARE.INPUT_SHAPE = [-1, 3, 224, 224] +MRT_CFG.PREPARE.INPUT_SHAPE = default_input_shape MRT_CFG.PREPARE.SPLIT_KEYS = [] +parser.add_argument( + "--device-type-prepare", type=str, choices=["cpu", "gpu"], + default=default_device_type) +parser.add_argument( + "--device-ids-prepare", type=int, nargs="+", + default=default_device_ids) +parser.add_argument( + "--input-shape", type=int, nargs="+", default=default_input_shape) +parser.add_argument("--split-keys", type=str, nargs="+", default=[]) + def prepare(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 47d2358d..a392f561 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -6,7 +6,7 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, get_model_prefix, get_logger, load_fname, save_conf, - load_conf, check_file_existance, get_ctx) + load_conf, check_file_existance, get_ctx, parser) MRT_CFG.QUANTIZE = CN() MRT_CFG.QUANTIZE.RESTORE_NAMES = [] @@ -20,6 +20,20 @@ MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = [] MRT_CFG.QUANTIZE.OSCALE_MAPS = [] +parser.add_argument("--restore-names", nargs="+", type=str, default=[]) +parser.add_argument("--input-precision", type=int) +parser.add_argument("--output-precision", type=int) +parser.add_argument( + "--device-type-quantize", type=str, choices=["cpu", "gpu"], + default=default_device_type) +parser.add_argument( + "--device-ids-quantize", type=int, nargs="+", default=default_device_ids) +parser.add_argument("--softmax-lambd", type=float) +parser.add_argument("--shift-bits", type=int) +parser.add_argument("--thresholds", type=tuple, default=[]) +parser.add_argument("--attribute-deps", type=tuple, default=[]) +parser.add_argument("--oscale-maps", type=tuple, default=[]) + def quantize(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 0695137e..eabaf46a 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -2,6 +2,7 @@ import logging import json from yacs.config import CfgNode as CN +import argparse import mxnet as mx @@ -29,6 +30,17 @@ MRT_CFG.COMMON.RUN_EVALUATE = True MRT_CFG.COMMON.RUN_COMPILE = True +parser = argparse.ArgumentParser("MRT YAML Interface") +parser.add_argument("--model-dir", type=str, default=conf.MRT_MODEL_ROOT) +parser.add_argument("--model-name", type=str, default="") +parser.add_argument("--verobosity", type=str, default="debug") +parser.add_argument("--start-after", type=str, default=None) +parser.add_argument("--device-type", type=str, default=default_device_type) +parser.add_argument("--device-ids", type=int, default=default_device_ids) +parser.add_argument("--batch", type=int, default=default_batch) +parser.add_argument("--run-evaluate", action="store_true") +parser.add_argument("--run-compile", action="store_true") + def get_model_prefix(model_dir, model_name): if model_dir.startswith("~"): model_dir = path.expanduser(model_dir) From ea113a76f03a459876f5c262c274894a26035f8a Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 29 Oct 2021 14:39:12 +0800 Subject: [PATCH 053/120] upt --- python/mrt/V3/__init__.py | 9 +++------ python/mrt/V3/evaluate.py | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/python/mrt/V3/__init__.py b/python/mrt/V3/__init__.py index 0f25c4d0..95baeeef 100644 --- a/python/mrt/V3/__init__.py +++ b/python/mrt/V3/__init__.py @@ -1,6 +1,3 @@ -import utils -import prepare -import calibrate -import quantize -import evaluate -import mrt_compile +from mrt.V3 import ( + utils, prepare, calibrate, evaluate, mrt_compile +) diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index a3883ea1..c0caad2b 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -11,7 +11,7 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, - load_conf, check_file_existance, get_ctx, get_batch_axis) + load_conf, check_file_existance, get_ctx, get_batch_axis, parser) MRT_CFG.EVALUATE = CN() MRT_CFG.EVALUATE.BATCH = default_batch From 23bbb95044f0df78c35e65422576f46914e5bcec Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 1 Nov 2021 19:25:53 +0800 Subject: [PATCH 054/120] upt --- main2.py | 16 ++++++++---- python/mrt/V3/calibrate.py | 36 +++++++++++++++++---------- python/mrt/V3/evaluate.py | 24 ++++++++++++------ python/mrt/V3/mrt_compile.py | 28 ++++++++++++++------- python/mrt/V3/prepare.py | 26 ++++++++++++-------- python/mrt/V3/quantize.py | 42 +++++++++++++++++++++----------- python/mrt/V3/utils.py | 47 +++++++++++++++++++++++++++++------- 7 files changed, 151 insertions(+), 68 deletions(-) diff --git a/main2.py b/main2.py index ec126231..947057e7 100644 --- a/main2.py +++ b/main2.py @@ -1,7 +1,7 @@ import sys from os import path -from mrt.V3.utils import get_cfg_defaults +from mrt.V3.utils import get_cfg_defaults, override_cfg_argparse, parser from mrt.V3.prepare import prepare from mrt.V3.calibrate import calibrate from mrt.V3.quantize import quantize @@ -36,16 +36,22 @@ def yaml_main(cfg): if cfg.COMMON.RUN_COMPILE: mrt_compile(cfg.COMMON, cfg.COMPILE) +parser.add_argument("yaml_file", type=str) +parser.add_argument( + "--entry-name", type=str, choices=[ + "prepare", "calibrate", "quantize", "evalueate", "compile"]) + if __name__ == "__main__": - assert len(sys.argv) in [2,3], len(sys.argv) - yaml_file = sys.argv[1] + args = parser.parse_args() + yaml_file = args.yaml_file if yaml_file.startswith("~"): yaml_file = path.expanduser(yaml_file) cfg = get_cfg_defaults() cfg.merge_from_file(yaml_file) cfg.freeze() - if len(sys.argv) == 3: - entry_name = sys.argv[2] + cfg = override_cfg_argparse(cfg, args) + entry_name = args.entry_name + if entry_name is not None: if entry_name == "compile": entry_name = "mrt_compile" if not hasattr(thismodule, entry_name): diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index 7ee517b0..18ad83fe 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -6,7 +6,7 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, save_conf, - load_conf, check_file_existance, get_ctx, parser) + load_conf, check_file_existance, get_ctx, parser, update_dest2yaml) default_num_calib = 1 @@ -19,18 +19,28 @@ MRT_CFG.CALIBRATE.DEVICE_TYPE = default_device_type MRT_CFG.CALIBRATE.DEVICE_IDS = default_device_ids -parser.add_argument("--batch-calibrate", type=int, default=default_batch) -parser.add_argument("--num-calib", type=int, default=default_num_calib) -parser.add_argument("--lambd", type=float) -parser.add_argument( - "--dataset-name", type=str, default="imagenet", - choices=list(ds.DS_REG.keys())) -parser.add_argument("--dataset-dir", type=str, default=conf.MRT_DATASET_ROOT) -parser.add_argument( - "--device-type-calibrate", type=str, choices=["cpu", "gpu"], - default=default_device_type) -parser.add_argument( - "--device-ids-calibrate", type=int, nargs="+", default=default_device_ids) +_pname = "CALIBRATE" +update_dest2yaml({ + parser.add_argument( + "--batch-calibrate", type=int, + default=default_batch).dest: (_pname, "BATCH"), + parser.add_argument( + "--num-calib", type=int, + default=default_num_calib).dest: (_pname, "NUM_CALIB"), + parser.add_argument("--lambd", type=float).dest: (_pname, "LAMBD"), + parser.add_argument( + "--dataset-name", type=str, default="imagenet", + choices=list(ds.DS_REG.keys())).dest: (_pname, "DATASET_NAME"), + parser.add_argument( + "--dataset-dir", type=str, + default=conf.MRT_DATASET_ROOT).dest: (_pname, "DATASET_DIR"), + parser.add_argument( + "--device-type-calibrate", type=str, choices=["cpu", "gpu"], + default=default_device_type).dest: (_pname, "DEVICE_TYPE"), + parser.add_argument( + "--device-ids-calibrate", type=int, nargs="+", + default=default_device_ids).dest: (_pname, "DEVICE_IDS"), +}) def calibrate(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index c0caad2b..6ee4b681 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -11,7 +11,8 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, - load_conf, check_file_existance, get_ctx, get_batch_axis, parser) + load_conf, check_file_existance, get_ctx, get_batch_axis, parser, + update_dest2yaml) MRT_CFG.EVALUATE = CN() MRT_CFG.EVALUATE.BATCH = default_batch @@ -19,13 +20,20 @@ MRT_CFG.EVALUATE.DEVICE_IDS = default_device_ids MRT_CFG.EVALUATE.ITER_NUM = 10 -parser.add_argument("--batch-evaluate", type=int, default=default_batch) -parser.add_argument( - "--device-type-evaluate", type=str, choices=["cpu", "gpu"], - default=default_device_type) -parser.add_argument( - "--device-ids-evaluate", type=int, nargs="+", default=default_device_ids) -parser.add_argument("--iter-num", type=int, default=10) +_pname = "EVALUATE" +update_dest2yaml({ + parser.add_argument( + "--batch-evaluate", type=int, + default=default_batch).dest: (_pname, "BATCH"), + parser.add_argument( + "--device-type-evaluate", type=str, choices=["cpu", "gpu"], + default=default_device_type).dest: (_pname, "DEVICE_TYPE"), + parser.add_argument( + "--device-ids-evaluate", type=int, nargs="+", + default=default_device_ids).dest: (_pname, "DEVICE_IDS"), + parser.add_argument( + "--iter-num", type=int, default=10).dest: (_pname, "ITER_NUM"), +}) def evaluate(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index c092a980..2c09bea6 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -1,4 +1,5 @@ from os import path +import os from yacs.config import CfgNode as CN import numpy as np @@ -9,9 +10,9 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, - load_conf, check_file_existance, parser) + load_conf, check_file_existance, parser, update_dest2yaml) -default_dump_dir = path.join("data1", "tmp") +default_dump_dir = path.expanduser("~/mrt_dump") MRT_CFG.COMPILE = CN() MRT_CFG.COMPILE.BATCH = 1 @@ -19,13 +20,20 @@ MRT_CFG.COMPILE.DEVICE_TYPE = default_device_type MRT_CFG.COMPILE.DEVICE_IDS = default_device_ids -parser.add_argument("--batch-compile", type=int, default=1) -parser.add_argument("--dump-dir", type=str, default=default_dump_dir) -parser.add_argument( - "--device-type-compile", type=str, choices=["cpu", "gpu"], - default=default_device_type) -parser.add_argument( - "--device-ids-compile", type=int, nargs="+", default=default_device_ids) +_cnode = "COMPILE" +update_dest2yaml({ + parser.add_argument( + "--batch-compile", type=int, default=1).dest: (_cnode, "BATCH"), + parser.add_argument( + "--dump-dir", type=str, + default=default_dump_dir).dest: (_cnode, "DUMP_DIR"), + parser.add_argument( + "--device-type-compile", type=str, choices=["cpu", "gpu"], + default=default_device_type).dest: (_cnode, "DEVICE_TYPE"), + parser.add_argument( + "--device-ids-compile", type=int, nargs="+", + default=default_device_ids).dest: (_cnode, "DEVICE_IDS"), +}) def mrt_compile(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR @@ -64,6 +72,8 @@ def mrt_compile(cm_cfg, pass_cfg): oscales = mrt.get_output_scales() inputs_ext = mrt.get_inputs_ext() qmodel = mrt.current_model + if not path.exists(dump_dir): + os.makedirs(dump_dir, exist_ok=True) qmodel.to_cvm( model_name_tfm, datadir=dump_dir, input_shape=set_batch(input_shape, batch), target=device_type, diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index e0c63680..9a50f4e0 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -6,7 +6,7 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, get_model_prefix, get_logger, set_batch, load_fname, save_conf, - get_ctx, parser) + get_ctx, parser, update_dest2yaml) default_input_shape = [-1, 3, 224, 224] @@ -16,15 +16,21 @@ MRT_CFG.PREPARE.INPUT_SHAPE = default_input_shape MRT_CFG.PREPARE.SPLIT_KEYS = [] -parser.add_argument( - "--device-type-prepare", type=str, choices=["cpu", "gpu"], - default=default_device_type) -parser.add_argument( - "--device-ids-prepare", type=int, nargs="+", - default=default_device_ids) -parser.add_argument( - "--input-shape", type=int, nargs="+", default=default_input_shape) -parser.add_argument("--split-keys", type=str, nargs="+", default=[]) +_pname = "PREPARE" +update_dest2yaml({ + parser.add_argument( + "--device-type-prepare", type=str, choices=["cpu", "gpu"], + default=default_device_type).dest: (_pname, "DEVICE_TYPE"), + parser.add_argument( + "--device-ids-prepare", type=int, nargs="+", + default=default_device_ids).dest: (_pname, "DEVICE_IDS"), + parser.add_argument( + "--input-shape", type=int, nargs="+", + default=default_input_shape).dest: (_pname, "INPUT_SHAPE"), + parser.add_argument( + "--split-keys", type=str, nargs="+", + default=[]).dest: (_pname, "SPLIT_KEYS"), +}) def prepare(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index a392f561..1cc233b9 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -6,7 +6,7 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, get_model_prefix, get_logger, load_fname, save_conf, - load_conf, check_file_existance, get_ctx, parser) + load_conf, check_file_existance, get_ctx, parser, update_dest2yaml) MRT_CFG.QUANTIZE = CN() MRT_CFG.QUANTIZE.RESTORE_NAMES = [] @@ -20,19 +20,33 @@ MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = [] MRT_CFG.QUANTIZE.OSCALE_MAPS = [] -parser.add_argument("--restore-names", nargs="+", type=str, default=[]) -parser.add_argument("--input-precision", type=int) -parser.add_argument("--output-precision", type=int) -parser.add_argument( - "--device-type-quantize", type=str, choices=["cpu", "gpu"], - default=default_device_type) -parser.add_argument( - "--device-ids-quantize", type=int, nargs="+", default=default_device_ids) -parser.add_argument("--softmax-lambd", type=float) -parser.add_argument("--shift-bits", type=int) -parser.add_argument("--thresholds", type=tuple, default=[]) -parser.add_argument("--attribute-deps", type=tuple, default=[]) -parser.add_argument("--oscale-maps", type=tuple, default=[]) +_pname = "QUANTIZE" +update_dest2yaml({ + parser.add_argument( + "--restore-names", nargs="+", type=str, + default=[]).dest: (_pname, "RESTORE_NAMES"), + parser.add_argument( + "--input-precision", type=int).dest: (_pname, "INPUT_PRECISION"), + parser.add_argument( + "--output-precision", type=int).dest: (_pname, "OUTPUT_PRECISION"), + parser.add_argument( + "--device-type-quantize", type=str, choices=["cpu", "gpu"], + default=default_device_type).dest: (_pname, "DEVICE_TYPE"), + parser.add_argument( + "--device-ids-quantize", type=int, nargs="+", + default=default_device_ids).dest: (_pname, "DEVICE_IDS"), + parser.add_argument( + "--softmax-lambd", type=float).dest: (_pname, "SOFTMAX_LAMBD"), + parser.add_argument( + "--shift-bits", type=int).dest: (_pname, "SHIFT_BITS"), + parser.add_argument( + "--thresholds", type=tuple, default=[]).dest: (_pname, "THRESHOLDS"), + parser.add_argument( + "--attribute-deps", type=tuple, + default=[]).dest: (_pname, "ATTRIBUTE_DEPS"), + parser.add_argument( + "--oscale-maps", type=tuple, default=[]).dest: (_pname, "OSCALE_MAPS"), +}) def quantize(cm_cfg, pass_cfg): model_dir = cm_cfg.MODEL_DIR diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index eabaf46a..59b936da 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -31,15 +31,32 @@ MRT_CFG.COMMON.RUN_COMPILE = True parser = argparse.ArgumentParser("MRT YAML Interface") -parser.add_argument("--model-dir", type=str, default=conf.MRT_MODEL_ROOT) -parser.add_argument("--model-name", type=str, default="") -parser.add_argument("--verobosity", type=str, default="debug") -parser.add_argument("--start-after", type=str, default=None) -parser.add_argument("--device-type", type=str, default=default_device_type) -parser.add_argument("--device-ids", type=int, default=default_device_ids) -parser.add_argument("--batch", type=int, default=default_batch) -parser.add_argument("--run-evaluate", action="store_true") -parser.add_argument("--run-compile", action="store_true") +_pname = "COMMON" +dest2yaml = { + parser.add_argument( + "--model-dir", type=str, + default=conf.MRT_MODEL_ROOT).dest: (_pname, "MODEL_DIR"), + parser.add_argument( + "--verobosity", type=str, default="debug").dest: (_pname, "VERBOSITY"), + parser.add_argument( + "--start-after", type=str, default=None).dest: (_pname, "START_AFTER"), + parser.add_argument( + "--device-type", type=str, + default=default_device_type).dest: (_pname, "DEVICE_TYPE"), + parser.add_argument( + "--device-ids", type=int, + default=default_device_ids).dest: (_pname, "DEVICE_IDS"), + parser.add_argument( + "--batch", type=int, default=default_batch).dest: (_pname, "BATCH"), +} + +def update_dest2yaml(dest2yaml_upt): + for dest, cfg in dest2yaml_upt.items(): + if dest in dest2yaml: + raise RuntimeError( + "dest: {} already in dest2yaml: {}".format( + dest, dest2yaml.keys())) + dest2yaml[dest] = cfg def get_model_prefix(model_dir, model_name): if model_dir.startswith("~"): @@ -146,3 +163,15 @@ def get_cfg_defaults(): # Return a clone so that the defaults will not be altered # This is for the "local variable" use pattern return MRT_CFG.clone() + +def override_cfg_argparse(cfg, args): + if cfg.is_frozen(): + cfg.defrost() + for dest in dir(args): + if dest not in dest2yaml: + continue + pname, attr = dest2yaml[dest] + cnode = getattr(cfg, pname) + setattr(cnode, attr, getattr(args, dest)) + cfg.freeze() + return cfg From 51c17c1b5e65004c61bf45fb642bd9b0ac7ce914 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 2 Nov 2021 13:16:58 +0800 Subject: [PATCH 055/120] upt --- python/mrt/V3/utils.py | 8 ++++++-- python/mrt/frontend/polls/urls.py | 5 ++++- python/mrt/frontend/polls/views.py | 18 ++++++++++++++---- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 59b936da..26b10d6c 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -21,7 +21,7 @@ MRT_CFG = CN() MRT_CFG.COMMON = CN() MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT -MRT_CFG.COMMON.MODEL_NAME = "" +MRT_CFG.COMMON.MODEL_NAME = None MRT_CFG.COMMON.VERBOSITY = "debug" MRT_CFG.COMMON.START_AFTER = None MRT_CFG.COMMON.DEVICE_TYPE = default_device_type @@ -36,6 +36,8 @@ parser.add_argument( "--model-dir", type=str, default=conf.MRT_MODEL_ROOT).dest: (_pname, "MODEL_DIR"), + parser.add_argument( + "--model-name", type=str).dest: (_pname, "MODEL_NAME"), parser.add_argument( "--verobosity", type=str, default="debug").dest: (_pname, "VERBOSITY"), parser.add_argument( @@ -172,6 +174,8 @@ def override_cfg_argparse(cfg, args): continue pname, attr = dest2yaml[dest] cnode = getattr(cfg, pname) - setattr(cnode, attr, getattr(args, dest)) + argv = getattr(args, dest) + if argv is not None: + setattr(cnode, attr, argv) cfg.freeze() return cfg diff --git a/python/mrt/frontend/polls/urls.py b/python/mrt/frontend/polls/urls.py index 2fcca0db..14576a92 100644 --- a/python/mrt/frontend/polls/urls.py +++ b/python/mrt/frontend/polls/urls.py @@ -3,5 +3,8 @@ from . import views urlpatterns = [ - path('', views.index, name='index') + path("", views.index, name="index"), + path("/", views.detail, name="detail"), + path("/results/", views.results, name="results"), + path("/vote/", views.vote, name="vote"), ] diff --git a/python/mrt/frontend/polls/views.py b/python/mrt/frontend/polls/views.py index dc608b0b..249d0495 100644 --- a/python/mrt/frontend/polls/views.py +++ b/python/mrt/frontend/polls/views.py @@ -5,8 +5,18 @@ from django.urls import reverse def index(request): - return HttpResponse("Hello, world. You're at the polls index.") + latest_question_list = Question.objects.order_by("-pub_data")[:5] + output = ", ".join([q.question_text for q in latest_question_list]) + return HttpResponse(output) -def redirect_to_year(request): - year = 2006 - return HttpResponseRedirect(reverse('news-year-archieve'), args=(year,)) +def detail(request, question_id): + return HttpResponse( + "You're looking at question {}.".format(question_id)) + +def results(request, question_id): + return HttpResponse( + "You're looking at the results of question {}.".format( + question_id)) + +def vote(request, question_id): + return HttpResponse("You're voting on question: {}".format(question_id)) From a7906130472eb3038a4fea71a62d9fb048115be1 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 2 Nov 2021 13:20:19 +0800 Subject: [PATCH 056/120] upt --- docs/mrt/mrt_user_guide.md | 10 +- python/mrt/frontend/frontend/__init__.py | 0 python/mrt/frontend/frontend/asgi.py | 16 --- python/mrt/frontend/frontend/settings.py | 126 ------------------ python/mrt/frontend/frontend/urls.py | 22 --- python/mrt/frontend/frontend/wsgi.py | 16 --- python/mrt/frontend/manage.py | 22 --- python/mrt/frontend/polls/__init__.py | 0 python/mrt/frontend/polls/admin.py | 5 - python/mrt/frontend/polls/apps.py | 6 - .../mrt/frontend/polls/migrations/__init__.py | 0 python/mrt/frontend/polls/models.py | 22 --- python/mrt/frontend/polls/tests.py | 3 - python/mrt/frontend/polls/urls.py | 10 -- python/mrt/frontend/polls/views.py | 22 --- 15 files changed, 5 insertions(+), 275 deletions(-) delete mode 100644 python/mrt/frontend/frontend/__init__.py delete mode 100644 python/mrt/frontend/frontend/asgi.py delete mode 100644 python/mrt/frontend/frontend/settings.py delete mode 100644 python/mrt/frontend/frontend/urls.py delete mode 100644 python/mrt/frontend/frontend/wsgi.py delete mode 100755 python/mrt/frontend/manage.py delete mode 100644 python/mrt/frontend/polls/__init__.py delete mode 100644 python/mrt/frontend/polls/admin.py delete mode 100644 python/mrt/frontend/polls/apps.py delete mode 100644 python/mrt/frontend/polls/migrations/__init__.py delete mode 100644 python/mrt/frontend/polls/models.py delete mode 100644 python/mrt/frontend/polls/tests.py delete mode 100644 python/mrt/frontend/polls/urls.py delete mode 100644 python/mrt/frontend/polls/views.py diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md index f85914fd..7a3ec7f6 100644 --- a/docs/mrt/mrt_user_guide.md +++ b/docs/mrt/mrt_user_guide.md @@ -52,15 +52,15 @@ or run either of the following commands for each pass. ```bash # preparation -python main2.py ~/mrt_yaml_root/alexnet.yaml prepare +python main2.py ~/mrt_yaml_root/alexnet.yaml --entry-name prepare # calibration -python main2.py ~/mrt_yaml_root/alexnet.yaml calibrate +python main2.py ~/mrt_yaml_root/alexnet.yaml --entry-name calibrate # quantization -python main2.py ~/mrt_yaml_root/alexnet.yaml quantize +python main2.py ~/mrt_yaml_root/alexnet.yaml --entry-name quantize # evaluation -python main2.py ~/mrt_yaml_root/alexnet.yaml evaluate +python main2.py ~/mrt_yaml_root/alexnet.yaml --entry-name evaluate # compilation -python main2.py ~/mrt_yaml_root/alexnet.yaml compile +python main2.py ~/mrt_yaml_root/alexnet.yaml --entry-name compile ``` ## ssd_512_voc_resnet50_v1_voc diff --git a/python/mrt/frontend/frontend/__init__.py b/python/mrt/frontend/frontend/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/mrt/frontend/frontend/asgi.py b/python/mrt/frontend/frontend/asgi.py deleted file mode 100644 index 7f108ee6..00000000 --- a/python/mrt/frontend/frontend/asgi.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -ASGI config for frontend project. - -It exposes the ASGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ -""" - -import os - -from django.core.asgi import get_asgi_application - -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') - -application = get_asgi_application() diff --git a/python/mrt/frontend/frontend/settings.py b/python/mrt/frontend/frontend/settings.py deleted file mode 100644 index c13db220..00000000 --- a/python/mrt/frontend/frontend/settings.py +++ /dev/null @@ -1,126 +0,0 @@ -""" -Django settings for frontend project. - -Generated by 'django-admin startproject' using Django 3.2.8. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/topics/settings/ - -For the full list of settings and their values, see -https://docs.djangoproject.com/en/3.2/ref/settings/ -""" - -from pathlib import Path - -# Build paths inside the project like this: BASE_DIR / 'subdir'. -BASE_DIR = Path(__file__).resolve().parent.parent - - -# Quick-start development settings - unsuitable for production -# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ - -# SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = 'django-insecure-)2)3=6b0g%w4-^g675xpd(@+^9l4(*z(3s)m=4)l&td0sc=290' - -# SECURITY WARNING: don't run with debug turned on in production! -DEBUG = True - -ALLOWED_HOSTS = [] - - -# Application definition - -INSTALLED_APPS = [ - 'polls.apps.PollsConfig', - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', -] - -MIDDLEWARE = [ - 'django.middleware.security.SecurityMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', -] - -ROOT_URLCONF = 'frontend.urls' - -TEMPLATES = [ - { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': [], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', - ], - }, - }, -] - -WSGI_APPLICATION = 'frontend.wsgi.application' - - -# Database -# https://docs.djangoproject.com/en/3.2/ref/settings/#databases - -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': BASE_DIR / 'db.sqlite3', - } -} - - -# Password validation -# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators - -AUTH_PASSWORD_VALIDATORS = [ - { - 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', - }, -] - - -# Internationalization -# https://docs.djangoproject.com/en/3.2/topics/i18n/ - -LANGUAGE_CODE = 'en-us' - -TIME_ZONE = 'UTC' - -USE_I18N = True - -USE_L10N = True - -USE_TZ = True - - -# Static files (CSS, JavaScript, Images) -# https://docs.djangoproject.com/en/3.2/howto/static-files/ - -STATIC_URL = '/static/' - -# Default primary key field type -# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field - -DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' diff --git a/python/mrt/frontend/frontend/urls.py b/python/mrt/frontend/frontend/urls.py deleted file mode 100644 index 90ced762..00000000 --- a/python/mrt/frontend/frontend/urls.py +++ /dev/null @@ -1,22 +0,0 @@ -"""frontend URL Configuration - -The `urlpatterns` list routes URLs to views. For more information please see: - https://docs.djangoproject.com/en/3.2/topics/http/urls/ -Examples: -Function views - 1. Add an import: from my_app import views - 2. Add a URL to urlpatterns: path('', views.home, name='home') -Class-based views - 1. Add an import: from other_app.views import Home - 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') -Including another URLconf - 1. Import the include() function: from django.urls import include, path - 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) -""" -from django.contrib import admin -from django.urls import include, path - -urlpatterns = [ - path('polls/', include('polls.urls')), - path('admin/', admin.site.urls), -] diff --git a/python/mrt/frontend/frontend/wsgi.py b/python/mrt/frontend/frontend/wsgi.py deleted file mode 100644 index 0b586f31..00000000 --- a/python/mrt/frontend/frontend/wsgi.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -WSGI config for frontend project. - -It exposes the WSGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ -""" - -import os - -from django.core.wsgi import get_wsgi_application - -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') - -application = get_wsgi_application() diff --git a/python/mrt/frontend/manage.py b/python/mrt/frontend/manage.py deleted file mode 100755 index c9c8c1fa..00000000 --- a/python/mrt/frontend/manage.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -"""Django's command-line utility for administrative tasks.""" -import os -import sys - - -def main(): - """Run administrative tasks.""" - os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') - try: - from django.core.management import execute_from_command_line - except ImportError as exc: - raise ImportError( - "Couldn't import Django. Are you sure it's installed and " - "available on your PYTHONPATH environment variable? Did you " - "forget to activate a virtual environment?" - ) from exc - execute_from_command_line(sys.argv) - - -if __name__ == '__main__': - main() diff --git a/python/mrt/frontend/polls/__init__.py b/python/mrt/frontend/polls/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/mrt/frontend/polls/admin.py b/python/mrt/frontend/polls/admin.py deleted file mode 100644 index 6af8ff67..00000000 --- a/python/mrt/frontend/polls/admin.py +++ /dev/null @@ -1,5 +0,0 @@ -from django.contrib import admin - -from .models import Question - -admin.site.register(Question) diff --git a/python/mrt/frontend/polls/apps.py b/python/mrt/frontend/polls/apps.py deleted file mode 100644 index 5a5f94ca..00000000 --- a/python/mrt/frontend/polls/apps.py +++ /dev/null @@ -1,6 +0,0 @@ -from django.apps import AppConfig - - -class PollsConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'polls' diff --git a/python/mrt/frontend/polls/migrations/__init__.py b/python/mrt/frontend/polls/migrations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/mrt/frontend/polls/models.py b/python/mrt/frontend/polls/models.py deleted file mode 100644 index f881ce92..00000000 --- a/python/mrt/frontend/polls/models.py +++ /dev/null @@ -1,22 +0,0 @@ -import datetime - -from django.db import models -from django.utils import timezone - - -class Question(models.Model): - question_text = models.CharField(max_length=200) - pub_date = models.DateTimeField('date published') - def __str__(self): - return self.question_text - def was_published_recently(self): - return self.pub_date >= \ - timezone.now() - datetime.timedelta(days=1) - - -class Choice(models.Model): - question = models.ForeignKey(Question, on_delete=models.CASCADE) - choice_text = models.CharField(max_length=200) - votes = models.IntegerField(default=0) - def __str__(self): - return self.choice_text diff --git a/python/mrt/frontend/polls/tests.py b/python/mrt/frontend/polls/tests.py deleted file mode 100644 index 7ce503c2..00000000 --- a/python/mrt/frontend/polls/tests.py +++ /dev/null @@ -1,3 +0,0 @@ -from django.test import TestCase - -# Create your tests here. diff --git a/python/mrt/frontend/polls/urls.py b/python/mrt/frontend/polls/urls.py deleted file mode 100644 index 14576a92..00000000 --- a/python/mrt/frontend/polls/urls.py +++ /dev/null @@ -1,10 +0,0 @@ -from django.urls import path - -from . import views - -urlpatterns = [ - path("", views.index, name="index"), - path("/", views.detail, name="detail"), - path("/results/", views.results, name="results"), - path("/vote/", views.vote, name="vote"), -] diff --git a/python/mrt/frontend/polls/views.py b/python/mrt/frontend/polls/views.py deleted file mode 100644 index 249d0495..00000000 --- a/python/mrt/frontend/polls/views.py +++ /dev/null @@ -1,22 +0,0 @@ -from django.shortcuts import render - -# Create your views here. -from django.http import HttpResponse, HttpResponseRedirect -from django.urls import reverse - -def index(request): - latest_question_list = Question.objects.order_by("-pub_data")[:5] - output = ", ".join([q.question_text for q in latest_question_list]) - return HttpResponse(output) - -def detail(request, question_id): - return HttpResponse( - "You're looking at question {}.".format(question_id)) - -def results(request, question_id): - return HttpResponse( - "You're looking at the results of question {}.".format( - question_id)) - -def vote(request, question_id): - return HttpResponse("You're voting on question: {}".format(question_id)) From c68b67b42d9bef54ad90d6dccddec8bc475f266e Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 10 Nov 2021 20:00:46 +0800 Subject: [PATCH 057/120] upt --- python/mrt/V3/__init__.py | 2 +- python/mrt/frontend/frontend/__init__.py | 5 + python/mrt/frontend/frontend/asgi.py | 16 ++ python/mrt/frontend/frontend/celery.py | 22 +++ python/mrt/frontend/frontend/settings.py | 148 ++++++++++++++++++ python/mrt/frontend/frontend/urls.py | 22 +++ python/mrt/frontend/frontend/wsgi.py | 16 ++ python/mrt/frontend/manage.py | 22 +++ python/mrt/frontend/polls/__init__.py | 0 python/mrt/frontend/polls/admin.py | 3 + python/mrt/frontend/polls/apps.py | 6 + .../mrt/frontend/polls/migrations/__init__.py | 0 python/mrt/frontend/polls/models.py | 9 ++ python/mrt/frontend/polls/tests.py | 3 + python/mrt/frontend/polls/urls.py | 7 + python/mrt/frontend/polls/views.py | 17 ++ 16 files changed, 297 insertions(+), 1 deletion(-) create mode 100644 python/mrt/frontend/frontend/__init__.py create mode 100644 python/mrt/frontend/frontend/asgi.py create mode 100644 python/mrt/frontend/frontend/celery.py create mode 100644 python/mrt/frontend/frontend/settings.py create mode 100644 python/mrt/frontend/frontend/urls.py create mode 100644 python/mrt/frontend/frontend/wsgi.py create mode 100755 python/mrt/frontend/manage.py create mode 100644 python/mrt/frontend/polls/__init__.py create mode 100644 python/mrt/frontend/polls/admin.py create mode 100644 python/mrt/frontend/polls/apps.py create mode 100644 python/mrt/frontend/polls/migrations/__init__.py create mode 100644 python/mrt/frontend/polls/models.py create mode 100644 python/mrt/frontend/polls/tests.py create mode 100644 python/mrt/frontend/polls/urls.py create mode 100644 python/mrt/frontend/polls/views.py diff --git a/python/mrt/V3/__init__.py b/python/mrt/V3/__init__.py index 95baeeef..b9915ed4 100644 --- a/python/mrt/V3/__init__.py +++ b/python/mrt/V3/__init__.py @@ -1,3 +1,3 @@ from mrt.V3 import ( - utils, prepare, calibrate, evaluate, mrt_compile + utils, prepare, calibrate, quantize, evaluate, mrt_compile ) diff --git a/python/mrt/frontend/frontend/__init__.py b/python/mrt/frontend/frontend/__init__.py new file mode 100644 index 00000000..5c39f774 --- /dev/null +++ b/python/mrt/frontend/frontend/__init__.py @@ -0,0 +1,5 @@ +# This will make sure the app is always imported when +# Django starts so that shared_task will use this app +from .celery import app as celery_app + +__all__ = ('celery_app', ) diff --git a/python/mrt/frontend/frontend/asgi.py b/python/mrt/frontend/frontend/asgi.py new file mode 100644 index 00000000..7f108ee6 --- /dev/null +++ b/python/mrt/frontend/frontend/asgi.py @@ -0,0 +1,16 @@ +""" +ASGI config for frontend project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ +""" + +import os + +from django.core.asgi import get_asgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') + +application = get_asgi_application() diff --git a/python/mrt/frontend/frontend/celery.py b/python/mrt/frontend/frontend/celery.py new file mode 100644 index 00000000..e0011cf3 --- /dev/null +++ b/python/mrt/frontend/frontend/celery.py @@ -0,0 +1,22 @@ +import os + +from celery import Celery + +# Set the default Django settings module for the 'celery' program. +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') + +app = Celery('frontend') + +# Using a string here means the worker doesn't have to serialize +# the configuration object to child processes. +# - namespace='CELERY' means all celery-related configurtion keys +# should have a 'CELERY_' prefix. +app.config_from_project( + 'django.conf:settings', namespace='CELERY') + +# Load task modules from all registered Django apps. +app.autodiscover_tasks() + +@app.task(bind=True) +def debug_task(self): + print(f'Request: {self.request!r}') diff --git a/python/mrt/frontend/frontend/settings.py b/python/mrt/frontend/frontend/settings.py new file mode 100644 index 00000000..7de4a8b1 --- /dev/null +++ b/python/mrt/frontend/frontend/settings.py @@ -0,0 +1,148 @@ +""" +Django settings for frontend project. + +Generated by 'django-admin startproject' using Django 3.2.9. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/3.2/ref/settings/ +""" + +from pathlib import Path + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = 'django-insecure-wmq4%*k#c4eyt#-#9u!oa9g@mgv4(-q-rtpe$sqqh8)*jr1ci9' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = [] + + +# Application definition + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +ROOT_URLCONF = 'frontend.urls' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'frontend.wsgi.application' + + +# Database +# https://docs.djangoproject.com/en/3.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } +} + + +# Password validation +# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/3.2/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/3.2/howto/static-files/ + +STATIC_URL = '/static/' + +# Default primary key field type +# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field + +DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' + +# import os + +# LOGGING = { + # 'version': 1, + # 'disable_existing_loggers': False, + # 'handlers': { + # 'console': { + # 'class': 'logging.StreamHandler', + # }, + # }, + # 'root': { + # 'handlers': ['console'], + # 'level': 'WARNING', + # }, + # 'loggers': { + # 'django': { + # 'handlers': ['console'], + # 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), + # 'propagate': False, + # }, + # }, +# } diff --git a/python/mrt/frontend/frontend/urls.py b/python/mrt/frontend/frontend/urls.py new file mode 100644 index 00000000..a2fd576f --- /dev/null +++ b/python/mrt/frontend/frontend/urls.py @@ -0,0 +1,22 @@ +"""frontend URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/3.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" +from django.contrib import admin +from django.urls import include, path + +urlpatterns = [ + path("polls/", include("polls.urls")), + path('admin/', admin.site.urls), +] diff --git a/python/mrt/frontend/frontend/wsgi.py b/python/mrt/frontend/frontend/wsgi.py new file mode 100644 index 00000000..0b586f31 --- /dev/null +++ b/python/mrt/frontend/frontend/wsgi.py @@ -0,0 +1,16 @@ +""" +WSGI config for frontend project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') + +application = get_wsgi_application() diff --git a/python/mrt/frontend/manage.py b/python/mrt/frontend/manage.py new file mode 100755 index 00000000..c9c8c1fa --- /dev/null +++ b/python/mrt/frontend/manage.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/python/mrt/frontend/polls/__init__.py b/python/mrt/frontend/polls/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/frontend/polls/admin.py b/python/mrt/frontend/polls/admin.py new file mode 100644 index 00000000..8c38f3f3 --- /dev/null +++ b/python/mrt/frontend/polls/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/python/mrt/frontend/polls/apps.py b/python/mrt/frontend/polls/apps.py new file mode 100644 index 00000000..5a5f94ca --- /dev/null +++ b/python/mrt/frontend/polls/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class PollsConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'polls' diff --git a/python/mrt/frontend/polls/migrations/__init__.py b/python/mrt/frontend/polls/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/frontend/polls/models.py b/python/mrt/frontend/polls/models.py new file mode 100644 index 00000000..4da9ef0b --- /dev/null +++ b/python/mrt/frontend/polls/models.py @@ -0,0 +1,9 @@ +from django.db import models + +# Create your models here. +class Question(models.Model): + question_text = model.CharField(max_length=200) + pub_date = models.DateTimeField("date published") + +class Choice(models.Model): + question = models.ForeignKey(Question, on_delete=models.CASCADE) diff --git a/python/mrt/frontend/polls/tests.py b/python/mrt/frontend/polls/tests.py new file mode 100644 index 00000000..7ce503c2 --- /dev/null +++ b/python/mrt/frontend/polls/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/python/mrt/frontend/polls/urls.py b/python/mrt/frontend/polls/urls.py new file mode 100644 index 00000000..2d07f441 --- /dev/null +++ b/python/mrt/frontend/polls/urls.py @@ -0,0 +1,7 @@ +from django.urls import path + +from . import views + +urlpatterns = [ + path("prepare", views.views_prepare, name="prepare"), +] diff --git a/python/mrt/frontend/polls/views.py b/python/mrt/frontend/polls/views.py new file mode 100644 index 00000000..419b05d6 --- /dev/null +++ b/python/mrt/frontend/polls/views.py @@ -0,0 +1,17 @@ +from os import path + +from django.http import HttpResponse + +from . import views +from mrt.V3.utils import get_cfg_defaults +from mrt.V3.prepare import prepare + +def views_prepare(request): + yaml_file = path.expanduser("~/mrt_yaml_root/alexnet.yaml") + cfg = get_cfg_defaults() + cfg.merge_from_file(yaml_file) + cfg.freeze() + cm_cfg = cfg.COMMON + pass_cfg = cfg.PREPARE + prepare(cm_cfg, pass_cfg) + return HttpResponse("Hello, quantize") From 4b185c300fed4ba6230ec9e0cdde5ed1e64563ab Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 11 Nov 2021 16:05:42 +0800 Subject: [PATCH 058/120] upt --- python/mrt/V3/prepare.py | 1 + python/mrt/frontend/frontend/__init__.py | 5 - python/mrt/frontend/frontend/asgi.py | 16 -- python/mrt/frontend/frontend/celery.py | 22 --- python/mrt/frontend/frontend/settings.py | 148 ------------------ python/mrt/frontend/frontend/urls.py | 22 --- python/mrt/frontend/frontend/wsgi.py | 16 -- python/mrt/frontend/manage.py | 22 --- python/mrt/frontend/polls/__init__.py | 0 python/mrt/frontend/polls/admin.py | 3 - python/mrt/frontend/polls/apps.py | 6 - .../mrt/frontend/polls/migrations/__init__.py | 0 python/mrt/frontend/polls/models.py | 9 -- python/mrt/frontend/polls/tests.py | 3 - python/mrt/frontend/polls/urls.py | 7 - python/mrt/frontend/polls/views.py | 17 -- 16 files changed, 1 insertion(+), 296 deletions(-) delete mode 100644 python/mrt/frontend/frontend/__init__.py delete mode 100644 python/mrt/frontend/frontend/asgi.py delete mode 100644 python/mrt/frontend/frontend/celery.py delete mode 100644 python/mrt/frontend/frontend/settings.py delete mode 100644 python/mrt/frontend/frontend/urls.py delete mode 100644 python/mrt/frontend/frontend/wsgi.py delete mode 100755 python/mrt/frontend/manage.py delete mode 100644 python/mrt/frontend/polls/__init__.py delete mode 100644 python/mrt/frontend/polls/admin.py delete mode 100644 python/mrt/frontend/polls/apps.py delete mode 100644 python/mrt/frontend/polls/migrations/__init__.py delete mode 100644 python/mrt/frontend/polls/models.py delete mode 100644 python/mrt/frontend/polls/tests.py delete mode 100644 python/mrt/frontend/polls/urls.py delete mode 100644 python/mrt/frontend/polls/views.py diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index 9a50f4e0..71e863b1 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -33,6 +33,7 @@ }) def prepare(cm_cfg, pass_cfg): + print("hihihisss") model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY diff --git a/python/mrt/frontend/frontend/__init__.py b/python/mrt/frontend/frontend/__init__.py deleted file mode 100644 index 5c39f774..00000000 --- a/python/mrt/frontend/frontend/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# This will make sure the app is always imported when -# Django starts so that shared_task will use this app -from .celery import app as celery_app - -__all__ = ('celery_app', ) diff --git a/python/mrt/frontend/frontend/asgi.py b/python/mrt/frontend/frontend/asgi.py deleted file mode 100644 index 7f108ee6..00000000 --- a/python/mrt/frontend/frontend/asgi.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -ASGI config for frontend project. - -It exposes the ASGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ -""" - -import os - -from django.core.asgi import get_asgi_application - -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') - -application = get_asgi_application() diff --git a/python/mrt/frontend/frontend/celery.py b/python/mrt/frontend/frontend/celery.py deleted file mode 100644 index e0011cf3..00000000 --- a/python/mrt/frontend/frontend/celery.py +++ /dev/null @@ -1,22 +0,0 @@ -import os - -from celery import Celery - -# Set the default Django settings module for the 'celery' program. -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') - -app = Celery('frontend') - -# Using a string here means the worker doesn't have to serialize -# the configuration object to child processes. -# - namespace='CELERY' means all celery-related configurtion keys -# should have a 'CELERY_' prefix. -app.config_from_project( - 'django.conf:settings', namespace='CELERY') - -# Load task modules from all registered Django apps. -app.autodiscover_tasks() - -@app.task(bind=True) -def debug_task(self): - print(f'Request: {self.request!r}') diff --git a/python/mrt/frontend/frontend/settings.py b/python/mrt/frontend/frontend/settings.py deleted file mode 100644 index 7de4a8b1..00000000 --- a/python/mrt/frontend/frontend/settings.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Django settings for frontend project. - -Generated by 'django-admin startproject' using Django 3.2.9. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/topics/settings/ - -For the full list of settings and their values, see -https://docs.djangoproject.com/en/3.2/ref/settings/ -""" - -from pathlib import Path - -# Build paths inside the project like this: BASE_DIR / 'subdir'. -BASE_DIR = Path(__file__).resolve().parent.parent - - -# Quick-start development settings - unsuitable for production -# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ - -# SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = 'django-insecure-wmq4%*k#c4eyt#-#9u!oa9g@mgv4(-q-rtpe$sqqh8)*jr1ci9' - -# SECURITY WARNING: don't run with debug turned on in production! -DEBUG = True - -ALLOWED_HOSTS = [] - - -# Application definition - -INSTALLED_APPS = [ - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', -] - -MIDDLEWARE = [ - 'django.middleware.security.SecurityMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', -] - -ROOT_URLCONF = 'frontend.urls' - -TEMPLATES = [ - { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': [], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', - ], - }, - }, -] - -WSGI_APPLICATION = 'frontend.wsgi.application' - - -# Database -# https://docs.djangoproject.com/en/3.2/ref/settings/#databases - -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': BASE_DIR / 'db.sqlite3', - } -} - - -# Password validation -# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators - -AUTH_PASSWORD_VALIDATORS = [ - { - 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', - }, -] - - -# Internationalization -# https://docs.djangoproject.com/en/3.2/topics/i18n/ - -LANGUAGE_CODE = 'en-us' - -TIME_ZONE = 'UTC' - -USE_I18N = True - -USE_L10N = True - -USE_TZ = True - - -# Static files (CSS, JavaScript, Images) -# https://docs.djangoproject.com/en/3.2/howto/static-files/ - -STATIC_URL = '/static/' - -# Default primary key field type -# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field - -DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' - -# import os - -# LOGGING = { - # 'version': 1, - # 'disable_existing_loggers': False, - # 'handlers': { - # 'console': { - # 'class': 'logging.StreamHandler', - # }, - # }, - # 'root': { - # 'handlers': ['console'], - # 'level': 'WARNING', - # }, - # 'loggers': { - # 'django': { - # 'handlers': ['console'], - # 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), - # 'propagate': False, - # }, - # }, -# } diff --git a/python/mrt/frontend/frontend/urls.py b/python/mrt/frontend/frontend/urls.py deleted file mode 100644 index a2fd576f..00000000 --- a/python/mrt/frontend/frontend/urls.py +++ /dev/null @@ -1,22 +0,0 @@ -"""frontend URL Configuration - -The `urlpatterns` list routes URLs to views. For more information please see: - https://docs.djangoproject.com/en/3.2/topics/http/urls/ -Examples: -Function views - 1. Add an import: from my_app import views - 2. Add a URL to urlpatterns: path('', views.home, name='home') -Class-based views - 1. Add an import: from other_app.views import Home - 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') -Including another URLconf - 1. Import the include() function: from django.urls import include, path - 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) -""" -from django.contrib import admin -from django.urls import include, path - -urlpatterns = [ - path("polls/", include("polls.urls")), - path('admin/', admin.site.urls), -] diff --git a/python/mrt/frontend/frontend/wsgi.py b/python/mrt/frontend/frontend/wsgi.py deleted file mode 100644 index 0b586f31..00000000 --- a/python/mrt/frontend/frontend/wsgi.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -WSGI config for frontend project. - -It exposes the WSGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ -""" - -import os - -from django.core.wsgi import get_wsgi_application - -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') - -application = get_wsgi_application() diff --git a/python/mrt/frontend/manage.py b/python/mrt/frontend/manage.py deleted file mode 100755 index c9c8c1fa..00000000 --- a/python/mrt/frontend/manage.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -"""Django's command-line utility for administrative tasks.""" -import os -import sys - - -def main(): - """Run administrative tasks.""" - os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend.settings') - try: - from django.core.management import execute_from_command_line - except ImportError as exc: - raise ImportError( - "Couldn't import Django. Are you sure it's installed and " - "available on your PYTHONPATH environment variable? Did you " - "forget to activate a virtual environment?" - ) from exc - execute_from_command_line(sys.argv) - - -if __name__ == '__main__': - main() diff --git a/python/mrt/frontend/polls/__init__.py b/python/mrt/frontend/polls/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/mrt/frontend/polls/admin.py b/python/mrt/frontend/polls/admin.py deleted file mode 100644 index 8c38f3f3..00000000 --- a/python/mrt/frontend/polls/admin.py +++ /dev/null @@ -1,3 +0,0 @@ -from django.contrib import admin - -# Register your models here. diff --git a/python/mrt/frontend/polls/apps.py b/python/mrt/frontend/polls/apps.py deleted file mode 100644 index 5a5f94ca..00000000 --- a/python/mrt/frontend/polls/apps.py +++ /dev/null @@ -1,6 +0,0 @@ -from django.apps import AppConfig - - -class PollsConfig(AppConfig): - default_auto_field = 'django.db.models.BigAutoField' - name = 'polls' diff --git a/python/mrt/frontend/polls/migrations/__init__.py b/python/mrt/frontend/polls/migrations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/mrt/frontend/polls/models.py b/python/mrt/frontend/polls/models.py deleted file mode 100644 index 4da9ef0b..00000000 --- a/python/mrt/frontend/polls/models.py +++ /dev/null @@ -1,9 +0,0 @@ -from django.db import models - -# Create your models here. -class Question(models.Model): - question_text = model.CharField(max_length=200) - pub_date = models.DateTimeField("date published") - -class Choice(models.Model): - question = models.ForeignKey(Question, on_delete=models.CASCADE) diff --git a/python/mrt/frontend/polls/tests.py b/python/mrt/frontend/polls/tests.py deleted file mode 100644 index 7ce503c2..00000000 --- a/python/mrt/frontend/polls/tests.py +++ /dev/null @@ -1,3 +0,0 @@ -from django.test import TestCase - -# Create your tests here. diff --git a/python/mrt/frontend/polls/urls.py b/python/mrt/frontend/polls/urls.py deleted file mode 100644 index 2d07f441..00000000 --- a/python/mrt/frontend/polls/urls.py +++ /dev/null @@ -1,7 +0,0 @@ -from django.urls import path - -from . import views - -urlpatterns = [ - path("prepare", views.views_prepare, name="prepare"), -] diff --git a/python/mrt/frontend/polls/views.py b/python/mrt/frontend/polls/views.py deleted file mode 100644 index 419b05d6..00000000 --- a/python/mrt/frontend/polls/views.py +++ /dev/null @@ -1,17 +0,0 @@ -from os import path - -from django.http import HttpResponse - -from . import views -from mrt.V3.utils import get_cfg_defaults -from mrt.V3.prepare import prepare - -def views_prepare(request): - yaml_file = path.expanduser("~/mrt_yaml_root/alexnet.yaml") - cfg = get_cfg_defaults() - cfg.merge_from_file(yaml_file) - cfg.freeze() - cm_cfg = cfg.COMMON - pass_cfg = cfg.PREPARE - prepare(cm_cfg, pass_cfg) - return HttpResponse("Hello, quantize") From 09abb3f416ce1e3710769207fe2d59825b17c294 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 12 Nov 2021 09:50:26 +0800 Subject: [PATCH 059/120] upt --- python/mrt/V3/calibrate.py | 5 +- python/mrt/V3/prepare.py | 6 +- python/mrt/web/manage.py | 22 ++++++ python/mrt/web/web/__init__.py | 0 python/mrt/web/web/asgi.py | 16 +++++ python/mrt/web/web/log.py | 20 ++++++ python/mrt/web/web/settings.py | 126 +++++++++++++++++++++++++++++++++ python/mrt/web/web/urls.py | 25 +++++++ python/mrt/web/web/views.py | 98 +++++++++++++++++++++++++ python/mrt/web/web/wsgi.py | 16 +++++ 10 files changed, 329 insertions(+), 5 deletions(-) create mode 100755 python/mrt/web/manage.py create mode 100644 python/mrt/web/web/__init__.py create mode 100644 python/mrt/web/web/asgi.py create mode 100644 python/mrt/web/web/log.py create mode 100644 python/mrt/web/web/settings.py create mode 100644 python/mrt/web/web/urls.py create mode 100644 python/mrt/web/web/views.py create mode 100644 python/mrt/web/web/wsgi.py diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index 18ad83fe..3e4df486 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -42,7 +42,7 @@ default=default_device_ids).dest: (_pname, "DEVICE_IDS"), }) -def calibrate(cm_cfg, pass_cfg): +def calibrate(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY @@ -55,7 +55,8 @@ def calibrate(cm_cfg, pass_cfg): batch=pass_cfg.BATCH model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) + if logger is None: + logger = get_logger(verbosity) conf_prep_file = model_prefix + ".prepare.conf" check_file_existance(conf_prep_file, logger=logger) conf_map = load_conf(conf_prep_file, logger=logger) diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index 71e863b1..98c083bb 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -32,8 +32,7 @@ default=[]).dest: (_pname, "SPLIT_KEYS"), }) -def prepare(cm_cfg, pass_cfg): - print("hihihisss") +def prepare(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY @@ -43,7 +42,8 @@ def prepare(cm_cfg, pass_cfg): split_keys = pass_cfg.SPLIT_KEYS model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) + if logger is None: + logger = get_logger(verbosity) conf_prep_file = model_prefix + ".prepare.conf" conf_map = {} diff --git a/python/mrt/web/manage.py b/python/mrt/web/manage.py new file mode 100755 index 00000000..19be6dd3 --- /dev/null +++ b/python/mrt/web/manage.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/python/mrt/web/web/__init__.py b/python/mrt/web/web/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/web/web/asgi.py b/python/mrt/web/web/asgi.py new file mode 100644 index 00000000..9446ae35 --- /dev/null +++ b/python/mrt/web/web/asgi.py @@ -0,0 +1,16 @@ +""" +ASGI config for web project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ +""" + +import os + +from django.core.asgi import get_asgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') + +application = get_asgi_application() diff --git a/python/mrt/web/web/log.py b/python/mrt/web/web/log.py new file mode 100644 index 00000000..c55cb690 --- /dev/null +++ b/python/mrt/web/web/log.py @@ -0,0 +1,20 @@ +import logging +from mrt.common.log import ( + LOG_LEVELS, ColorFormatter, FilterList, name2level +) + +def log_init(log_level, streamer): + assert log_level in LOG_LEVELS + logging.basicConfig(level=log_level, stream=streamer) + formatter = ColorFormatter( + fmt="[ %(asctime)s %(name)10s %(levelname)5s ] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S") + log_filter = FilterList(log_level=log_level, default=False) + for handler in logging.root.handlers: + handler.addFilter(log_filter) + handler.setFormatter(formatter) + +def get_logger(verbosity, streamer): + log_init(name2level(verbosity.upper()), streamer) + logger = logging.getLogger("log.main") + return logger diff --git a/python/mrt/web/web/settings.py b/python/mrt/web/web/settings.py new file mode 100644 index 00000000..94524c00 --- /dev/null +++ b/python/mrt/web/web/settings.py @@ -0,0 +1,126 @@ +""" +Django settings for web project. + +Generated by 'django-admin startproject' using Django 3.2.9. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/3.2/ref/settings/ +""" + +from pathlib import Path + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = 'django-insecure-cpi#8nm8p_dm!)kkn+^ugib_g*=ip224p5s@5_&aj1nz!$p)uh' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = [] + + +# Application definition + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', + 'web', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +ROOT_URLCONF = 'web.urls' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'web.wsgi.application' + + +# Database +# https://docs.djangoproject.com/en/3.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } +} + + +# Password validation +# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/3.2/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/3.2/howto/static-files/ + +STATIC_URL = '/static/' + +# Default primary key field type +# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field + +DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' diff --git a/python/mrt/web/web/urls.py b/python/mrt/web/web/urls.py new file mode 100644 index 00000000..65ed989f --- /dev/null +++ b/python/mrt/web/web/urls.py @@ -0,0 +1,25 @@ +"""web URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/3.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" +from django.urls import path + +from . import views + +urlpatterns = [ + path('prepare/', views.mrt_prepare_log), + path('calibrate/', views.mrt_calibrate_log), + path('quantize/', views.mrt_quantize_log), + path('evaluate/', views.mrt_evaluate_log), +] diff --git a/python/mrt/web/web/views.py b/python/mrt/web/web/views.py new file mode 100644 index 00000000..ec2714ba --- /dev/null +++ b/python/mrt/web/web/views.py @@ -0,0 +1,98 @@ +from queue import Queue, Empty +from threading import Thread, current_thread +import time +import sys +import os +import logging + +from django.http.response import StreamingHttpResponse + +from mrt.V3.utils import get_cfg_defaults +from mrt.V3.prepare import prepare +from mrt.V3.calibrate import calibrate +from mrt.V3.quantize import quantize +from mrt.V3.evaluate import evaluate +# from mrt.V3.mrt_compile import mrt_compile +from .log import get_logger + +class Printer: + def __init__(self): + self.queues = {} + + def write(self, value): + queue = self.queues.get(current_thread().name) + if queue: + queue.put(value) + else: + sys.__stdout__.write(value) + + def flush(self): + pass + + def register(self, thread): + queue = Queue() + self.queues[thread.name] = queue + return queue + + def clean(self, thread): + del self.queues[thread.name] + +printer = Printer() +sys.stdout = printer + + +class Streamer: + def __init__(self, target, args): + self.thread = Thread(target=target, args=args) + self.queue = printer.register(self.thread) + + def start(self): + self.thread.start() + # print('This should be stdout') + while self.thread.is_alive(): + try: + item = self.queue.get_nowait() + yield f'{item}
' + except Empty: + pass + yield 'End' + printer.clean(self.thread) + +mrt_web_tmp_dir = os.path.expanduser("~/.mrt_web") +os.makedirs(mrt_web_tmp_dir, exist_ok=True) + +def get_cfg(yaml_file, attr): + cfg = get_cfg_defaults() + cfg.merge_from_file(yaml_file) + cfg.freeze() + cm_cfg = cfg.COMMON + pass_cfg = getattr(cfg, attr) + return cm_cfg, pass_cfg + +def mrt_prepare_log(request): + yaml_file = os.path.expanduser("~/mrt_yaml_root/alexnet.yaml") + cm_cfg, pass_cfg = get_cfg(yaml_file, "PREPARE") + logger = get_logger(cm_cfg.VERBOSITY, printer) + streamer = Streamer(prepare, (cm_cfg, pass_cfg, logger)) + return StreamingHttpResponse(streamer.start()) + +def mrt_calibrate_log(request): + yaml_file = os.path.expanduser("~/mrt_yaml_root/alexnet.yaml") + cm_cfg, pass_cfg = get_cfg(yaml_file, "CALIBRATE") + logger = get_logger(cm_cfg.VERBOSITY, printer) + streamer = Streamer(calibrate, (cm_cfg, pass_cfg, logger)) + return StreamingHttpResponse(streamer.start()) + +def mrt_quantize_log(request): + yaml_file = os.path.expanduser("~/mrt_yaml_root/alexnet.yaml") + cm_cfg, pass_cfg = get_cfg(yaml_file, "QUANTIZE") + logger = get_logger(cm_cfg.VERBOSITY, printer) + streamer = Streamer(quantize, (cm_cfg, pass_cfg, logger)) + return StreamingHttpResponse(streamer.start()) + +def mrt_evaluate_log(request): + yaml_file = os.path.expanduser("~/mrt_yaml_root/alexnet.yaml") + cm_cfg, pass_cfg = get_cfg(yaml_file, "EVALUATE") + logger = get_logger(cm_cfg.VERBOSITY, printer) + streamer = Streamer(evaluate, (cm_cfg, pass_cfg, logger)) + return StreamingHttpResponse(streamer.start()) diff --git a/python/mrt/web/web/wsgi.py b/python/mrt/web/web/wsgi.py new file mode 100644 index 00000000..d092b9ab --- /dev/null +++ b/python/mrt/web/web/wsgi.py @@ -0,0 +1,16 @@ +""" +WSGI config for web project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') + +application = get_wsgi_application() From d24843317e703d0b04159f2f0f879a659a4c1ec0 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 12 Nov 2021 10:08:16 +0800 Subject: [PATCH 060/120] upt --- .gitignore | 2 +- python/mrt/V3/evaluate.py | 5 +++-- python/mrt/V3/mrt_compile.py | 5 +++-- python/mrt/V3/quantize.py | 5 +++-- python/mrt/web/web/urls.py | 1 + python/mrt/web/web/views.py | 11 +++++++++-- 6 files changed, 20 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 754e0a02..421c2ca5 100644 --- a/.gitignore +++ b/.gitignore @@ -57,4 +57,4 @@ docs/doctrees docs/doxygen_output # django -python/mrt/frontend/db.sqlite3 +python/mrt/web/db.sqlite3 diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index 6ee4b681..b8e067f0 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -35,7 +35,7 @@ "--iter-num", type=int, default=10).dest: (_pname, "ITER_NUM"), }) -def evaluate(cm_cfg, pass_cfg): +def evaluate(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY @@ -45,7 +45,8 @@ def evaluate(cm_cfg, pass_cfg): batch = pass_cfg.BATCH model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) + if logger is None: + logger = get_logger(verbosity) conf_quant_file = model_prefix + ".quantize.conf" check_file_existance(conf_quant_file, logger=logger) conf_map = load_conf(conf_quant_file, logger=logger) diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index 2c09bea6..e156206a 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -35,7 +35,7 @@ default=default_device_ids).dest: (_cnode, "DEVICE_IDS"), }) -def mrt_compile(cm_cfg, pass_cfg): +def mrt_compile(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY @@ -45,7 +45,8 @@ def mrt_compile(cm_cfg, pass_cfg): batch = pass_cfg.BATCH model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) + if logger is None: + logger = get_logger(verbosity) conf_quant_file = model_prefix + ".quantize.conf" check_file_existance(conf_quant_file, logger=logger) conf_map = load_conf(conf_quant_file, logger=logger) diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 1cc233b9..14c32539 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -48,7 +48,7 @@ "--oscale-maps", type=tuple, default=[]).dest: (_pname, "OSCALE_MAPS"), }) -def quantize(cm_cfg, pass_cfg): +def quantize(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY @@ -65,7 +65,8 @@ def quantize(cm_cfg, pass_cfg): oscale_maps = {opn1: opn2 for opn1, opn2 in pass_cfg.OSCALE_MAPS} model_prefix = get_model_prefix(model_dir, model_name) - logger = get_logger(verbosity) + if logger is None: + logger = get_logger(verbosity) conf_calib_file = model_prefix + ".calibrate.conf" check_file_existance(conf_calib_file, logger=logger) conf_map = load_conf(conf_calib_file, logger=logger) diff --git a/python/mrt/web/web/urls.py b/python/mrt/web/web/urls.py index 65ed989f..1fcad321 100644 --- a/python/mrt/web/web/urls.py +++ b/python/mrt/web/web/urls.py @@ -22,4 +22,5 @@ path('calibrate/', views.mrt_calibrate_log), path('quantize/', views.mrt_quantize_log), path('evaluate/', views.mrt_evaluate_log), + path('compile/', views.mrt_compile_log), ] diff --git a/python/mrt/web/web/views.py b/python/mrt/web/web/views.py index ec2714ba..9a9413d5 100644 --- a/python/mrt/web/web/views.py +++ b/python/mrt/web/web/views.py @@ -12,7 +12,7 @@ from mrt.V3.calibrate import calibrate from mrt.V3.quantize import quantize from mrt.V3.evaluate import evaluate -# from mrt.V3.mrt_compile import mrt_compile +from mrt.V3.mrt_compile import mrt_compile from .log import get_logger class Printer: @@ -55,7 +55,7 @@ def start(self): yield f'{item}
' except Empty: pass - yield 'End' + yield '
***End***
' printer.clean(self.thread) mrt_web_tmp_dir = os.path.expanduser("~/.mrt_web") @@ -96,3 +96,10 @@ def mrt_evaluate_log(request): logger = get_logger(cm_cfg.VERBOSITY, printer) streamer = Streamer(evaluate, (cm_cfg, pass_cfg, logger)) return StreamingHttpResponse(streamer.start()) + +def mrt_compile_log(request): + yaml_file = os.path.expanduser("~/mrt_yaml_root/alexnet.yaml") + cm_cfg, pass_cfg = get_cfg(yaml_file, "COMPILE") + logger = get_logger(cm_cfg.VERBOSITY, printer) + streamer = Streamer(mrt_compile, (cm_cfg, pass_cfg, logger)) + return StreamingHttpResponse(streamer.start()) From 9591a43c3aca9386dcb9e03701b2f31318f03318 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 12 Nov 2021 11:35:39 +0800 Subject: [PATCH 061/120] upt --- python/mrt/web/web/asgi.py | 9 ++++- python/mrt/web/web/consumers.py | 14 ++++++++ python/mrt/web/web/routing.py | 8 +++++ python/mrt/web/web/settings.py | 2 ++ python/mrt/web/web/templates/room.html | 50 ++++++++++++++++++++++++++ python/mrt/web/web/urls.py | 1 + python/mrt/web/web/views.py | 4 +++ 7 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 python/mrt/web/web/consumers.py create mode 100644 python/mrt/web/web/routing.py create mode 100644 python/mrt/web/web/templates/room.html diff --git a/python/mrt/web/web/asgi.py b/python/mrt/web/web/asgi.py index 9446ae35..90e7fb23 100644 --- a/python/mrt/web/web/asgi.py +++ b/python/mrt/web/web/asgi.py @@ -9,8 +9,15 @@ import os +from channels.auth import AuthMiddlewareStack +from channels.routing import ProtocolTypeRouter, URLRouter from django.core.asgi import get_asgi_application +import web.routing os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') -application = get_asgi_application() +application = ProtocolTypeRouter({ + "http": get_asgi_application(), + "websocket": AuthMiddlewareStack( + URLRouter(web.routing.websocket_urlpatterns)), +}) diff --git a/python/mrt/web/web/consumers.py b/python/mrt/web/web/consumers.py new file mode 100644 index 00000000..f6cbeeab --- /dev/null +++ b/python/mrt/web/web/consumers.py @@ -0,0 +1,14 @@ +import json +from channels.generic.websocket import WebsocketConsumer + +class ChatConsumer(WebsocketConsumer): + def connect(self): + self.accept() + + def disconnect(self, close_code): + pass + + def receive(self, text_data): + text_data_json = json.loads(text_data) + message = text_data_json['message'] + self.send(text_data=json.dumps({'message': message})) diff --git a/python/mrt/web/web/routing.py b/python/mrt/web/web/routing.py new file mode 100644 index 00000000..7353d483 --- /dev/null +++ b/python/mrt/web/web/routing.py @@ -0,0 +1,8 @@ +from django.urls import re_path + +from . import consumers + +websocket_urlpatterns = [ + re_path(r'ws/web/(?P\w+)/$', + consumers.ChatConsumer.as_asgi()), +] diff --git a/python/mrt/web/web/settings.py b/python/mrt/web/web/settings.py index 94524c00..22b77fde 100644 --- a/python/mrt/web/web/settings.py +++ b/python/mrt/web/web/settings.py @@ -38,6 +38,7 @@ 'django.contrib.messages', 'django.contrib.staticfiles', 'web', + 'channels', ] MIDDLEWARE = [ @@ -69,6 +70,7 @@ ] WSGI_APPLICATION = 'web.wsgi.application' +ASGI_APPLICATION = 'web.asgi.application' # Database diff --git a/python/mrt/web/web/templates/room.html b/python/mrt/web/web/templates/room.html new file mode 100644 index 00000000..e1786394 --- /dev/null +++ b/python/mrt/web/web/templates/room.html @@ -0,0 +1,50 @@ + + + + + + Chat Room + + +
+
+ + {{ room_name|json_script:"room-name" }} + + + diff --git a/python/mrt/web/web/urls.py b/python/mrt/web/web/urls.py index 1fcad321..e6a233bc 100644 --- a/python/mrt/web/web/urls.py +++ b/python/mrt/web/web/urls.py @@ -23,4 +23,5 @@ path('quantize/', views.mrt_quantize_log), path('evaluate/', views.mrt_evaluate_log), path('compile/', views.mrt_compile_log), + path('/', views.room) ] diff --git a/python/mrt/web/web/views.py b/python/mrt/web/web/views.py index 9a9413d5..4f5c0b58 100644 --- a/python/mrt/web/web/views.py +++ b/python/mrt/web/web/views.py @@ -6,6 +6,7 @@ import logging from django.http.response import StreamingHttpResponse +from django.shortcuts import render from mrt.V3.utils import get_cfg_defaults from mrt.V3.prepare import prepare @@ -103,3 +104,6 @@ def mrt_compile_log(request): logger = get_logger(cm_cfg.VERBOSITY, printer) streamer = Streamer(mrt_compile, (cm_cfg, pass_cfg, logger)) return StreamingHttpResponse(streamer.start()) + +def room(request, room_name): + return render(request, "room.html", {"room_name": room_name}) From f0e93d9f87145f1f5df717af8bf18a8f87857b4e Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 12 Nov 2021 14:12:29 +0800 Subject: [PATCH 062/120] upt --- main2.py | 78 +++++++------------------- python/mrt/V3/execute.py | 52 +++++++++++++++++ python/mrt/V3/utils.py | 16 ++---- python/mrt/web/web/consumers.py | 15 ++++- python/mrt/web/web/streamer.py | 55 ++++++++++++++++++ python/mrt/web/web/templates/room.html | 37 ++++++++---- 6 files changed, 170 insertions(+), 83 deletions(-) create mode 100644 python/mrt/V3/execute.py create mode 100644 python/mrt/web/web/streamer.py diff --git a/main2.py b/main2.py index 947057e7..bee84927 100644 --- a/main2.py +++ b/main2.py @@ -1,68 +1,30 @@ -import sys from os import path -from mrt.V3.utils import get_cfg_defaults, override_cfg_argparse, parser -from mrt.V3.prepare import prepare -from mrt.V3.calibrate import calibrate -from mrt.V3.quantize import quantize -from mrt.V3.evaluate import evaluate -from mrt.V3.mrt_compile import mrt_compile +from mrt.V3.utils import get_cfg_defaults, parser +from mrt.V3.execute import run -thismodule = sys.modules[__name__] +parser.add_argument("yaml_file", type=str) +parser.add_argument( + "--pass-name", type=str, default="all", choices=[ + "all", "prepare", "calibrate", "quantize", "evaluate", "compile"]) -def yaml_main(cfg): +def override_cfg_argparse(cfg, args): if cfg.is_frozen(): cfg.defrost() - for prefix in ["BATCH", "DEVICE_TYPE", "DEVICE_IDS"]: - for subcfg in [cfg.PREPARE, cfg.CALIBRATE, cfg.QUANTIZE, - cfg.EVALUATE, cfg.COMPILE]: - for attr in dir(subcfg): - if attr == prefix and getattr(subcfg, prefix) is None: - setattr(subcfg, prefix, getattr(cfg.COMMON, prefix)) - if not cfg.is_frozen(): - cfg.freeze() - start_pos = 0 - start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} - if cfg.COMMON.START_AFTER in start_pos_map: - start_pos = start_pos_map[cfg.COMMON.START_AFTER] - if start_pos < 1: - prepare(cfg.COMMON, cfg.PREPARE) - if start_pos < 2: - calibrate(cfg.COMMON, cfg.CALIBRATE) - if start_pos < 3: - quantize(cfg.COMMON, cfg.QUANTIZE) - if cfg.COMMON.RUN_EVALUATE: - evaluate(cfg.COMMON, cfg.EVALUATE) - if cfg.COMMON.RUN_COMPILE: - mrt_compile(cfg.COMMON, cfg.COMPILE) - -parser.add_argument("yaml_file", type=str) -parser.add_argument( - "--entry-name", type=str, choices=[ - "prepare", "calibrate", "quantize", "evalueate", "compile"]) + for dest in dir(args): + if dest not in dest2yaml: + continue + pname, attr = dest2yaml[dest] + cnode = getattr(cfg, pname) + argv = getattr(args, dest) + if argv is not None: + setattr(cnode, attr, argv) + cfg.freeze() + return cfg if __name__ == "__main__": args = parser.parse_args() - yaml_file = args.yaml_file - if yaml_file.startswith("~"): - yaml_file = path.expanduser(yaml_file) - cfg = get_cfg_defaults() - cfg.merge_from_file(yaml_file) - cfg.freeze() + cfg = merge_cfg(yaml_file) cfg = override_cfg_argparse(cfg, args) - entry_name = args.entry_name - if entry_name is not None: - if entry_name == "compile": - entry_name = "mrt_compile" - if not hasattr(thismodule, entry_name): - raise RuntimeError("invalid entry_name: {}".format(entry_name)) - yaml_func = getattr(thismodule, entry_name) - cm_cfg = cfg.COMMON - if entry_name == "mrt_compile": - cfg_name = "COMPILE" - else: - cfg_name = entry_name.upper() - pass_cfg = getattr(cfg, cfg_name) - yaml_func(cm_cfg, pass_cfg) - else: - yaml_main(cfg) + pass_name = args.pass_name + run(cfg, pass_name) diff --git a/python/mrt/V3/execute.py b/python/mrt/V3/execute.py new file mode 100644 index 00000000..519f54ce --- /dev/null +++ b/python/mrt/V3/execute.py @@ -0,0 +1,52 @@ +import sys + +from mrt.V3.prepare import prepare +from mrt.V3.calibrate import calibrate +from mrt.V3.quantize import quantize +from mrt.V3.evaluate import evaluate +from mrt.V3.mrt_compile import mrt_compile + +thismodule = sys.modules[__name__] + +def yaml_main(cfg, logger=None): + if cfg.is_frozen(): + cfg.defrost() + for prefix in ["BATCH", "DEVICE_TYPE", "DEVICE_IDS"]: + for subcfg in [cfg.PREPARE, cfg.CALIBRATE, cfg.QUANTIZE, + cfg.EVALUATE, cfg.COMPILE]: + for attr in dir(subcfg): + if attr == prefix and getattr(subcfg, prefix) is None: + setattr(subcfg, prefix, getattr(cfg.COMMON, prefix)) + if not cfg.is_frozen(): + cfg.freeze() + start_pos = 0 + start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} + if cfg.COMMON.START_AFTER in start_pos_map: + start_pos = start_pos_map[cfg.COMMON.START_AFTER] + if start_pos < 1: + prepare(cfg.COMMON, cfg.PREPARE, logger=logger) + if start_pos < 2: + calibrate(cfg.COMMON, cfg.CALIBRATE, logger=logger) + if start_pos < 3: + quantize(cfg.COMMON, cfg.QUANTIZE, logger=logger) + if cfg.COMMON.RUN_EVALUATE: + evaluate(cfg.COMMON, cfg.EVALUATE, logger=logger) + if cfg.COMMON.RUN_COMPILE: + mrt_compile(cfg.COMMON, cfg.COMPILE, logger=logger) + +def run(cfg, pass_name, logger=None): + if pass_name is not None: + if pass_name == "compile": + pass_name = "mrt_compile" + if not hasattr(thismodule, pass_name): + raise RuntimeError("invalid pass_name: {}".format(pass_name)) + yaml_func = getattr(thismodule, pass_name) + cm_cfg = cfg.COMMON + if pass_name == "mrt_compile": + cfg_name = "COMPILE" + else: + cfg_name = pass_name.upper() + pass_cfg = getattr(cfg, cfg_name) + yaml_func(cm_cfg, pass_cfg, logger=logger) + else: + yaml_main(cfg, logger=logger) diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 26b10d6c..089e0ccc 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -166,16 +166,10 @@ def get_cfg_defaults(): # This is for the "local variable" use pattern return MRT_CFG.clone() -def override_cfg_argparse(cfg, args): - if cfg.is_frozen(): - cfg.defrost() - for dest in dir(args): - if dest not in dest2yaml: - continue - pname, attr = dest2yaml[dest] - cnode = getattr(cfg, pname) - argv = getattr(args, dest) - if argv is not None: - setattr(cnode, attr, argv) +def merge_cfg(yaml_file): + if yaml_file.startswith("~"): + yaml_file = path.expanduser(yaml_file) + cfg = get_cfg_defaults() + cfg.merge_from_file(yaml_file) cfg.freeze() return cfg diff --git a/python/mrt/web/web/consumers.py b/python/mrt/web/web/consumers.py index f6cbeeab..92725667 100644 --- a/python/mrt/web/web/consumers.py +++ b/python/mrt/web/web/consumers.py @@ -1,6 +1,12 @@ import json +import os from channels.generic.websocket import WebsocketConsumer +from .log import get_logger +from .streamer import Streamer, printer +from mrt.V3.utils import merge_cfg +from mrt.V3.execute import run + class ChatConsumer(WebsocketConsumer): def connect(self): self.accept() @@ -10,5 +16,10 @@ def disconnect(self, close_code): def receive(self, text_data): text_data_json = json.loads(text_data) - message = text_data_json['message'] - self.send(text_data=json.dumps({'message': message})) + yaml_file = text_data_json['yaml_file'] + cfg = merge_cfg(yaml_file) + pass_name = text_data_json['pass_name'] + logger = get_logger(cm_cfg.VERBOSITY, printer) + my_streamer = Streamer(run, (cfg, pass_name, logger)) + for message in my_streamer.start(): + self.send(text_data=json.dumps({'message': message})) diff --git a/python/mrt/web/web/streamer.py b/python/mrt/web/web/streamer.py new file mode 100644 index 00000000..b559bbd6 --- /dev/null +++ b/python/mrt/web/web/streamer.py @@ -0,0 +1,55 @@ +from queue import Queue, Empty +from threading import Thread, current_thread +import time +import sys +import os +import logging + +from django.http.response import StreamingHttpResponse +from django.shortcuts import render + +from mrt.V3.utils import get_cfg_defaults + + +class Printer: + def __init__(self): + self.queues = {} + + def write(self, value): + queue = self.queues.get(current_thread().name) + if queue: + queue.put(value) + else: + sys.__stdout__.write(value) + + def flush(self): + pass + + def register(self, thread): + queue = Queue() + self.queues[thread.name] = queue + return queue + + def clean(self, thread): + del self.queues[thread.name] + +printer = Printer() +sys.stdout = printer + + +class Streamer: + def __init__(self, target, args): + self.thread = Thread(target=target, args=args) + self.queue = printer.register(self.thread) + + def start(self): + self.thread.start() + # print('This should be stdout') + while self.thread.is_alive(): + try: + item = self.queue.get_nowait() + yield item.strip() + except Empty: + pass + yield '\n***End***' + printer.clean(self.thread) diff --git a/python/mrt/web/web/templates/room.html b/python/mrt/web/web/templates/room.html index e1786394..2bc3afc4 100644 --- a/python/mrt/web/web/templates/room.html +++ b/python/mrt/web/web/templates/room.html @@ -6,9 +6,19 @@ Chat Room -
-
- + +
+ + +
+ {{ room_name|json_script:"room-name" }} From a7b85ebdaaee1d272521458a60ac815af6d58fb4 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 12 Nov 2021 14:48:07 +0800 Subject: [PATCH 063/120] upt --- main2.py | 4 +++- python/mrt/web/web/consumers.py | 2 +- python/mrt/web/web/templates/room.html | 29 ++++++++++++-------------- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/main2.py b/main2.py index bee84927..f0d250dc 100644 --- a/main2.py +++ b/main2.py @@ -1,6 +1,7 @@ from os import path -from mrt.V3.utils import get_cfg_defaults, parser +from mrt.V3.utils import ( + get_cfg_defaults, parser, merge_cfg, dest2yaml) from mrt.V3.execute import run parser.add_argument("yaml_file", type=str) @@ -24,6 +25,7 @@ def override_cfg_argparse(cfg, args): if __name__ == "__main__": args = parser.parse_args() + yaml_file = args.yaml_file cfg = merge_cfg(yaml_file) cfg = override_cfg_argparse(cfg, args) pass_name = args.pass_name diff --git a/python/mrt/web/web/consumers.py b/python/mrt/web/web/consumers.py index 92725667..04284476 100644 --- a/python/mrt/web/web/consumers.py +++ b/python/mrt/web/web/consumers.py @@ -19,7 +19,7 @@ def receive(self, text_data): yaml_file = text_data_json['yaml_file'] cfg = merge_cfg(yaml_file) pass_name = text_data_json['pass_name'] - logger = get_logger(cm_cfg.VERBOSITY, printer) + logger = get_logger(cfg.COMMON.VERBOSITY, printer) my_streamer = Streamer(run, (cfg, pass_name, logger)) for message in my_streamer.start(): self.send(text_data=json.dumps({'message': message})) diff --git a/python/mrt/web/web/templates/room.html b/python/mrt/web/web/templates/room.html index 2bc3afc4..e68248e6 100644 --- a/python/mrt/web/web/templates/room.html +++ b/python/mrt/web/web/templates/room.html @@ -6,9 +6,8 @@ Chat Room - -
- +
+
- + {{ room_name|json_script:"room-name" }} From b9af5e54080b1288e650bd3850c7adf7726faf3d Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 12 Nov 2021 18:32:59 +0800 Subject: [PATCH 064/120] upt --- python/mrt/V3/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 089e0ccc..0e88517a 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -32,6 +32,7 @@ parser = argparse.ArgumentParser("MRT YAML Interface") _pname = "COMMON" +# TODO: update with yaml dest2yaml = { parser.add_argument( "--model-dir", type=str, From f5e40fb63edd280d3646599b095124ea3158af42 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 15 Nov 2021 15:39:00 +0800 Subject: [PATCH 065/120] pupt --- docs/mrt/mrt_user_guide.md | 7 +++++-- main2.py | 40 +++++++++++++++++------------------- python/mrt/V3/calibrate.py | 25 +--------------------- python/mrt/V3/evaluate.py | 20 ++---------------- python/mrt/V3/execute.py | 3 ++- python/mrt/V3/mrt_compile.py | 19 ++--------------- python/mrt/V3/prepare.py | 21 ++----------------- python/mrt/V3/quantize.py | 34 +++--------------------------- python/mrt/V3/utils.py | 25 +--------------------- 9 files changed, 37 insertions(+), 157 deletions(-) diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md index 7a3ec7f6..b5c0aed2 100644 --- a/docs/mrt/mrt_user_guide.md +++ b/docs/mrt/mrt_user_guide.md @@ -44,8 +44,11 @@ EVALUATE: run command -```python -python main2.py ~/mrt_yaml_root/alexnet.yaml +```bash +python main2.py ~/mrt_yaml_root/alexnet.yaml \ + --calibrate.device_type cpu \ + --calibrate.device_ids [0] \ + --common.pass_name calibrate ``` or run either of the following commands for each pass. diff --git a/main2.py b/main2.py index f0d250dc..2210e7a4 100644 --- a/main2.py +++ b/main2.py @@ -1,32 +1,30 @@ from os import path +import sys -from mrt.V3.utils import ( - get_cfg_defaults, parser, merge_cfg, dest2yaml) +from mrt.V3.utils import get_cfg_defaults, merge_cfg from mrt.V3.execute import run -parser.add_argument("yaml_file", type=str) -parser.add_argument( - "--pass-name", type=str, default="all", choices=[ - "all", "prepare", "calibrate", "quantize", "evaluate", "compile"]) - -def override_cfg_argparse(cfg, args): +def override_cfg_args(cfg, argv): if cfg.is_frozen(): cfg.defrost() - for dest in dir(args): - if dest not in dest2yaml: - continue - pname, attr = dest2yaml[dest] - cnode = getattr(cfg, pname) - argv = getattr(args, dest) - if argv is not None: - setattr(cnode, attr, argv) + + for i in range(2, len(argv), 2): + attr, value = argv[i:i+2] + try: + value = eval(value) + except NameError: + pass + pass_name, pass_attr = [s.upper() for s in attr[2:].split(".")] + cnode = getattr(cfg, pass_name) + setattr(cnode, pass_attr, value) cfg.freeze() return cfg if __name__ == "__main__": - args = parser.parse_args() - yaml_file = args.yaml_file + assert len(sys.argv) >= 2 and len(sys.argv)%2 == 0, \ + "invalid length: {} of sys.argv: {}".format(length, sys.argv) + yaml_file = sys.argv[1] + cfg = get_cfg_defaults() cfg = merge_cfg(yaml_file) - cfg = override_cfg_argparse(cfg, args) - pass_name = args.pass_name - run(cfg, pass_name) + cfg = override_cfg_args(cfg, sys.argv) + run(cfg) diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index 3e4df486..7522077a 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -6,7 +6,7 @@ from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, get_model_prefix, get_logger, set_batch, load_fname, save_conf, - load_conf, check_file_existance, get_ctx, parser, update_dest2yaml) + load_conf, check_file_existance, get_ctx) default_num_calib = 1 @@ -19,29 +19,6 @@ MRT_CFG.CALIBRATE.DEVICE_TYPE = default_device_type MRT_CFG.CALIBRATE.DEVICE_IDS = default_device_ids -_pname = "CALIBRATE" -update_dest2yaml({ - parser.add_argument( - "--batch-calibrate", type=int, - default=default_batch).dest: (_pname, "BATCH"), - parser.add_argument( - "--num-calib", type=int, - default=default_num_calib).dest: (_pname, "NUM_CALIB"), - parser.add_argument("--lambd", type=float).dest: (_pname, "LAMBD"), - parser.add_argument( - "--dataset-name", type=str, default="imagenet", - choices=list(ds.DS_REG.keys())).dest: (_pname, "DATASET_NAME"), - parser.add_argument( - "--dataset-dir", type=str, - default=conf.MRT_DATASET_ROOT).dest: (_pname, "DATASET_DIR"), - parser.add_argument( - "--device-type-calibrate", type=str, choices=["cpu", "gpu"], - default=default_device_type).dest: (_pname, "DEVICE_TYPE"), - parser.add_argument( - "--device-ids-calibrate", type=int, nargs="+", - default=default_device_ids).dest: (_pname, "DEVICE_IDS"), -}) - def calibrate(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index b8e067f0..a3cc4c59 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -10,9 +10,8 @@ from mrt import sim_quant_helper as sim from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, - get_model_prefix, get_logger, set_batch, load_fname, - load_conf, check_file_existance, get_ctx, get_batch_axis, parser, - update_dest2yaml) + get_model_prefix, get_logger, set_batch, load_fname, load_conf, + check_file_existance, get_ctx, get_batch_axis) MRT_CFG.EVALUATE = CN() MRT_CFG.EVALUATE.BATCH = default_batch @@ -20,21 +19,6 @@ MRT_CFG.EVALUATE.DEVICE_IDS = default_device_ids MRT_CFG.EVALUATE.ITER_NUM = 10 -_pname = "EVALUATE" -update_dest2yaml({ - parser.add_argument( - "--batch-evaluate", type=int, - default=default_batch).dest: (_pname, "BATCH"), - parser.add_argument( - "--device-type-evaluate", type=str, choices=["cpu", "gpu"], - default=default_device_type).dest: (_pname, "DEVICE_TYPE"), - parser.add_argument( - "--device-ids-evaluate", type=int, nargs="+", - default=default_device_ids).dest: (_pname, "DEVICE_IDS"), - parser.add_argument( - "--iter-num", type=int, default=10).dest: (_pname, "ITER_NUM"), -}) - def evaluate(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/execute.py b/python/mrt/V3/execute.py index 519f54ce..79e664da 100644 --- a/python/mrt/V3/execute.py +++ b/python/mrt/V3/execute.py @@ -34,7 +34,8 @@ def yaml_main(cfg, logger=None): if cfg.COMMON.RUN_COMPILE: mrt_compile(cfg.COMMON, cfg.COMPILE, logger=logger) -def run(cfg, pass_name, logger=None): +def run(cfg, logger=None): + pass_name = cfg.COMMON.PASS_NAME if pass_name is not None: if pass_name == "compile": pass_name = "mrt_compile" diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index e156206a..7c29bd44 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -9,8 +9,8 @@ from mrt import sim_quant_helper as sim from mrt.V3.utils import ( MRT_CFG, default_device_type, default_device_ids, default_batch, - get_model_prefix, get_logger, set_batch, load_fname, - load_conf, check_file_existance, parser, update_dest2yaml) + get_model_prefix, get_logger, set_batch, load_fname, load_conf, + check_file_existance) default_dump_dir = path.expanduser("~/mrt_dump") @@ -20,21 +20,6 @@ MRT_CFG.COMPILE.DEVICE_TYPE = default_device_type MRT_CFG.COMPILE.DEVICE_IDS = default_device_ids -_cnode = "COMPILE" -update_dest2yaml({ - parser.add_argument( - "--batch-compile", type=int, default=1).dest: (_cnode, "BATCH"), - parser.add_argument( - "--dump-dir", type=str, - default=default_dump_dir).dest: (_cnode, "DUMP_DIR"), - parser.add_argument( - "--device-type-compile", type=str, choices=["cpu", "gpu"], - default=default_device_type).dest: (_cnode, "DEVICE_TYPE"), - parser.add_argument( - "--device-ids-compile", type=int, nargs="+", - default=default_device_ids).dest: (_cnode, "DEVICE_IDS"), -}) - def mrt_compile(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index 98c083bb..f64b16ba 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -4,9 +4,8 @@ from mrt.gluon_zoo import save_model from mrt.transformer import Model from mrt.V3.utils import ( - MRT_CFG, default_device_type, default_device_ids, - get_model_prefix, get_logger, set_batch, load_fname, save_conf, - get_ctx, parser, update_dest2yaml) + MRT_CFG, default_device_type, default_device_ids, get_model_prefix, + get_logger, set_batch, load_fname, save_conf, get_ctx) default_input_shape = [-1, 3, 224, 224] @@ -16,22 +15,6 @@ MRT_CFG.PREPARE.INPUT_SHAPE = default_input_shape MRT_CFG.PREPARE.SPLIT_KEYS = [] -_pname = "PREPARE" -update_dest2yaml({ - parser.add_argument( - "--device-type-prepare", type=str, choices=["cpu", "gpu"], - default=default_device_type).dest: (_pname, "DEVICE_TYPE"), - parser.add_argument( - "--device-ids-prepare", type=int, nargs="+", - default=default_device_ids).dest: (_pname, "DEVICE_IDS"), - parser.add_argument( - "--input-shape", type=int, nargs="+", - default=default_input_shape).dest: (_pname, "INPUT_SHAPE"), - parser.add_argument( - "--split-keys", type=str, nargs="+", - default=[]).dest: (_pname, "SPLIT_KEYS"), -}) - def prepare(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 14c32539..6381aa83 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -4,9 +4,9 @@ from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim from mrt.V3.utils import ( - MRT_CFG, default_device_type, default_device_ids, - get_model_prefix, get_logger, load_fname, save_conf, - load_conf, check_file_existance, get_ctx, parser, update_dest2yaml) + MRT_CFG, default_device_type, default_device_ids, get_model_prefix, + get_logger, load_fname, save_conf, load_conf, check_file_existance, + get_ctx) MRT_CFG.QUANTIZE = CN() MRT_CFG.QUANTIZE.RESTORE_NAMES = [] @@ -20,34 +20,6 @@ MRT_CFG.QUANTIZE.ATTRIBUTE_DEPS = [] MRT_CFG.QUANTIZE.OSCALE_MAPS = [] -_pname = "QUANTIZE" -update_dest2yaml({ - parser.add_argument( - "--restore-names", nargs="+", type=str, - default=[]).dest: (_pname, "RESTORE_NAMES"), - parser.add_argument( - "--input-precision", type=int).dest: (_pname, "INPUT_PRECISION"), - parser.add_argument( - "--output-precision", type=int).dest: (_pname, "OUTPUT_PRECISION"), - parser.add_argument( - "--device-type-quantize", type=str, choices=["cpu", "gpu"], - default=default_device_type).dest: (_pname, "DEVICE_TYPE"), - parser.add_argument( - "--device-ids-quantize", type=int, nargs="+", - default=default_device_ids).dest: (_pname, "DEVICE_IDS"), - parser.add_argument( - "--softmax-lambd", type=float).dest: (_pname, "SOFTMAX_LAMBD"), - parser.add_argument( - "--shift-bits", type=int).dest: (_pname, "SHIFT_BITS"), - parser.add_argument( - "--thresholds", type=tuple, default=[]).dest: (_pname, "THRESHOLDS"), - parser.add_argument( - "--attribute-deps", type=tuple, - default=[]).dest: (_pname, "ATTRIBUTE_DEPS"), - parser.add_argument( - "--oscale-maps", type=tuple, default=[]).dest: (_pname, "OSCALE_MAPS"), -}) - def quantize(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 0e88517a..abefdea3 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -2,7 +2,6 @@ import logging import json from yacs.config import CfgNode as CN -import argparse import mxnet as mx @@ -20,6 +19,7 @@ MRT_CFG = CN() MRT_CFG.COMMON = CN() +MRT_CFG.COMMON.PASS_NAME = None MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT MRT_CFG.COMMON.MODEL_NAME = None MRT_CFG.COMMON.VERBOSITY = "debug" @@ -30,29 +30,6 @@ MRT_CFG.COMMON.RUN_EVALUATE = True MRT_CFG.COMMON.RUN_COMPILE = True -parser = argparse.ArgumentParser("MRT YAML Interface") -_pname = "COMMON" -# TODO: update with yaml -dest2yaml = { - parser.add_argument( - "--model-dir", type=str, - default=conf.MRT_MODEL_ROOT).dest: (_pname, "MODEL_DIR"), - parser.add_argument( - "--model-name", type=str).dest: (_pname, "MODEL_NAME"), - parser.add_argument( - "--verobosity", type=str, default="debug").dest: (_pname, "VERBOSITY"), - parser.add_argument( - "--start-after", type=str, default=None).dest: (_pname, "START_AFTER"), - parser.add_argument( - "--device-type", type=str, - default=default_device_type).dest: (_pname, "DEVICE_TYPE"), - parser.add_argument( - "--device-ids", type=int, - default=default_device_ids).dest: (_pname, "DEVICE_IDS"), - parser.add_argument( - "--batch", type=int, default=default_batch).dest: (_pname, "BATCH"), -} - def update_dest2yaml(dest2yaml_upt): for dest, cfg in dest2yaml_upt.items(): if dest in dest2yaml: From 55947ba9b34d2ba30e4f2538480c224924caea51 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 22 Nov 2021 15:30:07 +0800 Subject: [PATCH 066/120] upt --- docs/mrt/mrt_user_guide.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/mrt/mrt_user_guide.md b/docs/mrt/mrt_user_guide.md index b5c0aed2..790a194c 100644 --- a/docs/mrt/mrt_user_guide.md +++ b/docs/mrt/mrt_user_guide.md @@ -11,6 +11,21 @@ evoke passes by `mrt_entry.py` Currently supported configuration format by MRT: `yaml`, `argparse`, `ini`(not integrated into mrt_entry.py yet) +# Web Configuration + +```bash +# launch http server +cd /path/to/cvm-runtime/python/mrt/web +python manage.py runserver +``` + +```http +# open in browser +http://127.0.0.1:8000/test +``` + + + # YAML Configuration Examples ## alexnet From 714b3de42a0bb9b8c28488737f199a942a0817a9 Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 25 Nov 2021 13:25:17 +0800 Subject: [PATCH 067/120] upt --- python/mrt/V3/execute.py | 16 ++++++++++------ python/mrt/V3/utils.py | 12 +++++++++--- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/python/mrt/V3/execute.py b/python/mrt/V3/execute.py index 79e664da..d437b4cf 100644 --- a/python/mrt/V3/execute.py +++ b/python/mrt/V3/execute.py @@ -20,9 +20,13 @@ def yaml_main(cfg, logger=None): if not cfg.is_frozen(): cfg.freeze() start_pos = 0 - start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} - if cfg.COMMON.START_AFTER in start_pos_map: - start_pos = start_pos_map[cfg.COMMON.START_AFTER] + start_pos_map = { + 'initial': 0, 'prepare': 1, 'calibrate': 2, 'quantize': 3} + start_after = cfg.COMMON.START_AFTER + assert start_after in start_pos_map, \ + "start_after: {}, start_pos_map: {}".format( + start_after, start_pos_map) + start_pos = start_pos_map[start_after] if start_pos < 1: prepare(cfg.COMMON, cfg.PREPARE, logger=logger) if start_pos < 2: @@ -36,7 +40,9 @@ def yaml_main(cfg, logger=None): def run(cfg, logger=None): pass_name = cfg.COMMON.PASS_NAME - if pass_name is not None: + if pass_name == "all": + yaml_main(cfg, logger=logger) + else: if pass_name == "compile": pass_name = "mrt_compile" if not hasattr(thismodule, pass_name): @@ -49,5 +55,3 @@ def run(cfg, logger=None): cfg_name = pass_name.upper() pass_cfg = getattr(cfg, cfg_name) yaml_func(cm_cfg, pass_cfg, logger=logger) - else: - yaml_main(cfg, logger=logger) diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index abefdea3..ab09d48d 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -10,7 +10,6 @@ from mrt.utils import extend_fname # TODO: jiazhen branch code design -# TODO main jungle server # python server, flask default_device_type = "cpu" default_device_ids = [0] @@ -19,11 +18,11 @@ MRT_CFG = CN() MRT_CFG.COMMON = CN() -MRT_CFG.COMMON.PASS_NAME = None +MRT_CFG.COMMON.PASS_NAME = "all" MRT_CFG.COMMON.MODEL_DIR = conf.MRT_MODEL_ROOT MRT_CFG.COMMON.MODEL_NAME = None MRT_CFG.COMMON.VERBOSITY = "debug" -MRT_CFG.COMMON.START_AFTER = None +MRT_CFG.COMMON.START_AFTER = "initial" MRT_CFG.COMMON.DEVICE_TYPE = default_device_type MRT_CFG.COMMON.DEVICE_IDS = default_device_ids MRT_CFG.COMMON.BATCH = default_batch @@ -151,3 +150,10 @@ def merge_cfg(yaml_file): cfg.merge_from_file(yaml_file) cfg.freeze() return cfg + +def revise_cfg(cfg, stage, attr, value): + if cfg.is_frozen(): + cfg.defrost() + subcfg = getattr(cfg, stage) + setattr(subcfg, attr, value) + cfg.freeze() From dd0c814785e6ab175cb5845b348c2d96647907c7 Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 25 Nov 2021 13:29:36 +0800 Subject: [PATCH 068/120] upt --- python/mrt/web/javascript/init.js | 22 +++ python/mrt/web/javascript/mrt_executor.js | 56 +++++++ python/mrt/web/javascript/utils.js | 22 +++ python/mrt/web/javascript/yaml_clearer.js | 24 +++ python/mrt/web/javascript/yaml_file_loader.js | 10 ++ python/mrt/web/web/consumers.py | 73 ++++++++- python/mrt/web/web/protocol.py | 57 +++++++ python/mrt/web/web/routing.py | 17 +- python/mrt/web/web/settings.py | 5 + python/mrt/web/web/templates/room.html | 153 +++++++++++++----- 10 files changed, 389 insertions(+), 50 deletions(-) create mode 100644 python/mrt/web/javascript/init.js create mode 100644 python/mrt/web/javascript/mrt_executor.js create mode 100644 python/mrt/web/javascript/utils.js create mode 100644 python/mrt/web/javascript/yaml_clearer.js create mode 100644 python/mrt/web/javascript/yaml_file_loader.js create mode 100644 python/mrt/web/web/protocol.py diff --git a/python/mrt/web/javascript/init.js b/python/mrt/web/javascript/init.js new file mode 100644 index 00000000..c435d36c --- /dev/null +++ b/python/mrt/web/javascript/init.js @@ -0,0 +1,22 @@ +import { roomName, update_yaml_configurations } from './utils.js'; + +const yamlInitSocket = new WebSocket( + 'ws://' + + window.location.host + + '/ws/web/yaml/init/' + + roomName + + '/' +); + +yamlInitSocket.onopen = function(e) { + yamlInitSocket.send(null); +}; + +yamlInitSocket.addEventListener( + 'message', update_yaml_configurations); + +const yamlResetter = document.querySelector('#yaml-resetter'); + +yamlResetter.onclick = function(e) { + yamlInitSocket.send(null); +}; diff --git a/python/mrt/web/javascript/mrt_executor.js b/python/mrt/web/javascript/mrt_executor.js new file mode 100644 index 00000000..d978059f --- /dev/null +++ b/python/mrt/web/javascript/mrt_executor.js @@ -0,0 +1,56 @@ +import { roomName } from './utils.js'; + +const mrtExecuteSocket = new WebSocket( + 'ws://' + + window.location.host + + '/ws/web/mrt/execute/' + + roomName + + '/' +); + +const activation_flag = "___activate_executor___"; +const mrtExecutor = document.querySelector('#mrt-executor'); + +mrtExecuteSocket.onmessage = function(e) { + const data = JSON.parse(e.data); + if (data.message === activation_flag) { + mrtExecutor.disabled = false; + } else { + document.querySelector('#chat-log').value += (data.message + '\n'); + } +}; + +mrtExecuteSocket.onclose = function(e) { + console.error('Chat socket closed unexpectedly'); +}; + +const yamlCollectSocket = new WebSocket( + 'ws://' + + window.location.host + + '/ws/web/yaml/collect/' + + roomName + + '/' +); + +yamlCollectSocket.onmessage = function(e) { + const data = JSON.parse(e.data); + let dict = new Object(); + for (const [stage, stage_data] of Object.entries(data)) { + let subdict = new Object(); + for (const attr of Object.keys(stage_data)) { + const id = '#' + stage + '_' + attr; + let value = document.querySelector(id).value; + subdict[attr] = value; + } + dict[stage] = subdict; + } + // overide pass_name + const pass_name = document.querySelector('#mrt-stage-selector').value; + dict["COMMON"]["PASS_NAME"] = pass_name; + mrtExecuteSocket.send(JSON.stringify(dict)); +}; + +mrtExecutor.onclick = function(e) { + mrtExecutor.disabled = true; + yamlCollectSocket.send(null); +}; diff --git a/python/mrt/web/javascript/utils.js b/python/mrt/web/javascript/utils.js new file mode 100644 index 00000000..aee51d40 --- /dev/null +++ b/python/mrt/web/javascript/utils.js @@ -0,0 +1,22 @@ +export const roomName = JSON.parse(document.getElementById('room-name').textContent); + +export function update_yaml_configurations(e) { + const data = JSON.parse(e.data); + for (const [stage, stage_data] of Object.entries(data)) { + for (const [attr, value] of Object.entries(stage_data)) { + const id = '#' + stage + '_' + attr; + document.querySelector(id).value = value; + } + } +} + +export const yamlUpdateSocket = new WebSocket( + 'ws://' + + window.location.host + + '/ws/web/yaml/update/' + + roomName + + '/' +); + +yamlUpdateSocket.addEventListener( + 'message', update_yaml_configurations); diff --git a/python/mrt/web/javascript/yaml_clearer.js b/python/mrt/web/javascript/yaml_clearer.js new file mode 100644 index 00000000..dadde501 --- /dev/null +++ b/python/mrt/web/javascript/yaml_clearer.js @@ -0,0 +1,24 @@ +import { roomName } from './utils.js'; + +const yamlClearSocket = new WebSocket( + 'ws://' + + window.location.host + + '/ws/web/yaml/clear/' + + roomName + + '/' +); + +yamlClearSocket.onmessage = function(e) { + const data = JSON.parse(e.data); + for (const [stage, stage_data] of Object.entries(data)) { + for (const [attr, value] of Object.entries(stage_data)) { + const id = '#' + stage + '_' + attr; + document.querySelector(id).value = ''; + } + } +}; + +const yamlClearer = document.querySelector('#yaml-clearer'); +yamlClearer.onclick = function(e) { + yamlClearSocket.send(null) +}; diff --git a/python/mrt/web/javascript/yaml_file_loader.js b/python/mrt/web/javascript/yaml_file_loader.js new file mode 100644 index 00000000..3ff3dcbf --- /dev/null +++ b/python/mrt/web/javascript/yaml_file_loader.js @@ -0,0 +1,10 @@ +import { yamlUpdateSocket } from './utils.js'; + +const yamlLoader = document.querySelector('#yaml-loader'); + +yamlLoader.onclick = function(e) { + const yamlFileLocator = document.querySelector('#yaml-file-locator'); + yamlUpdateSocket.send(JSON.stringify({ + 'yaml_file': yamlFileLocator.value, + })); +}; diff --git a/python/mrt/web/web/consumers.py b/python/mrt/web/web/consumers.py index 04284476..0830ef31 100644 --- a/python/mrt/web/web/consumers.py +++ b/python/mrt/web/web/consumers.py @@ -1,13 +1,21 @@ import json +import yaml import os +from os import path from channels.generic.websocket import WebsocketConsumer from .log import get_logger from .streamer import Streamer, printer -from mrt.V3.utils import merge_cfg +from mrt.V3.utils import merge_cfg, revise_cfg, get_cfg_defaults from mrt.V3.execute import run +from .protocol import type_cast -class ChatConsumer(WebsocketConsumer): +tmp_dir = path.expanduser("~/mrt_tmp") +os.makedirs(tmp_dir, exist_ok=True) +activation_flag = "___activate_executor___" + + +class MRTExecuteConsumer(WebsocketConsumer): def connect(self): self.accept() @@ -15,11 +23,62 @@ def disconnect(self, close_code): pass def receive(self, text_data): - text_data_json = json.loads(text_data) - yaml_file = text_data_json['yaml_file'] - cfg = merge_cfg(yaml_file) - pass_name = text_data_json['pass_name'] + json_from_js = json.loads(text_data) + json_data = {} + for stage, stage_data in json_from_js.items(): + sub_type_cast = type_cast[stage] + sub_json_data = {} + for attr, data in stage_data.items(): + if data == '': + continue + if attr in sub_type_cast: + cast_func = sub_type_cast[attr] + data = cast_func(data) + sub_json_data[attr] = data + json_data[stage] = sub_json_data + yaml_str = yaml.dump(json_data) + tmp_yaml_file = path.join(tmp_dir, "tmp.yaml") + with open(tmp_yaml_file, "w") as f: + f.write(yaml_str) + cfg = get_cfg_defaults() + cfg.merge_from_file(tmp_yaml_file) logger = get_logger(cfg.COMMON.VERBOSITY, printer) - my_streamer = Streamer(run, (cfg, pass_name, logger)) + # revise_cfg(cfg, "COMMON", "PASS_NAME", pass_name) + my_streamer = Streamer(run, (cfg, logger)) for message in my_streamer.start(): self.send(text_data=json.dumps({'message': message})) + self.send(text_data=json.dumps({'message': activation_flag})) + + +class YAMLInitConsumer(WebsocketConsumer): + def connect(self): + self.accept() + + def disconnect(self, close_code): + pass + + def receive(self, text_data): + cfg = get_cfg_defaults() + self.send(text_data=json.dumps(cfg)) + + +class YAMLUpdateConsumer(WebsocketConsumer): + def connect(self): + self.accept() + + def disconnect(self, close_code): + pass + + def receive(self, text_data): + text_data_json = json.loads(text_data) + yaml_file = text_data_json['yaml_file'] + cfg = merge_cfg(yaml_file) + self.send(text_data=json.dumps(cfg)) + + +class YAMLClearConsumer(YAMLInitConsumer): + pass + + +class YAMLCollectConsumer(YAMLInitConsumer): + pass diff --git a/python/mrt/web/web/protocol.py b/python/mrt/web/web/protocol.py new file mode 100644 index 00000000..1f54abb7 --- /dev/null +++ b/python/mrt/web/web/protocol.py @@ -0,0 +1,57 @@ +import json + +str2listofeval = lambda v: [eval(s) for s in v.split(',')] +str2eval = lambda v: eval(v) +str2listofstr = lambda v: [s.strip() for s in v.split(',')] + +def str2bool(v): + if v == "true": + ret = True + elif v == "false": + ret = False + else: + raise RuntimeError("invalid v: {}".format(v)) + return ret + +# def str2attribute_deps(v): + # print(v) + # ret = json.loads(v) + # return ret + +type_cast = { + "COMMON": { + "DEVICE_IDS": str2listofeval, + "BATCH": str2eval, + "RUN_EVALUATE": str2bool, + "RUN_COMPILE": str2bool, + }, + "PREPARE": { + "DEVICE_IDS": str2listofeval, + "INPUT_SHAPE": str2listofeval, + "SPLIT_KEYS": str2listofstr, + }, + "CALIBRATE": { + "BATCH": str2eval, + "NUM_CALIB": str2eval, + "LAMBD": str2eval, + "DEVICE_IDS": str2listofeval, + }, + "QUANTIZE": { + "RESTORE_NAMES": str2listofstr, + "INPUT_PRECISION": str2eval, + "OUTPUT_PRECISION": str2eval, + "DEVICE_IDS": str2listofeval, + "SOFTMAX_LAMBD": str2eval, + "SHIFT_BITS": str2eval, + # TODO ATTRIBUTE_DEPS, OSCALE_MAPS, THRESHOLDS + }, + "EVALUATE": { + "BATCH": str2eval, + "DEVICE_IDS": str2listofeval, + "ITER_NUM": str2eval, + }, + "COMPILE": { + "BATCH": str2eval, + "DEVICE_IDS": str2listofeval, + }, +} diff --git a/python/mrt/web/web/routing.py b/python/mrt/web/web/routing.py index 7353d483..1078cad6 100644 --- a/python/mrt/web/web/routing.py +++ b/python/mrt/web/web/routing.py @@ -3,6 +3,19 @@ from . import consumers websocket_urlpatterns = [ - re_path(r'ws/web/(?P\w+)/$', - consumers.ChatConsumer.as_asgi()), + re_path( + r'ws/web/mrt/execute/(?P\w+)/$', + consumers.MRTExecuteConsumer.as_asgi()), + re_path( + r'ws/web/yaml/init/(?P\w+)/$', + consumers.YAMLInitConsumer.as_asgi()), + re_path( + r'ws/web/yaml/update/(?P\w+)/$', + consumers.YAMLUpdateConsumer.as_asgi()), + re_path( + r'ws/web/yaml/clear/(?P\w+)/$', + consumers.YAMLClearConsumer.as_asgi()), + re_path( + r'ws/web/yaml/collect/(?P\w+)/$', + consumers.YAMLCollectConsumer.as_asgi()), ] diff --git a/python/mrt/web/web/settings.py b/python/mrt/web/web/settings.py index 22b77fde..d317947e 100644 --- a/python/mrt/web/web/settings.py +++ b/python/mrt/web/web/settings.py @@ -11,6 +11,7 @@ """ from pathlib import Path +from os import path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent @@ -120,6 +121,10 @@ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ +STATICFILES_DIRS = [ + path.join(BASE_DIR, "javascript"), +] + STATIC_URL = '/static/' # Default primary key field type diff --git a/python/mrt/web/web/templates/room.html b/python/mrt/web/web/templates/room.html index e68248e6..05f2dd58 100644 --- a/python/mrt/web/web/templates/room.html +++ b/python/mrt/web/web/templates/room.html @@ -1,14 +1,22 @@ +{% load static %} Chat Room -
- - +

+ + + +
+ + +
+ + + +

+ + + + + + +

+ +

YAML Configuration Zone

+ + +
+

COMMON

+ PASS_NAME
+ MODEL_DIR
+ MODEL_NAME
+ VERBOSITY
+ START_AFTER
+ DEVICE_TYPE
+ DEVICE_IDS
+ BATCH
+ RUN_EVALUATE
+ RUN_COMPILE
+
+ + +
+ + +
+

PREPARE

+ DEVICE_TYPE
+ DEVICE_IDS
+ INPUT_SHAPE
+ SPLIT_KEYS
+
+ + +
+ + +
+

CALIBRATE

+ BATCH
+ NUM_CALIB
+ LAMBD
+ DATASET_NAME
+ DATASET_DIR
+ DEVICE_TYPE
+ DEVICE_IDS
+
+ + +
+ + +
+

QUANTIZATE

+ RESTORE_NAMES
+ INPUT_PRECISION
+ OUTPUT_PRECISION
+ DEVICE_TYPE
+ DEVICE_IDS
+ SOFTMAX_LAMBD
+ SHIFT_BITS
+ THRESHOLDS
+ ATTRIBUTE_DEPS
+ OSCALE_MAPS
+
+ +
- + + +
+

EVALUATE

+ BATCH
+ DEVICE_TYPE
+ DEVICE_IDS
+ ITER_NUM
+
+ + +
+ + +
+

COMPLIE

+ BATCH
+ DUMP_DIR
+ DEVICE_TYPE
+ DEVICE_IDS
+
+ {{ room_name|json_script:"room-name" }} - + + + + + From 5a02372bfb60337dd32a4f764872f805883a56c0 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 26 Nov 2021 13:22:00 +0800 Subject: [PATCH 069/120] upt --- main2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main2.py b/main2.py index 2210e7a4..e4ef954b 100644 --- a/main2.py +++ b/main2.py @@ -22,7 +22,7 @@ def override_cfg_args(cfg, argv): if __name__ == "__main__": assert len(sys.argv) >= 2 and len(sys.argv)%2 == 0, \ - "invalid length: {} of sys.argv: {}".format(length, sys.argv) + "invalid length: {} of sys.argv: {}".format(len(sys.argv), sys.argv) yaml_file = sys.argv[1] cfg = get_cfg_defaults() cfg = merge_cfg(yaml_file) From 48db21af73d7f335b9ccabcb6b246a1440e6b824 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 17 Dec 2021 11:27:48 +0800 Subject: [PATCH 070/120] upt --- python/mrt/model_zoo/yolov5s.json | 61 +++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 python/mrt/model_zoo/yolov5s.json diff --git a/python/mrt/model_zoo/yolov5s.json b/python/mrt/model_zoo/yolov5s.json new file mode 100644 index 00000000..1da25286 --- /dev/null +++ b/python/mrt/model_zoo/yolov5s.json @@ -0,0 +1,61 @@ +[DEFAULT] +Model_dir= +Model_name=alexnet +Device_type= +Device_ids= +# default: None(0), DEBUG(10), INFO(20) +# WARNING(30), ERROR(40), CRITICAL(50) +Verbosity=20 +Input_shape=(-1, 3, 224, 224) +Start= + +[PREPARE] +Dump=True + +[SPLIT_MODEL] +Keys= +Dump_dir= + +[CALIBRATION] +Batch=16 +Calibrate_num=1 +# lambda=None +# [ 2020-02-10 16:59:59 mrt.validate.INFO ] Iteration: 312 | evalfunc: top1=55.91% top5=78.75% | quantize: top1=51.69% top5=77.99% | Total Sample: 50080 +# lambda=5 +# [ 2020-02-10 17:41:06 mrt.validate.INFO ] Iteration: 312 | evalfunc: top1=55.91% top5=78.75% | quantize: top1=31.94% top5=56.13% | Total Sample: 50080 +# lambda=16 +# [ 2020-02-10 17:36:52 mrt.validate.INFO ] Iteration: 312 | evalfunc: top1=55.91% top5=78.75% | quantize: top1=51.54% top5=77.40% | Total Sample: 50080 +Lambda=16 +Dataset=imagenet +Dataset_dir= +Device_type=gpu +Device_ids=2 +Dump=True + +[QUANTIZATION] +Input_precision=8 +Output_precision=8 +Device_type=gpu +Device_ids=2 +Softmax_lambd= +Shift_bits= +Thresholds= +Restore_name= +Dump=True + +[MERGE_MODEL] +Attribute_deps= +Oscale_maps= +Dump= + +[EVALUATION] +Batch=160 +Device_type=gpu +Device_ids=0 +Iter_num=10 + +[COMPILATION] +Batch=1 +Dump_dir=/data/ryt + +# [DUMP] From b000c9ab89b269987f688bc9e370dfc9f0cf9924 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 17 Dec 2021 12:36:27 +0800 Subject: [PATCH 071/120] upt --- python/mrt/model_zoo/yolov5s.json | 61 ------------------------------- 1 file changed, 61 deletions(-) delete mode 100644 python/mrt/model_zoo/yolov5s.json diff --git a/python/mrt/model_zoo/yolov5s.json b/python/mrt/model_zoo/yolov5s.json deleted file mode 100644 index 1da25286..00000000 --- a/python/mrt/model_zoo/yolov5s.json +++ /dev/null @@ -1,61 +0,0 @@ -[DEFAULT] -Model_dir= -Model_name=alexnet -Device_type= -Device_ids= -# default: None(0), DEBUG(10), INFO(20) -# WARNING(30), ERROR(40), CRITICAL(50) -Verbosity=20 -Input_shape=(-1, 3, 224, 224) -Start= - -[PREPARE] -Dump=True - -[SPLIT_MODEL] -Keys= -Dump_dir= - -[CALIBRATION] -Batch=16 -Calibrate_num=1 -# lambda=None -# [ 2020-02-10 16:59:59 mrt.validate.INFO ] Iteration: 312 | evalfunc: top1=55.91% top5=78.75% | quantize: top1=51.69% top5=77.99% | Total Sample: 50080 -# lambda=5 -# [ 2020-02-10 17:41:06 mrt.validate.INFO ] Iteration: 312 | evalfunc: top1=55.91% top5=78.75% | quantize: top1=31.94% top5=56.13% | Total Sample: 50080 -# lambda=16 -# [ 2020-02-10 17:36:52 mrt.validate.INFO ] Iteration: 312 | evalfunc: top1=55.91% top5=78.75% | quantize: top1=51.54% top5=77.40% | Total Sample: 50080 -Lambda=16 -Dataset=imagenet -Dataset_dir= -Device_type=gpu -Device_ids=2 -Dump=True - -[QUANTIZATION] -Input_precision=8 -Output_precision=8 -Device_type=gpu -Device_ids=2 -Softmax_lambd= -Shift_bits= -Thresholds= -Restore_name= -Dump=True - -[MERGE_MODEL] -Attribute_deps= -Oscale_maps= -Dump= - -[EVALUATION] -Batch=160 -Device_type=gpu -Device_ids=0 -Iter_num=10 - -[COMPILATION] -Batch=1 -Dump_dir=/data/ryt - -# [DUMP] From d88b64db3022b1d97cd8afe9f432d54798258448 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 17 Dec 2021 12:47:06 +0800 Subject: [PATCH 072/120] upt --- python/mrt/V3/model_zoo/yolov5s.yaml | 49 ++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 python/mrt/V3/model_zoo/yolov5s.yaml diff --git a/python/mrt/V3/model_zoo/yolov5s.yaml b/python/mrt/V3/model_zoo/yolov5s.yaml new file mode 100644 index 00000000..2db64c54 --- /dev/null +++ b/python/mrt/V3/model_zoo/yolov5s.yaml @@ -0,0 +1,49 @@ +COMMON: + MODEL_NAME: yolov5s + VERBOSITY: info + RUN_EVALUATE: True +PREPARE: + INPUT_SHAPE: [-1, 3, 416, 416] + # SPLIT_KEYS: [ + # "ssd0_multiperclassdecoder0_zeros_like0", + # "ssd0_multiperclassdecoder0_slice_axis0", + # "ssd0_normalizedboxcenterdecoder0_concat0" + # ] +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: voc + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + # THRESHOLDS: [ + # ["data", 2.64], + # ["ssd0_multiperclassdecoder0_slice_axis0", 1], + # ] + # ATTRIBUTE_DEPS: [ + # [ + # "_greater_scalar", + # [ + # ["scalar", "ssd0_multiperclassdecoder0_slice_axis0"], + # ] + # ], + # [ + # "_contrib_box_nms", + # [ + # ["valid_thresh", "ssd0_multiperclassdecoder0_slice_axis0"], + # ] + # ], + # ] + # OSCALE_MAPS: [ + # ["ssd0_slice_axis41", "ssd0_multiperclassdecoder0_zeros_like0"], + # ["ssd0_slice_axis42", "ssd0_multiperclassdecoder0_slice_axis0"], + # ["ssd0_slice_axis43", "ssd0_normalizedboxcenterdecoder0_concat0"], + # ] +EVALUATE: + BATCH: 15 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10 From 1c4c106b44e65752152b4855bb0eb602446422bb Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 20 Dec 2021 14:24:05 +0800 Subject: [PATCH 073/120] upt --- python/mrt/V3/check_duplicate_name.py | 85 +++++++++++++++++++++++++++ python/mrt/V3/model_zoo/alexnet.yaml | 21 +++++++ python/mrt/tfm_ops.py | 29 +++++++++ 3 files changed, 135 insertions(+) create mode 100644 python/mrt/V3/check_duplicate_name.py create mode 100644 python/mrt/V3/model_zoo/alexnet.yaml diff --git a/python/mrt/V3/check_duplicate_name.py b/python/mrt/V3/check_duplicate_name.py new file mode 100644 index 00000000..fe0efd2e --- /dev/null +++ b/python/mrt/V3/check_duplicate_name.py @@ -0,0 +1,85 @@ +import argparse +import json +from os import path +import logging +from copy import deepcopy + +import mxnet as mx +from mxnet import ndarray as nd + +from mrt.sym_utils import sym_iter +from mrt import utils +from mrt.tfm_base import N +from mrt.conf import MRT_MODEL_ROOT +from mrt.V3.utils import load_fname + +utils.log_init() +parser = argparse.ArgumentParser() +parser.add_argument("model_name", type=str) +parser.add_argument("--model-dir", type=str, default=MRT_MODEL_ROOT) +parser.add_argument("--unify", action="store_true") + +@N.register_nm("unify") +def check_duplicate_name( + sym, params, unify=False, logger=logging.getLogger("unify")): + + # check symbol + sym_json_str = sym.tojson() + sym_json_dict = json.loads(sym_json_str) + nodes = sym_json_dict["nodes"] + + name_cnts = {} + nnodes = [] + for node in nodes: + name = node["name"] + if name in name_cnts: + cur_cnt = name_cnts[name] = N.n(name) + logger.warning("duplicate name: {}".format(name)) + if unify: + nnode = deepcopy(node) + nnode["name"] = "{}_{}".format(name, cur_cnt) + nnodes.append(nnode) + else: + name_cnts[name] = 1 + if unify: + nnodes.append(node) + + if unify: + sym_json_dict["nodes"] = nnodes + sym_json_str = json.dumps(sym_json_dict) + sym = mx.sym.load_json(sym_json_str) + + # check params + param_keys = {} + for k in params: + if k.startswith("arg:") or k.startswith("aux:"): + nk = k[4:] + else: + nk = k + if nk in param_keys: + assert False, nk + param_keys[k] = nk + params = {param_keys[k]: v for k, v in params.items()} + + return sym, params + +if __name__ == "__main__": + args = parser.parse_args() + + model_name = args.model_name + model_dir = args.model_dir + if model_dir.startswith("~"): + model_dir = path.expanduser(model_dir) + prefix = path.join(model_dir, model_name) + sym_file, prm_file = load_fname(prefix) + + sym = mx.sym.load(sym_file) + params = nd.load(prm_file) + + sym, params = check_duplicate_name(sym, params, unify=args.unify) + sym_json_str = sym.tojson() + nsym_file = "{}.unify.json".format(path.join(model_dir, model_name)) + nsym_file, nprm_file = load_fname(prefix, suffix="unify") + with open(nsym_file, "w") as f: + f.write(sym_json_str) + nd.save(nprm_file, params) diff --git a/python/mrt/V3/model_zoo/alexnet.yaml b/python/mrt/V3/model_zoo/alexnet.yaml new file mode 100644 index 00000000..b993dfcd --- /dev/null +++ b/python/mrt/V3/model_zoo/alexnet.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: alexnet + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10 diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index a3859b04..9193dfc8 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -1893,6 +1893,35 @@ def quantize(self, op, **kwargs): return _quantize_scale(op, **kwargs) +@register_pass("rewrite") +@register_transformer("elemwise_mul") +class ElemwiseMul(Transformer): + def fuse_transpose(self, op, **kwargs): + return _ft_multi_input(op) + + def quantize(self, op, **kwargs): + precs, scales = kwargs['precs'], kwargs['scales'] + name, op_name = op.attr('name'), kwargs['op_name'] + childs, attr = sym_iter(op.get_children()), op.list_attr() + + oprec = kwargs['op_input_precs'][op_name] + X, xprec, xs = requant(childs[0], oprec, oname=name, **kwargs) + W, wprec, ws = requant_parameter( + cns[1], oprec, oname=name, **kwargs) + scales[name] = ws * xs + op = get_mxnet_op(op_name)(X, W, B, **attr, name=name) + + shp = kwargs['params'][childs[1].attr('name')].shape + infer_prec = xprec + wprec + kwargs['precs'][name][OUT_KEY] = infer_prec + + logger = logging.getLogger('log.mrt.realize') + logger.debug( + "operator %-20s name=%-40s oscale=%s, iscale=%s", + op_name, name, scales[name], cns) + return op + + @register_pass("validate") @register_pass("calculate_ops") @register_pass("rewrite") From f7c0b685dde4f185b1cc5aa03f0d7d0b5b64dfb9 Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 20 Dec 2021 19:09:05 +0800 Subject: [PATCH 074/120] upt --- python/mrt/V3/evaluate.py | 7 ++++++ python/mrt/V3/model_zoo/yolov5s.yaml | 35 ++++------------------------ python/mrt/tfm_ops.py | 10 ++++---- python/mrt/transformer.py | 1 + 4 files changed, 17 insertions(+), 36 deletions(-) diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index a3cc4c59..823b3898 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -92,6 +92,13 @@ def evalfunc(data, label): 'data': set_batch(input_shape, split_batch)}) qgraph = rqmodel.to_graph(ctx=ctx) qmetric = dataset.metrics() + try: + data, _ = dataset.iter_func()() + data = sim.load_real_data(data, 'data', inputs_ext) + outs = forward(qgraph, data, ctx) + logger.debug("shape of outs: {}".format([o.shape for o in outs])) + except: + raise RuntimeError("Quantized Graph could not forward") def quantize(data, label): data = sim.load_real_data(data, 'data', inputs_ext) diff --git a/python/mrt/V3/model_zoo/yolov5s.yaml b/python/mrt/V3/model_zoo/yolov5s.yaml index 2db64c54..52501222 100644 --- a/python/mrt/V3/model_zoo/yolov5s.yaml +++ b/python/mrt/V3/model_zoo/yolov5s.yaml @@ -3,47 +3,20 @@ COMMON: VERBOSITY: info RUN_EVALUATE: True PREPARE: - INPUT_SHAPE: [-1, 3, 416, 416] - # SPLIT_KEYS: [ - # "ssd0_multiperclassdecoder0_zeros_like0", - # "ssd0_multiperclassdecoder0_slice_axis0", - # "ssd0_normalizedboxcenterdecoder0_concat0" - # ] + INPUT_SHAPE: [-1, 3, 640, 640] CALIBRATE: NUM_CALIB: 1 LAMBD: 16 - DATASET_NAME: voc + DATASET_NAME: coco DEVICE_TYPE: gpu DEVICE_IDS: [0] QUANTIZE: OUTPUT_PRECISION: 30 DEVICE_TYPE: gpu DEVICE_IDS: [0] - # THRESHOLDS: [ - # ["data", 2.64], - # ["ssd0_multiperclassdecoder0_slice_axis0", 1], - # ] - # ATTRIBUTE_DEPS: [ - # [ - # "_greater_scalar", - # [ - # ["scalar", "ssd0_multiperclassdecoder0_slice_axis0"], - # ] - # ], - # [ - # "_contrib_box_nms", - # [ - # ["valid_thresh", "ssd0_multiperclassdecoder0_slice_axis0"], - # ] - # ], - # ] - # OSCALE_MAPS: [ - # ["ssd0_slice_axis41", "ssd0_multiperclassdecoder0_zeros_like0"], - # ["ssd0_slice_axis42", "ssd0_multiperclassdecoder0_slice_axis0"], - # ["ssd0_slice_axis43", "ssd0_normalizedboxcenterdecoder0_concat0"], - # ] EVALUATE: - BATCH: 15 + # in this model, the BATCH should be set as 16 + BATCH: 16 DEVICE_TYPE: gpu DEVICE_IDS: [0] ITER_NUM: 10 diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index 9193dfc8..5e850a31 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -1893,6 +1893,7 @@ def quantize(self, op, **kwargs): return _quantize_scale(op, **kwargs) +@register_pass("prepare_for_compile") @register_pass("rewrite") @register_transformer("elemwise_mul") class ElemwiseMul(Transformer): @@ -1901,17 +1902,16 @@ def fuse_transpose(self, op, **kwargs): def quantize(self, op, **kwargs): precs, scales = kwargs['precs'], kwargs['scales'] - name, op_name = op.attr('name'), kwargs['op_name'] + name, op_name = op.attr('name'), op.attr('op_name') childs, attr = sym_iter(op.get_children()), op.list_attr() + cns = [c.attr('name') for c in childs] if childs else [] oprec = kwargs['op_input_precs'][op_name] X, xprec, xs = requant(childs[0], oprec, oname=name, **kwargs) - W, wprec, ws = requant_parameter( - cns[1], oprec, oname=name, **kwargs) + W, wprec, ws = requant(childs[1], oprec, oname=name, **kwargs) scales[name] = ws * xs - op = get_mxnet_op(op_name)(X, W, B, **attr, name=name) + op = get_mxnet_op(op_name)(X, W, **attr, name=name) - shp = kwargs['params'][childs[1].attr('name')].shape infer_prec = xprec + wprec kwargs['precs'][name][OUT_KEY] = infer_prec diff --git a/python/mrt/transformer.py b/python/mrt/transformer.py index c517226c..322411a1 100644 --- a/python/mrt/transformer.py +++ b/python/mrt/transformer.py @@ -224,6 +224,7 @@ def _op_default_input_precs(self): op_precs['Embedding'] = 16 op_precs['slice_like'] = 30 op_precs['batch_dot'] = 8 + op_precs['elemwise_mul'] = 16 def set_input_prec(self, prec): """ Set the input precision before quantization. From 35a4253a49eb2f58ec465f54776cdc5a48668267 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 21 Dec 2021 21:23:28 +0800 Subject: [PATCH 075/120] upt --- main2.py | 2 +- python/mrt/V3/model_zoo/prediction_SCTF.yaml | 22 ++++++++++++++++++++ python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml | 22 ++++++++++++++++++++ python/mrt/tfm_ops.py | 19 ++++++++++++----- 4 files changed, 59 insertions(+), 6 deletions(-) create mode 100644 python/mrt/V3/model_zoo/prediction_SCTF.yaml create mode 100644 python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml diff --git a/main2.py b/main2.py index e4ef954b..5f7a83b8 100644 --- a/main2.py +++ b/main2.py @@ -12,7 +12,7 @@ def override_cfg_args(cfg, argv): attr, value = argv[i:i+2] try: value = eval(value) - except NameError: + except: pass pass_name, pass_attr = [s.upper() for s in attr[2:].split(".")] cnode = getattr(cfg, pass_name) diff --git a/python/mrt/V3/model_zoo/prediction_SCTF.yaml b/python/mrt/V3/model_zoo/prediction_SCTF.yaml new file mode 100644 index 00000000..28fad154 --- /dev/null +++ b/python/mrt/V3/model_zoo/prediction_SCTF.yaml @@ -0,0 +1,22 @@ +COMMON: + MODEL_NAME: prediction_SCTF + VERBOSITY: info + RUN_EVALUATE: True +PREPARE: + INPUT_SHAPE: [-1, 1, 3] +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: mnist + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + # in this model, the BATCH should be set as 16 + BATCH: 64 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10 diff --git a/python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml b/python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml new file mode 100644 index 00000000..a45a6a3d --- /dev/null +++ b/python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml @@ -0,0 +1,22 @@ +COMMON: + MODEL_NAME: yolov5s_sigmoid + VERBOSITY: info + RUN_EVALUATE: True +PREPARE: + INPUT_SHAPE: [-1, 3, 640, 640] +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: coco + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + # in this model, the BATCH should be set as 16 + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10 diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index 5e850a31..fc8ef67b 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -229,18 +229,25 @@ def fuse_transpose(self, op, **kwargs): attr = op.list_attr() if attr['act_type'] == Relu.op_name: op = Relu().fuse_transpose(op, **kwargs) + elif attr['act_type'] == Sigmoid.op_name: + op = Sigmoid().fuse_transpose(op, **kwargs) return op def rewrite(self, op, **kwargs): attr = op.list_attr() if attr['act_type'] == Relu.op_name: op = Relu().rewrite(op, **kwargs) + elif attr['act_type'] == Sigmoid.op_name: + childs = sym_iter(op.get_children()) + op = mx.sym.sigmoid(childs[0]) return op def calculate_ops(self, op, **kwargs): attr = op.list_attr() if attr['act_type'] == Relu.op_name: op = Relu().calculate_ops(op, **kwargs) + elif attr['act_type'] == Sigmoid.op_name: + op = Sigmoid().calculate_ops(op, **kwargs) return op def prepare_for_compile(self, op, **kwargs): @@ -252,12 +259,13 @@ def prepare_for_compile(self, op, **kwargs): def compile(self, op, **kwargs): attrs = kwargs['attr'] act_type = attrs['act_type'] + + nkwargs = {k: v for k, v in kwargs.items() if k != 'attr'} + nattrs = {k: v for k, v in attrs.items() if k != 'act_type'} + nkwargs['attr'] = nattrs if act_type == Relu.op_name: - nkwargs = {k: v for k, v in kwargs.items() if k != 'attr'} - nattrs = {k: v for k, v in attrs.items() if k != 'act_type'} - nkwargs['attr'] = nattrs - sym = Relu().compile(op, **nkwargs) - return sym + op = Relu().compile(op, **nkwargs) + return op @register_pass("fuse_transpose") @@ -1893,6 +1901,7 @@ def quantize(self, op, **kwargs): return _quantize_scale(op, **kwargs) +@register_pass("compile") @register_pass("prepare_for_compile") @register_pass("rewrite") @register_transformer("elemwise_mul") From e267327e9a6dd73ce52b170ebf82729f526d012e Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 21 Dec 2021 21:25:26 +0800 Subject: [PATCH 076/120] upt --- python/mrt/tfm_ops.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index fc8ef67b..1a7c6a12 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -260,10 +260,10 @@ def compile(self, op, **kwargs): attrs = kwargs['attr'] act_type = attrs['act_type'] - nkwargs = {k: v for k, v in kwargs.items() if k != 'attr'} - nattrs = {k: v for k, v in attrs.items() if k != 'act_type'} - nkwargs['attr'] = nattrs if act_type == Relu.op_name: + nkwargs = {k: v for k, v in kwargs.items() if k != 'attr'} + nattrs = {k: v for k, v in attrs.items() if k != 'act_type'} + nkwargs['attr'] = nattrs op = Relu().compile(op, **nkwargs) return op From 05119b75b053c8d331de23be420922d02a7ff8b9 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Dec 2021 16:01:38 +0800 Subject: [PATCH 077/120] upt --- python/mrt/V3/elem2broadcast_mul.py | 14 +++++++++++++ python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml | 22 -------------------- 2 files changed, 14 insertions(+), 22 deletions(-) create mode 100644 python/mrt/V3/elem2broadcast_mul.py delete mode 100644 python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml diff --git a/python/mrt/V3/elem2broadcast_mul.py b/python/mrt/V3/elem2broadcast_mul.py new file mode 100644 index 00000000..6fb2546e --- /dev/null +++ b/python/mrt/V3/elem2broadcast_mul.py @@ -0,0 +1,14 @@ +import argparse + +from mrt import tfm_ops as tops +from mrt.tfm_base import N +from mrt.sym_utils import + +@N.register_nm("broadcastify") +def broadcastify(sym, params): + def callback(op, **kwargs): + pass + +class ElemwiseMul(tops.ElemwiseMul): + def broadcastify(self, op, **kwargs): + pass diff --git a/python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml b/python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml deleted file mode 100644 index a45a6a3d..00000000 --- a/python/mrt/V3/model_zoo/yolov5s_sigmoid.yaml +++ /dev/null @@ -1,22 +0,0 @@ -COMMON: - MODEL_NAME: yolov5s_sigmoid - VERBOSITY: info - RUN_EVALUATE: True -PREPARE: - INPUT_SHAPE: [-1, 3, 640, 640] -CALIBRATE: - NUM_CALIB: 1 - LAMBD: 16 - DATASET_NAME: coco - DEVICE_TYPE: gpu - DEVICE_IDS: [0] -QUANTIZE: - OUTPUT_PRECISION: 30 - DEVICE_TYPE: gpu - DEVICE_IDS: [0] -EVALUATE: - # in this model, the BATCH should be set as 16 - BATCH: 16 - DEVICE_TYPE: gpu - DEVICE_IDS: [0] - ITER_NUM: 10 From 232ccc289033e51fe2773279061e5b443b19510e Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Dec 2021 17:10:57 +0800 Subject: [PATCH 078/120] upt --- python/mrt/V3/elem2broadcast_mul.py | 14 ----- .../mrt/yolov5s/preprocess.py | 53 +++++++++++-------- .../mrt/yolov5s}/yolov5s.yaml | 2 +- 3 files changed, 33 insertions(+), 36 deletions(-) delete mode 100644 python/mrt/V3/elem2broadcast_mul.py rename python/mrt/V3/check_duplicate_name.py => tests/mrt/yolov5s/preprocess.py (54%) rename {python/mrt/V3/model_zoo => tests/mrt/yolov5s}/yolov5s.yaml (90%) diff --git a/python/mrt/V3/elem2broadcast_mul.py b/python/mrt/V3/elem2broadcast_mul.py deleted file mode 100644 index 6fb2546e..00000000 --- a/python/mrt/V3/elem2broadcast_mul.py +++ /dev/null @@ -1,14 +0,0 @@ -import argparse - -from mrt import tfm_ops as tops -from mrt.tfm_base import N -from mrt.sym_utils import - -@N.register_nm("broadcastify") -def broadcastify(sym, params): - def callback(op, **kwargs): - pass - -class ElemwiseMul(tops.ElemwiseMul): - def broadcastify(self, op, **kwargs): - pass diff --git a/python/mrt/V3/check_duplicate_name.py b/tests/mrt/yolov5s/preprocess.py similarity index 54% rename from python/mrt/V3/check_duplicate_name.py rename to tests/mrt/yolov5s/preprocess.py index fe0efd2e..f4e0d849 100644 --- a/python/mrt/V3/check_duplicate_name.py +++ b/tests/mrt/yolov5s/preprocess.py @@ -7,7 +7,7 @@ import mxnet as mx from mxnet import ndarray as nd -from mrt.sym_utils import sym_iter +from mrt.sym_utils import topo_visit_transformer, sym_iter from mrt import utils from mrt.tfm_base import N from mrt.conf import MRT_MODEL_ROOT @@ -15,13 +15,12 @@ utils.log_init() parser = argparse.ArgumentParser() -parser.add_argument("model_name", type=str) -parser.add_argument("--model-dir", type=str, default=MRT_MODEL_ROOT) -parser.add_argument("--unify", action="store_true") +parser.add_argument("--model-name", type=str, default="yolov5s") +parser.add_argument( + "--model-dir", type=str, default=MRT_MODEL_ROOT) @N.register_nm("unify") -def check_duplicate_name( - sym, params, unify=False, logger=logging.getLogger("unify")): +def unify(sym, params, logger=logging.getLogger("unify")): # check symbol sym_json_str = sym.tojson() @@ -34,20 +33,17 @@ def check_duplicate_name( name = node["name"] if name in name_cnts: cur_cnt = name_cnts[name] = N.n(name) - logger.warning("duplicate name: {}".format(name)) - if unify: - nnode = deepcopy(node) - nnode["name"] = "{}_{}".format(name, cur_cnt) - nnodes.append(nnode) + logger.info("duplicate name: {}".format(name)) + nnode = deepcopy(node) + nnode["name"] = "{}_{}".format(name, cur_cnt) + nnodes.append(nnode) else: name_cnts[name] = 1 - if unify: - nnodes.append(node) + nnodes.append(node) - if unify: - sym_json_dict["nodes"] = nnodes - sym_json_str = json.dumps(sym_json_dict) - sym = mx.sym.load_json(sym_json_str) + sym_json_dict["nodes"] = nnodes + sym_json_str = json.dumps(sym_json_dict) + sym = mx.sym.load_json(sym_json_str) # check params param_keys = {} @@ -63,6 +59,21 @@ def check_duplicate_name( return sym, params +@N.register_nm("broadcastify") +def broadcastify(sym, params, logger=logging.getLogger("broadcastify")): + def callback(op, **kwargs): + name, op_name = op.attr("name"), op.attr("op_name") + if op_name != "elemwise_mul": + return op + childs = sym_iter(op.get_children()) + lhs, rhs = childs + op = mx.sym.broadcast_mul(lhs, rhs) + logger.info("op: {} has been broadcastified".format(name)) + return op + + return topo_visit_transformer( + sym, params, callback, logger=logger) + if __name__ == "__main__": args = parser.parse_args() @@ -72,14 +83,14 @@ def check_duplicate_name( model_dir = path.expanduser(model_dir) prefix = path.join(model_dir, model_name) sym_file, prm_file = load_fname(prefix) - sym = mx.sym.load(sym_file) params = nd.load(prm_file) - sym, params = check_duplicate_name(sym, params, unify=args.unify) + sym, params = unify(sym, params) + sym, params = broadcastify(sym, params) + sym_json_str = sym.tojson() - nsym_file = "{}.unify.json".format(path.join(model_dir, model_name)) - nsym_file, nprm_file = load_fname(prefix, suffix="unify") + nsym_file, nprm_file = load_fname(prefix, suffix="unify.broadcastify") with open(nsym_file, "w") as f: f.write(sym_json_str) nd.save(nprm_file, params) diff --git a/python/mrt/V3/model_zoo/yolov5s.yaml b/tests/mrt/yolov5s/yolov5s.yaml similarity index 90% rename from python/mrt/V3/model_zoo/yolov5s.yaml rename to tests/mrt/yolov5s/yolov5s.yaml index 52501222..4a8cbaf6 100644 --- a/python/mrt/V3/model_zoo/yolov5s.yaml +++ b/tests/mrt/yolov5s/yolov5s.yaml @@ -1,5 +1,5 @@ COMMON: - MODEL_NAME: yolov5s + MODEL_NAME: yolov5s.unify.broadcastify VERBOSITY: info RUN_EVALUATE: True PREPARE: From a2f476cf36906cfa455d82fd2ce1fd9e89462157 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 22 Dec 2021 17:37:12 +0800 Subject: [PATCH 079/120] upt --- tests/mrt/yolov5s/preprocess.py | 16 ++++++++++++---- tests/mrt/yolov5s/yolov5s.yaml | 6 +++++- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/tests/mrt/yolov5s/preprocess.py b/tests/mrt/yolov5s/preprocess.py index f4e0d849..cf73710a 100644 --- a/tests/mrt/yolov5s/preprocess.py +++ b/tests/mrt/yolov5s/preprocess.py @@ -18,6 +18,8 @@ parser.add_argument("--model-name", type=str, default="yolov5s") parser.add_argument( "--model-dir", type=str, default=MRT_MODEL_ROOT) +parser.add_argument("--no-unify", action="store_true") +parser.add_argument("--no-broadcastify", action="store_true") @N.register_nm("unify") def unify(sym, params, logger=logging.getLogger("unify")): @@ -67,7 +69,7 @@ def callback(op, **kwargs): return op childs = sym_iter(op.get_children()) lhs, rhs = childs - op = mx.sym.broadcast_mul(lhs, rhs) + op = mx.sym.broadcast_mul(lhs, rhs, name=name) logger.info("op: {} has been broadcastified".format(name)) return op @@ -86,11 +88,17 @@ def callback(op, **kwargs): sym = mx.sym.load(sym_file) params = nd.load(prm_file) - sym, params = unify(sym, params) - sym, params = broadcastify(sym, params) + suffixes = ["preprocess"] + if not args.no_unify: + suffixes.append("unify") + sym, params = unify(sym, params) + if not args.no_broadcastify: + suffixes.append("broadcastify") + sym, params = broadcastify(sym, params) + suffix = ".".join(suffixes) sym_json_str = sym.tojson() - nsym_file, nprm_file = load_fname(prefix, suffix="unify.broadcastify") + nsym_file, nprm_file = load_fname(prefix, suffix=suffix) with open(nsym_file, "w") as f: f.write(sym_json_str) nd.save(nprm_file, params) diff --git a/tests/mrt/yolov5s/yolov5s.yaml b/tests/mrt/yolov5s/yolov5s.yaml index 4a8cbaf6..fd6caf8f 100644 --- a/tests/mrt/yolov5s/yolov5s.yaml +++ b/tests/mrt/yolov5s/yolov5s.yaml @@ -1,5 +1,6 @@ COMMON: - MODEL_NAME: yolov5s.unify.broadcastify + MODEL_NAME: yolov5s.preprocess.unify.broadcastify + # MODEL_NAME: yolov5s.preprocess.unify VERBOSITY: info RUN_EVALUATE: True PREPARE: @@ -20,3 +21,6 @@ EVALUATE: DEVICE_TYPE: gpu DEVICE_IDS: [0] ITER_NUM: 10 +COMPILE: + # in this model, the BATCH should be set as 16 + BATCH: 16 From 0cd850376c1e92800eafbd9cf6ddc3c455e2721d Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 24 Dec 2021 14:39:27 +0800 Subject: [PATCH 080/120] upt --- python/mrt/frontend/.gitignore | 7 + python/mrt/frontend/Makefile | 9 ++ python/mrt/frontend/README.md | 1 + python/mrt/frontend/javascript/init.js | 20 +++ .../frontend/javascript/model_submitter.js | 36 +++++ .../mrt/frontend/javascript/mrt_executor.js | 50 ++++++ python/mrt/frontend/javascript/utils.js | 42 +++++ .../mrt/frontend/javascript/yaml_clearer.js | 19 +++ .../frontend/javascript/yaml_file_loader.js | 8 + python/mrt/frontend/manage.py | 22 +++ python/mrt/frontend/python/rpc/forwarding.py | 34 +++++ python/mrt/frontend/python/rpc/log.py | 20 +++ python/mrt/frontend/python/rpc/service.proto | 20 +++ python/mrt/frontend/python/rpc/service.py | 107 +++++++++++++ python/mrt/frontend/python/rpc/streamer.py | 47 ++++++ python/mrt/frontend/python/rpc/test_rpc.py | 33 ++++ python/mrt/frontend/python/rpc/utils.py | 25 +++ python/mrt/frontend/requirements.txt | 3 + python/mrt/frontend/web/__init__.py | 0 python/mrt/frontend/web/asgi.py | 23 +++ python/mrt/frontend/web/consumers.py | 104 +++++++++++++ python/mrt/frontend/web/protocol.py | 57 +++++++ python/mrt/frontend/web/routing.py | 24 +++ python/mrt/frontend/web/settings.py | 144 ++++++++++++++++++ python/mrt/frontend/web/templates/room.html | 144 ++++++++++++++++++ python/mrt/frontend/web/urls.py | 22 +++ python/mrt/frontend/web/views.py | 4 + python/mrt/frontend/web/wsgi.py | 16 ++ 28 files changed, 1041 insertions(+) create mode 100644 python/mrt/frontend/.gitignore create mode 100644 python/mrt/frontend/Makefile create mode 100644 python/mrt/frontend/README.md create mode 100644 python/mrt/frontend/javascript/init.js create mode 100644 python/mrt/frontend/javascript/model_submitter.js create mode 100644 python/mrt/frontend/javascript/mrt_executor.js create mode 100644 python/mrt/frontend/javascript/utils.js create mode 100644 python/mrt/frontend/javascript/yaml_clearer.js create mode 100644 python/mrt/frontend/javascript/yaml_file_loader.js create mode 100755 python/mrt/frontend/manage.py create mode 100644 python/mrt/frontend/python/rpc/forwarding.py create mode 100644 python/mrt/frontend/python/rpc/log.py create mode 100644 python/mrt/frontend/python/rpc/service.proto create mode 100644 python/mrt/frontend/python/rpc/service.py create mode 100644 python/mrt/frontend/python/rpc/streamer.py create mode 100644 python/mrt/frontend/python/rpc/test_rpc.py create mode 100644 python/mrt/frontend/python/rpc/utils.py create mode 100644 python/mrt/frontend/requirements.txt create mode 100644 python/mrt/frontend/web/__init__.py create mode 100644 python/mrt/frontend/web/asgi.py create mode 100644 python/mrt/frontend/web/consumers.py create mode 100644 python/mrt/frontend/web/protocol.py create mode 100644 python/mrt/frontend/web/routing.py create mode 100644 python/mrt/frontend/web/settings.py create mode 100644 python/mrt/frontend/web/templates/room.html create mode 100644 python/mrt/frontend/web/urls.py create mode 100644 python/mrt/frontend/web/views.py create mode 100644 python/mrt/frontend/web/wsgi.py diff --git a/python/mrt/frontend/.gitignore b/python/mrt/frontend/.gitignore new file mode 100644 index 00000000..4ac9261f --- /dev/null +++ b/python/mrt/frontend/.gitignore @@ -0,0 +1,7 @@ +db.sqlite3 +__pycache__ +.DS_Store +mrt_rpc_service_pb2.py +mrt_rpc_service_pb2_grpc.py +service_pb2.py +service_pb2_grpc.py diff --git a/python/mrt/frontend/Makefile b/python/mrt/frontend/Makefile new file mode 100644 index 00000000..f1c0561b --- /dev/null +++ b/python/mrt/frontend/Makefile @@ -0,0 +1,9 @@ +rpc-build: + python -m grpc_tools.protoc \ + -I. --python_out=. --grpc_python_out=. \ + ./python/rpc/service.proto + python ./python/rpc/service.py +rpc-test: + python ./python/rpc/test_rpc.py +web-build: + python manage.py runserver 8000 diff --git a/python/mrt/frontend/README.md b/python/mrt/frontend/README.md new file mode 100644 index 00000000..6ee19ee2 --- /dev/null +++ b/python/mrt/frontend/README.md @@ -0,0 +1 @@ +# mrt-web \ No newline at end of file diff --git a/python/mrt/frontend/javascript/init.js b/python/mrt/frontend/javascript/init.js new file mode 100644 index 00000000..5f153931 --- /dev/null +++ b/python/mrt/frontend/javascript/init.js @@ -0,0 +1,20 @@ +import { + roomName, update_yaml_configurations, update_console, + create_socket } from './utils.js'; + +const yamlInitSocket = create_socket("yaml/init/"); + +yamlInitSocket.onopen = function(e) { + yamlInitSocket.send(null); +}; + +yamlInitSocket.onmessage = function(e) { + update_yaml_configurations(e); + update_console("yaml parameters initialized."); +} + +const yamlResetter = document.querySelector('#yaml-resetter'); + +yamlResetter.onclick = function(e) { + yamlInitSocket.send(null); +}; diff --git a/python/mrt/frontend/javascript/model_submitter.js b/python/mrt/frontend/javascript/model_submitter.js new file mode 100644 index 00000000..20337db2 --- /dev/null +++ b/python/mrt/frontend/javascript/model_submitter.js @@ -0,0 +1,36 @@ +import { roomName, create_socket, update_console_v2 } from './utils.js'; + +const modelSubmitSocket = create_socket("model/submit/"); + +const mrtExecutor = document.querySelector('#mrt-executor'); +const modelSubmitter = document.querySelector('#model-submitter'); + +modelSubmitSocket.onmessage = function(e) { + const data = JSON.parse(e.data); + if ('activate' in data) { + mrtExecutor.disabled = false; + modelSubmitter.disabled = false; + } + if ('message' in data) { + if ('first' in data) { + update_console(data.message); + } else { + update_console_v2(data.message); + } + } +}; + +modelSubmitSocket.onclose = function(e) { + console.error('model submit socket closed unexpectedly'); +}; + +modelSubmitter.onclick = function(e) { + mrtExecutor.disabled = true; + modelSubmitter.disabled = true; + let text_data = new Object(); + text_data['symbol'] = document.querySelector('#symbol-locator').value; + text_data['params'] = document.querySelector('#params-locator').value; + text_data['dst'] = document.querySelector('#COMMON_MODEL_DIR').value; + text_data['host'] = document.querySelector('#host-locator').value; + modelSubmitSocket.send(JSON.stringify(text_data)); +}; diff --git a/python/mrt/frontend/javascript/mrt_executor.js b/python/mrt/frontend/javascript/mrt_executor.js new file mode 100644 index 00000000..f5b4f155 --- /dev/null +++ b/python/mrt/frontend/javascript/mrt_executor.js @@ -0,0 +1,50 @@ +import { roomName, create_socket, update_console } from './utils.js'; + +const mrtExecuteSocket = create_socket("mrt/execute/"); + +const mrtExecutor = document.querySelector('#mrt-executor'); +const modelSubmitter = document.querySelector('#model-submitter'); + +mrtExecuteSocket.onmessage = function(e) { + const data = JSON.parse(e.data); + if ('activate' in data) { + mrtExecutor.disabled = false; + modelSubmitter.disabled = false; + } + if ('message' in data) { + update_console(data.message); + } +}; + +mrtExecuteSocket.onclose = function(e) { + console.error('mrt execute socket closed unexpectedly'); +}; + +const ConfigWrapperSocket = create_socket("config/wrapper/"); + +ConfigWrapperSocket.onmessage = function(e) { + const data = JSON.parse(e.data); + let dict = new Object(); + for (const [stage, stage_data] of Object.entries(data)) { + let subdict = new Object(); + for (const attr of Object.keys(stage_data)) { + const id = '#' + stage + '_' + attr; + let value = document.querySelector(id).value; + subdict[attr] = value; + } + dict[stage] = subdict; + } + // overide pass_name + const pass_name = document.querySelector('#mrt-stage-selector').value; + dict["COMMON"]["PASS_NAME"] = pass_name; + let text_data = new Object(); + text_data['yaml'] = dict; + text_data['host'] = document.querySelector('#host-locator').value; + mrtExecuteSocket.send(JSON.stringify(text_data)); +}; + +mrtExecutor.onclick = function(e) { + mrtExecutor.disabled = true; + modelSubmitter.disabled = true; + ConfigWrapperSocket.send(null); +}; diff --git a/python/mrt/frontend/javascript/utils.js b/python/mrt/frontend/javascript/utils.js new file mode 100644 index 00000000..b3c0b97e --- /dev/null +++ b/python/mrt/frontend/javascript/utils.js @@ -0,0 +1,42 @@ +export const roomName = JSON.parse(document.getElementById('room-name').textContent); + +export function update_yaml_configurations(e) { + const data = JSON.parse(e.data); + for (const [stage, stage_data] of Object.entries(data)) { + for (const [attr, value] of Object.entries(stage_data)) { + const id = '#' + stage + '_' + attr; + document.querySelector(id).value = value; + } + } +} + +export function create_socket(sub_path) { + const newSocket = new WebSocket( + 'ws://' + + window.location.host + + '/ws/web/' + + sub_path + + roomName + + '/' + ); + return newSocket; +} + +export function update_console(str) { + document.querySelector('#chat-log').value += (str + '\n'); +} + +export function update_console_v2(str) { + document.querySelector('#chat-log').value += (str + '\n'); + let s = document.querySelector('#chat-log').value; + let ind = s.slice(0,-1).lastIndexOf('\n'); + document.querySelector('#chat-log').value = s.slice(0,ind+1) + str + '\n'; + +} + +export const yamlUpdateSocket = create_socket("yaml/update/"); + +yamlUpdateSocket.onmessage = function(e) { + update_yaml_configurations(e); + update_console("yaml parameters updated."); +}; diff --git a/python/mrt/frontend/javascript/yaml_clearer.js b/python/mrt/frontend/javascript/yaml_clearer.js new file mode 100644 index 00000000..4eb864d5 --- /dev/null +++ b/python/mrt/frontend/javascript/yaml_clearer.js @@ -0,0 +1,19 @@ +import { roomName, create_socket, update_console } from './utils.js'; + +const yamlClearSocket = create_socket("yaml/clear/") + +yamlClearSocket.onmessage = function(e) { + const data = JSON.parse(e.data); + for (const [stage, stage_data] of Object.entries(data)) { + for (const [attr, value] of Object.entries(stage_data)) { + const id = '#' + stage + '_' + attr; + document.querySelector(id).value = ''; + } + } + update_console("yaml parameters cleared."); +}; + +const yamlClearer = document.querySelector('#yaml-clearer'); +yamlClearer.onclick = function(e) { + yamlClearSocket.send(null) +}; diff --git a/python/mrt/frontend/javascript/yaml_file_loader.js b/python/mrt/frontend/javascript/yaml_file_loader.js new file mode 100644 index 00000000..cc5f5f5f --- /dev/null +++ b/python/mrt/frontend/javascript/yaml_file_loader.js @@ -0,0 +1,8 @@ +import { yamlUpdateSocket } from './utils.js'; + +document.querySelector('#yaml-loader').onclick = function(e) { + const yamlFileLocator = document.querySelector('#yaml-file-locator'); + yamlUpdateSocket.send(JSON.stringify({ + 'yaml_file': yamlFileLocator.value, + })); +}; diff --git a/python/mrt/frontend/manage.py b/python/mrt/frontend/manage.py new file mode 100755 index 00000000..19be6dd3 --- /dev/null +++ b/python/mrt/frontend/manage.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/python/mrt/frontend/python/rpc/forwarding.py b/python/mrt/frontend/python/rpc/forwarding.py new file mode 100644 index 00000000..048161b5 --- /dev/null +++ b/python/mrt/frontend/python/rpc/forwarding.py @@ -0,0 +1,34 @@ +import os +import argparse + +default_local_port = 5001 +default_remote_port = 5000 +default_remote_user = None +default_remote_host = None + +parser = argparse.ArgumentParser() +parser.add_argument( + "--local-port", type=int, default=default_local_port) +parser.add_argument( + "--remote-port", type=int, default=default_remote_port) +parser.add_argument( + "--remote-user", type=str, default=default_remote_user) +parser.add_argument( + "--remote-host", type=str, default=default_remote_host) + +def forward( + local_port=default_local_port, remote_port=default_remote_port, + remote_user=default_remote_user, remote_host=default_remote_host): + if remote_user is None: + raise RuntimeError("remote_user should be specified") + if remote_host is None: + raise RuntimeError("remote_host should be specified") + cmd = "ssh -N -L {}:localhost:{} {}@{}".format( + local_port, remote_port, remote_user, remote_host) + os.system(cmd) + +if __name__ == "__main__": + args = parser.parse_args() + forward( + local_port=args.local_port, remote_port=args.remote_port, + remote_user=args.remote_user, remote_host=args.remote_host) diff --git a/python/mrt/frontend/python/rpc/log.py b/python/mrt/frontend/python/rpc/log.py new file mode 100644 index 00000000..c55cb690 --- /dev/null +++ b/python/mrt/frontend/python/rpc/log.py @@ -0,0 +1,20 @@ +import logging +from mrt.common.log import ( + LOG_LEVELS, ColorFormatter, FilterList, name2level +) + +def log_init(log_level, streamer): + assert log_level in LOG_LEVELS + logging.basicConfig(level=log_level, stream=streamer) + formatter = ColorFormatter( + fmt="[ %(asctime)s %(name)10s %(levelname)5s ] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S") + log_filter = FilterList(log_level=log_level, default=False) + for handler in logging.root.handlers: + handler.addFilter(log_filter) + handler.setFormatter(formatter) + +def get_logger(verbosity, streamer): + log_init(name2level(verbosity.upper()), streamer) + logger = logging.getLogger("log.main") + return logger diff --git a/python/mrt/frontend/python/rpc/service.proto b/python/mrt/frontend/python/rpc/service.proto new file mode 100644 index 00000000..63913f02 --- /dev/null +++ b/python/mrt/frontend/python/rpc/service.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package test; + +service MRTRpcSrv { + rpc execute(MRTClientReq) returns(stream MRTServerResp) {} + rpc submit(stream MRTClientReqStream) returns (stream MRTServerResp) {} +} + +message MRTClientReq { + string content = 1; +} + +message MRTClientReqStream { + bytes chunck = 1; +} + +message MRTServerResp { + string logging_str = 1; +} diff --git a/python/mrt/frontend/python/rpc/service.py b/python/mrt/frontend/python/rpc/service.py new file mode 100644 index 00000000..b09242d4 --- /dev/null +++ b/python/mrt/frontend/python/rpc/service.py @@ -0,0 +1,107 @@ +from concurrent import futures +import os +from os import path +from shutil import copyfile + +import grpc + +import rpc.service_pb2 as pb2 +import rpc.service_pb2_grpc as pb2_grpc +from rpc.utils import get_streamer + +# TODO(ryt): load balancer for maxinum_workers +maximum_workers = 4 +local_addr = "127.0.0.1:5000" +chunk_size = 1024 * 1024 # 1MB + +def mrt_submit( + src_sym_file, src_prm_file, dst_model_dir, host_addr=None): + model_name = path.splitext(path.basename(src_sym_file))[0] + model_name_2 = path.splitext(path.basename(src_prm_file))[0] + assert model_name == model_name_2, "not compatible, " + \ + "src_sym_file: {}, src_prm_file: {}".format( + src_sym_file, src_prm_file) + if host_addr is None: + dst_sym_file = path.join(dst_model_dir, model_name+".json") + dst_prm_file = path.join(dst_model_dir, model_name+".params") + copyfile(src_sym_file, dst_sym_file) + copyfile(src_prm_file, dst_prm_file) + yield "src files copied" + else: + def iterator_func(src_file, file_name): + yield pb2.MRTClientReqStream(chunck=bytes(dst_model_dir, 'utf-8')) + yield pb2.MRTClientReqStream(chunck=bytes(file_name, 'utf-8')) + yield pb2.MRTClientReqStream( + chunck=bytes(str(path.getsize(src_file)), 'utf-8')) + with open(src_file, 'rb') as f: + while True: + piece = f.read(chunk_size); + if len(piece) == 0: + return + yield pb2.MRTClientReqStream(chunck=piece) + conn = grpc.insecure_channel(host_addr) + client = pb2_grpc.MRTRpcSrvStub(channel=conn) + response = client.submit( + iterator_func(src_sym_file, model_name+".json")) + next(response) + for message in response: + yield message.logging_str + response = client.submit( + iterator_func(src_prm_file, model_name+".params")) + for message in response: + yield message.logging_str + +def mrt_execute(yaml_file_str, host_addr=None): + if host_addr is None: + my_streamer = get_streamer(yaml_file_str) + for logging_str in my_streamer.start(): + yield logging_str + else: + conn = grpc.insecure_channel(host_addr) + client = pb2_grpc.MRTRpcSrvStub(channel=conn) + response = client.execute( + pb2.MRTClientReq(content=yaml_file_str)) + for message in response: + yield message.logging_str + + +class MRTRpcSrv(pb2_grpc.MRTRpcSrvServicer): + def execute(self, request, context): + yaml_file_str = request.content + my_streamer = get_streamer(yaml_file_str) + for message in my_streamer.start(): + if not context.is_active(): + raise RuntimeError("client connection lost") + yield pb2.MRTServerResp(logging_str=message) + # if context.is_active(): + # context.cancel() + + def submit(self, request_iterator, context): + dst_model_dir = str(next(request_iterator).chunck, 'utf-8') + os.makedirs(dst_model_dir, exist_ok=True) + file_name = str(next(request_iterator).chunck, 'utf-8') + size = eval(str(next(request_iterator).chunck, 'utf-8')) + dst_file = path.join(dst_model_dir, file_name) + with open(dst_file, 'wb') as f: + cur_size = 0 + for piece in request_iterator: + f.write(piece.chunck) + cur_size += chunk_size + cur_size = min(cur_size, size) + message = "Current: {} Bytes / Total: {} Bytes, ".format( + cur_size, size) + \ + "{} % Completed".format(round(cur_size/size*100.0, 2)) + yield pb2.MRTServerResp(logging_str=message) + +def main(): + grpc_server = grpc.server( + futures.ThreadPoolExecutor(max_workers=maximum_workers)) + pb2_grpc.add_MRTRpcSrvServicer_to_server( + MRTRpcSrv(), grpc_server) + grpc_server.add_insecure_port(local_addr) + grpc_server.start() + print("server will start at {}".format(local_addr)) + grpc_server.wait_for_termination() + +if __name__ == '__main__': + main() diff --git a/python/mrt/frontend/python/rpc/streamer.py b/python/mrt/frontend/python/rpc/streamer.py new file mode 100644 index 00000000..3c43f288 --- /dev/null +++ b/python/mrt/frontend/python/rpc/streamer.py @@ -0,0 +1,47 @@ +from queue import Queue, Empty +from threading import Thread, current_thread +import sys + + +class Printer: + def __init__(self): + self.queues = {} + + def write(self, value): + queue = self.queues.get(current_thread().name) + if queue: + queue.put(value) + else: + sys.__stdout__.write(value) + + def flush(self): + pass + + def register(self, thread): + queue = Queue() + self.queues[thread.name] = queue + return queue + + def clean(self, thread): + del self.queues[thread.name] + +printer = Printer() +sys.stdout = printer + + +class Streamer: + def __init__(self, target, args): + self.thread = Thread(target=target, args=args) + self.queue = printer.register(self.thread) + + def start(self): + self.thread.start() + # print('This should be stdout') + while self.thread.is_alive(): + try: + item = self.queue.get_nowait() + yield item.strip() + except Empty: + pass + yield '\n***End***' + printer.clean(self.thread) diff --git a/python/mrt/frontend/python/rpc/test_rpc.py b/python/mrt/frontend/python/rpc/test_rpc.py new file mode 100644 index 00000000..bb248791 --- /dev/null +++ b/python/mrt/frontend/python/rpc/test_rpc.py @@ -0,0 +1,33 @@ +from os import path +import argparse + +from mrt.V3.utils import get_cfg_defaults +from rpc.service import local_addr, mrt_execute, mrt_submit +from rpc.utils import stringify_cfg + +parser = argparse.ArgumentParser() +parser.add_argument("--host-addr", type=str, default=local_addr) + +def test_execute(host_addr): + cfg = get_cfg_defaults() + tmp_yaml_file = path.expanduser("~/mrt_yaml_root/alexnet.yaml") + cfg.merge_from_file(tmp_yaml_file) + yaml_file_str = stringify_cfg(cfg) + for message in mrt_execute(yaml_file_str, host_addr=host_addr): + print(message) + +def test_submit(host_addr): + src_sym_file = path.expanduser("~/mrt_model/alexnet.json") + src_prm_file = path.expanduser("~/mrt_model/alexnet.params") + # dst_model_dir = path.expanduser("~/mrt_model_2") + dst_model_dir = "/home/ycmtrivial/mrt_model" + for message in mrt_submit( + src_sym_file, src_prm_file, dst_model_dir, + host_addr=host_addr): + print(message) + +if __name__ == "__main__": + args = parser.parse_args() + host_addr = args.host_addr + test_execute(host_addr=host_addr) + test_submit(host_addr=host_addr) diff --git a/python/mrt/frontend/python/rpc/utils.py b/python/mrt/frontend/python/rpc/utils.py new file mode 100644 index 00000000..a55445ec --- /dev/null +++ b/python/mrt/frontend/python/rpc/utils.py @@ -0,0 +1,25 @@ +import sys +import io + +from yacs.config import CfgNode as CN + +from mrt.V3.execute import run +from rpc import streamer +from rpc.log import get_logger + +def get_streamer(yaml_file_str): + cfg = CN().load_cfg(yaml_file_str) + cfg.freeze() + logger = get_logger(cfg.COMMON.VERBOSITY, streamer.printer) + my_streamer = streamer.Streamer(run, (cfg, logger)) + return my_streamer + +def stringify_cfg(cfg): + # TODO(ryt): replace by appropriately + # configured yacs interface cfg.dump(**kwargs) + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + print(cfg) + yaml_file_str = new_stdout.getvalue() + sys.stdout = old_stdout + return yaml_file_str diff --git a/python/mrt/frontend/requirements.txt b/python/mrt/frontend/requirements.txt new file mode 100644 index 00000000..e2b6ba4c --- /dev/null +++ b/python/mrt/frontend/requirements.txt @@ -0,0 +1,3 @@ +grpcio +grpcio-tools +protobuf diff --git a/python/mrt/frontend/web/__init__.py b/python/mrt/frontend/web/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/frontend/web/asgi.py b/python/mrt/frontend/web/asgi.py new file mode 100644 index 00000000..90e7fb23 --- /dev/null +++ b/python/mrt/frontend/web/asgi.py @@ -0,0 +1,23 @@ +""" +ASGI config for web project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ +""" + +import os + +from channels.auth import AuthMiddlewareStack +from channels.routing import ProtocolTypeRouter, URLRouter +from django.core.asgi import get_asgi_application +import web.routing + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') + +application = ProtocolTypeRouter({ + "http": get_asgi_application(), + "websocket": AuthMiddlewareStack( + URLRouter(web.routing.websocket_urlpatterns)), +}) diff --git a/python/mrt/frontend/web/consumers.py b/python/mrt/frontend/web/consumers.py new file mode 100644 index 00000000..2793257f --- /dev/null +++ b/python/mrt/frontend/web/consumers.py @@ -0,0 +1,104 @@ +import json +import yaml +import os +from os import path + +from channels.generic.websocket import WebsocketConsumer + +from mrt.V3.utils import merge_cfg, get_cfg_defaults +from mrt.V3.execute import run +from rpc.service import mrt_execute, mrt_submit +from rpc.utils import stringify_cfg +from .protocol import type_cast + + +class MRTExecuteConsumer(WebsocketConsumer): + def connect(self): + self.accept() + + def disconnect(self, close_code): + pass + + def receive(self, text_data): + json_from_js = json.loads(text_data) + json_data = {} + ref_cfg = get_cfg_defaults() + for stage, stage_data in json_from_js['yaml'].items(): + sub_type_cast = type_cast[stage] + sub_json_data = {} + stage_ref_data = getattr(ref_cfg, stage) + for attr, data in stage_data.items(): + if data == '': + data = getattr(stage_ref_data, attr) + elif attr in sub_type_cast: + cast_func = sub_type_cast[attr] + data = cast_func(data) + sub_json_data[attr] = data + json_data[stage] = sub_json_data + yaml_file_str = yaml.dump(json_data) + host_addr = json_from_js['host'] + for message in mrt_execute(yaml_file_str, host_addr=host_addr): + self.send(text_data=json.dumps({'message': message})) + self.send( + text_data=json.dumps({'activate': None})) + + +class ModelSubmitConsumer(WebsocketConsumer): + def connect(self): + self.accept() + + def disconnect(self, close_code): + pass + + def receive(self, text_data): + json_from_js = json.loads(text_data) + json_data = {} + host_addr = json_from_js['host'] + src_sym_file = json_from_js['symbol'] + src_prm_file = json_from_js['params'] + dst_model_dir = json_from_js['dst'] + cnt = 0 + for message in mrt_submit( + src_sym_file, src_prm_file, dst_model_dir, + host_addr=host_addr): + cnt += 1 + dct = {'message': message} + if cnt == 1: + dct['first'] = 1 + self.send(text_data=json.dumps(dct)) + self.send( + text_data=json.dumps({'activate': None})) + + +class YAMLInitConsumer(WebsocketConsumer): + def connect(self): + self.accept() + + def disconnect(self, close_code): + pass + + def receive(self, text_data): + cfg = get_cfg_defaults() + self.send(text_data=json.dumps(cfg)) + + +class YAMLUpdateConsumer(WebsocketConsumer): + def connect(self): + self.accept() + + def disconnect(self, close_code): + pass + + def receive(self, text_data): + text_data_json = json.loads(text_data) + yaml_file = text_data_json['yaml_file'] + cfg = merge_cfg(yaml_file) + self.send(text_data=json.dumps(cfg)) + + +class YAMLClearConsumer(YAMLInitConsumer): + pass + + +class ConfigWrapperConsumer(YAMLInitConsumer): + pass diff --git a/python/mrt/frontend/web/protocol.py b/python/mrt/frontend/web/protocol.py new file mode 100644 index 00000000..1f54abb7 --- /dev/null +++ b/python/mrt/frontend/web/protocol.py @@ -0,0 +1,57 @@ +import json + +str2listofeval = lambda v: [eval(s) for s in v.split(',')] +str2eval = lambda v: eval(v) +str2listofstr = lambda v: [s.strip() for s in v.split(',')] + +def str2bool(v): + if v == "true": + ret = True + elif v == "false": + ret = False + else: + raise RuntimeError("invalid v: {}".format(v)) + return ret + +# def str2attribute_deps(v): + # print(v) + # ret = json.loads(v) + # return ret + +type_cast = { + "COMMON": { + "DEVICE_IDS": str2listofeval, + "BATCH": str2eval, + "RUN_EVALUATE": str2bool, + "RUN_COMPILE": str2bool, + }, + "PREPARE": { + "DEVICE_IDS": str2listofeval, + "INPUT_SHAPE": str2listofeval, + "SPLIT_KEYS": str2listofstr, + }, + "CALIBRATE": { + "BATCH": str2eval, + "NUM_CALIB": str2eval, + "LAMBD": str2eval, + "DEVICE_IDS": str2listofeval, + }, + "QUANTIZE": { + "RESTORE_NAMES": str2listofstr, + "INPUT_PRECISION": str2eval, + "OUTPUT_PRECISION": str2eval, + "DEVICE_IDS": str2listofeval, + "SOFTMAX_LAMBD": str2eval, + "SHIFT_BITS": str2eval, + # TODO ATTRIBUTE_DEPS, OSCALE_MAPS, THRESHOLDS + }, + "EVALUATE": { + "BATCH": str2eval, + "DEVICE_IDS": str2listofeval, + "ITER_NUM": str2eval, + }, + "COMPILE": { + "BATCH": str2eval, + "DEVICE_IDS": str2listofeval, + }, +} diff --git a/python/mrt/frontend/web/routing.py b/python/mrt/frontend/web/routing.py new file mode 100644 index 00000000..72643818 --- /dev/null +++ b/python/mrt/frontend/web/routing.py @@ -0,0 +1,24 @@ +from django.urls import re_path + +from . import consumers + +websocket_urlpatterns = [ + re_path( + r'ws/web/mrt/execute/(?P\w+)/$', + consumers.MRTExecuteConsumer.as_asgi()), + re_path( + r'ws/web/yaml/init/(?P\w+)/$', + consumers.YAMLInitConsumer.as_asgi()), + re_path( + r'ws/web/yaml/update/(?P\w+)/$', + consumers.YAMLUpdateConsumer.as_asgi()), + re_path( + r'ws/web/yaml/clear/(?P\w+)/$', + consumers.YAMLClearConsumer.as_asgi()), + re_path( + r'ws/web/config/wrapper/(?P\w+)/$', + consumers.ConfigWrapperConsumer.as_asgi()), + re_path( + r'ws/web/model/submit/(?P\w+)/$', + consumers.ModelSubmitConsumer.as_asgi()), +] diff --git a/python/mrt/frontend/web/settings.py b/python/mrt/frontend/web/settings.py new file mode 100644 index 00000000..f95f5bb5 --- /dev/null +++ b/python/mrt/frontend/web/settings.py @@ -0,0 +1,144 @@ +""" +Django settings for web project. + +Generated by 'django-admin startproject' using Django 3.2.9. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/3.2/ref/settings/ +""" + +from pathlib import Path +from os import path + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = 'django-insecure-cpi#8nm8p_dm!)kkn+^ugib_g*=ip224p5s@5_&aj1nz!$p)uh' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +# ALLOWED_HOSTS = [] +ALLOWED_HOSTS = ['*'] + + +# Application definition + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', + 'web', + 'channels', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'whitenoise.middleware.WhiteNoiseMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +ROOT_URLCONF = 'web.urls' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'web.wsgi.application' +ASGI_APPLICATION = 'web.asgi.application' + + +# Database +# https://docs.djangoproject.com/en/3.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } +} + +import dj_database_url + +db_from_env = dj_database_url.config(conn_max_age=500) +DATABASES['default'].update(db_from_env) + + +# Password validation +# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/3.2/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/3.2/howto/static-files/ + +STATICFILES_DIRS = [ + path.join(BASE_DIR, "javascript"), +] + +STATIC_ROOT = BASE_DIR / 'staticfiles' + +STATIC_URL = '/static/' + +STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' + +# Default primary key field type +# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field + +DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' diff --git a/python/mrt/frontend/web/templates/room.html b/python/mrt/frontend/web/templates/room.html new file mode 100644 index 00000000..07f385b0 --- /dev/null +++ b/python/mrt/frontend/web/templates/room.html @@ -0,0 +1,144 @@ + + +{% load static %} + + + + Chat Room + + + Destination Host Address
+ +
+ +
+ +
+ +
+ +
+
+ +
+ +
+ Local Symbol File Path
+ Local Params File Path
+
+
+ +
+ +
+ Local YAML File Path + + +
+
+ +
+ +

YAML Configuration Zone

+ + +
+

COMMON

+ PASS_NAME
+ MODEL_DIR
+ MODEL_NAME
+ VERBOSITY
+ START_AFTER
+ DEVICE_TYPE
+ DEVICE_IDS
+ BATCH
+ RUN_EVALUATE
+ RUN_COMPILE
+
+ + +
+ + +
+

PREPARE

+ DEVICE_TYPE
+ DEVICE_IDS
+ INPUT_SHAPE
+ SPLIT_KEYS
+
+ + +
+ + +
+

CALIBRATE

+ BATCH
+ NUM_CALIB
+ LAMBD
+ DATASET_NAME
+ DATASET_DIR
+ DEVICE_TYPE
+ DEVICE_IDS
+
+ + +
+ + +
+

QUANTIZATE

+ RESTORE_NAMES
+ INPUT_PRECISION
+ OUTPUT_PRECISION
+ DEVICE_TYPE
+ DEVICE_IDS
+ SOFTMAX_LAMBD
+ SHIFT_BITS
+ THRESHOLDS
+ ATTRIBUTE_DEPS
+ OSCALE_MAPS
+
+ + +
+ + +
+

EVALUATE

+ BATCH
+ DEVICE_TYPE
+ DEVICE_IDS
+ ITER_NUM
+
+ + +
+ + +
+

COMPLIE

+ BATCH
+ DUMP_DIR
+ DEVICE_TYPE
+ DEVICE_IDS
+
+ + {{ room_name|json_script:"room-name" }} + + + + + + + + + diff --git a/python/mrt/frontend/web/urls.py b/python/mrt/frontend/web/urls.py new file mode 100644 index 00000000..ac730d5d --- /dev/null +++ b/python/mrt/frontend/web/urls.py @@ -0,0 +1,22 @@ +"""web URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/3.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" +from django.urls import path + +from . import views + +urlpatterns = [ + path('/', views.room) +] diff --git a/python/mrt/frontend/web/views.py b/python/mrt/frontend/web/views.py new file mode 100644 index 00000000..70d876df --- /dev/null +++ b/python/mrt/frontend/web/views.py @@ -0,0 +1,4 @@ +from django.shortcuts import render + +def room(request, room_name): + return render(request, "room.html", {"room_name": room_name}) diff --git a/python/mrt/frontend/web/wsgi.py b/python/mrt/frontend/web/wsgi.py new file mode 100644 index 00000000..d092b9ab --- /dev/null +++ b/python/mrt/frontend/web/wsgi.py @@ -0,0 +1,16 @@ +""" +WSGI config for web project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') + +application = get_wsgi_application() From 22e3aa65503dd3d0cc8c317600dac2cf09de9b98 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 28 Dec 2021 12:15:24 +0800 Subject: [PATCH 081/120] upt --- python/mrt/V3/evaluate.py | 1 + python/mrt/frontend/python/rpc/service.py | 4 +++- .../mrt/prediction_SCTF}/prediction_SCTF.yaml | 6 +++--- tests/mrt/prediction_SCTF/preprocess.py | 0 tests/mrt/yolov5s/preprocess.py | 2 ++ 5 files changed, 9 insertions(+), 4 deletions(-) rename {python/mrt/V3/model_zoo => tests/mrt/prediction_SCTF}/prediction_SCTF.yaml (85%) create mode 100644 tests/mrt/prediction_SCTF/preprocess.py diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index 823b3898..cda9bbac 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -92,6 +92,7 @@ def evalfunc(data, label): 'data': set_batch(input_shape, split_batch)}) qgraph = rqmodel.to_graph(ctx=ctx) qmetric = dataset.metrics() + # move to test try: data, _ = dataset.iter_func()() data = sim.load_real_data(data, 'data', inputs_ext) diff --git a/python/mrt/frontend/python/rpc/service.py b/python/mrt/frontend/python/rpc/service.py index b09242d4..f66bbc0d 100644 --- a/python/mrt/frontend/python/rpc/service.py +++ b/python/mrt/frontend/python/rpc/service.py @@ -11,7 +11,9 @@ # TODO(ryt): load balancer for maxinum_workers maximum_workers = 4 -local_addr = "127.0.0.1:5000" +# local_addr = "127.0.0.1:5000" +# socket host difference +local_addr = "0.0.0.0:5000" chunk_size = 1024 * 1024 # 1MB def mrt_submit( diff --git a/python/mrt/V3/model_zoo/prediction_SCTF.yaml b/tests/mrt/prediction_SCTF/prediction_SCTF.yaml similarity index 85% rename from python/mrt/V3/model_zoo/prediction_SCTF.yaml rename to tests/mrt/prediction_SCTF/prediction_SCTF.yaml index 28fad154..5300c9f6 100644 --- a/python/mrt/V3/model_zoo/prediction_SCTF.yaml +++ b/tests/mrt/prediction_SCTF/prediction_SCTF.yaml @@ -1,17 +1,17 @@ COMMON: MODEL_NAME: prediction_SCTF VERBOSITY: info - RUN_EVALUATE: True + RUN_EVALUATE: False PREPARE: INPUT_SHAPE: [-1, 1, 3] CALIBRATE: NUM_CALIB: 1 - LAMBD: 16 + # LAMBD: 16 DATASET_NAME: mnist DEVICE_TYPE: gpu DEVICE_IDS: [0] QUANTIZE: - OUTPUT_PRECISION: 30 + # OUTPUT_PRECISION: 30 DEVICE_TYPE: gpu DEVICE_IDS: [0] EVALUATE: diff --git a/tests/mrt/prediction_SCTF/preprocess.py b/tests/mrt/prediction_SCTF/preprocess.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/mrt/yolov5s/preprocess.py b/tests/mrt/yolov5s/preprocess.py index cf73710a..a0cc1574 100644 --- a/tests/mrt/yolov5s/preprocess.py +++ b/tests/mrt/yolov5s/preprocess.py @@ -48,6 +48,8 @@ def unify(sym, params, logger=logging.getLogger("unify")): sym = mx.sym.load_json(sym_json_str) # check params + # model exported from mxnet hybrid block compatibility, + # remove the unnecessary prefix, hack param_keys = {} for k in params: if k.startswith("arg:") or k.startswith("aux:"): From 3cb7812a6119ac80aa5c724666ddac14809a57a2 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 28 Dec 2021 13:06:22 +0800 Subject: [PATCH 082/120] elemwisemul rewrite --- python/mrt/tfm_ops.py | 55 ++++++++++++++++++++++------------ tests/mrt/yolov5s/yolov5s.yaml | 4 +-- 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index 1a7c6a12..16912032 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -1901,35 +1901,52 @@ def quantize(self, op, **kwargs): return _quantize_scale(op, **kwargs) -@register_pass("compile") -@register_pass("prepare_for_compile") -@register_pass("rewrite") +# @register_pass("compile") +# @register_pass("prepare_for_compile") +# @register_pass("rewrite") @register_transformer("elemwise_mul") class ElemwiseMul(Transformer): def fuse_transpose(self, op, **kwargs): return _ft_multi_input(op) - def quantize(self, op, **kwargs): - precs, scales = kwargs['precs'], kwargs['scales'] + def rewrite(self, op, **kwargs): name, op_name = op.attr('name'), op.attr('op_name') - childs, attr = sym_iter(op.get_children()), op.list_attr() - cns = [c.attr('name') for c in childs] if childs else [] - - oprec = kwargs['op_input_precs'][op_name] - X, xprec, xs = requant(childs[0], oprec, oname=name, **kwargs) - W, wprec, ws = requant(childs[1], oprec, oname=name, **kwargs) - scales[name] = ws * xs - op = get_mxnet_op(op_name)(X, W, **attr, name=name) + childs = sym_iter(op.get_children()) - infer_prec = xprec + wprec - kwargs['precs'][name][OUT_KEY] = infer_prec + # validate the infer_shapes of lhs and rhs must be the same + # thus this op could be rewrite into broadcast_mul + # corresponding cvm op would be optimized at compile time + ln, rn = [c.attr('name') for c in childs] + infer_shapes = kwargs['infer_shapes'] + lshp, rshp = infer_shapes[ln], infer_shapes[rn] + assert lshp == rshp, \ + "lhs infer_shape: {}, rhs infer_shape: {}".format(lshp, rshp) - logger = logging.getLogger('log.mrt.realize') - logger.debug( - "operator %-20s name=%-40s oscale=%s, iscale=%s", - op_name, name, scales[name], cns) + lhs, rhs = childs + op = mx.sym.broadcast_mul(lhs, rhs, name=name) return op + # def quantize(self, op, **kwargs): + # precs, scales = kwargs['precs'], kwargs['scales'] + # name, op_name = op.attr('name'), op.attr('op_name') + # childs, attr = sym_iter(op.get_children()), op.list_attr() + # cns = [c.attr('name') for c in childs] if childs else [] + + # oprec = kwargs['op_input_precs'][op_name] + # X, xprec, xs = requant(childs[0], oprec, oname=name, **kwargs) + # W, wprec, ws = requant(childs[1], oprec, oname=name, **kwargs) + # scales[name] = ws * xs + # op = get_mxnet_op(op_name)(X, W, **attr, name=name) + + # infer_prec = xprec + wprec + # kwargs['precs'][name][OUT_KEY] = infer_prec + + # logger = logging.getLogger('log.mrt.realize') + # logger.debug( + # "operator %-20s name=%-40s oscale=%s, iscale=%s", + # op_name, name, scales[name], cns) + # return op + @register_pass("validate") @register_pass("calculate_ops") diff --git a/tests/mrt/yolov5s/yolov5s.yaml b/tests/mrt/yolov5s/yolov5s.yaml index fd6caf8f..21dd8e0e 100644 --- a/tests/mrt/yolov5s/yolov5s.yaml +++ b/tests/mrt/yolov5s/yolov5s.yaml @@ -1,8 +1,8 @@ COMMON: - MODEL_NAME: yolov5s.preprocess.unify.broadcastify + MODEL_NAME: yolov5s.preprocess.unify # MODEL_NAME: yolov5s.preprocess.unify VERBOSITY: info - RUN_EVALUATE: True + RUN_EVALUATE: False PREPARE: INPUT_SHAPE: [-1, 3, 640, 640] CALIBRATE: From 846be3ebcf48e0dd4ed3c7d65163f4576e125f24 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 28 Dec 2021 13:46:47 +0800 Subject: [PATCH 083/120] unify name in json, remove params key prefix --- python/mrt/tfm_pass.py | 46 ++++++++++++++++++++++++++++++++++ python/mrt/transformer.py | 5 ++++ tests/mrt/yolov5s/yolov5s.yaml | 3 ++- 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/python/mrt/tfm_pass.py b/python/mrt/tfm_pass.py index 3a6fdd55..b195f729 100644 --- a/python/mrt/tfm_pass.py +++ b/python/mrt/tfm_pass.py @@ -7,6 +7,7 @@ import math import numpy as np import time +from copy import deepcopy from .tfm_utils import get_bit, scale, requant from .sym_utils import is_var, is_params, is_inputs @@ -345,6 +346,34 @@ def name_duplicate_check(symbol, params): assert name not in names, "duplicated name in graph: %s" % name names.add(name) +@N.register_nm("unify") +def unify_name_json( + symbol, params, logger=logging.getLogger("unify_name_json")): + + # check symbol + sym_json_str = symbol.tojson() + sym_json_dict = json.loads(sym_json_str) + nodes = sym_json_dict["nodes"] + + name_cnts = {} + nnodes = [] + for node in nodes: + name = node["name"] + if name in name_cnts: + cur_cnt = name_cnts[name] = N.n(name) + logger.info("duplicate name: {}".format(name)) + nnode = deepcopy(node) + nnode["name"] = "{}_{}".format(name, cur_cnt) + nnodes.append(nnode) + else: + name_cnts[name] = 1 + nnodes.append(node) + + sym_json_dict["nodes"] = nnodes + sym_json_str = json.dumps(sym_json_dict) + sym = mx.sym.load_json(sym_json_str) + return sym, params + def params_unique(symbol, params): """ Remove duplicate keys params dict. """ @@ -352,6 +381,23 @@ def params_unique(symbol, params): for s in topo_sort(symbol) if is_params(s, params)} return symbol, new_params +def remove_params_prefix( + symbol, params, logger=logging.getLogger("remove_params_prefix")): + # check params + # model exported from mxnet hybrid block compatibility, + # remove the unnecessary prefix, hack + param_keys = {} + for k in params: + if k.startswith("arg:") or k.startswith("aux:"): + nk = k[4:] + else: + nk = k + if nk in param_keys: + assert False, nk + param_keys[k] = nk + params = {param_keys[k]: v for k, v in params.items()} + return symbol, params + def input_name_replace(symbol, params): """ Customized graph-level topo pass definition. diff --git a/python/mrt/transformer.py b/python/mrt/transformer.py index 322411a1..1a9bb02b 100644 --- a/python/mrt/transformer.py +++ b/python/mrt/transformer.py @@ -113,6 +113,11 @@ def init(model, input_shape=None): logger.info("Model initializing...") _sym, _prm = model.symbol, model.params + + # unify graph names and check graph params + _sym, _prm = tpass.unify_name_json(_sym, _prm) + _sym, _prm = tpass.remove_params_prefix(_sym, _prm) + tpass.name_duplicate_check(_sym, _prm) if isinstance(input_shape, dict): diff --git a/tests/mrt/yolov5s/yolov5s.yaml b/tests/mrt/yolov5s/yolov5s.yaml index 21dd8e0e..f20b919f 100644 --- a/tests/mrt/yolov5s/yolov5s.yaml +++ b/tests/mrt/yolov5s/yolov5s.yaml @@ -1,6 +1,7 @@ COMMON: - MODEL_NAME: yolov5s.preprocess.unify + # MODEL_NAME: yolov5s.preprocess.unify.broadcastify # MODEL_NAME: yolov5s.preprocess.unify + MODEL_NAME: yolov5s VERBOSITY: info RUN_EVALUATE: False PREPARE: From 10ed36b96cf6726651be035881d744ca6a828133 Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 30 Dec 2021 09:05:51 +0800 Subject: [PATCH 084/120] std random dataset --- tests/mrt/prediction_SCTF/preprocess.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tests/mrt/prediction_SCTF/preprocess.py b/tests/mrt/prediction_SCTF/preprocess.py index e69de29b..73a178b4 100644 --- a/tests/mrt/prediction_SCTF/preprocess.py +++ b/tests/mrt/prediction_SCTF/preprocess.py @@ -0,0 +1,22 @@ +from mrt import dataset as ds + +from mxnet import ndarray as nd + + +@register_dataset("stdrandom") +class StdRandomDataset(ds.Dataset): + def _load_data(self): + def data_loader(): + N, I, C = self.ishape + assert I == 1 and C == 3 + data, label = [], [] + while True: + if len(data) < N: + x = nd.random.uniform(low=0.0,high=1.0,shape=(I,C)) + y = nd.random.uniform(low=0.0,high=1.0,shape=(I)) + data.append(x) + label.append(y) + else: + yield nd.array(data), nd.array(label) + data, label = [], [] + self.data = data_loader() From 4ad2108d72d73f5d7c899d87bea00c0aef051de7 Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 30 Dec 2021 10:39:36 +0800 Subject: [PATCH 085/120] test_prediction_SCTF.py --- main2.py | 20 ++--------------- python/mrt/V3/evaluate.py | 8 ------- python/mrt/V3/utils.py | 18 +++++++++++++++ .../mrt/prediction_SCTF/prediction_SCTF.yaml | 22 ------------------- tests/mrt/prediction_SCTF/preprocess.py | 22 ------------------- 5 files changed, 20 insertions(+), 70 deletions(-) delete mode 100644 tests/mrt/prediction_SCTF/prediction_SCTF.yaml delete mode 100644 tests/mrt/prediction_SCTF/preprocess.py diff --git a/main2.py b/main2.py index 5f7a83b8..df1648ba 100644 --- a/main2.py +++ b/main2.py @@ -1,30 +1,14 @@ from os import path import sys -from mrt.V3.utils import get_cfg_defaults, merge_cfg +from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args from mrt.V3.execute import run -def override_cfg_args(cfg, argv): - if cfg.is_frozen(): - cfg.defrost() - - for i in range(2, len(argv), 2): - attr, value = argv[i:i+2] - try: - value = eval(value) - except: - pass - pass_name, pass_attr = [s.upper() for s in attr[2:].split(".")] - cnode = getattr(cfg, pass_name) - setattr(cnode, pass_attr, value) - cfg.freeze() - return cfg - if __name__ == "__main__": assert len(sys.argv) >= 2 and len(sys.argv)%2 == 0, \ "invalid length: {} of sys.argv: {}".format(len(sys.argv), sys.argv) yaml_file = sys.argv[1] cfg = get_cfg_defaults() cfg = merge_cfg(yaml_file) - cfg = override_cfg_args(cfg, sys.argv) + cfg = override_cfg_args(cfg, sys.argv[2:]) run(cfg) diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index cda9bbac..a3cc4c59 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -92,14 +92,6 @@ def evalfunc(data, label): 'data': set_batch(input_shape, split_batch)}) qgraph = rqmodel.to_graph(ctx=ctx) qmetric = dataset.metrics() - # move to test - try: - data, _ = dataset.iter_func()() - data = sim.load_real_data(data, 'data', inputs_ext) - outs = forward(qgraph, data, ctx) - logger.debug("shape of outs: {}".format([o.shape for o in outs])) - except: - raise RuntimeError("Quantized Graph could not forward") def quantize(data, label): data = sim.load_real_data(data, 'data', inputs_ext) diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index ab09d48d..c1c17e8d 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -157,3 +157,21 @@ def revise_cfg(cfg, stage, attr, value): subcfg = getattr(cfg, stage) setattr(subcfg, attr, value) cfg.freeze() + +def override_cfg_args(cfg, mrt_argv): + if not mrt_argv: + return cfg + if cfg.is_frozen(): + cfg.defrost() + + for i in range(len(mrt_argv), 2): + attr, value = mrt_argv[i:i+2] + try: + value = eval(value) + except: + pass + pass_name, pass_attr = [s.upper() for s in attr[2:].split(".")] + cnode = getattr(cfg, pass_name) + setattr(cnode, pass_attr, value) + cfg.freeze() + return cfg diff --git a/tests/mrt/prediction_SCTF/prediction_SCTF.yaml b/tests/mrt/prediction_SCTF/prediction_SCTF.yaml deleted file mode 100644 index 5300c9f6..00000000 --- a/tests/mrt/prediction_SCTF/prediction_SCTF.yaml +++ /dev/null @@ -1,22 +0,0 @@ -COMMON: - MODEL_NAME: prediction_SCTF - VERBOSITY: info - RUN_EVALUATE: False -PREPARE: - INPUT_SHAPE: [-1, 1, 3] -CALIBRATE: - NUM_CALIB: 1 - # LAMBD: 16 - DATASET_NAME: mnist - DEVICE_TYPE: gpu - DEVICE_IDS: [0] -QUANTIZE: - # OUTPUT_PRECISION: 30 - DEVICE_TYPE: gpu - DEVICE_IDS: [0] -EVALUATE: - # in this model, the BATCH should be set as 16 - BATCH: 64 - DEVICE_TYPE: gpu - DEVICE_IDS: [0] - ITER_NUM: 10 diff --git a/tests/mrt/prediction_SCTF/preprocess.py b/tests/mrt/prediction_SCTF/preprocess.py deleted file mode 100644 index 73a178b4..00000000 --- a/tests/mrt/prediction_SCTF/preprocess.py +++ /dev/null @@ -1,22 +0,0 @@ -from mrt import dataset as ds - -from mxnet import ndarray as nd - - -@register_dataset("stdrandom") -class StdRandomDataset(ds.Dataset): - def _load_data(self): - def data_loader(): - N, I, C = self.ishape - assert I == 1 and C == 3 - data, label = [], [] - while True: - if len(data) < N: - x = nd.random.uniform(low=0.0,high=1.0,shape=(I,C)) - y = nd.random.uniform(low=0.0,high=1.0,shape=(I)) - data.append(x) - label.append(y) - else: - yield nd.array(data), nd.array(label) - data, label = [], [] - self.data = data_loader() From ddbba70a5d9bd39d6ac101562129af823e0b24a8 Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 30 Dec 2021 10:47:17 +0800 Subject: [PATCH 086/120] upt --- tests/mrt/model_zoo/prediction_SCTF.yaml | 22 +++++++++++++ tests/mrt/test_prediction_SCTF.py | 39 ++++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 tests/mrt/model_zoo/prediction_SCTF.yaml create mode 100644 tests/mrt/test_prediction_SCTF.py diff --git a/tests/mrt/model_zoo/prediction_SCTF.yaml b/tests/mrt/model_zoo/prediction_SCTF.yaml new file mode 100644 index 00000000..3623cc0a --- /dev/null +++ b/tests/mrt/model_zoo/prediction_SCTF.yaml @@ -0,0 +1,22 @@ +COMMON: + MODEL_NAME: prediction_SCTF + VERBOSITY: info + RUN_EVALUATE: False +PREPARE: + INPUT_SHAPE: [-1, 1, 3] +CALIBRATE: + NUM_CALIB: 1 + # LAMBD: 16 + DATASET_NAME: stdrandom + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + # OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + # in this model, the BATCH should be set as 16 + BATCH: 64 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10 diff --git a/tests/mrt/test_prediction_SCTF.py b/tests/mrt/test_prediction_SCTF.py new file mode 100644 index 00000000..d9e4897d --- /dev/null +++ b/tests/mrt/test_prediction_SCTF.py @@ -0,0 +1,39 @@ +from os import path +import sys + +from mxnet import ndarray as nd +import numpy as np + +from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args +from mrt.V3.execute import run +from mrt import dataset as ds + + +@ds.register_dataset("stdrandom") +class StdRandomDataset(ds.Dataset): + def _load_data(self): + def data_loader(): + N, I, C = self.ishape + assert I == 1 and C == 3 + data, label = [], [] + while True: + if len(data) < N: + x = np.random.uniform(low=0.0, high=1.0, size=(I,C)) + y = np.random.uniform(low=0.0, high=1.0, size=(I)) + data.append(x) + label.append(y) + else: + batch_data, batch_label = nd.array(data), nd.array(label) + yield batch_data, batch_label + data, label = [], [] + self.data = data_loader() + +if __name__ == "__main__": + assert len(sys.argv) >= 1 and len(sys.argv)%2 == 1, \ + "invalid length: {} of sys.argv: {}".format(len(sys.argv), sys.argv) + yaml_file = path.join( + path.dirname(path.realpath(__file__)), "model_zoo", "prediction_SCTF.yaml") + cfg = get_cfg_defaults() + cfg = merge_cfg(yaml_file) + cfg = override_cfg_args(cfg, sys.argv[1:]) + run(cfg) From 1333f4918e41b9696149991b40387057ad87a7ed Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 30 Dec 2021 13:41:07 +0800 Subject: [PATCH 087/120] forward_utils.py --- tests/mrt/forward_utils.py | 86 +++++++++++++++++++++++++++++++ tests/mrt/test_prediction_SCTF.py | 2 + 2 files changed, 88 insertions(+) create mode 100644 tests/mrt/forward_utils.py diff --git a/tests/mrt/forward_utils.py b/tests/mrt/forward_utils.py new file mode 100644 index 00000000..0b873108 --- /dev/null +++ b/tests/mrt/forward_utils.py @@ -0,0 +1,86 @@ +import mxnet as mx +from mxnet import gluon, ndarray as nd + +from mrt.transformer import Model, MRT, reduce_graph +from mrt import dataset as ds +from mrt import sim_quant_helper as sim +from mrt.V3.utils import ( + get_model_prefix, get_logger, set_batch, load_fname, load_conf, + check_file_existance, get_ctx, get_batch_axis) + +def test_quantized_forward(cm_cfg, pass_cfg, logger=None): + model_dir = cm_cfg.MODEL_DIR + model_name = cm_cfg.MODEL_NAME + verbosity = cm_cfg.VERBOSITY + device_type = pass_cfg.DEVICE_TYPE + device_ids = pass_cfg.DEVICE_IDS + batch = pass_cfg.BATCH + + model_prefix = get_model_prefix(model_dir, model_name) + if logger is None: + logger = get_logger(verbosity) + conf_quant_file = model_prefix + ".quantize.conf" + check_file_existance(conf_quant_file, logger=logger) + conf_map = load_conf(conf_quant_file, logger=logger) + ctx = get_ctx(device_type, device_ids) + if isinstance(ctx, mx.Context): + ctx = [ctx] + + # forward function for the orginal model + omodel = Model.load(*load_fname(model_prefix)) + dataset_name = conf_map["dataset_name"] + input_shape = conf_map["input_shape"] + dataset = ds.DS_REG[dataset_name](set_batch(input_shape, batch)) + baxis = get_batch_axis(input_shape) + olen = len(omodel.symbol) + + def forward(net, data, ctx): + """ Multiple xpu run support. + """ + data = gluon.utils.split_and_load( + data, ctx_list=ctx, batch_axis=baxis, even_split=False) + outs = [net(d) for d in data] + if olen == 1: + outs = nd.concatenate(outs) + else: + outs = [nd.concatenate([outs[i][j] \ + for i in range(len(outs))]) for j in range(olen)] + return outs + + + # forward function for the quantized model + num_xpus = len(ctx) + if batch % num_xpus: + raise RuntimeError("Batch must be divisible by the number of xpus") + split_batch = batch // num_xpus + if conf_map.get("split_keys", "") != "": + sym_all_file, prm_all_file, ext_all_file = load_fname( + model_prefix, suffix="all.quantize", with_ext=True) + check_file_existance( + sym_all_file, prm_all_file, ext_all_file, logger=logger) + qmodel = Model.load(sym_all_file, prm_all_file) + oscale, inputs_ext = sim.load_ext(ext_all_file) + else: + sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + check_file_existance( + sym_quant_file, prm_quant_file, ext_quant_file, logger=logger) + mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + + rqmodel = reduce_graph(qmodel, {'data': set_batch(input_shape, split_batch)}) + qgraph = rqmodel.to_graph(ctx=ctx) + + try: + data, _ = dataset.iter_func()() + data = sim.load_real_data(data, 'data', inputs_ext) + outs = forward(qgraph, data, ctx) + outs = outs / oscales[0] if olen == 1 \ + else [(t / oscales[i]) for i, t in enumerate(outs)] + logger.info( + "shape of outs: {}".format( + outs.shape if olen == 1 else [o.shape for o in outs])) + except: + raise RuntimeError("Quantized Graph could not forward") diff --git a/tests/mrt/test_prediction_SCTF.py b/tests/mrt/test_prediction_SCTF.py index d9e4897d..06dfc529 100644 --- a/tests/mrt/test_prediction_SCTF.py +++ b/tests/mrt/test_prediction_SCTF.py @@ -7,6 +7,7 @@ from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args from mrt.V3.execute import run from mrt import dataset as ds +import forward_utils as futils @ds.register_dataset("stdrandom") @@ -37,3 +38,4 @@ def data_loader(): cfg = merge_cfg(yaml_file) cfg = override_cfg_args(cfg, sys.argv[1:]) run(cfg) + # futils.test_quantized_forward(cfg.COMMON, cfg.EVALUATE) From 2ad69cbc06cdda0dffc4b272eea0df1eab9ef2d3 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 31 Dec 2021 14:45:16 +0800 Subject: [PATCH 088/120] test_op_equiv --- tests/mrt/test_op_equiv.py | 256 +++++++++++++++++++++++++++++++++++++ 1 file changed, 256 insertions(+) create mode 100644 tests/mrt/test_op_equiv.py diff --git a/tests/mrt/test_op_equiv.py b/tests/mrt/test_op_equiv.py new file mode 100644 index 00000000..5dea2214 --- /dev/null +++ b/tests/mrt/test_op_equiv.py @@ -0,0 +1,256 @@ +import unittest + +from mxnet import ndarray as nd +import numpy as np + + +class TestOpEquiv(unittest.TestCase): + def assert_equal(self, a, b, places=10): + self.assertEqual(a.shape, b.shape) + nentry = int(nd.prod(nd.array(a.shape)).asscalar()) + res = (a-b).reshape(shape=(nentry,)).asnumpy() + norm = np.linalg.norm(res) + self.assertAlmostEqual(norm, 0.0, places=places) + + def test_elemmul_to_broadcast_mul(self): + """ + ElemwiseMul --> BroadcastMul + """ + def get_data(*x, low=-1000.0, high=1000.0): + lhs = nd.random.uniform(low=low, high=high, shape=x) + rhs = nd.random.uniform(low=low, high=high, shape=x) + return lhs, rhs + inputs = [ + { + "x": [3,2,5], + "low": -55000.0, + "high": 55000.0, + }, + { + "x": [11,5,2], + "low": -7767.0, + "high": 9989.0, + }, + { + "x": [11,5,2,23], + "low": -77672.0, + "high": 99892.0, + }, + ] + for inp in inputs: + x = inp["x"] + low = inp["low"] + high = inp["high"] + lhs, rhs = get_data(*x, low=low, high=high) + out = nd.elemwise_mul(lhs=lhs, rhs=rhs) + out1 = nd.broadcast_mul(lhs=lhs, rhs=rhs) + self.assert_equal(out, out1) + + def test_activation_to_sigmoid(self): + """ + Activation(act_type=sigmoid) --> sigmoid + """ + def get_data(*x, low=-1000.0, high=1000.0): + data = nd.random.uniform(low=low, high=high, shape=x) + return data + inputs = [ + { + "x": [3,2,5], + "low": -55000.0, + "high": 55000.0, + }, + { + "x": [11,5,2], + "low": -7767.0, + "high": 9989.0, + }, + { + "x": [11,5,2,23], + "low": -77672.0, + "high": 99892.0, + }, + ] + for inp in inputs: + x = inp["x"] + low = inp["low"] + high = inp["high"] + data = get_data(*x, low=low, high=high) + out = nd.Activation(data=data, act_type="sigmoid") + out1 = nd.sigmoid(data=data) + self.assert_equal(out, out1, places=4) + + def test_dense_to_dense2d_flatten(self): + """ + FullyConnected(flatten=True) + --> + reshape.FullyConnected(flatten=True) + """ + def get_data( + batch_size, num_hidden, *x, + low=-1000.0, high=1000.0, no_bias=False): + xshp = (batch_size,) + x + product = int(nd.prod(nd.array(x)).asscalar()) + wshp = (num_hidden, product) + data = nd.random.uniform(low=low, high=high, shape=xshp) + weight = nd.random.uniform(low=low, high=high, shape=wshp) + if no_bias: + return data, weight, None, xshp + bshp = (num_hidden,) + bias = nd.random.uniform(low=low, high=high, shape=bshp) + return data, weight, bias + inputs = [ + { + "batch_size": 16, + "num_hidden": 8, + "x": [3,2,5], + "no_bias": True, + "low": -55000.0, + "high": 55000.0, + }, + { + "batch_size": 64, + "num_hidden": 5, + "x": [11,5,2], + "no_bias": False, + "low": -7767.0, + "high": 9989.0, + }, + { + "batch_size": 13, + "num_hidden": 3, + "x": [11,5,2,23], + "no_bias": True, + "low": -77672.0, + "high": 99892.0, + }, + ] + for inp in inputs: + batch_size = inp["batch_size"] + num_hidden = inp["num_hidden"] + x = inp["x"] + no_bias = inp["no_bias"] + low = inp["low"] + high = inp["high"] + # for reference + data, weight, bias = get_data( + batch_size, num_hidden, *x, low=low, high=high) + if no_bias: + out = nd.FullyConnected( + data=data, weight=weight, + no_bias=no_bias, flatten=True, num_hidden=num_hidden) + else: + out = nd.FullyConnected( + data=data, weight=weight, bias=bias, + no_bias=no_bias, flatten=True, num_hidden=num_hidden) + # for comparison + xshp = data.shape + shape = (-1,) + xshp[1:] + data1 = nd.reshape(data=data, shape=shape) + if no_bias: + out1 = nd.FullyConnected( + data=data, weight=weight, + no_bias=no_bias, flatten=True, num_hidden=num_hidden) + else: + out1 = nd.FullyConnected( + data=data, weight=weight, bias=bias, + no_bias=no_bias, flatten=True, num_hidden=num_hidden) + # validate + self.assert_equal(out, out1) + + def test_dense_to_dense2d(self): + """ + FullyConnected(flatten=False) + --> + reshape.FullyConnected(flatten=False).reshape + """ + def get_data( + input_dim, num_hidden, *x, + low=-1000.0, high=1000.0, no_bias=False): + xshp = x + (input_dim,) + wshp = (num_hidden, input_dim) + data = nd.random.uniform(low=low, high=high, shape=xshp) + weight = nd.random.uniform(low=low, high=high, shape=wshp) + if no_bias: + return data, weight, None, xshp + bshp = (num_hidden,) + bias = nd.random.uniform(low=low, high=high, shape=bshp) + return data, weight, bias + inputs = [ + { + "input_dim": 16, + "num_hidden": 8, + "x": [3,2,5], + "no_bias": True, + "low": -55000.0, + "high": 55000.0, + "batch_axis": 3, + }, + { + "input_dim": 64, + "num_hidden": 5, + "x": [11,5,2], + "no_bias": False, + "low": -7767.0, + "high": 9989.0, + }, + { + "input_dim": 13, + "num_hidden": 3, + "x": [11,5,2,23], + "no_bias": True, + "low": -77672.0, + "high": 99892.0, + "batch_axis": 3, + }, + ] + for inp in inputs: + input_dim = inp["input_dim"] + num_hidden = inp["num_hidden"] + x = inp["x"] + no_bias = inp["no_bias"] + low = inp["low"] + high = inp["high"] + # for reference + data, weight, bias = get_data( + input_dim, num_hidden, *x, low=low, high=high) + if no_bias: + out = nd.FullyConnected( + data=data, weight=weight, + no_bias=no_bias, flatten=False, num_hidden=num_hidden) + else: + out = nd.FullyConnected( + data=data, weight=weight, bias=bias, + no_bias=no_bias, flatten=False, num_hidden=num_hidden) + # for comparison + xshp = data.shape + batch_axis = inp.get("batch_axis", 0) + assert batch_axis < len(xshp), \ + "invalid batch_axis: {}, length of xshp: {}".format( + batch_axis, len(xshp)) + if batch_axis == len(xshp)-1: + product = int(nd.prod(nd.array(xshp)).asscalar()) + res_shp = int(product/xshp[batch_axis]) + shape = (res_shp, -1) + else: + shape = (-1, xshp[-1]) + data1 = nd.reshape(data=data, shape=shape) + if no_bias: + fc = nd.FullyConnected( + data=data1, weight=weight, + no_bias=no_bias, flatten=False, num_hidden=num_hidden) + else: + fc = nd.FullyConnected( + data=data1, weight=weight, bias=bias, + no_bias=no_bias, flatten=False, num_hidden=num_hidden) + if batch_axis == len(xshp)-1: + shape = xshp[:-1] + (num_hidden,) + else: + shape = \ + xshp[:batch_axis] + (-1,) + \ + xshp[batch_axis+1:-1] + (num_hidden,) + out1 = nd.reshape(data=fc, shape=shape) + # validate + self.assert_equal(out, out1) + +if __name__ == "__main__": + unittest.main() From 0405bca153d54471850259412cd32f60f6698086 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 31 Dec 2021 15:59:19 +0800 Subject: [PATCH 089/120] preprocess_prediction_SCTF.py --- python/mrt/V3/utils.py | 2 +- tests/mrt/model_zoo/prediction_SCTF.yaml | 3 +- tests/mrt/preprocess_prediction_SCTF.py | 114 +++++++++++++++++++++++ tests/mrt/test_prediction_SCTF.py | 6 +- 4 files changed, 121 insertions(+), 4 deletions(-) create mode 100644 tests/mrt/preprocess_prediction_SCTF.py diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index c1c17e8d..123408bf 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -164,7 +164,7 @@ def override_cfg_args(cfg, mrt_argv): if cfg.is_frozen(): cfg.defrost() - for i in range(len(mrt_argv), 2): + for i in range(0, len(mrt_argv), 2): attr, value = mrt_argv[i:i+2] try: value = eval(value) diff --git a/tests/mrt/model_zoo/prediction_SCTF.yaml b/tests/mrt/model_zoo/prediction_SCTF.yaml index 3623cc0a..721bce45 100644 --- a/tests/mrt/model_zoo/prediction_SCTF.yaml +++ b/tests/mrt/model_zoo/prediction_SCTF.yaml @@ -1,5 +1,6 @@ COMMON: - MODEL_NAME: prediction_SCTF + MODEL_NAME: prediction_SCTF.preprocess.reduce_dense + # MODEL_NAME: prediction_SCTF VERBOSITY: info RUN_EVALUATE: False PREPARE: diff --git a/tests/mrt/preprocess_prediction_SCTF.py b/tests/mrt/preprocess_prediction_SCTF.py new file mode 100644 index 00000000..ed917abc --- /dev/null +++ b/tests/mrt/preprocess_prediction_SCTF.py @@ -0,0 +1,114 @@ +import argparse +from os import path +import logging + +import mxnet as mx +from mxnet import ndarray as nd + +from mrt.sym_utils import ( + topo_visit_transformer, sym_iter, get_entry_id, get_attr) +from mrt import utils +from mrt.tfm_base import N +from mrt.conf import MRT_MODEL_ROOT +from mrt.V3.utils import load_fname +from mrt import tfm_pass as tpass + +utils.log_init() +parser = argparse.ArgumentParser() +parser.add_argument("--model-name", type=str, default="prediction_SCTF") +parser.add_argument( + "--model-dir", type=str, default=MRT_MODEL_ROOT) +parser.add_argument("--no-reduce-dense", action="store_true") + +@N.register_nm("reduce_dense") +def reduce_dense(sym, params, logger=logging.getLogger("reduce_dense")): + def callback(op, **kwargs): + name, op_name = op.attr('name'), op.attr('op_name') + if op_name != 'FullyConnected': + return op + + attr, childs = op.list_attr(), sym_iter(op.get_children()) + cns = [c.attr('name') for c in childs] + X, W = childs[:2] + infer_shapes = kwargs['infer_shapes'] + xshp = infer_shapes[cns[0]][get_entry_id(X)] + if len(xshp) == 2: + return op + + no_bias = get_attr(attr, 'no_bias') + flatten = get_attr(attr, "flatten") + num_hidden = get_attr(attr, "num_hidden") + + if flatten: + shape = (-1,) + xshp[1:] + rshp = mx.sym.reshape(X, shape=shape) + if no_bias: + op = mx.sym.FullyConnected( + rshp, W, no_bias=no_bias, flatten=flatten, + num_hidden=num_hidden) + else: + op = mx.sym.FullyConnected( + rshp, W, childs[2], no_bias=no_bias, flatten=flatten, + num_hidden=num_hidden) + else: + default_batch_axis = 0 + batch_axis = \ + kwargs.get("batch_axes", {}).get(name, default_batch_axis) + assert batch_axis < len(xshp), \ + "invalid batch_axis: {}, length of xshp: {}".format( + batch_axis, len(xshp)) + if batch_axis == len(xshp)-1: + product = int(nd.prod(nd.array(xshp)).asscalar()) + res_shp = int(product/xshp[batch_axis]) + shape = (res_shp, -1) + else: + shape = (-1, xshp[-1]) + rshp = mx.sym.reshape(X, shape=shape) + if no_bias: + fc = mx.sym.FullyConnected( + rshp, W, no_bias=no_bias, flatten=flatten, + num_hidden=num_hidden) + else: + fc = mx.sym.FullyConnected( + rshp, W, childs[2], no_bias=no_bias, flatten=flatten, + num_hidden=num_hidden) + if batch_axis == len(xshp)-1: + shape = xshp[:-1] + (num_hidden,) + else: + shape = \ + xshp[:batch_axis] + (-1,) + \ + xshp[batch_axis+1:-1] + (num_hidden,) + op = mx.sym.reshape(fc, shape=shape) + + logger.info( + "{}-d dense name: {} has been reduced.".format( + len(xshp), name)) + return op + + infer_shapes = tpass.infer_shape(sym, params, input_shape=(64,1,3)) + return topo_visit_transformer( + sym, params, callback, logger=logger, infer_shapes=infer_shapes) + +if __name__ == "__main__": + args = parser.parse_args() + + model_name = args.model_name + model_dir = args.model_dir + if model_dir.startswith("~"): + model_dir = path.expanduser(model_dir) + prefix = path.join(model_dir, model_name) + sym_file, prm_file = load_fname(prefix) + sym = mx.sym.load(sym_file) + params = nd.load(prm_file) + + suffixes = ["preprocess"] + if not args.no_reduce_dense: + suffixes.append("reduce_dense") + sym, params = reduce_dense(sym, params) + suffix = ".".join(suffixes) + + sym_json_str = sym.tojson() + nsym_file, nprm_file = load_fname(prefix, suffix=suffix) + with open(nsym_file, "w") as f: + f.write(sym_json_str) + nd.save(nprm_file, params) diff --git a/tests/mrt/test_prediction_SCTF.py b/tests/mrt/test_prediction_SCTF.py index 06dfc529..b7b56d30 100644 --- a/tests/mrt/test_prediction_SCTF.py +++ b/tests/mrt/test_prediction_SCTF.py @@ -31,9 +31,11 @@ def data_loader(): if __name__ == "__main__": assert len(sys.argv) >= 1 and len(sys.argv)%2 == 1, \ - "invalid length: {} of sys.argv: {}".format(len(sys.argv), sys.argv) + "invalid length: {} of sys.argv: {}".format( + len(sys.argv), sys.argv) yaml_file = path.join( - path.dirname(path.realpath(__file__)), "model_zoo", "prediction_SCTF.yaml") + path.dirname(path.realpath(__file__)), + "model_zoo", "prediction_SCTF.yaml") cfg = get_cfg_defaults() cfg = merge_cfg(yaml_file) cfg = override_cfg_args(cfg, sys.argv[1:]) From 92b4aa4bb0592fd99bfddec9a1dfc1b981b73e21 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 4 Jan 2022 17:38:42 +0800 Subject: [PATCH 090/120] upt --help prompt for main2.py --- main2.py | 38 +++++++++++++++++++++++++++++------- python/mrt/V3/calibrate.py | 11 +++++++++++ python/mrt/V3/evaluate.py | 8 ++++++++ python/mrt/V3/mrt_compile.py | 8 ++++++++ python/mrt/V3/prepare.py | 8 ++++++++ python/mrt/V3/quantize.py | 14 +++++++++++++ python/mrt/V3/utils.py | 15 ++++++++++++++ 7 files changed, 95 insertions(+), 7 deletions(-) diff --git a/main2.py b/main2.py index df1648ba..6e9ef3b2 100644 --- a/main2.py +++ b/main2.py @@ -3,12 +3,36 @@ from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args from mrt.V3.execute import run +from mrt.V3.utils import DOC as utils_doc +from mrt.V3.prepare import DOC as prepare_doc +from mrt.V3.calibrate import DOC as calibrate_doc +from mrt.V3.quantize import DOC as quantize_doc +from mrt.V3.evaluate import DOC as evaluate_doc +from mrt.V3.mrt_compile import DOC as compile_doc +import mrt.V3.calibrate as calirbat + +DOC = """ +Usage: python main2.py --help + python main2.py [YAML_FILE_PATH] [OPTIONS] +""" + +def complete_docs(): + return docs if __name__ == "__main__": - assert len(sys.argv) >= 2 and len(sys.argv)%2 == 0, \ - "invalid length: {} of sys.argv: {}".format(len(sys.argv), sys.argv) - yaml_file = sys.argv[1] - cfg = get_cfg_defaults() - cfg = merge_cfg(yaml_file) - cfg = override_cfg_args(cfg, sys.argv[2:]) - run(cfg) + if len(sys.argv) == 2: + assert sys.argv[1] == "--help", \ + "invalid optional argument: {}".format(sys.argv[2]) + docs = "\n".join([ + DOC, utils_doc, prepare_doc, calibrate_doc, + quantize_doc, evaluate_doc, compile_doc]) + print(docs) + else: + assert len(sys.argv) >= 2 and len(sys.argv)%2 == 0, \ + "invalid length: {} of sys.argv: {}".format( + len(sys.argv), sys.argv) + yaml_file = sys.argv[1] + cfg = get_cfg_defaults() + cfg = merge_cfg(yaml_file) + cfg = override_cfg_args(cfg, sys.argv[2:]) + run(cfg) diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index 7522077a..a80e47b3 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -8,6 +8,17 @@ get_model_prefix, get_logger, set_batch, load_fname, save_conf, load_conf, check_file_existance, get_ctx) +DOC = """ +CALIBRATE Stage Options: + --calibrate.batch Batch size for calibration. + --calibrate.num_calib Number of iterations for calibration. + --calibrate.lambd Hyperparameter for the threshold of model internal data. + --calibrate.dataset_name Name of the dataset chosen from "voc", "imagenet", "trec", "mnist", "coco", "quickdraw" and "cifar10". + --calibrate.dataset_dir Dataset root directory for specific dataset out of list above. + --calibrate.device_type Context type for calibration stage chosen from "cpu" or "gpu". + --calibrate.device_ids A comma list within square brackets specifying the context ids, eg.[0,1,2]. +""" + default_num_calib = 1 MRT_CFG.CALIBRATE = CN() diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index a3cc4c59..6986277f 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -13,6 +13,14 @@ get_model_prefix, get_logger, set_batch, load_fname, load_conf, check_file_existance, get_ctx, get_batch_axis) +DOC = """ +EVALUATE Stage Options: + --evaluate.batch Batch size for evaluation. + --evaluate.device_type Context type for evaluation stage chosen from "cpu" or "gpu". + --evaluate.device_ids A comma list within square brackets specifying the context ids, eg.[0,1,2]. + --evaluate.iter_num Number of evaluating iteration steps. +""" + MRT_CFG.EVALUATE = CN() MRT_CFG.EVALUATE.BATCH = default_batch MRT_CFG.EVALUATE.DEVICE_TYPE = default_device_type diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index 7c29bd44..f29052bc 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -12,6 +12,14 @@ get_model_prefix, get_logger, set_batch, load_fname, load_conf, check_file_existance) +DOC = """ +COMPILE Stage Options: + --compile.batch Batch size for compilation. + --compile.dump_dir Directory for saving compilation results. + --compile.device_type Context type for compilation stage chosen from "cpu" or "gpu". + --compile.device_ids A comma list within square brackets specifying the context ids, eg.[0,1,2]. +""" + default_dump_dir = path.expanduser("~/mrt_dump") MRT_CFG.COMPILE = CN() diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index f64b16ba..9992a263 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -7,6 +7,14 @@ MRT_CFG, default_device_type, default_device_ids, get_model_prefix, get_logger, set_batch, load_fname, save_conf, get_ctx) +DOC = """ +PREPARE Stage Options: + --prepare.device_type Context type for preparation stage chosen from "cpu" or "gpu". + --prepare.device_ids A comma list within square brackets specifying the context ids, eg.[0,1,2]. + --prepare.input_shape Shape of the input data. + --prepare.split_keys Node names in the computation graph specifying the split points. +""" + default_input_shape = [-1, 3, 224, 224] MRT_CFG.PREPARE= CN() diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 6381aa83..877b4e8d 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -8,6 +8,20 @@ get_logger, load_fname, save_conf, load_conf, check_file_existance, get_ctx) +DOC = """ +QUANTIZE Stage Options: + --quantize.restore_names Names of graph nodes restored from quantization. + --quantize.input_precision Input precision for quantization. + --quantize.output_precision Output precision for quantization. + --quantize.device_type Context type for quantization stage chosen from "cpu" or "gpu". + --quantize.device_ids A comma list within square brackets specifying the context ids, eg.[0,1,2]. + --quantize.softmax_lambd Hyperparameter for requant function. + --quantize.shift_bits Hyperparameter for operator requantization. + --quantize.thresholds Initial threshold for base symbol given model split. + --quantize.attribute_deps Adjust the top attributes with respect to the base oscales. + --quantize.oscale_maps Model merger output scales name map. +""" + MRT_CFG.QUANTIZE = CN() MRT_CFG.QUANTIZE.RESTORE_NAMES = [] MRT_CFG.QUANTIZE.INPUT_PRECISION = None diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 123408bf..3026b87c 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -9,6 +9,21 @@ from mrt.common import log from mrt.utils import extend_fname +DOC = """ +COMMON Stage Options: + --common.pass_name Stage to be executed, chosen from "all", "prepare", "calibrate", "quantize", "evaluate", "compile". + --common.model_dir Model root directory. + --common.model_name Name of the model file without file extension. + --common.verbosity Control the logger hiearchy, chosen from "debug", "info", "warning", "error", "critical". + --common.start_after Name of the stage to start the execution from, chosen from "initial", "prepare", "calibrate", "quantize". + --common.device_type Default context type for all stages chosen from "cpu" or "gpu". + --common.device_ids A comma list within square brackets specifying the context ids, eg.[0,1,2]. + --common.input_shape Shape of the input data. + --common.batch Default batch size for all stages. + --common.run_evaluate Flag for determining whether to execute evaluation stage, "True" for execution, otherwise "False". + --common.run_compile Flag for determining whether to execute compilation stage, "True" for execution, otherwise "False". +""" + # TODO: jiazhen branch code design default_device_type = "cpu" From 32dcca3b9a4b8b5780f9755e8cc2febb53510535 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 7 Jan 2022 11:23:55 +0800 Subject: [PATCH 091/120] upt test_op_equiv --- tests/mrt/test_op_equiv.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tests/mrt/test_op_equiv.py b/tests/mrt/test_op_equiv.py index 5dea2214..c3d653d6 100644 --- a/tests/mrt/test_op_equiv.py +++ b/tests/mrt/test_op_equiv.py @@ -1,8 +1,13 @@ import unittest +import logging from mxnet import ndarray as nd import numpy as np +from mrt.utils import log_init + +log_init() + class TestOpEquiv(unittest.TestCase): def assert_equal(self, a, b, places=10): @@ -12,7 +17,8 @@ def assert_equal(self, a, b, places=10): norm = np.linalg.norm(res) self.assertAlmostEqual(norm, 0.0, places=places) - def test_elemmul_to_broadcast_mul(self): + def test_elemwisemul_to_broadcast_mul( + self, logger=logging.getLogger("elemwisemul_to_broadcastmul")): """ ElemwiseMul --> BroadcastMul """ @@ -45,8 +51,10 @@ def get_data(*x, low=-1000.0, high=1000.0): out = nd.elemwise_mul(lhs=lhs, rhs=rhs) out1 = nd.broadcast_mul(lhs=lhs, rhs=rhs) self.assert_equal(out, out1) + logger.info("test succeeded, inp: {}".format(inp)) - def test_activation_to_sigmoid(self): + def test_activation_to_sigmoid( + self, logger=logging.getLogger("activation_to_sigmoid")): """ Activation(act_type=sigmoid) --> sigmoid """ @@ -78,8 +86,10 @@ def get_data(*x, low=-1000.0, high=1000.0): out = nd.Activation(data=data, act_type="sigmoid") out1 = nd.sigmoid(data=data) self.assert_equal(out, out1, places=4) + logger.info("test succeeded, inp: {}".format(inp)) - def test_dense_to_dense2d_flatten(self): + def test_dense_to_dense2d_flatten( + self, logger=logging.getLogger("test_dense_to_dense2d_flatten")): """ FullyConnected(flatten=True) --> @@ -156,8 +166,10 @@ def get_data( no_bias=no_bias, flatten=True, num_hidden=num_hidden) # validate self.assert_equal(out, out1) + logger.info("test succeeded, inp: {}".format(inp)) - def test_dense_to_dense2d(self): + def test_dense_to_dense2d( + self, logger=logging.getLogger("dense_to_dense2d")): """ FullyConnected(flatten=False) --> @@ -251,6 +263,7 @@ def get_data( out1 = nd.reshape(data=fc, shape=shape) # validate self.assert_equal(out, out1) + logger.info("test succeeded, inp: {}".format(inp)) if __name__ == "__main__": unittest.main() From 884060c4c8accc85633857317ac202a5faac00a2 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 7 Jan 2022 14:31:17 +0800 Subject: [PATCH 092/120] upt --- main2.py | 5 +---- python/mrt/V3/calibrate.py | 2 +- tests/mrt/model_zoo/yolov5n-train.yaml | 27 ++++++++++++++++++++++++++ 3 files changed, 29 insertions(+), 5 deletions(-) create mode 100644 tests/mrt/model_zoo/yolov5n-train.yaml diff --git a/main2.py b/main2.py index 6e9ef3b2..3c5f5a8e 100644 --- a/main2.py +++ b/main2.py @@ -9,7 +9,6 @@ from mrt.V3.quantize import DOC as quantize_doc from mrt.V3.evaluate import DOC as evaluate_doc from mrt.V3.mrt_compile import DOC as compile_doc -import mrt.V3.calibrate as calirbat DOC = """ Usage: python main2.py --help @@ -20,9 +19,7 @@ def complete_docs(): return docs if __name__ == "__main__": - if len(sys.argv) == 2: - assert sys.argv[1] == "--help", \ - "invalid optional argument: {}".format(sys.argv[2]) + if len(sys.argv) == 2 and sys.argv[1] in ["--help", "-h"]: docs = "\n".join([ DOC, utils_doc, prepare_doc, calibrate_doc, quantize_doc, evaluate_doc, compile_doc]) diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index a80e47b3..f99dd397 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -40,7 +40,7 @@ def calibrate(cm_cfg, pass_cfg, logger=None): device_ids = pass_cfg.DEVICE_IDS calibrate_num = pass_cfg.NUM_CALIB lambd = pass_cfg.LAMBD - batch=pass_cfg.BATCH + batch = pass_cfg.BATCH model_prefix = get_model_prefix(model_dir, model_name) if logger is None: diff --git a/tests/mrt/model_zoo/yolov5n-train.yaml b/tests/mrt/model_zoo/yolov5n-train.yaml new file mode 100644 index 00000000..277097f9 --- /dev/null +++ b/tests/mrt/model_zoo/yolov5n-train.yaml @@ -0,0 +1,27 @@ +COMMON: + MODEL_NAME: yolov5n-train + VERBOSITY: info + RUN_EVALUATE: False +PREPARE: + INPUT_SHAPE: [-1, 3, 640, 640] +CALIBRATE: + # in this model, the BATCH should be set as 32 + BATCH: 32 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: coco + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + # in this model, the BATCH should be set as 32 + BATCH: 32 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10 +COMPILE: + # in this model, the BATCH should be set as 32 + BATCH: 32 From f00437d7d79c78f7ad563b4162b221a8b25d8c9f Mon Sep 17 00:00:00 2001 From: Longtao Wang Date: Fri, 7 Jan 2022 15:42:02 +0800 Subject: [PATCH 093/120] [doc]: add math formalization doc --- docs/deep_dive/math_formalization.rst | 93 +++++++++++++++++---------- 1 file changed, 59 insertions(+), 34 deletions(-) diff --git a/docs/deep_dive/math_formalization.rst b/docs/deep_dive/math_formalization.rst index 5f0e320c..62687e4d 100644 --- a/docs/deep_dive/math_formalization.rst +++ b/docs/deep_dive/math_formalization.rst @@ -9,13 +9,16 @@ Operator Math Formalization Write this section document refer to the doc: :ref:`Math Format ` please. -This will give a full exhaustive explanation to CVM operators. -The FORMAL version source code has a strong correlation +This will give a full exhaustive explanation to CVM-Runtime operators. +The source code of FORMAL version has a strong correlation with this mathematical description, while other versions like CPU, CUDA, will only promise the consistent inference result, with arbitrary process logic. -All the operators' formalization obeys the unify format: +.. note:: + All numbers refered to by the symbol are integers by default. + +All the operators' formalization obeys the unified format: .. math:: @@ -28,7 +31,7 @@ All the operators' formalization obeys the unify format: which means that for given value range, the forluma in the first line is always true, subjecting to the constraints listed as the -condition variable. +condition statements. .. _op_list: @@ -46,6 +49,8 @@ Reduction is performed on the given axes, other dimensions remains the same and We abstract the common reduce logic as formalization here and specify the reduce function for each operators respectively. +*Math Formalization* + - Input: :math:`X`, a tensor of :math:`N` dimensions, namely :math:`(n_0, n_1, \cdots, n_{N-1})` - Output: :math:`Y` - Attribute: @@ -172,29 +177,46 @@ Broadcast Operators A broadcast operator performs the broadcast function to input data, and the process logic over all kinds of operators are the same. +*Math Formalization* + - Input: There are 2 inputs. + :math:`A`, a tensor of :math:`M` dimensions, namely :math:`(m_0, m_1, \cdots, m_{M-1})` + :math:`B`, a tensor of :math:`N` dimensions, namely :math:`(n_0, n_1, \cdots, n_{N-1})` -- Output: :math:`Y`, a tensor with :math:`max(M, N)` dimensions, the higher dimension of the two inputs, and it's shape is identical to the input with higher dimension. +The two input shapes of tensor must satisfy the assertions as below: + +.. math:: + P = \min(M, N) \\ + Q = \max(M, N) -The lower :math:`min(M, N)` dimensions of the two inputs must be the same. The input with lower dimension is expanded with 1 so that the two inputs can have the same dimension. +.. math:: + m_i = n_i \text{ or } m_i = 1 \text{ or } n_i = 1, + \forall i \in [0, P) -Then the elementwise opertaion is performed to the inputs with broadcast. +- Output: :math:`Y`, a tensor with :math:`Q` dimensions, the higher dimension of the two inputs, and it's shape is identical to the input with higher dimension. + We abstract the formalization here and introduce the details as below: .. math:: - Y[d_0, d_1, \cdots, d_{K-1}] = \begin{cases} - A[d_{N-M}, d_1, \cdots, d_{N-1}] \text{ OP } B[d_0, d_1, \cdots, d_{N-1}], & M \leq N \\ - A[d_0, d_1, \cdots, d_{M-1}] \text{ OP } B[d_{M-N}, d_1, \cdots, d_{M-1}], & M > N - \end{cases}, \\ - - \forall d_i \in [0, n_i) \text{ if } N \geq M \text{ or } d_i \in [0, m_i) \text{ otherwise} \\ - - \text{where } i \in [0, max(M, N))\\ + Y[d_0, d_1, \cdots, d_{K-1}] = + A[a_0, a_1, \cdots, a_{M-1}] \text{ OP } B[b_0, a_1, \cdots, a_{N-1}], \\ + + \forall i \in [0, Q) \wedge d_i \in [0, \max(em_i, en_i)), \\ + + \text{where } + a_j = d_{Q-M+j} \text{ if } d_{Q-M+j} < m_j \text{ else } 0, \forall j \in [0, M) \text{ and} \\ + b_j = d_{Q-N+j} \text{ if } d_{Q-N+j} < n_j \text{ else } 0, \forall j \in [0, N) \text{ and} \\ + em_i = \begin{cases} + 1, & i < Q - M \\ + m_{Q-M+i}, & \text{otherwise} + \end{cases}, \forall i \in [0, Q) \text{ and} \\ + en_i = \begin{cases} + 1, & i < Q - N \\ + n_{Q-N+i}, & \text{otherwise} + \end{cases}, \forall i \in [0, Q) @@ -221,7 +243,6 @@ set :math:`\text{OP}` to :math:`\text{add}`. broadcast_sub ~~~~~~~~~~~~~ set :math:`\text{OP}` to :math:`\text{sub}`. -Note that there's no need to make sure that the dimension of the minuend :math:`A` is higher than subtractor :math:`B` broadcast_mul ~~~~~~~~~~~~~ @@ -272,11 +293,15 @@ We only supported 2-D convolution operator. Also alias *Group-wise Convolution*. p \in \left[0, \text{Y_HMAX} \right) \wedge q \in \left[0, \text{Y_WMAX} \right), - \text{where } \text{Y_HMAX} = \left\lfloor{H+2 \cdot \text{PH}-\text{DH} \cdot (\text{KH}-1)-1\over\text{SH}}\right\rfloor+1\wedge\\ - \text{Y_WMAX} = \left\lfloor{W+2 \cdot \text{PW}-\text{DW} \cdot (\text{KW}-1)-1 \over \text{SW}}\right\rfloor+1 \wedge\\ - OPG = OC / \text{groups, } OPG \in \mathbb N^+ \text{ since } OC \text{ mod } \text{groups} = 0\\ + \text{where } \text{Y_HMAX} = \left\lfloor{ + H+2 \cdot \text{PH}-\text{DH} \cdot (\text{KH}-1)-1 \over \text{SH} + }\right\rfloor + 1 \text{ and} \\ + \text{Y_WMAX} = \left\lfloor{ + W+2 \cdot \text{PW}-\text{DW} \cdot (\text{KW}-1)-1 \over \text{SW} + }\right\rfloor + 1 \text{ and} \\ + OPG = OC / \text{groups, } OPG \in \mathbb N^+ \text{ since } OC \text{ mod } \text{groups} = 0\\ -where :math:`\text{kernel}` function does the 2D image convolution calculation, and the formulation is +where :math:`\text{kernel}` function does the 2D image convolution calculation, and the formulation is: .. math:: @@ -322,8 +347,8 @@ Relu performs elementwise rectified linear unit function. - Output: :math:`Y`, the same shape as :math:`X` .. math:: - Y[d_0, d_1, \cdots, d_{n-1}] = max(0, X[d_0, d_1, \cdots, d_{n-1}]) \\ - \forall i \in [0, N), d_i \in [0, n_i) + Y[d_0, d_1, \cdots, d_{n-1}] = max(0, X[d_0, d_1, \cdots, d_{n-1}]), \\ + \forall i \in [0, N) \wedge d_i \in [0, n_i) max_pool2d ~~~~~~~~~~ @@ -361,7 +386,7 @@ Max_pool2d performs max pooling over every plane for each batch and channel. \end{cases} \text{ and} \\ \text{pad}(n, i, p, q) = \begin{cases} X[n, i, p, q], & \text{ if } p \in [0, H) \wedge q \in [0, W) \\ - INT32_MIN, & \text{otherwise} + INT32\_MIN, & \text{otherwise} \end{cases} @@ -402,9 +427,9 @@ This operator calculates absolute value of input data. -x, & x < 0 \end{cases},\\ - \forall i \in [0, N), d_i \in [0, n_i),\\ + \forall i \in [0, N) \wedge d_i \in [0, n_i),\\ - \text{, where }x \text{ denotes } X[d_0, d_1, \cdots, d_{N-1}] + \text{where } x \text{ denotes } X[d_0, d_1, \cdots, d_{N-1}] cvm_precision ~~~~~~~~~~~~~ @@ -423,9 +448,9 @@ The precision operator gives how many bits the absolute value of a number takes. 1, & x = 0 \end{cases},\\ - \forall i \in [0, N), d_i \in [0, n_i),\\ + \forall i \in [0, N) \wedge d_i \in [0, n_i),\\ - \text{ where } x \text{ denotes } X[d_0, d_1, \cdots, d_{N-1}] + \text{where } x \text{ denotes } X[d_0, d_1, \cdots, d_{N-1}] elemwise_add @@ -492,9 +517,9 @@ This operator performs clip, cutting the data into a range, to the input tensor. \text{a_min}, & x \leqslant \text{a_min} \end{cases},\\ - \forall i \in [0, N) \wedge d_i \in [0, n_i), + \forall i \in [0, N) \wedge d_i \in [0, n_i), \\ - \text{ where } x \text{ denotes } X[d_0, d_1, \cdots, d_{N-1}] + \text{where } x \text{ denotes } X[d_0, d_1, \cdots, d_{N-1}] cvm_cilp ~~~~~~~~ @@ -764,6 +789,8 @@ slice_like This operator slices the input :math:`X` to a shape that looks like the other given input ``shape_like``. +TODO: need more consideration. + *Math Formalization* - Input: there are 2 inputs @@ -786,13 +813,11 @@ This operator slices the input :math:`X` to a shape that looks like the other gi .. math:: Y[d_0, d_1, \cdots, d_{N-1}] = X[d_0, d_1, \cdots, d_{N-1}], \\ - \forall d_j \in \begin{cases} + \forall j \in [0, N) \wedge d_j \in \begin{cases} [0, m_j), & j \in \text{sliced_axes} \\ [0, n_j), & j \notin \text{sliced_axes} \end{cases},\\ - \text{where } j \in [0, N) - take ~~~~ @@ -1051,7 +1076,7 @@ This operator implements the nms algorithm, finding valid bounding boxes. \text{where } T = \text{max}\{ \text{min}(N, \text{valid_count}[b]), 0\} \text{ and} \\ I: \{ i \mid i \in [0, T) \} \to \{ i \mid i \in [0, T) \}, \\ - \text {s.t. } X[b, I(i), 1] > X[b, I(j), 1] \vee \\ + \text {s.t. } X[b, I(i), 1] > X[b, I(j), 1] \text{ or } \\ (X[b, I(i), 1] = X[b, I(j), 1] \wedge I(i) < I(j)), \forall 0 \leqslant i < j < T @@ -1074,7 +1099,7 @@ This operator implements the nms algorithm, finding valid bounding boxes. \text{OLR}(R[b, p, :], R[b, q, :]), & \begin{array}{} \text{force_suppress is true}\\ - \vee R[b, p, 0] = R[b, q, 0] + \text{ or } R[b, p, 0] = R[b, q, 0] \end{array} \\[1ex] 0, & \text{otherwise} \end{cases} \text{ and} \\ From ca3e7df9e4fa6346ace1f7942138870757fcee38 Mon Sep 17 00:00:00 2001 From: Longtao Wang Date: Fri, 7 Jan 2022 16:14:03 +0800 Subject: [PATCH 094/120] [fix]: remove main.py and add pip requirement --- install/requirements.txt | 1 + main.py | 193 ++++++--------------------------------- main2.py | 35 ------- 3 files changed, 31 insertions(+), 198 deletions(-) delete mode 100644 main2.py diff --git a/install/requirements.txt b/install/requirements.txt index c7379abd..7b5fef2a 100644 --- a/install/requirements.txt +++ b/install/requirements.txt @@ -1,3 +1,4 @@ numpy cython decorator +yacs diff --git a/main.py b/main.py index 19b3bfea..43f4c4d1 100644 --- a/main.py +++ b/main.py @@ -1,170 +1,37 @@ -import sys from os import path -import argparse -from typing import Tuple, List, Union -import logging -import json - -import mxnet as mx -from mxnet import gluon, ndarray as nd -import numpy as np - -from mrt.conf import MRT_MODEL_ROOT, MRT_DATASET_ROOT -from mrt.common import cmd, log, thread -from mrt.transformer import Model, MRT, reduce_graph -from mrt.sym_utils import topo_sort -from mrt import utils -from mrt.gluon_zoo import save_model -from mrt import dataset as ds -from mrt import sym_utils as sutils -from mrt import sim_quant_helper as sim -import mrt.V3.mrt_entry as mentry - -# set up dependencies -__ROOT__ = path.dirname(path.realpath(__file__)) -sys.path.insert(0, path.join(__ROOT__, "python")) - -LOG_MSG = ",".join(["{}:{}".format(l, n) \ - for l, n in zip(log.LOG_LEVELS, log.LOG_NAMES)]) - -# @cmd.option("-v", "--verbosity", metavar="LEVEL", - # choices=log.LOG_NAMES, default=log.level2name(log.DEBUG), - # help="log verbosity to pring information, " + \ - # "available options: {}".format(log.LOG_NAMES) + \ - # " by default {}".format(log.level2name(log.DEBUG))) -# @cmd.global_options() -# def global_func(args): - # log.Init(log.name2level(args.verbosity)) - -@cmd.option("model_name", type=str) -@cmd.option("--model-dir", type=str, default=MRT_MODEL_ROOT) -@cmd.module("modelprefix") -def get_model_prefix(args): - pass - -@cmd.option("--verbosity", type=str, default="debug", - choices=["none", "debug", "info", "warning", "error", "critical"]) -@cmd.module("logger") -def get_logger(args): - pass - -@cmd.option("--device-type-prepare", type=str, choices=["cpu", "gpu"]) -@cmd.option("--device-ids-prepare", nargs="+", type=int) -@cmd.option("--input-shape", nargs="+", type=int, default=[-1, 3, 224, 224]) -@cmd.option("--split-keys", nargs="+", type=str, default="") -@cmd.module("prepare", as_main=True, refs=["modelprefix", "logger"], - description=""" -MRT Python Tool: preparation stage -""") -def cmd_prepare(args): - mentry.mrt_prepare( - args.model_dir, args.model_name, args.verbosity, - args.device_type_prepare, args.device_ids_prepare, - args.input_shape, args.split_keys) - -@cmd.option("--batch-calibrate", type=int, default=mentry.default_batch) -@cmd.option("--calibrate-num", type=int, default=1) -@cmd.option("--lambd", type=int) -@cmd.option("--dataset-name", type=str, default="imagenet", - choices=list(ds.DS_REG.keys())) -@cmd.option("--dataset-dir", type=str, default=MRT_DATASET_ROOT) -@cmd.option("--device-type-calibrate", type=str, choices=["cpu", "gpu"]) -@cmd.option("--device-ids-calibrate", nargs="+", type=int) -@cmd.module("calibrate", as_main=True, refs=["modelprefix", "logger"], - description=""" -MRT Python Tool: calibration stage -""") -def cmd_calibrate(args): - mentry.mrt_calibrate( - args.model_dir, args.model_name, args.verbosity, args.dataset_name, - args.dataset_dir, args.device_type_calibrate, args.device_ids_calibrate, - args.calibrate_num, args.lambd, batch=args.batch_calibrate) +import sys -@cmd.option("--restore-names", nargs="+", type=str, default=[]) -@cmd.option("--input-precision", type=int) -@cmd.option("--output-precision", type=int) -@cmd.option("--device-type-quantize", type=str, choices=["cpu", "gpu"]) -@cmd.option("--device-ids-quantize", nargs="+", type=int) -@cmd.option("--softmax-lambd", type=float) -@cmd.option("--shift-bits", type=int) -@cmd.option("--thresholds", type=str) -@cmd.option("--attribute-deps", type=str) -@cmd.option("--oscale-maps", type=str) -@cmd.module("quantize", as_main=True, refs=["modelprefix", "logger"], - description=""" -MRT Python Tool: quantization stage -""") -def cmd_quantize(args): - mentry.mrt_quantize( - args.model_dir, args.model_name, args.verbosity, args.restore_names, - args.input_precision, args.output_precision, args.device_type_quantize, - args.device_ids_quantize, args.softmax_lambd, args.shift_bits, - args.thresholds, args.attribute_deps, args.oscale_maps) +sys.path.insert(0, "./python") -@cmd.option("--batch-evaluate", type=int, default=mentry.default_batch) -@cmd.option("--device-type-evaluate", type=str, choices=["cpu", "gpu"]) -@cmd.option("--device-ids-evaluate", nargs="+", type=int) -@cmd.option("--iter-num", type=int, default=0) -@cmd.module("evaluate", as_main=True, refs=["modelprefix", "logger"], - description=""" -MRT Python Tool: evaluation stage -""") -def cmd_evaluate(args): - mentry.mrt_evaluate( - args.model_dir, args.model_name, args.verbosity, - args.device_type_evaluate, args.device_ids_evaluate, args.iter_num, - batch=args.batch_evaluate) +from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args +from mrt.V3.execute import run +from mrt.V3.utils import DOC as utils_doc +from mrt.V3.prepare import DOC as prepare_doc +from mrt.V3.calibrate import DOC as calibrate_doc +from mrt.V3.quantize import DOC as quantize_doc +from mrt.V3.evaluate import DOC as evaluate_doc +from mrt.V3.mrt_compile import DOC as compile_doc -@cmd.option("--batch-compile", type=int, default=mentry.default_batch) -@cmd.option("--dump-dir", type=str, default="/data1/tmp") -@cmd.option("--device-type-compile", type=str, default="cpu", - choices=["cpu", "gpu"]) -@cmd.option("--device-ids-compile", nargs="+", type=int, default=[0]) -@cmd.module("compile", as_main=True, refs=["modelprefix", "logger"], - description=""" -MRT Python Tool: compilation stage -""") -def cmd_compile(args): - mentry.mrt_compile( - args.model_dir, args.model_name, args.verbosity, args.dump_dir, - device_type=args.device_type_compile, - device_ids=args.device_ids_compile, batch=args.batch_compile) +DOC = """ +Usage: python main2.py --help + python main2.py [YAML_FILE_PATH] [OPTIONS] +""" -@cmd.option("--start-after", type=str, - choices=["prepare", "calibrate", "quantize"]) -@cmd.option("--device-type", type=str, default=mentry.default_device_type, - choices=["cpu", "gpu"]) -@cmd.option("--device-ids", nargs="+", type=int, - default=mentry.default_device_ids) -@cmd.option("--batch", type=int, default=mentry.default_batch) -@cmd.option("--run-evaluate", action="store_true") -@cmd.option("--run-compile", action="store_true") -@cmd.module("cmd", as_main=True, - refs=["prepare", "calibrate", "quantize", "evaluate", "compile"], - description=""" -MRT CMD Tool -""") -def cmd_main(args): - # setting up attributes for all passes - for prefix in ["batch", "device_type", "device_ids"]: - for attr in dir(args): - if attr.startswith(prefix+"_") and getattr(args, attr) is None: - setattr(args, attr, getattr(args, prefix)) - start_pos = 0 - start_pos_map = {'prepare': 1, 'calibrate': 2, 'quantize': 3} - if args.start_after in start_pos_map: - start_pos = start_pos_map[args.start_after] - if start_pos < 1: - cmd_prepare(args) - if start_pos < 2: - cmd_calibrate(args) - if start_pos < 3: - cmd_quantize(args) - if args.run_evaluate: - cmd_evaluate(args) - if args.run_compile: - cmd_compile(args) +def complete_docs(): + return docs if __name__ == "__main__": - logger = logging.getLogger("main") - cmd.Run() + if len(sys.argv) == 2 and sys.argv[1] in ["--help", "-h"]: + docs = "\n".join([ + DOC, utils_doc, prepare_doc, calibrate_doc, + quantize_doc, evaluate_doc, compile_doc]) + print(docs) + else: + assert len(sys.argv) >= 2 and len(sys.argv)%2 == 0, \ + "invalid length: {} of sys.argv: {}".format( + len(sys.argv), sys.argv) + yaml_file = sys.argv[1] + cfg = get_cfg_defaults() + cfg = merge_cfg(yaml_file) + cfg = override_cfg_args(cfg, sys.argv[2:]) + run(cfg) diff --git a/main2.py b/main2.py deleted file mode 100644 index 3c5f5a8e..00000000 --- a/main2.py +++ /dev/null @@ -1,35 +0,0 @@ -from os import path -import sys - -from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args -from mrt.V3.execute import run -from mrt.V3.utils import DOC as utils_doc -from mrt.V3.prepare import DOC as prepare_doc -from mrt.V3.calibrate import DOC as calibrate_doc -from mrt.V3.quantize import DOC as quantize_doc -from mrt.V3.evaluate import DOC as evaluate_doc -from mrt.V3.mrt_compile import DOC as compile_doc - -DOC = """ -Usage: python main2.py --help - python main2.py [YAML_FILE_PATH] [OPTIONS] -""" - -def complete_docs(): - return docs - -if __name__ == "__main__": - if len(sys.argv) == 2 and sys.argv[1] in ["--help", "-h"]: - docs = "\n".join([ - DOC, utils_doc, prepare_doc, calibrate_doc, - quantize_doc, evaluate_doc, compile_doc]) - print(docs) - else: - assert len(sys.argv) >= 2 and len(sys.argv)%2 == 0, \ - "invalid length: {} of sys.argv: {}".format( - len(sys.argv), sys.argv) - yaml_file = sys.argv[1] - cfg = get_cfg_defaults() - cfg = merge_cfg(yaml_file) - cfg = override_cfg_args(cfg, sys.argv[2:]) - run(cfg) From 385b53a0b407158fe0658f730760175d7e30851c Mon Sep 17 00:00:00 2001 From: Longtao Wang Date: Fri, 7 Jan 2022 16:15:49 +0800 Subject: [PATCH 095/120] [stash]: move install to conf --- {install => conf}/deps.py | 0 {install => conf}/env.sh | 0 {install => conf}/install.sh | 0 {install => conf}/requirements.txt | 0 main.py | 6 +++--- 5 files changed, 3 insertions(+), 3 deletions(-) rename {install => conf}/deps.py (100%) rename {install => conf}/env.sh (100%) rename {install => conf}/install.sh (100%) rename {install => conf}/requirements.txt (100%) diff --git a/install/deps.py b/conf/deps.py similarity index 100% rename from install/deps.py rename to conf/deps.py diff --git a/install/env.sh b/conf/env.sh similarity index 100% rename from install/env.sh rename to conf/env.sh diff --git a/install/install.sh b/conf/install.sh similarity index 100% rename from install/install.sh rename to conf/install.sh diff --git a/install/requirements.txt b/conf/requirements.txt similarity index 100% rename from install/requirements.txt rename to conf/requirements.txt diff --git a/main.py b/main.py index 43f4c4d1..a87922ca 100644 --- a/main.py +++ b/main.py @@ -13,9 +13,9 @@ from mrt.V3.mrt_compile import DOC as compile_doc DOC = """ -Usage: python main2.py --help - python main2.py [YAML_FILE_PATH] [OPTIONS] -""" +Usage: python {0} --help + python {0} [YAML_FILE_PATH] [OPTIONS] +""".format(sys.argv[0]) def complete_docs(): return docs From 8151b816dfba4f528ba367d4954d374ecdf927df Mon Sep 17 00:00:00 2001 From: Longtao Wang Date: Fri, 7 Jan 2022 16:22:39 +0800 Subject: [PATCH 096/120] [fix]: move unuseful code into deprecated --- python/mrt/{ => deprecated}/_mrt.py | 0 python/mrt/{ => deprecated}/main.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename python/mrt/{ => deprecated}/_mrt.py (100%) rename python/mrt/{ => deprecated}/main.py (100%) diff --git a/python/mrt/_mrt.py b/python/mrt/deprecated/_mrt.py similarity index 100% rename from python/mrt/_mrt.py rename to python/mrt/deprecated/_mrt.py diff --git a/python/mrt/main.py b/python/mrt/deprecated/main.py similarity index 100% rename from python/mrt/main.py rename to python/mrt/deprecated/main.py From 93ae341ce389e1c434736276abfb216caa3ce42b Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 10 Jan 2022 09:51:16 +0800 Subject: [PATCH 097/120] [test] V3 mrt models --- docs/mrt/V3.rst | 105 ++++++++++++++++++ docs/mrt/index.rst | 8 ++ python/mrt/V3/calibrate.py | 15 ++- python/mrt/V3/evaluate.py | 15 ++- python/mrt/V3/mrt_compile.py | 11 +- python/mrt/V3/prepare.py | 8 +- python/mrt/V3/quantize.py | 13 ++- .../V3 => tests/mrt}/model_zoo/alexnet.yaml | 6 +- tests/mrt/model_zoo/cifar_resnet20_v1.yaml | 19 ++++ tests/mrt/model_zoo/densenet161.yaml | 17 +++ tests/mrt/model_zoo/mnist.yaml | 19 ++++ tests/mrt/model_zoo/mobilenet1_0.yaml | 18 +++ tests/mrt/model_zoo/mobilenetv2_1.0.yaml | 18 +++ tests/mrt/model_zoo/qd10_resnetv1_20.yaml | 19 ++++ tests/mrt/model_zoo/quickdraw.yaml | 19 ++++ tests/mrt/model_zoo/resnet18_v1.yaml | 18 +++ tests/mrt/model_zoo/resnet18_v1b_0.89.yaml | 18 +++ tests/mrt/model_zoo/resnet50_v1.yaml | 18 +++ tests/mrt/model_zoo/resnet50_v2.yaml | 18 +++ tests/mrt/model_zoo/shufflenet_v1.yaml | 18 +++ tests/mrt/model_zoo/squeezenet1.0.yaml | 18 +++ tests/mrt/model_zoo/ssd.yaml | 49 ++++++++ .../model_zoo/ssd_512_mobilenet1.0_voc.yaml | 48 ++++++++ tests/mrt/model_zoo/tf_inception_v3.yaml | 18 +++ .../tf_mobilenet_v1_0.25_224_lite.yaml | 17 +++ tests/mrt/model_zoo/trec.yaml | 20 ++++ tests/mrt/model_zoo/vgg19.yaml | 18 +++ tests/mrt/model_zoo/yolo3_darknet53_voc.yaml | 50 +++++++++ .../mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml | 47 ++++++++ 29 files changed, 661 insertions(+), 24 deletions(-) create mode 100644 docs/mrt/V3.rst rename {python/mrt/V3 => tests/mrt}/model_zoo/alexnet.yaml (85%) create mode 100644 tests/mrt/model_zoo/cifar_resnet20_v1.yaml create mode 100644 tests/mrt/model_zoo/densenet161.yaml create mode 100644 tests/mrt/model_zoo/mnist.yaml create mode 100644 tests/mrt/model_zoo/mobilenet1_0.yaml create mode 100644 tests/mrt/model_zoo/mobilenetv2_1.0.yaml create mode 100644 tests/mrt/model_zoo/qd10_resnetv1_20.yaml create mode 100644 tests/mrt/model_zoo/quickdraw.yaml create mode 100644 tests/mrt/model_zoo/resnet18_v1.yaml create mode 100644 tests/mrt/model_zoo/resnet18_v1b_0.89.yaml create mode 100644 tests/mrt/model_zoo/resnet50_v1.yaml create mode 100644 tests/mrt/model_zoo/resnet50_v2.yaml create mode 100644 tests/mrt/model_zoo/shufflenet_v1.yaml create mode 100644 tests/mrt/model_zoo/squeezenet1.0.yaml create mode 100644 tests/mrt/model_zoo/ssd.yaml create mode 100644 tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml create mode 100644 tests/mrt/model_zoo/tf_inception_v3.yaml create mode 100644 tests/mrt/model_zoo/tf_mobilenet_v1_0.25_224_lite.yaml create mode 100644 tests/mrt/model_zoo/trec.yaml create mode 100644 tests/mrt/model_zoo/vgg19.yaml create mode 100644 tests/mrt/model_zoo/yolo3_darknet53_voc.yaml create mode 100644 tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml diff --git a/docs/mrt/V3.rst b/docs/mrt/V3.rst new file mode 100644 index 00000000..9343433b --- /dev/null +++ b/docs/mrt/V3.rst @@ -0,0 +1,105 @@ +V3 Documentation +================ + +[TOC] + +Model Test +~~~~~~~~~~ + +The comparison between the original float model and quantized model +is listed as below. + + +Top 1 Accuracy: + ++-------------------------------------------+----------------------+------------------------+ +| Model Name | Original Float Model | MRT V3 Quantized Model | ++===========================================+======================+========================+ +| resnet_v1 | 77.39% | 76.46% | ++-------------------------------------------+----------------------+------------------------+ +| resnet_v2 | 77.15% | 74.16% | ++-------------------------------------------+----------------------+------------------------+ +| resnet18_v1 | 70.96% | 70.11% | ++-------------------------------------------+----------------------+------------------------+ +| resnet18_v1b_0.89 | 67.21% | 63.79% | ++-------------------------------------------+----------------------+------------------------+ +| quickdraw | 81.66% | 81.57% | ++-------------------------------------------+----------------------+------------------------+ +| qd10_resnetv1_20 | 85.72% | 85.73% | ++-------------------------------------------+----------------------+------------------------+ +| densenet161 | 77.62% | 77.25% | ++-------------------------------------------+----------------------+------------------------+ +| alenxet | 55.91% | 51.54% | ++-------------------------------------------+----------------------+------------------------+ +| cifar_resnet20_v1 | 92.88% | 92.82% | ++-------------------------------------------+----------------------+------------------------+ +| mobilenet1_0 | 70.77% | 66.11% | ++-------------------------------------------+----------------------+------------------------+ +| mobilenetv2_1.0 | 71.51% | 69.39% | ++-------------------------------------------+----------------------+------------------------+ +| shufflenet_v1 | 63.48% | 60.45% | ++-------------------------------------------+----------------------+------------------------+ +| squeezenet1.0 | 57.20% | 54.92% | ++-------------------------------------------+----------------------+------------------------+ +| tf_inception_v3 | 45.16% | 49.62% | ++-------------------------------------------+----------------------+------------------------+ +| vgg19 | 74.13% | 73.29% | ++-------------------------------------------+----------------------+------------------------+ +| mnist | 99.00% | 98.96% | ++-------------------------------------------+----------------------+------------------------+ + + +Top 5 Accuracy: + ++-------------------------------------------+----------------------+------------------------+ +| Model Name | Original Float Model | MRT V3 Quantized Model | ++===========================================+======================+========================+ +| resnet_v1 | 93.59% | 93.29% | ++-------------------------------------------+----------------------+------------------------+ +| resnet_v2 | 93.44% | 91.74% | ++-------------------------------------------+----------------------+------------------------+ +| resnet18_v1 | 89.93% | 89.62% | ++-------------------------------------------+----------------------+------------------------+ +| resnet18_v1b_0.89 | 87.45% | 85.62% | ++-------------------------------------------+----------------------+------------------------+ +| quickdraw | 98.22% | 98.20% | ++-------------------------------------------+----------------------+------------------------+ +| qd10_resnetv1_20 | 98.71% | 98.70% | ++-------------------------------------------+----------------------+------------------------+ +| densenet161 | 93.82% | 93.60% | ++-------------------------------------------+----------------------+------------------------+ +| alenxet | 78.75% | 77.40% | ++-------------------------------------------+----------------------+------------------------+ +| cifar_resnet20_v1 | 99.78% | 99.75% | ++-------------------------------------------+----------------------+------------------------+ +| mobilenet1_0 | 89.97% | 87.35% | ++-------------------------------------------+----------------------+------------------------+ +| mobilenetv2_1.0 | 90.10% | 89.30% | ++-------------------------------------------+----------------------+------------------------+ +| shufflenet_v1 | 85.12% | 82.95% | ++-------------------------------------------+----------------------+------------------------+ +| squeezenet1.0 | 80.04% | 78.64% | ++-------------------------------------------+----------------------+------------------------+ +| tf_inception_v3 | 67.93% | 74.71% | ++-------------------------------------------+----------------------+------------------------+ +| vgg19 | 91.77% | 91.52% | ++-------------------------------------------+----------------------+------------------------+ +| mnist | 100.00% | 100.00% | ++-------------------------------------------+----------------------+------------------------+ + + +Accuracy: + ++-------------------------------------------+----------------------+------------------------+ +| Model Name | Original Float Model | MRT V3 Quantized Model | ++===========================================+======================+========================+ +| trec | 98.19% | 97.99% | ++-------------------------------------------+----------------------+------------------------+ +| yolo3_darknet53_voc | 81.51% | 81.51% | ++-------------------------------------------+----------------------+------------------------+ +| yolo3_mobilenet1.0_voc | 76.03% | 71.56% | ++-------------------------------------------+----------------------+------------------------+ +| ssd_512_resnet50_v1_voc | 80.30% | 80.05% | ++-------------------------------------------+----------------------+------------------------+ +| ssd_512_mobilenet1.0_voc | 75.58% | 71.32% | ++-------------------------------------------+----------------------+------------------------+ diff --git a/docs/mrt/index.rst b/docs/mrt/index.rst index 6f5ce2f4..0b307c82 100644 --- a/docs/mrt/index.rst +++ b/docs/mrt/index.rst @@ -54,6 +54,14 @@ V2 Documentation V2.rst +V3 Documentation +================ + +.. toctree:: + :maxdepth: 3 + + V3.rst + API Documentation ================= diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index f99dd397..2f0c3555 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -4,8 +4,7 @@ from mrt import dataset as ds from mrt import conf from mrt.V3.utils import ( - MRT_CFG, default_device_type, default_device_ids, default_batch, - get_model_prefix, get_logger, set_batch, load_fname, save_conf, + MRT_CFG, get_model_prefix, get_logger, set_batch, load_fname, save_conf, load_conf, check_file_existance, get_ctx) DOC = """ @@ -22,13 +21,13 @@ default_num_calib = 1 MRT_CFG.CALIBRATE = CN() -MRT_CFG.CALIBRATE.BATCH = default_batch +MRT_CFG.CALIBRATE.BATCH = None MRT_CFG.CALIBRATE.NUM_CALIB = default_num_calib MRT_CFG.CALIBRATE.LAMBD = None MRT_CFG.CALIBRATE.DATASET_NAME = "imagenet" MRT_CFG.CALIBRATE.DATASET_DIR = conf.MRT_DATASET_ROOT -MRT_CFG.CALIBRATE.DEVICE_TYPE = default_device_type -MRT_CFG.CALIBRATE.DEVICE_IDS = default_device_ids +MRT_CFG.CALIBRATE.DEVICE_TYPE = None +MRT_CFG.CALIBRATE.DEVICE_IDS = None def calibrate(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR @@ -41,6 +40,12 @@ def calibrate(cm_cfg, pass_cfg, logger=None): calibrate_num = pass_cfg.NUM_CALIB lambd = pass_cfg.LAMBD batch = pass_cfg.BATCH + if batch is None: + batch = cm_cfg.BATCH + if device_type is None: + device_type = cm_cfg.DEVICE_TYPE + if device_ids is None: + device_ids = cm_cfg.DEVICE_IDS model_prefix = get_model_prefix(model_dir, model_name) if logger is None: diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index 6986277f..9b678377 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -9,8 +9,7 @@ from mrt import utils from mrt import sim_quant_helper as sim from mrt.V3.utils import ( - MRT_CFG, default_device_type, default_device_ids, default_batch, - get_model_prefix, get_logger, set_batch, load_fname, load_conf, + MRT_CFG, get_model_prefix, get_logger, set_batch, load_fname, load_conf, check_file_existance, get_ctx, get_batch_axis) DOC = """ @@ -22,9 +21,9 @@ """ MRT_CFG.EVALUATE = CN() -MRT_CFG.EVALUATE.BATCH = default_batch -MRT_CFG.EVALUATE.DEVICE_TYPE = default_device_type -MRT_CFG.EVALUATE.DEVICE_IDS = default_device_ids +MRT_CFG.EVALUATE.BATCH = None +MRT_CFG.EVALUATE.DEVICE_TYPE = None +MRT_CFG.EVALUATE.DEVICE_IDS = None MRT_CFG.EVALUATE.ITER_NUM = 10 def evaluate(cm_cfg, pass_cfg, logger=None): @@ -35,6 +34,12 @@ def evaluate(cm_cfg, pass_cfg, logger=None): device_ids = pass_cfg.DEVICE_IDS iter_num = pass_cfg.ITER_NUM batch = pass_cfg.BATCH + if batch is None: + batch = cm_cfg.BATCH + if device_type is None: + device_type = cm_cfg.DEVICE_TYPE + if device_ids is None: + device_ids = cm_cfg.DEVICE_IDS model_prefix = get_model_prefix(model_dir, model_name) if logger is None: diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index f29052bc..2ddb1fe4 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -8,8 +8,7 @@ from mrt import dataset as ds from mrt import sim_quant_helper as sim from mrt.V3.utils import ( - MRT_CFG, default_device_type, default_device_ids, default_batch, - get_model_prefix, get_logger, set_batch, load_fname, load_conf, + MRT_CFG, get_model_prefix, get_logger, set_batch, load_fname, load_conf, check_file_existance) DOC = """ @@ -25,8 +24,8 @@ MRT_CFG.COMPILE = CN() MRT_CFG.COMPILE.BATCH = 1 MRT_CFG.COMPILE.DUMP_DIR = default_dump_dir -MRT_CFG.COMPILE.DEVICE_TYPE = default_device_type -MRT_CFG.COMPILE.DEVICE_IDS = default_device_ids +MRT_CFG.COMPILE.DEVICE_TYPE = None +MRT_CFG.COMPILE.DEVICE_IDS = None def mrt_compile(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR @@ -36,6 +35,10 @@ def mrt_compile(cm_cfg, pass_cfg, logger=None): device_type = pass_cfg.DEVICE_TYPE device_ids = pass_cfg.DEVICE_IDS batch = pass_cfg.BATCH + if device_type is None: + device_type = cm_cfg.DEVICE_TYPE + if device_ids is None: + device_ids = cm_cfg.DEVICE_IDS model_prefix = get_model_prefix(model_dir, model_name) if logger is None: diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index 9992a263..c44a0cf1 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -18,8 +18,8 @@ default_input_shape = [-1, 3, 224, 224] MRT_CFG.PREPARE= CN() -MRT_CFG.PREPARE.DEVICE_TYPE = default_device_type -MRT_CFG.PREPARE.DEVICE_IDS = default_device_ids +MRT_CFG.PREPARE.DEVICE_TYPE = None +MRT_CFG.PREPARE.DEVICE_IDS = None MRT_CFG.PREPARE.INPUT_SHAPE = default_input_shape MRT_CFG.PREPARE.SPLIT_KEYS = [] @@ -31,6 +31,10 @@ def prepare(cm_cfg, pass_cfg, logger=None): device_ids = pass_cfg.DEVICE_IDS input_shape = pass_cfg.INPUT_SHAPE split_keys = pass_cfg.SPLIT_KEYS + if device_type is None: + device_type = cm_cfg.DEVICE_TYPE + if device_ids is None: + device_ids = cm_cfg.DEVICE_IDS model_prefix = get_model_prefix(model_dir, model_name) if logger is None: diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index 877b4e8d..f2761ac9 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -4,9 +4,8 @@ from mrt import sym_utils as sutils from mrt import sim_quant_helper as sim from mrt.V3.utils import ( - MRT_CFG, default_device_type, default_device_ids, get_model_prefix, - get_logger, load_fname, save_conf, load_conf, check_file_existance, - get_ctx) + MRT_CFG, get_model_prefix, get_logger, load_fname, save_conf, load_conf, + check_file_existance, get_ctx) DOC = """ QUANTIZE Stage Options: @@ -26,8 +25,8 @@ MRT_CFG.QUANTIZE.RESTORE_NAMES = [] MRT_CFG.QUANTIZE.INPUT_PRECISION = None MRT_CFG.QUANTIZE.OUTPUT_PRECISION = None -MRT_CFG.QUANTIZE.DEVICE_TYPE = default_device_type -MRT_CFG.QUANTIZE.DEVICE_IDS = default_device_ids +MRT_CFG.QUANTIZE.DEVICE_TYPE = None +MRT_CFG.QUANTIZE.DEVICE_IDS = None MRT_CFG.QUANTIZE.SOFTMAX_LAMBD = None MRT_CFG.QUANTIZE.SHIFT_BITS = None MRT_CFG.QUANTIZE.THRESHOLDS = [] @@ -49,6 +48,10 @@ def quantize(cm_cfg, pass_cfg, logger=None): attribute_deps = {attr: {sattr: opn for sattr, opn in attr_map} \ for attr, attr_map in pass_cfg.ATTRIBUTE_DEPS} oscale_maps = {opn1: opn2 for opn1, opn2 in pass_cfg.OSCALE_MAPS} + if device_type is None: + device_type = cm_cfg.DEVICE_TYPE + if device_ids is None: + device_ids = cm_cfg.DEVICE_IDS model_prefix = get_model_prefix(model_dir, model_name) if logger is None: diff --git a/python/mrt/V3/model_zoo/alexnet.yaml b/tests/mrt/model_zoo/alexnet.yaml similarity index 85% rename from python/mrt/V3/model_zoo/alexnet.yaml rename to tests/mrt/model_zoo/alexnet.yaml index b993dfcd..59ee4e13 100644 --- a/python/mrt/V3/model_zoo/alexnet.yaml +++ b/tests/mrt/model_zoo/alexnet.yaml @@ -8,14 +8,14 @@ CALIBRATE: LAMBD: 16 DATASET_NAME: imagenet DEVICE_TYPE: gpu - DEVICE_IDS: [0] + DEVICE_IDS: [2] QUANTIZE: INPUT_PRECISION: 8 OUTPUT_PRECISION: 8 DEVICE_TYPE: gpu - DEVICE_IDS: [0] + DEVICE_IDS: [2] EVALUATE: BATCH: 160 DEVICE_TYPE: gpu DEVICE_IDS: [0] - ITER_NUM: 10 + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar_resnet20_v1.yaml b/tests/mrt/model_zoo/cifar_resnet20_v1.yaml new file mode 100644 index 00000000..b2eddb4a --- /dev/null +++ b/tests/mrt/model_zoo/cifar_resnet20_v1.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_resnet20_v1 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/densenet161.yaml b/tests/mrt/model_zoo/densenet161.yaml new file mode 100644 index 00000000..4f97ebd0 --- /dev/null +++ b/tests/mrt/model_zoo/densenet161.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: densenet161 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/mnist.yaml b/tests/mrt/model_zoo/mnist.yaml new file mode 100644 index 00000000..36ea196f --- /dev/null +++ b/tests/mrt/model_zoo/mnist.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: mnist_dapp +PREPARE: + INPUT_SHAPE: [-1,1,28,28] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: mnist + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/mobilenet1_0.yaml b/tests/mrt/model_zoo/mobilenet1_0.yaml new file mode 100644 index 00000000..f746e989 --- /dev/null +++ b/tests/mrt/model_zoo/mobilenet1_0.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: mobilenet1_0 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/mobilenetv2_1.0.yaml b/tests/mrt/model_zoo/mobilenetv2_1.0.yaml new file mode 100644 index 00000000..421684a4 --- /dev/null +++ b/tests/mrt/model_zoo/mobilenetv2_1.0.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: mobilenetv2_1.0 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/qd10_resnetv1_20.yaml b/tests/mrt/model_zoo/qd10_resnetv1_20.yaml new file mode 100644 index 00000000..29b6442e --- /dev/null +++ b/tests/mrt/model_zoo/qd10_resnetv1_20.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: quick_raw_qd_animal10_2_cifar_resnet20_v2 +PREPARE: + INPUT_SHAPE: [-1,1,28,28] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: quickdraw + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 2000 diff --git a/tests/mrt/model_zoo/quickdraw.yaml b/tests/mrt/model_zoo/quickdraw.yaml new file mode 100644 index 00000000..b85fa6b3 --- /dev/null +++ b/tests/mrt/model_zoo/quickdraw.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: quickdraw_wlt_augmentation_epoch-4-0.8164531394275162 +PREPARE: + INPUT_SHAPE: [-1,1,28,28] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: quickdraw + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 2000 diff --git a/tests/mrt/model_zoo/resnet18_v1.yaml b/tests/mrt/model_zoo/resnet18_v1.yaml new file mode 100644 index 00000000..11ec49ff --- /dev/null +++ b/tests/mrt/model_zoo/resnet18_v1.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet18_v1 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/resnet18_v1b_0.89.yaml b/tests/mrt/model_zoo/resnet18_v1b_0.89.yaml new file mode 100644 index 00000000..49fa7b5c --- /dev/null +++ b/tests/mrt/model_zoo/resnet18_v1b_0.89.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet18_v1b_0.89 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 18 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/resnet50_v1.yaml b/tests/mrt/model_zoo/resnet50_v1.yaml new file mode 100644 index 00000000..16f5d519 --- /dev/null +++ b/tests/mrt/model_zoo/resnet50_v1.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/resnet50_v2.yaml b/tests/mrt/model_zoo/resnet50_v2.yaml new file mode 100644 index 00000000..a594e766 --- /dev/null +++ b/tests/mrt/model_zoo/resnet50_v2.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v2 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/shufflenet_v1.yaml b/tests/mrt/model_zoo/shufflenet_v1.yaml new file mode 100644 index 00000000..d76877fb --- /dev/null +++ b/tests/mrt/model_zoo/shufflenet_v1.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: shufflenet_v1 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 20 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/squeezenet1.0.yaml b/tests/mrt/model_zoo/squeezenet1.0.yaml new file mode 100644 index 00000000..c573ff63 --- /dev/null +++ b/tests/mrt/model_zoo/squeezenet1.0.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: squeezenet1.0 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 13 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/ssd.yaml b/tests/mrt/model_zoo/ssd.yaml new file mode 100644 index 00000000..b9486b05 --- /dev/null +++ b/tests/mrt/model_zoo/ssd.yaml @@ -0,0 +1,49 @@ +COMMON: + MODEL_NAME: ssd_512_resnet50_v1_voc + VERBOSITY: info + RUN_EVALUATE: True +PREPARE: + INPUT_SHAPE: [-1, 3, 512, 512] + SPLIT_KEYS: [ + "ssd0_multiperclassdecoder0_zeros_like0", + "ssd0_multiperclassdecoder0_slice_axis0", + "ssd0_normalizedboxcenterdecoder0_concat0" + ] +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: voc + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + THRESHOLDS: [ + ["data", 2.64], + ["ssd0_multiperclassdecoder0_slice_axis0", 1], + ] + ATTRIBUTE_DEPS: [ + [ + "_greater_scalar", + [ + ["scalar", "ssd0_multiperclassdecoder0_slice_axis0"], + ] + ], + [ + "_contrib_box_nms", + [ + ["valid_thresh", "ssd0_multiperclassdecoder0_slice_axis0"], + ] + ], + ] + OSCALE_MAPS: [ + ["ssd0_slice_axis41", "ssd0_multiperclassdecoder0_zeros_like0"], + ["ssd0_slice_axis42", "ssd0_multiperclassdecoder0_slice_axis0"], + ["ssd0_slice_axis43", "ssd0_normalizedboxcenterdecoder0_concat0"], + ] +EVALUATE: + BATCH: 64 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 2000 diff --git a/tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml b/tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml new file mode 100644 index 00000000..4676e3ad --- /dev/null +++ b/tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml @@ -0,0 +1,48 @@ +COMMON: + MODEL_NAME: ssd_512_mobilenet1.0_voc + VERBOSITY: info + RUN_EVALUATE: True +PREPARE: + INPUT_SHAPE: [-1, 3, 512, 512] + SPLIT_KEYS: [ + "ssd0_softmax0", + "ssd0_normalizedboxcenterdecoder0_concat0" + ] +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: voc + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + THRESHOLDS: [ + ["mrt_rewrite_ssd0_mobilenet0_conv1_weight_0", 29.09], + ["mrt_rewrite_ssd0_mobilenet0_conv2_weight_0", 1.891], + ] + ATTRIBUTE_DEPS: [ + [ + "_greater_scalar", + [ + ["scalar", "ssd0_softmax0"], + ] + ], + [ + "_contrib_box_nms", + [ + ["valid_thresh", "ssd0_softmax0"], + ] + ], + ] + OSCALE_MAPS: [ + ["ssd0_slice_axis41", None], + ["ssd0_slice_axis42", "ssd0_softmax0"], + ["ssd0_slice_axis43", "ssd0_normalizedboxcenterdecoder0_concat0"], + ] +EVALUATE: + BATCH: 64 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 2000 diff --git a/tests/mrt/model_zoo/tf_inception_v3.yaml b/tests/mrt/model_zoo/tf_inception_v3.yaml new file mode 100644 index 00000000..438037de --- /dev/null +++ b/tests/mrt/model_zoo/tf_inception_v3.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: tf_inception_v3 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 12 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tf_mobilenet_v1_0.25_224_lite.yaml b/tests/mrt/model_zoo/tf_mobilenet_v1_0.25_224_lite.yaml new file mode 100644 index 00000000..0d9c6d17 --- /dev/null +++ b/tests/mrt/model_zoo/tf_mobilenet_v1_0.25_224_lite.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: tf_mobilenet_v1_0.25_224_lite +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/trec.yaml b/tests/mrt/model_zoo/trec.yaml new file mode 100644 index 00000000..76992541 --- /dev/null +++ b/tests/mrt/model_zoo/trec.yaml @@ -0,0 +1,20 @@ +COMMON: + MODEL_NAME: trec +PREPARE: + INPUT_SHAPE: [38,-1] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: trec + LAMBD: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 16 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/vgg19.yaml b/tests/mrt/model_zoo/vgg19.yaml new file mode 100644 index 00000000..5237da62 --- /dev/null +++ b/tests/mrt/model_zoo/vgg19.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: vgg19 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 64 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 2000 diff --git a/tests/mrt/model_zoo/yolo3_darknet53_voc.yaml b/tests/mrt/model_zoo/yolo3_darknet53_voc.yaml new file mode 100644 index 00000000..e738b3e4 --- /dev/null +++ b/tests/mrt/model_zoo/yolo3_darknet53_voc.yaml @@ -0,0 +1,50 @@ +COMMON: + MODEL_NAME: yolo3_darknet53_voc + VERBOSITY: info + RUN_EVALUATE: True +PREPARE: + INPUT_SHAPE: [-1, 3, 416, 416] + SPLIT_KEYS: [ + "yolov30_yolooutputv30_expand_dims0", + "yolov30_yolooutputv31_expand_dims0", + "yolov30_yolooutputv32_expand_dims0", + "yolov30_yolooutputv30_tile0", + "yolov30_yolooutputv31_tile0", + "yolov30_yolooutputv32_tile0" + ] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: voc + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + THRESHOLDS: [ + ["data", 2.64], + ["yolov30_yolooutputv30_expand_dims0", 1], + ["yolov30_yolooutputv31_expand_dims0", 1], + ["yolov30_yolooutputv32_expand_dims0", 1], + ["yolov30_yolooutputv30_tile0", 416], + ["yolov30_yolooutputv31_tile0", 416], + ["yolov30_yolooutputv32_tile0", 416] + ] + ATTRIBUTE_DEPS: [ + [ + "_contrib_box_nms", + [ + ["valid_thresh", "yolov30_yolooutputv30_expand_dims0"], + ] + ], + ] + OSCALE_MAPS: [ + ["yolov30_slice_axis1", None], + ["yolov30_slice_axis2", "yolov30_yolooutputv30_expand_dims0"], + ["yolov30_slice_axis3", "yolov30_yolooutputv30_tile0"] + ] +EVALUATE: + BATCH: 48 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 2000 diff --git a/tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml b/tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml new file mode 100644 index 00000000..33d8fa4f --- /dev/null +++ b/tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml @@ -0,0 +1,47 @@ +COMMON: + MODEL_NAME: yolo3_mobilenet1.0_voc + VERBOSITY: info + RUN_EVALUATE: True +PREPARE: + INPUT_SHAPE: [-1, 3, 416, 416] + SPLIT_KEYS: [ + "yolov30_yolooutputv30_expand_dims0", + "yolov30_yolooutputv31_expand_dims0", + "yolov30_yolooutputv32_expand_dims0", + "yolov30_yolooutputv30_tile0", + "yolov30_yolooutputv31_tile0", + "yolov30_yolooutputv32_tile0" + ] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: voc + DEVICE_TYPE: gpu + DEVICE_IDS: [2] +QUANTIZE: + OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [2] + THRESHOLDS: [ + ["mrt_rewrite_mobilenet0_conv23_weight_0", 11], + ["yolov30_yolooutputv30_tile0", 416], + ["yolov30_yolooutputv31_tile0", 416], + ["yolov30_yolooutputv32_tile0", 416] + ] + ATTRIBUTE_DEPS: [ + [ + "_contrib_box_nms", + [ + ["valid_thresh", "yolov30_yolooutputv30_expand_dims0"], + ] + ], + ] + OSCALE_MAPS: [ + ["yolov30_slice_axis1", None], + ["yolov30_slice_axis2", "yolov30_yolooutputv30_expand_dims0"], + ["yolov30_slice_axis3", "yolov30_yolooutputv30_tile0"] + ] +EVALUATE: + BATCH: 64 + DEVICE_TYPE: gpu + DEVICE_IDS: [2] + ITER_NUM: 2000 From b66758a07158fd739d7d51dc650af702b0ff1c86 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 11 Jan 2022 17:56:30 +0800 Subject: [PATCH 098/120] [doc] V3 architecture part 1 --- docs/mrt/V3.rst | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/docs/mrt/V3.rst b/docs/mrt/V3.rst index 9343433b..8b9551f1 100644 --- a/docs/mrt/V3.rst +++ b/docs/mrt/V3.rst @@ -3,6 +3,47 @@ V3 Documentation [TOC] +V3 Architecture +~~~~~~~~~~~~~~~ + +MRT V3 is the functional entry point of MRT taht supports customized user-defined attributes +for different quantization stages. As listed, there are three required postional stages , which +need to be executed sequentially: + +- preparation: initializing the pre-trained model, supported graph-level passes including: + + - standardizing the specified model + + - duplicate symbol name hacking and validation + - parameter prefix removal + - constant operator deduction + - input shape attachment + - input name replacement + - multiple outputs fusion + - parameter unification + + - equivalent operator substitution for the availability of quantization and cvm compliation + + - tranpose operator fusion + - operator rewriting + + - model splitting into top and base to be precocessed respectively + +- calibration: calibrate the prepared model to acquire the operator-level thresholds that is +futher exploited in the stage of quantization. + +- quantization: perform the operator-level quantizing procedure that inludes: + + - operator restoration for tuning purposes + + - operator quantization + + - quantizing with respect to the acquried thresholds and input precisions + - operator clipping on condition that the output precision exceeds the tight precision + - model output operator quantization with respect to the pre-defined output precision + + - graph-level model merging + Model Test ~~~~~~~~~~ From be1b50890dbf65b62a5f40aff9d39c8573635c48 Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 20 Jan 2022 16:16:02 +0800 Subject: [PATCH 099/120] [enhancement] inference_original_model inference_quantized_model get_ctx_eval forward --- python/mrt/V3/evaluate.py | 65 +++++++++++++++++++++++++++++++ tests/mrt/forward_utils.py | 3 +- tests/mrt/test_forward.py | 33 ++++++++++++++++ tests/mrt/test_prediction_SCTF.py | 2 +- 4 files changed, 100 insertions(+), 3 deletions(-) create mode 100644 tests/mrt/test_forward.py diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index 9b678377..4c506503 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -81,6 +81,7 @@ def evalfunc(data, label): return acc # forward function for the quantized model + # TODO(ryt.dev) [bug fix] remove split batch check num_xpus = len(ctx) if batch % num_xpus: raise RuntimeError("Batch must be divisible by the number of xpus") @@ -123,3 +124,67 @@ def quantize(data, label): logger.info("evaluatation stage finished") else: logger.info("evaluatation stage skipped") + +def forward(net, data, ctx, baxis, olen): + """ Multiple xpu run support. + """ + data = gluon.utils.split_and_load( + data, ctx_list=ctx, batch_axis=baxis, even_split=False) + outs = [net(d) for d in data] + if olen == 1: + outs = nd.concatenate(outs) + else: + outs = [nd.concatenate([outs[i][j] \ + for i in range(len(outs))]) for j in range(olen)] + return outs + +def get_ctx_eval(ctx): + if isinstance(ctx, mx.Context): + ctx = [ctx] + elif isinstance(ctx, list): + assert all([isinstance(c, mx.Context) for c in ctx]), \ + "invalid value of ctx: {}".format(ctx) + else: + assert False, "invalid type of ctx: {}".format(type(ctx)) + return ctx + +def inference_original_model( + symbol_file, params_file, data, batch_axis=0, + device_type=MRT_CFG.EVALUATE.DEVICE_TYPE, + device_ids=MRT_CFG.EVALUATE.DEVICE_IDS): + + ctx = get_ctx_eval(get_ctx(device_type, device_ids)) + omodel = Model.load(symbol_file, params_file) + graph = omodel.to_graph(ctx=ctx) + olen = len(omodel.symbol) + + outs = forward(graph, data, ctx, batch_axis, olen) + return outs + +def inference_quantized_model( + qsymbol_file, qparams_file, qext_file, data, batch_axis=0, split=False, + device_type=MRT_CFG.EVALUATE.DEVICE_TYPE, + device_ids=MRT_CFG.EVALUATE.DEVICE_IDS): + + ctx = get_ctx_eval(get_ctx(device_type, device_ids)) + + if split: + qmodel = Model.load(qsymbol_file, qparams_file) + oscales, inputs_ext = sim.load_ext(qext_file) + else: + mrt = MRT(Model.load(qsymbol_file, qparams_file)) + mrt.old_names, mrt.th_dict, mrt.precs, mrt.scales = \ + sim.load_ext(qext_file) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + + rqmodel = reduce_graph(qmodel, {'data': data.shape}) + qgraph = rqmodel.to_graph(ctx=ctx) + data = sim.load_real_data(data, 'data', inputs_ext) + olen = len(rqmodel.symbol) + + outs = forward(qgraph, data, ctx, batch_axis, olen) + outs = outs / oscales[0] if olen == 1 \ + else [(t / oscales[i]) for i, t in enumerate(outs)] + return outs diff --git a/tests/mrt/forward_utils.py b/tests/mrt/forward_utils.py index 0b873108..8755b987 100644 --- a/tests/mrt/forward_utils.py +++ b/tests/mrt/forward_utils.py @@ -8,7 +8,7 @@ get_model_prefix, get_logger, set_batch, load_fname, load_conf, check_file_existance, get_ctx, get_batch_axis) -def test_quantized_forward(cm_cfg, pass_cfg, logger=None): +def evaluate_forward(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY @@ -47,7 +47,6 @@ def forward(net, data, ctx): for i in range(len(outs))]) for j in range(olen)] return outs - # forward function for the quantized model num_xpus = len(ctx) if batch % num_xpus: diff --git a/tests/mrt/test_forward.py b/tests/mrt/test_forward.py new file mode 100644 index 00000000..a2cfb8aa --- /dev/null +++ b/tests/mrt/test_forward.py @@ -0,0 +1,33 @@ +from os import path + +from mrt import dataset as ds +from mrt.V3.evaluate import inference_original_model, inference_quantized_model + +def main(): + dataset_name = "coco" + batch_size = 50 + input_shape = [batch_size,3,640,640] + dataset = ds.DS_REG[dataset_name](input_shape) + data_iter_func = dataset.iter_func() + data, _ = data_iter_func() + + # test forward of original model + symbol_file = path.expanduser("~/mrt_model/yolov5s.preprocess.unify.broadcastify.json") + params_file = path.expanduser("~/mrt_model/yolov5s.preprocess.unify.broadcastify.params") + outs = inference_original_model( + symbol_file, params_file, data, + batch_axis=0, device_type="gpu", device_ids=[0,1,2]) + print([o.shape for o in outs]) + + # test forward of quantized model + qsymbol_file = path.expanduser("~/mrt_model/yolov5s.mrt.quantize.json") + qparams_file = path.expanduser("~/mrt_model/yolov5s.mrt.quantize.params") + qext_file = path.expanduser("~/mrt_model/yolov5s.mrt.quantize.ext") + outs = inference_quantized_model( + qsymbol_file, qparams_file, qext_file, data, + batch_axis=0, split=False, device_type="gpu", device_ids=[0,1,2]) + print([o.shape for o in outs]) + + +if __name__ == "__main__": + main() diff --git a/tests/mrt/test_prediction_SCTF.py b/tests/mrt/test_prediction_SCTF.py index b7b56d30..ad6077e5 100644 --- a/tests/mrt/test_prediction_SCTF.py +++ b/tests/mrt/test_prediction_SCTF.py @@ -40,4 +40,4 @@ def data_loader(): cfg = merge_cfg(yaml_file) cfg = override_cfg_args(cfg, sys.argv[1:]) run(cfg) - # futils.test_quantized_forward(cfg.COMMON, cfg.EVALUATE) + # futils.evaluate_forward(cfg.COMMON, cfg.EVALUATE) From 843e53ada32ed66bf7787285f001e8374851d9c5 Mon Sep 17 00:00:00 2001 From: ryt Date: Thu, 20 Jan 2022 21:05:22 +0800 Subject: [PATCH 100/120] [fix bug] set batch_size as 16 times len(device_ids) for yolov5s --- tests/mrt/test_forward.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/tests/mrt/test_forward.py b/tests/mrt/test_forward.py index a2cfb8aa..634e9493 100644 --- a/tests/mrt/test_forward.py +++ b/tests/mrt/test_forward.py @@ -4,9 +4,14 @@ from mrt.V3.evaluate import inference_original_model, inference_quantized_model def main(): + device_ids = [0,1,2] + device_type = "gpu" + dataset_name = "coco" - batch_size = 50 + # dataset_name = "voc" + batch_size = 16 * len(device_ids) input_shape = [batch_size,3,640,640] + # input_shape = [batch_size,3,416,416] dataset = ds.DS_REG[dataset_name](input_shape) data_iter_func = dataset.iter_func() data, _ = data_iter_func() @@ -14,20 +19,26 @@ def main(): # test forward of original model symbol_file = path.expanduser("~/mrt_model/yolov5s.preprocess.unify.broadcastify.json") params_file = path.expanduser("~/mrt_model/yolov5s.preprocess.unify.broadcastify.params") + # symbol_file = path.expanduser("~/mrt_model/yolo3_darknet53_voc.json") + # params_file = path.expanduser("~/mrt_model/yolo3_darknet53_voc.params") outs = inference_original_model( symbol_file, params_file, data, - batch_axis=0, device_type="gpu", device_ids=[0,1,2]) - print([o.shape for o in outs]) + batch_axis=0, device_type=device_type, device_ids=device_ids) + # print([o.shape for o in outs]) + print(outs[1]) # test forward of quantized model qsymbol_file = path.expanduser("~/mrt_model/yolov5s.mrt.quantize.json") qparams_file = path.expanduser("~/mrt_model/yolov5s.mrt.quantize.params") qext_file = path.expanduser("~/mrt_model/yolov5s.mrt.quantize.ext") + # qsymbol_file = path.expanduser("~/mrt_model/yolo3_darknet53_voc.all.quantize.json") + # qparams_file = path.expanduser("~/mrt_model/yolo3_darknet53_voc.all.quantize.params") + # qext_file = path.expanduser("~/mrt_model/yolo3_darknet53_voc.all.quantize.ext") outs = inference_quantized_model( qsymbol_file, qparams_file, qext_file, data, - batch_axis=0, split=False, device_type="gpu", device_ids=[0,1,2]) - print([o.shape for o in outs]) - + batch_axis=0, split=False, device_type=device_type, device_ids=device_ids) + # print([o.shape for o in outs]) + print(outs[1]) if __name__ == "__main__": main() From 222b27a7c09e5a53c2330d76000249207730cfb3 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 21 Jan 2022 10:50:31 +0800 Subject: [PATCH 101/120] add todo --- python/mrt/V3/evaluate.py | 5 +++++ tests/mrt/yolov5s/yolov5s.yaml | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index 4c506503..e73a6570 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -53,6 +53,7 @@ def evaluate(cm_cfg, pass_cfg, logger=None): # forward function for the orginal model omodel = Model.load(*load_fname(model_prefix)) + #TODO(ryt.dev) [bug fix] load revised model graph = omodel.to_graph(ctx=ctx) dataset_name = conf_map["dataset_name"] input_shape = conf_map["input_shape"] @@ -126,6 +127,7 @@ def quantize(data, label): logger.info("evaluatation stage skipped") def forward(net, data, ctx, baxis, olen): + #TODO(ryt.dev) documentation """ Multiple xpu run support. """ data = gluon.utils.split_and_load( @@ -139,6 +141,7 @@ def forward(net, data, ctx, baxis, olen): return outs def get_ctx_eval(ctx): + #TODO(ryt.dev) documentation if isinstance(ctx, mx.Context): ctx = [ctx] elif isinstance(ctx, list): @@ -152,6 +155,7 @@ def inference_original_model( symbol_file, params_file, data, batch_axis=0, device_type=MRT_CFG.EVALUATE.DEVICE_TYPE, device_ids=MRT_CFG.EVALUATE.DEVICE_IDS): + #TODO(ryt.dev) documentation ctx = get_ctx_eval(get_ctx(device_type, device_ids)) omodel = Model.load(symbol_file, params_file) @@ -165,6 +169,7 @@ def inference_quantized_model( qsymbol_file, qparams_file, qext_file, data, batch_axis=0, split=False, device_type=MRT_CFG.EVALUATE.DEVICE_TYPE, device_ids=MRT_CFG.EVALUATE.DEVICE_IDS): + #TODO(ryt.dev) documentation ctx = get_ctx_eval(get_ctx(device_type, device_ids)) diff --git a/tests/mrt/yolov5s/yolov5s.yaml b/tests/mrt/yolov5s/yolov5s.yaml index f20b919f..9aed2508 100644 --- a/tests/mrt/yolov5s/yolov5s.yaml +++ b/tests/mrt/yolov5s/yolov5s.yaml @@ -1,7 +1,7 @@ COMMON: - # MODEL_NAME: yolov5s.preprocess.unify.broadcastify + MODEL_NAME: yolov5s.preprocess.unify.broadcastify # MODEL_NAME: yolov5s.preprocess.unify - MODEL_NAME: yolov5s + # MODEL_NAME: yolov5s VERBOSITY: info RUN_EVALUATE: False PREPARE: From 0719f6d6c1ea82c0c33c2d2471a4b9a0596d5b1e Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 25 Jan 2022 17:54:49 +0800 Subject: [PATCH 102/120] [enhancement] Yolov5sDataset Yolov5Metric --- tests/mrt/yolov5s/test_yolov5s.py | 249 ++++++++++++++ tests/mrt/yolov5s/utils.py | 519 ++++++++++++++++++++++++++++++ 2 files changed, 768 insertions(+) create mode 100644 tests/mrt/yolov5s/test_yolov5s.py create mode 100644 tests/mrt/yolov5s/utils.py diff --git a/tests/mrt/yolov5s/test_yolov5s.py b/tests/mrt/yolov5s/test_yolov5s.py new file mode 100644 index 00000000..e8d68a2a --- /dev/null +++ b/tests/mrt/yolov5s/test_yolov5s.py @@ -0,0 +1,249 @@ +from os import path +import os +import sys + +from mxnet import ndarray as nd +import numpy as np +import cv2 + +from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args +from mrt.V3.execute import run +from mrt import dataset as ds +from utils import ( + non_max_suppression, scale_coords, xywh2xyxy, process_batch, ap_per_class) + + +class Yolov5Metric: + def __init__( + self, conf_thres=0.001, iou_thres=0.6, names=None, + iouv=np.linspace(0.5,0.95,10)): + # attributes + self.conf_thres = conf_thres + self.iou_thres = iou_thres + self.names = names + self.iouv = iouv + self.niou = iouv.shape[0] + # status variable + self.stats = [] + self.seen = 0 + + def reset(self): + self.stats.clear() + self.seen = 0 + + def update(self, labels, out): + nl = labels.shape[0] + out = non_max_suppression( + out.asnumpy(), self.conf_thres, self.iou_thres, labels=[] + multi_label=True, agnostic=False) + pred = out[0] + tcls = labels[:,0] if nl else [] + self.seen += 1 + if pred.shape[0] == 0: + if nl: + self.stats.append( + (np.zeros((0)), np.zeros((0)), np.zeros((0)), tcls)) + continue + predn = pred.copy() + # native-space pred + _, _, H, W = self.ishape + scale_coords((H,W), predn[:,:4], [H,W], [[1.0,1.0],[0.0,0.0]]) + if nl: + # target boxes + tbox = xywh2xyxy(labels[:,1:5]) + # native-space labels + scale_coords((H,W), tbox, [H,W], [[1.0,1.0],[0.0,0.0]]) + # native-space labels + labelsn = np.concatenate((labels[:,0:1],tbox), axis=1) + correct = process_batch(predn, labelsn, self.iouv) + else: + correct = np.zeros((pred.shape[0], self.niou), dtype=np.bool) + # (correct, conf, pcls, tcls) + self.stats.append((correct, pred[:, 4], pred[:, 5], tcls)) + # compute metrics + # to numpy + cur_stats = [np.concatenate(x, 0) for x in zip(*self.stats)] + if len(cur_stats) and cur_stats[0].any(): + tp, fp, p, r, f1, ap, ap_class = ap_per_class( + *cur_stats, plot=False, save_dir=None, names=names) + # AP@0.5, AP@0.5:0.95 + ap50, ap = ap[:, 0], ap.mean(1) + mp, mr, map50, map_ = p.mean(), r.mean(), ap50.mean(), ap.mean() + # number of targets per class + nt = np.bincount(cur_stats[3].astype(np.int64), minlength=80) + else: + nt = np.zeros(1) + return self.seen, nt, mp, mr, map50, map_ + + +@ds.register_dataset("yolov5s_dataset") +class Yolov5sDataset(ds.Dataset): + def __init__(self, input_shape, imgsz=640, **kwargs): + super().__init__(input_shape, **kwargs) + self.image_dir = path.join(self.root_dir, "images") + self.label_dir = path.join(self.root_dir, "labels") + self.imgsz = imgsz + + def _load_data(self): + assert len(self.ishape) == 4, self.ishape + assert self.ishape[0] == 1, self.ishape + + def data_loader(): + for f in os.listdir(self.image_dir): + _, ext = os.path.splitext(f) + if ext != ".jpg" and ext != ".JPG" and ext != ".png" and ext != ".PNG": + continue + l = f.replace(f.split(".")[1], "txt") + file_name = os.path.join(self.root_dir, f) + label_name = os.path.join(self.label_dir, l) + img = cv2.imread(file_name) + try: + labels = np.loadtxt(label_name) + except: + labels = np.array([]) + labels = labels.reshape((-1, 5)) + height, width = img.shape[0:2] + scale = min(self.imgsz/height, self.imgsz/width) + h0, w0 = height*scale, width*scale + img0 = cv2.resize(img, (round(w0/32.)*32, round(h0/32.)*32)) + img = img0.astype("float32")/255. + img = nd.array(img.transpose((2,0,1))[None]) + labels[:,1:] = labels[:,1:] * np.array([img.shape[3], img.shape[2]]*2) + yield img, labels + + self.data = data_loader() + + def metrics( + self, conf_thres=0.001, iou_thres=0.6, names=None, + iouv=np.linspace(0.5,0.95,10)): + if names is None: + names = { + 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', + 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', + 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', + 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', + 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', + 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', + 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', + 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', + 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', + 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', + 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', + 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', + 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', + 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', + 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', + 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', + 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', + 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', + 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', + 78: 'hair drier', 79: 'toothbrush', + } + return Yolov5Metric( + conf_thres=conf_thres, iou_thres=iou_thres, names=names, iouv=iouv) + + def validate(self, metrics, out, labels): + metrics.update(labels, out) + seen, nt, mp, mr, map50, map_ = metrics.get() + return "{}: #images={}, #objects={}, ".fomrat( + self.root_dir, seen, nt.sum()) + \ + "mp={02.2f}%, mr={02.2f}%, ".format(mp*100, mr*100) + \ + "map50={02.2f}%, map={02.2f}%".format(map50*100, map_*100) + +def main(opt): + print(opt) + conf_thres = 0.001 + iou_thres = 0.6 + + args = parse_opt() + ctx = mx.cpu() if args.cpu else mx.gpu(args.gpu) + + gw = {"n":1, "s":2, "m":3, "l":4, "x":5} + gd = {"n":1, "s":1, "m":2, "l":3, "x":4} + postfix = args.model[-1] + model = yolov5(batch_size=args.batch_size, mode="val", ctx=ctx, act=args.silu, gd=gd[postfix], gw=gw[postfix]) + model.collect_params().initialize(init=mx.init.Xavier(), ctx=ctx) + #model.hybridize() + + try: + EPOCH = [] + start_epoch = 0 + for f in os.listdir(args.model_dir): + if f.endswith("params") and args.model in f: + name_epoch = f.strip().split(".")[0].split("-") + if len(name_epoch) == 2 and name_epoch[0] == args.model: + EPOCH.append(name_epoch[1]) + tmp = [int(_) for _ in EPOCH] + ind = tmp.index(max(tmp)) + params_file = os.path.join(args.model_dir, args.model+"-"+EPOCH[ind]+".params") + model.collect_params().load(params_file,ignore_extra=False) + print(f'load weight {params_file} successfully') + except: + print("failed to load weight") + + iouv = np.linspace(0.5, 0.95, 10) + niou = iouv.shape[0] + seen = 0 + jdict, stats, ap, ap_class = [], [], [], [] + + for f in os.listdir(args.dataset): + _, ext = os.path.splitext(f) + if ext != ".jpg" and ext != ".JPG" and ext != ".png" and ext != ".PNG": + continue + print(f) + l = f.replace(f.split(".")[1], "txt") + file_name = os.path.join(args.dataset, f) + label_name = os.path.join(args.dataset.replace("images","labels"), l) + img = cv2.imread(file_name) + try: + labels = np.loadtxt(label_name) + except: + labels = np.array([]) + labels = labels.reshape((-1, 5)) + + height, width = img.shape[0:2] + scale = min(args.imgsz/height, args.imgsz/width) + h0, w0 = height*scale, width*scale + img0 = cv2.resize(img, (round(w0/32.)*32, round(h0/32.)*32)) + + img = img0.astype("float32")/255. + img = nd.array(img.transpose((2,0,1))[None], ctx = ctx) + labels[:,1:] = labels[:,1:]*np.array([img.shape[3], img.shape[2]]*2) + + nl = labels.shape[0] + out = model(img).asnumpy() + out = non_max_suppression(out, conf_thres, iou_thres, labels=[], multi_label=True, agnostic=False) + pred = out[0] + + tcls = labels[:,0] if nl else [] + seen += 1 + + if pred.shape[0] == 0: + if nl: + stats.append((np.zeros((0)), np.zeros((0)), np.zeros((0)), tcls)) + continue + + predn = pred.copy() + scale_coords(img[0].shape[1:], predn[:, :4], [img.shape[2], img.shape[3]], [[1.0,1.0],[0.0,0.0]]) # native-space pred + + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(img[0].shape[1:], tbox, [img.shape[2], img.shape[3]], [[1.0,1.0],[0.0,0.0]]) # native-space labels + labelsn = np.concatenate((labels[:, 0:1], tbox), axis=1) # native-space labels + correct = process_batch(predn, labelsn, iouv) + else: + correct = np.zeros((pred.shape[0], niou), dtype=np.bool) + stats.append((correct, pred[:, 4], pred[:, 5], tcls)) # (correct, conf, pcls, tcls) + + +if __name__ == "__main__": + assert len(sys.argv) >= 1 and len(sys.argv)%2 == 1, \ + "invalid length: {} of sys.argv: {}".format( + len(sys.argv), sys.argv) + yaml_file = path.join( + path.dirname(path.realpath(__file__)), + "model_zoo", "prediction_SCTF.yaml") + cfg = get_cfg_defaults() + cfg = merge_cfg(yaml_file) + cfg = override_cfg_args(cfg, sys.argv[1:]) + run(cfg) diff --git a/tests/mrt/yolov5s/utils.py b/tests/mrt/yolov5s/utils.py new file mode 100644 index 00000000..438b2c2c --- /dev/null +++ b/tests/mrt/yolov5s/utils.py @@ -0,0 +1,519 @@ +import numpy as np +import os +from pathlib import Path +import platform +import re +from PIL import Image, ImageDraw, ImageFont +import cv2 +import random +import math + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.copy() + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + +def clip_coords(boxes, shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + return boxes + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + x = clip_coords(x, (h - eps, w - eps)) # warning: inplace clip + y = np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + +def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (np.minimum(box1[:, None, 2:], box2[:, 2:]) - np.maximum(box1[:, None, :2], box2[:, :2])).clip(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = {i: v for i, v in enumerate(names)} # to dict + + i = f1.mean(0).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype('int32') + +def process_batch(detections, labels, iouv): + """ + Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (Array[N, 10]), for 10 IoU levels + """ + correct = np.zeros((detections.shape[0], iouv.shape[0]), dtype=np.bool) + iou = box_iou(labels[:, 1:], detections[:, :4]) + x = np.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match + if x[0].shape[0]: + matches = np.concatenate((np.concatenate((x[0][:,np.newaxis],x[1][:,np.newaxis]), axis=1), iou[x[0], x[1]][:, None]), axis=1) # [label, detection, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + matches = np.array(matches) + correct[matches[:, 1].astype("int32")] = matches[:, 2:3] >= iouv + return correct + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb('#' + c) for c in hex] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if test: # method 1 + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + else: # method 2 + return os.access(dir, os.R_OK) # possible issues on Windows + +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + +CONFIG_DIR = user_config_dir() + +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return re.search('[\u4e00-\u9fff]', s) + +def check_font(font='Arial.ttf', size=10): + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary + font = Path(font) + font = font if font.exists() else (CONFIG_DIR / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except: + pass + +class Annotator: + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + self.pil = pil or not is_ascii(example) or is_chinese(example) + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle([box[0], + box[1] - h if outside else box[1], + box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1], fill=color) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h - 3 >= 0 # label fits outside box + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, + thickness=tf, lineType=cv2.LINE_AA) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255)): + # Add text to image (PIL-only) + w, h = self.font.getsize(text) # text width, height + self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + +def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + return im, targets + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + +def NMS(dets, scores, thresh): + # x1, y1, x2, y2, score + # (x1、y1)(x2、y2)left-top, and right-bottom + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + + #area for each candidate + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + #descending reorder + order = scores.argsort()[::-1] + + temp = [] + while order.size > 0: + i = order[0] + temp.append(i) + # calc the interact coordinate between current largest prob. candidate and others + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.minimum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.maximum(y2[i], y2[order[1:]]) + + # calc the area of interact condidate, note the area should be nonzero + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + # IoU + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + # find the candidate with interactness larger than threshold + inds = np.where(ovr <= thresh)[0] + #re-order + order = order[inds + 1] + return np.array(temp) + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=(), max_det=300): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + output = [np.zeros((0, 6))] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = np.zeros((len(l), nc + 5)) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = np.concatenate((x, v), axis=0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = np.nonzero(x[:, 5:] > conf_thres) + x = np.concatenate((box[i], x[i, j + 5, None], j[:, None].astype("float32")), axis=1) + else: # best class only + j = np.argmax(x[:, 5:], axis=1) + conf = np.array([x[i,5+j[i]] for i in range(x.shape[0])]) + x = np.concatenate((box, conf[:, None], j[:, None].astype("float32")), axis=1)[conf.reshape((-1)) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == np.array(classes)).any(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort()[::-1][:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = NMS(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + output[xi] = x[i] + return output + + + + + From 03e45b95f2427aaa5764460a974b9d0db4768be2 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 25 Jan 2022 22:53:33 +0800 Subject: [PATCH 103/120] [fix bug] tests/mrt/yolov5s/test_yolov5s.py --- tests/mrt/yolov5s/test_yolov5s.py | 201 ++++++++++-------------------- 1 file changed, 67 insertions(+), 134 deletions(-) diff --git a/tests/mrt/yolov5s/test_yolov5s.py b/tests/mrt/yolov5s/test_yolov5s.py index e8d68a2a..98b68f47 100644 --- a/tests/mrt/yolov5s/test_yolov5s.py +++ b/tests/mrt/yolov5s/test_yolov5s.py @@ -25,47 +25,54 @@ def __init__( self.niou = iouv.shape[0] # status variable self.stats = [] - self.seen = 0 def reset(self): self.stats.clear() - self.seen = 0 - def update(self, labels, out): - nl = labels.shape[0] - out = non_max_suppression( - out.asnumpy(), self.conf_thres, self.iou_thres, labels=[] - multi_label=True, agnostic=False) - pred = out[0] - tcls = labels[:,0] if nl else [] - self.seen += 1 - if pred.shape[0] == 0: + def update(self, labels, out, input_shape): + batch_size, _, H, W = input_shape + outs = [] + for i in range(batch_size): + concat_out = nd.concatenate( + [o[i].reshape((-1, o[i].shape[-1])) for o in out]) + expand_dims_out = concat_out.expand_dims(axis=0) + outs.append(expand_dims_out) + for i in range(batch_size): + label = labels[i] + nl = label.shape[0] + out = non_max_suppression( + outs[i].asnumpy(), self.conf_thres, self.iou_thres, labels=[], + multi_label=True, agnostic=False) + pred = out[0] + tcls = label[:,0] if nl else [] + if pred.shape[0] == 0: + if nl: + self.stats.append( + (np.zeros((0,self.niou)), np.zeros((0)), np.zeros((0)), tcls)) + continue + predn = pred.copy() + # native-space pred + scale_coords((H,W), predn[:,:4], [H,W], [[1.0,1.0],[0.0,0.0]]) if nl: - self.stats.append( - (np.zeros((0)), np.zeros((0)), np.zeros((0)), tcls)) - continue - predn = pred.copy() - # native-space pred - _, _, H, W = self.ishape - scale_coords((H,W), predn[:,:4], [H,W], [[1.0,1.0],[0.0,0.0]]) - if nl: - # target boxes - tbox = xywh2xyxy(labels[:,1:5]) - # native-space labels - scale_coords((H,W), tbox, [H,W], [[1.0,1.0],[0.0,0.0]]) - # native-space labels - labelsn = np.concatenate((labels[:,0:1],tbox), axis=1) - correct = process_batch(predn, labelsn, self.iouv) - else: - correct = np.zeros((pred.shape[0], self.niou), dtype=np.bool) - # (correct, conf, pcls, tcls) - self.stats.append((correct, pred[:, 4], pred[:, 5], tcls)) + # target boxes + tbox = xywh2xyxy(label[:,1:5]) + # native-space label + scale_coords((H,W), tbox, [H,W], [[1.0,1.0],[0.0,0.0]]) + # native-space label + labelsn = np.concatenate((label[:,0:1],tbox), axis=1) + correct = process_batch(predn, labelsn, self.iouv) + else: + correct = np.zeros((pred.shape[0], self.niou), dtype=np.bool) + # (correct, conf, pcls, tcls) + self.stats.append((correct, pred[:, 4], pred[:, 5], tcls)) + + def get(self): # compute metrics # to numpy cur_stats = [np.concatenate(x, 0) for x in zip(*self.stats)] if len(cur_stats) and cur_stats[0].any(): tp, fp, p, r, f1, ap, ap_class = ap_per_class( - *cur_stats, plot=False, save_dir=None, names=names) + *cur_stats, plot=False, save_dir=None, names=self.names) # AP@0.5, AP@0.5:0.95 ap50, ap = ap[:, 0], ap.mean(1) mp, mr, map50, map_ = p.mean(), r.mean(), ap50.mean(), ap.mean() @@ -73,11 +80,12 @@ def update(self, labels, out): nt = np.bincount(cur_stats[3].astype(np.int64), minlength=80) else: nt = np.zeros(1) - return self.seen, nt, mp, mr, map50, map_ + mp = mr = map50 = map_ = 0. + return nt, mp, mr, map50, map_ -@ds.register_dataset("yolov5s_dataset") -class Yolov5sDataset(ds.Dataset): +@ds.register_dataset("yolov5_dataset") +class Yolov5Dataset(ds.Dataset): def __init__(self, input_shape, imgsz=640, **kwargs): super().__init__(input_shape, **kwargs) self.image_dir = path.join(self.root_dir, "images") @@ -86,17 +94,20 @@ def __init__(self, input_shape, imgsz=640, **kwargs): def _load_data(self): assert len(self.ishape) == 4, self.ishape - assert self.ishape[0] == 1, self.ishape def data_loader(): - for f in os.listdir(self.image_dir): + data, label = [], [] + for f in sorted(os.listdir(self.image_dir)): _, ext = os.path.splitext(f) - if ext != ".jpg" and ext != ".JPG" and ext != ".png" and ext != ".PNG": + if ext != ".jpg" and ext != ".JPG" \ + and ext != ".png" and ext != ".PNG": continue l = f.replace(f.split(".")[1], "txt") - file_name = os.path.join(self.root_dir, f) + file_name = os.path.join(self.image_dir, f) label_name = os.path.join(self.label_dir, l) img = cv2.imread(file_name) + # hack size + img = cv2.resize(img, tuple(self.ishape[2:])) try: labels = np.loadtxt(label_name) except: @@ -109,7 +120,15 @@ def data_loader(): img = img0.astype("float32")/255. img = nd.array(img.transpose((2,0,1))[None]) labels[:,1:] = labels[:,1:] * np.array([img.shape[3], img.shape[2]]*2) - yield img, labels + # if img.shape[2] != self.ishape[2] or img.shape[3] != self.ishape[3]: + # continue + if len(data) < self.ishape[0]: + data.append(img) + label.append(labels) + else: + batch_data = nd.concatenate(data) + yield batch_data, label + data, label = [], [] self.data = data_loader() @@ -139,110 +158,24 @@ def metrics( 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush', } - return Yolov5Metric( + metric = Yolov5Metric( conf_thres=conf_thres, iou_thres=iou_thres, names=names, iouv=iouv) + metric.reset() + return metric def validate(self, metrics, out, labels): - metrics.update(labels, out) - seen, nt, mp, mr, map50, map_ = metrics.get() - return "{}: #images={}, #objects={}, ".fomrat( - self.root_dir, seen, nt.sum()) + \ - "mp={02.2f}%, mr={02.2f}%, ".format(mp*100, mr*100) + \ - "map50={02.2f}%, map={02.2f}%".format(map50*100, map_*100) - -def main(opt): - print(opt) - conf_thres = 0.001 - iou_thres = 0.6 - - args = parse_opt() - ctx = mx.cpu() if args.cpu else mx.gpu(args.gpu) - - gw = {"n":1, "s":2, "m":3, "l":4, "x":5} - gd = {"n":1, "s":1, "m":2, "l":3, "x":4} - postfix = args.model[-1] - model = yolov5(batch_size=args.batch_size, mode="val", ctx=ctx, act=args.silu, gd=gd[postfix], gw=gw[postfix]) - model.collect_params().initialize(init=mx.init.Xavier(), ctx=ctx) - #model.hybridize() - - try: - EPOCH = [] - start_epoch = 0 - for f in os.listdir(args.model_dir): - if f.endswith("params") and args.model in f: - name_epoch = f.strip().split(".")[0].split("-") - if len(name_epoch) == 2 and name_epoch[0] == args.model: - EPOCH.append(name_epoch[1]) - tmp = [int(_) for _ in EPOCH] - ind = tmp.index(max(tmp)) - params_file = os.path.join(args.model_dir, args.model+"-"+EPOCH[ind]+".params") - model.collect_params().load(params_file,ignore_extra=False) - print(f'load weight {params_file} successfully') - except: - print("failed to load weight") - - iouv = np.linspace(0.5, 0.95, 10) - niou = iouv.shape[0] - seen = 0 - jdict, stats, ap, ap_class = [], [], [], [] - - for f in os.listdir(args.dataset): - _, ext = os.path.splitext(f) - if ext != ".jpg" and ext != ".JPG" and ext != ".png" and ext != ".PNG": - continue - print(f) - l = f.replace(f.split(".")[1], "txt") - file_name = os.path.join(args.dataset, f) - label_name = os.path.join(args.dataset.replace("images","labels"), l) - img = cv2.imread(file_name) - try: - labels = np.loadtxt(label_name) - except: - labels = np.array([]) - labels = labels.reshape((-1, 5)) - - height, width = img.shape[0:2] - scale = min(args.imgsz/height, args.imgsz/width) - h0, w0 = height*scale, width*scale - img0 = cv2.resize(img, (round(w0/32.)*32, round(h0/32.)*32)) - - img = img0.astype("float32")/255. - img = nd.array(img.transpose((2,0,1))[None], ctx = ctx) - labels[:,1:] = labels[:,1:]*np.array([img.shape[3], img.shape[2]]*2) - - nl = labels.shape[0] - out = model(img).asnumpy() - out = non_max_suppression(out, conf_thres, iou_thres, labels=[], multi_label=True, agnostic=False) - pred = out[0] - - tcls = labels[:,0] if nl else [] - seen += 1 - - if pred.shape[0] == 0: - if nl: - stats.append((np.zeros((0)), np.zeros((0)), np.zeros((0)), tcls)) - continue - - predn = pred.copy() - scale_coords(img[0].shape[1:], predn[:, :4], [img.shape[2], img.shape[3]], [[1.0,1.0],[0.0,0.0]]) # native-space pred - - if nl: - tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(img[0].shape[1:], tbox, [img.shape[2], img.shape[3]], [[1.0,1.0],[0.0,0.0]]) # native-space labels - labelsn = np.concatenate((labels[:, 0:1], tbox), axis=1) # native-space labels - correct = process_batch(predn, labelsn, iouv) - else: - correct = np.zeros((pred.shape[0], niou), dtype=np.bool) - stats.append((correct, pred[:, 4], pred[:, 5], tcls)) # (correct, conf, pcls, tcls) - + metrics.update(labels, out, self.ishape) + nt, mp, mr, map50, map_ = metrics.get() + return "#objects={}, ".format(nt.sum()) + \ + "mp={:6.2%}, mr={:6.2%}, ".format(mp*100, mr*100) + \ + "map50={:6.2%}, map={:6.2%}".format(map50*100, map_*100) if __name__ == "__main__": assert len(sys.argv) >= 1 and len(sys.argv)%2 == 1, \ "invalid length: {} of sys.argv: {}".format( len(sys.argv), sys.argv) yaml_file = path.join( - path.dirname(path.realpath(__file__)), - "model_zoo", "prediction_SCTF.yaml") + path.dirname(path.realpath(__file__)), "yolov5s-0040.yaml") cfg = get_cfg_defaults() cfg = merge_cfg(yaml_file) cfg = override_cfg_args(cfg, sys.argv[1:]) From f0ff44a1357a56d5f5e563fb9606284d56777324 Mon Sep 17 00:00:00 2001 From: ryt Date: Tue, 25 Jan 2022 22:54:21 +0800 Subject: [PATCH 104/120] [config] tests/mrt/yolov5s/yolov5s-0040.yaml --- tests/mrt/yolov5s/yolov5s-0040.yaml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 tests/mrt/yolov5s/yolov5s-0040.yaml diff --git a/tests/mrt/yolov5s/yolov5s-0040.yaml b/tests/mrt/yolov5s/yolov5s-0040.yaml new file mode 100644 index 00000000..ed0e3ce8 --- /dev/null +++ b/tests/mrt/yolov5s/yolov5s-0040.yaml @@ -0,0 +1,24 @@ +COMMON: + MODEL_NAME: yolov5s-0040.preprocess.unify.broadcastify + VERBOSITY: info + # RUN_EVALUATE: False + BATCH: 16 +PREPARE: + INPUT_SHAPE: [-1, 3, 640, 640] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: yolov5_dataset + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + # OUTPUT_PRECISION: 30 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + # in this model, the BATCH should be set as 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10 +COMPILE: + # in this model, the BATCH should be set as 16 + BATCH: 16 From 5d339a263ecf34732f95a7f2440658d134ec39b1 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 26 Jan 2022 13:59:42 +0800 Subject: [PATCH 105/120] [enhancement] _make_grid --- tests/mrt/yolov5s/test_yolov5s.py | 152 ++++++++++++++++++++++-------- 1 file changed, 115 insertions(+), 37 deletions(-) diff --git a/tests/mrt/yolov5s/test_yolov5s.py b/tests/mrt/yolov5s/test_yolov5s.py index 98b68f47..15e41a4d 100644 --- a/tests/mrt/yolov5s/test_yolov5s.py +++ b/tests/mrt/yolov5s/test_yolov5s.py @@ -2,6 +2,7 @@ import os import sys +import mxnet as mx from mxnet import ndarray as nd import numpy as np import cv2 @@ -15,28 +16,123 @@ class Yolov5Metric: def __init__( - self, conf_thres=0.001, iou_thres=0.6, names=None, - iouv=np.linspace(0.5,0.95,10)): - # attributes + self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10), + nc=80, anchors=()): + + # metric parameters self.conf_thres = conf_thres self.iou_thres = iou_thres - self.names = names self.iouv = iouv self.niou = iouv.shape[0] + self.names = { + 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', + 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', + 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', + 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', + 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', + 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', + 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', + 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', + 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', + 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', + 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', + 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', + 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', + 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', + 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', + 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', + 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', + 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', + 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', + 79: 'toothbrush', + } + + # detect parameters + self.no = nc + 5 + self.na = len(anchors[0]) // 2 + self.stride = nd.array([8., 16., 32.]) + self.anchors = nd.array( + [ + [ + [ 1.25000, 1.62500], + [ 2.00000, 3.75000], + [ 4.12500, 2.87500] + ], + [ + [ 1.87500, 3.81250], + [ 3.87500, 2.81250], + [ 3.68750, 7.43750] + ], + [ + [ 3.62500, 2.81250], + [ 4.87500, 6.18750], + [11.65625, 10.18750] + ] + ] + ) + # status variable self.stats = [] def reset(self): self.stats.clear() - def update(self, labels, out, input_shape): + def _make_grid(self, nx=20, ny=20, i=0, ctx=mx.cpu(0)): + yv = nd.array(range(ny))[:,None].repeat(nx,axis=1) + xv = nd.array(range(nx))[None,:].repeat(ny,axis=0) + grid = nd.concat( + xv[...,None], yv[...,None], dim=2)[None,None,...].repeat( + self.na, axis=1) + grid = nd.Cast(grid, dtype="float32") + + anchor_grid = (self.anchors[i].copy()*self.stride[i]) + anchor_grid = anchor_grid[None,:, None, None,:] + anchor_grid = anchor_grid.repeat(ny, axis=-3) + anchor_grid = anchor_grid.repeat(nx, axis=-2) + return grid.as_in_context(ctx), anchor_grid.as_in_context(ctx) + + def update(self, labels, predict, input_shape): batch_size, _, H, W = input_shape outs = [] for i in range(batch_size): - concat_out = nd.concatenate( - [o[i].reshape((-1, o[i].shape[-1])) for o in out]) - expand_dims_out = concat_out.expand_dims(axis=0) - outs.append(expand_dims_out) + x, y, z = [o.slice_axis(axis=0, begin=i, end=i+1) for o in predict] + out = [] + + bs, _, ny, nx, _ = x.shape + grid, anchor_grid = self._make_grid(nx, ny, 0, ctx=x.ctx) + tmp = x.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[0].as_in_context(x.ctx) + # wh + wh = (tmp[..., 2:4]*2)**2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + bs, _, ny, nx, _ = y.shape + grid, anchor_grid = self._make_grid(nx, ny, 1, ctx=y.ctx) + tmp = y.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[1].as_in_context(y.ctx) + # wh + wh = (tmp[..., 2:4]*2)*2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + bs, _, ny, nx, _ = z.shape + grid, anchor_grid = self._make_grid(nx, ny, 2, ctx=z.ctx) + tmp = z.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[2].as_in_context(z.ctx) + # wh + wh = (tmp[..., 2:4]*2)**2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + out = nd.concat(*out, dim=1) + outs.append(out) for i in range(batch_size): label = labels[i] nl = label.shape[0] @@ -133,33 +229,15 @@ def data_loader(): self.data = data_loader() def metrics( - self, conf_thres=0.001, iou_thres=0.6, names=None, - iouv=np.linspace(0.5,0.95,10)): - if names is None: - names = { - 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', - 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', - 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', - 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', - 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', - 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', - 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', - 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', - 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', - 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', - 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', - 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', - 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', - 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', - 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', - 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', - 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', - 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', - 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', - 78: 'hair drier', 79: 'toothbrush', - } + self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10)): + anchors = [ + [10, 13, 16, 30, 33, 23], + [30 ,61, 62 ,45, 59, 119], + [116, 90, 156, 198, 373, 326] + ] metric = Yolov5Metric( - conf_thres=conf_thres, iou_thres=iou_thres, names=names, iouv=iouv) + conf_thres=conf_thres, iou_thres=iou_thres, iouv=iouv, + anchors=anchors, nc=80) metric.reset() return metric @@ -167,8 +245,8 @@ def validate(self, metrics, out, labels): metrics.update(labels, out, self.ishape) nt, mp, mr, map50, map_ = metrics.get() return "#objects={}, ".format(nt.sum()) + \ - "mp={:6.2%}, mr={:6.2%}, ".format(mp*100, mr*100) + \ - "map50={:6.2%}, map={:6.2%}".format(map50*100, map_*100) + "mp={:6.2%}, mr={:6.2%}, ".format(mp, mr) + \ + "map50={:6.2%}, map={:6.2%}".format(map50, map_) if __name__ == "__main__": assert len(sys.argv) >= 1 and len(sys.argv)%2 == 1, \ From 5ec972a16bab36d818a80d0f94cd75aeac5ca447 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 26 Jan 2022 14:10:17 +0800 Subject: [PATCH 106/120] [fix bug] Yolov5Dataset data iter --- tests/mrt/yolov5s/test_yolov5s.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/mrt/yolov5s/test_yolov5s.py b/tests/mrt/yolov5s/test_yolov5s.py index 15e41a4d..afd564ea 100644 --- a/tests/mrt/yolov5s/test_yolov5s.py +++ b/tests/mrt/yolov5s/test_yolov5s.py @@ -190,6 +190,8 @@ def __init__(self, input_shape, imgsz=640, **kwargs): def _load_data(self): assert len(self.ishape) == 4, self.ishape + batch_size = self.ishape[0] + assert batch_size == 16, batch_size def data_loader(): data, label = [], [] @@ -218,13 +220,15 @@ def data_loader(): labels[:,1:] = labels[:,1:] * np.array([img.shape[3], img.shape[2]]*2) # if img.shape[2] != self.ishape[2] or img.shape[3] != self.ishape[3]: # continue - if len(data) < self.ishape[0]: - data.append(img) - label.append(labels) - else: + if len(data) == batch_size: batch_data = nd.concatenate(data) yield batch_data, label data, label = [], [] + data.append(img) + label.append(labels) + if len(data) == batch_size: + batch_data = nd.concatenate(data) + yield batch_data, label self.data = data_loader() From 1b760174763797f5e6d52680ad04db439aabfc83 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 9 Feb 2022 16:33:40 +0800 Subject: [PATCH 107/120] [docs,enhancement,tests] V3 docs, V3 test module --- docs/mrt/V3.rst | 49 ++++- docs/mrt/V3_results.rst | 11 + docs/mrt/api/V3.rst | 55 +++++ docs/mrt/index.rst | 1 + python/mrt/V3/calibrate.py | 19 ++ python/mrt/V3/evaluate.py | 25 ++- python/mrt/V3/execute.py | 28 +++ python/mrt/V3/mrt_compile.py | 19 ++ python/mrt/V3/prepare.py | 22 ++ python/mrt/V3/quantize.py | 19 ++ python/mrt/V3/utils.py | 176 ++++++++++++++-- python/mrt/dataset.py | 20 ++ python/mrt/tfm_ops.py | 69 +++++++ python/mrt/transformer.py | 13 +- tests/mrt/model_zoo/prediction_SCTF.yaml | 4 +- tests/mrt/test_V3.py | 109 ++++++++++ tests/mrt/yolov5s/metric.py | 250 ++++++++++++++++++++++ tests/mrt/yolov5s/metric_v2.py | 251 +++++++++++++++++++++++ tests/mrt/yolov5s/test_yolov5s.py | 250 +--------------------- tests/mrt/yolov5s/utils.py | 102 +++++++++ 20 files changed, 1214 insertions(+), 278 deletions(-) create mode 100644 docs/mrt/V3_results.rst create mode 100644 docs/mrt/api/V3.rst create mode 100644 tests/mrt/test_V3.py create mode 100644 tests/mrt/yolov5s/metric.py create mode 100644 tests/mrt/yolov5s/metric_v2.py diff --git a/docs/mrt/V3.rst b/docs/mrt/V3.rst index 8b9551f1..a34ead4a 100644 --- a/docs/mrt/V3.rst +++ b/docs/mrt/V3.rst @@ -8,9 +8,13 @@ V3 Architecture MRT V3 is the functional entry point of MRT taht supports customized user-defined attributes for different quantization stages. As listed, there are three required postional stages , which -need to be executed sequentially: +need to be executed sequentially. V3 architecture can be shown as: -- preparation: initializing the pre-trained model, supported graph-level passes including: +- |avatar| + +.. |avatar| image:: ../assets/V3.png + +- **Preparation**: initializing the pre-trained model, supported graph-level passes including: - standardizing the specified model @@ -29,10 +33,9 @@ need to be executed sequentially: - model splitting into top and base to be precocessed respectively -- calibration: calibrate the prepared model to acquire the operator-level thresholds that is -futher exploited in the stage of quantization. +- **Calibration**: calibrate the prepared model to acquire the operator-level thresholds that is futher exploited in the stage of quantization -- quantization: perform the operator-level quantizing procedure that inludes: +- **Quantization**: perform the operator-level quantizing procedure that inludes: - operator restoration for tuning purposes @@ -44,10 +47,37 @@ futher exploited in the stage of quantization. - graph-level model merging -Model Test -~~~~~~~~~~ +- **Evaluation**: multiple model validating with specified metric definition + + - pre-compilation stage graph-level reduction + + - input shape attaching + - operator attribute revising for the stage of compilation + - constant operator deduction + + - precision comparison of quantized and unquantized models with provided metric function + +- **Compilation**: compile mxnet model into cvm accepted json&bin format + + - pre-compilation stage graph-level reduction -The comparison between the original float model and quantized model + - input shape attaching + - operator attribute revising for the stage of compilation + - constant operator deduction + + - CVM graph compilation + - op shape inference + - graph compilation + + - CVM parameter precision reduction + + - CVM deployed graph and parameter complilation + + +Benchmark Quantization Results +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The comparison between the original float model and quantized model is listed as below. @@ -144,3 +174,6 @@ Accuracy: +-------------------------------------------+----------------------+------------------------+ | ssd_512_mobilenet1.0_voc | 75.58% | 71.32% | +-------------------------------------------+----------------------+------------------------+ + +For most recent model quantization results, +please refer to :ref:`MRT Quantization Results `. diff --git a/docs/mrt/V3_results.rst b/docs/mrt/V3_results.rst new file mode 100644 index 00000000..3549ca70 --- /dev/null +++ b/docs/mrt/V3_results.rst @@ -0,0 +1,11 @@ + +************************ +MRT Quantization Results +************************ + +.. _mrt_quantization_results: + +**alexnet**: +Iteration: 312 | evalfunc: top1=55.91% top5=78.75% | quantize: top1=51.54% top5=77.40% | Total Sample: 50080 + + diff --git a/docs/mrt/api/V3.rst b/docs/mrt/api/V3.rst new file mode 100644 index 00000000..54709b9b --- /dev/null +++ b/docs/mrt/api/V3.rst @@ -0,0 +1,55 @@ +********** +MRT V3 API +********** + +.. _mrt_v3_api: + +.. contents:: + +mrt.V3.utils +____________ +.. automodule:: mrt.V3.utils +.. autofunction:: mrt.V3.utils.get_model_prefix +.. autofunction:: mrt.V3.utils.get_logger +.. autofunction:: mrt.V3.utils.set_batch +.. autofunction:: mrt.V3.utils.load_fname +.. autofunction:: mrt.V3.utils.save_conf +.. autofunction:: mrt.V3.utils.load_conf +.. autofunction:: mrt.V3.utils.check_file_existance +.. autofunction:: mrt.V3.utils.get_ctx +.. autofunction:: mrt.V3.utils.get_batch_axis +.. autofunction:: mrt.V3.utils.get_cfg_defaults +.. autofunction:: mrt.V3.utils.merge_cfg +.. autofunction:: mrt.V3.utils.revise_cfg +.. autofunction:: mrt.V3.utils.override_cfg_args + +mrt.V3.prepare +______________ +.. automodule:: mrt.V3.prepare +.. autofunction:: mrt.V3.prepare.prepare + +mrt.V3.calibrate +________________ +.. automodule:: mrt.V3.calibrate +.. autofunction:: mrt.V3.calibrate.calibrate + +mrt.V3.quantize +_______________ +.. automodule:: mrt.V3.quantize +.. autofunction:: mrt.V3.quantize.quantize + +mrt.V3.evaluate +_______________ +.. automodule:: mrt.V3.evaluate +.. autofunction:: mrt.V3.evaluate.evaluate + +mrt.V3.mrt_compile +__________________ +.. automodule:: mrt.V3.mrt_compile +.. autofunction:: mrt.V3.mrt_compile.mrt_compile + +mrt.V3.execute +______________ +.. automodule:: mrt.V3.execute +.. autofunction:: mrt.V3.execute.yaml_main +.. autofunction:: mrt.V3.execute.run diff --git a/docs/mrt/index.rst b/docs/mrt/index.rst index 0b307c82..50e69962 100644 --- a/docs/mrt/index.rst +++ b/docs/mrt/index.rst @@ -78,6 +78,7 @@ For other detailed classes and methods, please refer to the API links. Operator List Graph API Helper Utils + V3 API Deprecated Docs =============== diff --git a/python/mrt/V3/calibrate.py b/python/mrt/V3/calibrate.py index 2f0c3555..3d114a0f 100644 --- a/python/mrt/V3/calibrate.py +++ b/python/mrt/V3/calibrate.py @@ -1,3 +1,10 @@ +""" +Calibration Module for MRT V3. + +Calibration function definition, default YAML configurations for MRT calibration +Stage options and Command line help prompt are also included. +""" + from yacs.config import CfgNode as CN from mrt.transformer import Model @@ -30,6 +37,18 @@ MRT_CFG.CALIBRATE.DEVICE_IDS = None def calibrate(cm_cfg, pass_cfg, logger=None): + """ + YAML configuration API of MRT calibration stage. + + Parameters + ---------- + cm_cfg : yacs.config.CfgNode + CfgNode of common stage. + pass_cfg : yacs.config.CfgNode + CfgNode of calibration stage. + logger : logging.RootLogger + Console logger. + """ model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index e73a6570..44ab76b8 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -1,3 +1,10 @@ +""" +Evaluation Module for MRT V3. + +Evaluate function definition, default YAML configurations for MRT evaluation +Stage options and Command line help prompt are also included. +""" + from yacs.config import CfgNode as CN import logging @@ -26,13 +33,12 @@ MRT_CFG.EVALUATE.DEVICE_IDS = None MRT_CFG.EVALUATE.ITER_NUM = 10 -def evaluate(cm_cfg, pass_cfg, logger=None): +def get_evaluation_info(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY device_type = pass_cfg.DEVICE_TYPE device_ids = pass_cfg.DEVICE_IDS - iter_num = pass_cfg.ITER_NUM batch = pass_cfg.BATCH if batch is None: batch = cm_cfg.BATCH @@ -52,7 +58,8 @@ def evaluate(cm_cfg, pass_cfg, logger=None): ctx = [ctx] # forward function for the orginal model - omodel = Model.load(*load_fname(model_prefix)) + model_prefix_fixed = model_prefix + ".fixed" + omodel = Model.load(*load_fname(model_prefix_fixed)) #TODO(ryt.dev) [bug fix] load revised model graph = omodel.to_graph(ctx=ctx) dataset_name = conf_map["dataset_name"] @@ -116,7 +123,17 @@ def quantize(data, label): acc = dataset.validate(qmetric, outs, label) return acc - # evaluate + return evalfunc, data_iter_func, quantize + +def evaluate(cm_cfg, pass_cfg, logger=None): + evalfunc, data_iter_func, quantize = get_evaluation_info( + cm_cfg, pass_cfg, logger=logger) + + iter_num = pass_cfg.ITER_NUM + batch = pass_cfg.BATCH + if batch is None: + batch = cm_cfg.BATCH + if iter_num > 0: logger.info("Validating...") utils.multi_validate( diff --git a/python/mrt/V3/execute.py b/python/mrt/V3/execute.py index d437b4cf..b0a2d5b4 100644 --- a/python/mrt/V3/execute.py +++ b/python/mrt/V3/execute.py @@ -1,3 +1,10 @@ +""" +Execution Module for MRT V3. + +Definition of execution functions for either MRT complete process execution or +specified stages execution. +""" + import sys from mrt.V3.prepare import prepare @@ -9,6 +16,16 @@ thismodule = sys.modules[__name__] def yaml_main(cfg, logger=None): + """ + Execution function to launch the complete MRT process. + + Parameters + ---------- + cfg : yacs.config.CfgNode + CfgNode of MRT. + logger : logging.RootLogger + Console logger. + """ if cfg.is_frozen(): cfg.defrost() for prefix in ["BATCH", "DEVICE_TYPE", "DEVICE_IDS"]: @@ -39,6 +56,17 @@ def yaml_main(cfg, logger=None): mrt_compile(cfg.COMMON, cfg.COMPILE, logger=logger) def run(cfg, logger=None): + """ + Execution function to launch either the complete MRT process + or a specified MRT stage. + + Parameters + ---------- + cfg : yacs.config.CfgNode + CfgNode of MRT. + logger : logging.RootLogger + Console logger. + """ pass_name = cfg.COMMON.PASS_NAME if pass_name == "all": yaml_main(cfg, logger=logger) diff --git a/python/mrt/V3/mrt_compile.py b/python/mrt/V3/mrt_compile.py index 2ddb1fe4..b6747d38 100644 --- a/python/mrt/V3/mrt_compile.py +++ b/python/mrt/V3/mrt_compile.py @@ -1,3 +1,10 @@ +""" +Compilation Module for MRT V3. + +Compile function definition, default YAML configurations for MRT compilation +Stage options and Command line help prompt are also included. +""" + from os import path import os from yacs.config import CfgNode as CN @@ -28,6 +35,18 @@ MRT_CFG.COMPILE.DEVICE_IDS = None def mrt_compile(cm_cfg, pass_cfg, logger=None): + """ + YAML configuration API of MRT compilation stage. + + Parameters + ---------- + cm_cfg : yacs.config.CfgNode + CfgNode of common stage. + pass_cfg : yacs.config.CfgNode + CfgNode of compilation stage. + logger : logging.RootLogger + Console logger. + """ model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY diff --git a/python/mrt/V3/prepare.py b/python/mrt/V3/prepare.py index c44a0cf1..dedb8857 100644 --- a/python/mrt/V3/prepare.py +++ b/python/mrt/V3/prepare.py @@ -1,3 +1,10 @@ +""" +Preparation Module for MRT V3. + +Prepare function definition, default YAML configurations for MRT preparation +Stage options and Command line help prompt are also included. +""" + from os import path from yacs.config import CfgNode as CN @@ -24,7 +31,21 @@ MRT_CFG.PREPARE.SPLIT_KEYS = [] def prepare(cm_cfg, pass_cfg, logger=None): + """ + YAML configuration API of MRT preparation stage. + + Parameters + ---------- + cm_cfg : yacs.config.CfgNode + CfgNode of common stage. + pass_cfg : yacs.config.CfgNode + CfgNode of preparation stage. + logger : logging.RootLogger + Console logger. + """ model_dir = cm_cfg.MODEL_DIR + if model_dir.startswith("~"): + model_dir = path.expanduser(model_dir) model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY device_type = pass_cfg.DEVICE_TYPE @@ -49,6 +70,7 @@ def prepare(cm_cfg, pass_cfg, logger=None): model_name, data_dir=model_dir, ctx=get_ctx(device_type, device_ids)) model = Model.load(sym_path, prm_path) + model.fix_original_model(model_dir, model_name) model.prepare(set_batch(input_shape, 1)) sym_prep_file, prm_prep_file = load_fname( model_prefix, suffix="prepare") diff --git a/python/mrt/V3/quantize.py b/python/mrt/V3/quantize.py index f2761ac9..7086b41c 100644 --- a/python/mrt/V3/quantize.py +++ b/python/mrt/V3/quantize.py @@ -1,3 +1,10 @@ +""" +Quantization Module for MRT V3. + +Quantize function definition, default YAML configurations for MRT quantization +Stage options and Command line help prompt are also included. +""" + from yacs.config import CfgNode as CN from mrt.transformer import Model, MRT @@ -34,6 +41,18 @@ MRT_CFG.QUANTIZE.OSCALE_MAPS = [] def quantize(cm_cfg, pass_cfg, logger=None): + """ + YAML configuration API of MRT quantization stage. + + Parameters + ---------- + cm_cfg : yacs.config.CfgNode + CfgNode of common stage. + pass_cfg : yacs.config.CfgNode + CfgNode of evaluation stage. + logger : logging.RootLogger + Console logger. + """ model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY diff --git a/python/mrt/V3/utils.py b/python/mrt/V3/utils.py index 3026b87c..bfdb2a69 100644 --- a/python/mrt/V3/utils.py +++ b/python/mrt/V3/utils.py @@ -1,3 +1,11 @@ +""" +Utility Module for MRT V3. + +Collector of utility functions including YAML configuration node manipulating +functions, MRT stage helper functions. Default YAML configurations for MRT +Common Stage options and Command line help prompt are also included. +""" + from os import path import logging import json @@ -44,15 +52,22 @@ MRT_CFG.COMMON.RUN_EVALUATE = True MRT_CFG.COMMON.RUN_COMPILE = True -def update_dest2yaml(dest2yaml_upt): - for dest, cfg in dest2yaml_upt.items(): - if dest in dest2yaml: - raise RuntimeError( - "dest: {} already in dest2yaml: {}".format( - dest, dest2yaml.keys())) - dest2yaml[dest] = cfg - def get_model_prefix(model_dir, model_name): + """ + Get the prefix of the pre-trained MRT model name. + + Parameters + ---------- + model_dir : str + Directory of the model file. + model_name : str + Name of the MRT pre-trained model. + + Returns + ------- + model_prefix : str + Prefix of the pre-trained MRT model name. + """ if model_dir.startswith("~"): model_dir = path.expanduser(model_dir) assert path.exists(model_dir), \ @@ -61,12 +76,27 @@ def get_model_prefix(model_dir, model_name): return model_prefix def get_logger(verbosity): + """ + Get the console logger + + Parameters + ---------- + verbosity : str + verbosity level chosen from `debug`, `info`, `warning`, `error`, `fatal`. + + Returns + ------- + logger : logging.RootLogger + Console logger. + """ log.Init(log.name2level(verbosity.upper())) logger = logging.getLogger("log.main") return logger def set_batch(input_shape, batch): - """Get the input shape with respect to a specified batch value and an original input shape. + """ + Get the input shape with respect to a specified batch value + and an original input shape. Parameters ---------- @@ -102,7 +132,19 @@ def load_fname(prefix, suffix=None, with_ext=False): suffix = "."+suffix if suffix is not None else "" return extend_fname(prefix+suffix, with_ext) -def save_conf(fname, logger=logging, **conf_map): +def save_conf(fname, logger=logging.getLogger(""), **conf_map): + """ + Save the JSON-formatted MRT configuration from configuration checkpoint file. + + Parameters + ---------- + fname : str + Path of the JSON-formatted MRT configuration checkpoint file. + logger : logging.RootLogger + Console logger. + conf_map : dict + Dictionary of the attribute-value pairs. + """ try: info_s = json.dumps(conf_map, indent=4) except: @@ -111,7 +153,22 @@ def save_conf(fname, logger=logging, **conf_map): with open(fname, "w") as f: f.write(info_s) -def load_conf(fname, logger=logging): +def load_conf(fname, logger=logging.getLogger("")): + """ + Load the JSON-formatted MRT configuration from configuration checkpoint file. + + Parameters + ---------- + fname : str + Path of the JSON-formatted MRT configuration checkpoint file. + logger : logging.RootLogger + Console logger. + + Returns + ------- + conf_map : dict + Dictionary of the attribute-value pairs. + """ with open(fname, "r") as f: try: conf_map = json.load(f) @@ -119,12 +176,39 @@ def load_conf(fname, logger=logging): logger.error("Json deserialize invalid, fname: {}".format(fname)) return conf_map -def check_file_existance(*fpaths, logger=logging): +def check_file_existance(*fpaths, logger=logging.getLogger("")): + """ + Check the existance of the listed file paths. + + Parameters + ---------- + fpaths : list of str + List of paths to be checked. + logger : logging.RootLogger + Console logger. + """ for fpath in fpaths: if not path.exists(fpath): raise FileNotFoundError("fpath: {} does not exist".format(fpath)) def get_ctx(device_type, device_ids, dctx=default_ctx): + """ + Get the context instance of mxnet + + Parameters + ---------- + device_type : str + context type string chosen from `cpu` or `gpu`. + device_ids : list + list of context ids + dctx: mx.context.Context + default context + + Returns + ------- + context : mx.context.Context + The created context with respect to the device_type and device_ids. + """ if device_type is None: device_type = default_device_type if device_ids is None: @@ -136,7 +220,8 @@ def get_ctx(device_type, device_ids, dctx=default_ctx): return contex def get_batch_axis(input_shape): - """Get the batch axis entry of an input shape. + """ + Get the batch axis entry of an input shape. Parameters ---------- @@ -153,12 +238,37 @@ def get_batch_axis(input_shape): return idx[0] def get_cfg_defaults(): - """Get a yacs CfgNode object with default values for mrt.""" + """ + Get a yacs CfgNode object with default values for MRT. + + Returns + ------- + cfg : yacs.config.CfgNode + CfgNode represents an internal node in the configuration tree. + It's a simple dict-like container that allows for + attribute-based access to keys. + """ # Return a clone so that the defaults will not be altered # This is for the "local variable" use pattern return MRT_CFG.clone() def merge_cfg(yaml_file): + """ + Override the default YAML configuration node with the + provided YAML-formatted configuration file. + + Parameters + ---------- + yaml_file : str + Path of the YAML-formatted configuration file. + + Returns + ------- + cfg : yacs.config.CfgNode + CfgNode represents an internal node in the configuration tree. + It's a simple dict-like container that allows for + attribute-based access to keys. + """ if yaml_file.startswith("~"): yaml_file = path.expanduser(yaml_file) cfg = get_cfg_defaults() @@ -167,6 +277,25 @@ def merge_cfg(yaml_file): return cfg def revise_cfg(cfg, stage, attr, value): + """ + Revise MRT YAML configuration node with respect to the specified stage + and attribute name into the provided value. + + Parameters + ---------- + cfg : yacs.config.CfgNode + CfgNode represents an internal node in the configuration tree. + It's a simple dict-like container that allows for + attribute-based access to keys. + stage : str + Stage name chosen from `common`, `prepare`, `calibrate`, `quantize`, + `evaluate` or `compile`. + attr : str + Attribute name attribute to the provided stage. + value : int, float, str, list, tuple, bool or NoneType + The revision value to be applied, type of value + should be supported by yacs.config.CfgNode + """ if cfg.is_frozen(): cfg.defrost() subcfg = getattr(cfg, stage) @@ -174,6 +303,25 @@ def revise_cfg(cfg, stage, attr, value): cfg.freeze() def override_cfg_args(cfg, mrt_argv): + """ + Override YAML configuration node with command line optional arguments + for the simplicity of MRT configuration revision. + + Parameters + ---------- + cfg : yacs.config.CfgNode + CfgNode represents an internal node in the configuration tree. + It's a simple dict-like container that allows for + attribute-based access to keys. + mrt_argv : list + list of even length which can be resoluted as key value pairs, + the key could be split into stage name and attribute name. + + Returns + ------- + cfg : yacs.config.CfgNode + Overridden YAML configuration node. + """ if not mrt_argv: return cfg if cfg.is_frozen(): diff --git a/python/mrt/dataset.py b/python/mrt/dataset.py index b41933c8..75006f6b 100644 --- a/python/mrt/dataset.py +++ b/python/mrt/dataset.py @@ -636,3 +636,23 @@ def validate(self, metrics, predict, label): acc = 1. * metrics["acc"] / metrics["total"] return "{:6.2%}".format(acc) + + +@register_dataset("stdrandom") +class StdRandomDataset(Dataset): + def _load_data(self): + def data_loader(): + N, I, C = self.ishape + assert I == 1 and C == 3 + data, label = [], [] + while True: + if len(data) < N: + x = np.random.uniform(low=0.0, high=1.0, size=(I,C)) + y = np.random.uniform(low=0.0, high=1.0, size=(I)) + data.append(x) + label.append(y) + else: + batch_data, batch_label = nd.array(data), nd.array(label) + yield batch_data, batch_label + data, label = [], [] + self.data = data_loader() diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index 16912032..54670322 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -657,10 +657,79 @@ def rewrite(self, op, **kwargs): .. math:: Xi.shape = (batchSize, step), X = [X1, X2, ...] """ + name = op.attr('name') + attr, childs = op.list_attr(), sym_iter(op.get_children()) + cns = [c.attr('name') for c in childs] infer_shapes, params = kwargs['infer_shapes'], kwargs['params'] + xshp = infer_shapes[cns[0]][get_entry_id(childs[0])] + + if len(xshp) > 2: + op = self.reduce(op, **kwargs) + return op + op = self._matrix_decomposition(op, params, infer_shapes) return op + def reduce(self, op, **kwargs): + # TODO(ryt.dev) documentation + name = op.attr('name') + attr, childs = op.list_attr(), sym_iter(op.get_children()) + cns = [c.attr('name') for c in childs] + X, W = childs[:2] + infer_shapes, params = kwargs['infer_shapes'], kwargs['params'] + xshp = infer_shapes[cns[0]][get_entry_id(X)] + + no_bias = get_attr(attr, 'no_bias') + flatten = get_attr(attr, "flatten") + num_hidden = get_attr(attr, "num_hidden") + + rshp_name = N.n("pre_reshape") + if flatten: + shape = (-1,) + xshp[1:] + rshp = mx.sym.reshape(X, shape=shape, name=rshp_name) + if no_bias: + op = mx.sym.FullyConnected( + rshp, W, no_bias=no_bias, flatten=flatten, + num_hidden=num_hidden, name=name) + else: + op = mx.sym.FullyConnected( + rshp, W, childs[2], no_bias=no_bias, flatten=flatten, + num_hidden=num_hidden, name=name) + op = self._matrix_decomposition(op, params, infer_shapes) + else: + fc_name = N.n("reduced_fc") + default_batch_axis = 0 + batch_axis = \ + kwargs.get("batch_axes", {}).get(name, default_batch_axis) + assert batch_axis < len(xshp), \ + "invalid batch_axis: {}, length of xshp: {}".format( + batch_axis, len(xshp)) + if batch_axis == len(xshp)-1: + product = int(nd.prod(nd.array(xshp)).asscalar()) + res_shp = int(product/xshp[batch_axis]) + shape = (res_shp, -1) + else: + shape = (-1, xshp[-1]) + rshp = mx.sym.reshape(X, shape=shape, name=rshp_name) + if no_bias: + fc = mx.sym.FullyConnected( + rshp, W, no_bias=no_bias, flatten=flatten, + num_hidden=num_hidden, name=fc_name) + else: + fc = mx.sym.FullyConnected( + rshp, W, childs[2], no_bias=no_bias, flatten=flatten, + num_hidden=num_hidden, name=fc_name) + fc = self._matrix_decomposition(fc, params, infer_shapes) + if batch_axis == len(xshp)-1: + shape = xshp[:-1] + (num_hidden,) + else: + shape = \ + xshp[:batch_axis] + (-1,) + \ + xshp[batch_axis+1:-1] + (num_hidden,) + op = mx.sym.reshape(fc, shape=shape, name=name) + + return op + def quantize(self, op, **kwargs): """ Customized quantize pass Introduction. diff --git a/python/mrt/transformer.py b/python/mrt/transformer.py index 1a9bb02b..61c3a7c5 100644 --- a/python/mrt/transformer.py +++ b/python/mrt/transformer.py @@ -108,6 +108,15 @@ def to_cvm(self, model_name, datadir="/data/stdout", input_shape, target, device_ids=device_ids) + def fix_original_model(self, model_dir, model_name): + _sym, _prm = tpass.unify_name_json(self.symbol, self.params) + self.symbol, self.params = tpass.remove_params_prefix(_sym, _prm) + model_prefix = path.join(model_dir, model_name+".fixed") + sym_file, prm_file = utils.extend_fname(model_prefix) + with open(sym_file, "w") as f: + f.write(self.symbol.tojson()) + nd.save(prm_file, self.params) + def init(model, input_shape=None): logger = logging.getLogger("mrt.prepare") logger.info("Model initializing...") @@ -115,8 +124,8 @@ def init(model, input_shape=None): _sym, _prm = model.symbol, model.params # unify graph names and check graph params - _sym, _prm = tpass.unify_name_json(_sym, _prm) - _sym, _prm = tpass.remove_params_prefix(_sym, _prm) + # TODO(ryt.dev) [bug fix, reconstruct] write fixed model in conf_map, move to fix_orginal_model + # _sym, _prm = fix_original_model(sym, parmas) tpass.name_duplicate_check(_sym, _prm) diff --git a/tests/mrt/model_zoo/prediction_SCTF.yaml b/tests/mrt/model_zoo/prediction_SCTF.yaml index 721bce45..44a61e6b 100644 --- a/tests/mrt/model_zoo/prediction_SCTF.yaml +++ b/tests/mrt/model_zoo/prediction_SCTF.yaml @@ -1,6 +1,6 @@ COMMON: - MODEL_NAME: prediction_SCTF.preprocess.reduce_dense - # MODEL_NAME: prediction_SCTF + # MODEL_NAME: prediction_SCTF.preprocess.reduce_dense + MODEL_NAME: prediction_SCTF VERBOSITY: info RUN_EVALUATE: False PREPARE: diff --git a/tests/mrt/test_V3.py b/tests/mrt/test_V3.py new file mode 100644 index 00000000..02a66994 --- /dev/null +++ b/tests/mrt/test_V3.py @@ -0,0 +1,109 @@ +import unittest +import logging +import os +from os import path +from io import StringIO +import sys + +from mrt.utils import log_init +from mrt.V3.execute import run +from mrt.V3.utils import merge_cfg, override_cfg_args +from mrt.V3.evaluate import get_evaluation_info + +# old_stdout = sys.stdout +# sys.stdout = StringIO() +# sys.stdout = old_stdout + +log_init() +yaml_files = set() +results = {} +base_dir = path.join(path.dirname(path.realpath(__file__)), "..", "..") + +def _multi_validate( + messages, base_func, data_iter, *comp_funcs, iter_num=10, batch_size=16): + log_str = "Iteration: {:3d} | " + base_func.__name__ + ": {} | " + for func in comp_funcs: + log_str += func.__name__ + ": {} | " + log_str += "Total Sample: {:5d}" + total = 0 + for i in range(iter_num): + data, label = data_iter() + base_acc = base_func(data, label) + comp_acc = [func(data, label) for func in comp_funcs] + total += batch_size + + msg = log_str.format(i, base_acc, *comp_acc, total) + messages.append(msg) + +def register_test_case(yaml_file_name): + yaml_dir = path.join(base_dir, "tests", "mrt", "model_zoo") + yaml_file_name_ext = "{}.yaml".format(yaml_file_name) + yaml_file = path.join(yaml_dir, yaml_file_name_ext) + + if yaml_file in yaml_files: + raise RuntimeError( + "test case: {} already registered.".format(yaml_file)) + yaml_files.add(yaml_file) + + def test_func(self): + # test preparation, calibration, quantization, compilation + base_cfg = merge_cfg(yaml_file) + argv = [ + "--common.run_evaluate", "False", + "--common.run_compile", "True", + "--common.verbosity", "error", + ] + cfg = override_cfg_args(base_cfg, argv) + run(cfg) + + # test evaluation + argv = [ + "--common.run_evaluate", "True", + "--common.run_compile", "False", + "--common.verbosity", "error", + "--common.start_after", "quantize", + ] + cfg = override_cfg_args(base_cfg, argv) + evalfunc, data_iter_func, quantfunc = get_evaluation_info( + cfg.COMMON, cfg.EVALUATE) + messages = [] + with self.assertRaises(StopIteration): + _multi_validate( + messages, evalfunc, data_iter_func, quantfunc, + iter_num=cfg.EVALUATE.ITER_NUM, batch_size=cfg.EVALUATE.BATCH) + results[yaml_file_name] = messages[-1] + + def wrapper(cls): + func_name = "test_case_{}".format(yaml_file_name) + setattr(cls, func_name, test_func) + return cls + + return wrapper + + +@register_test_case("alexnet") +class TestV3(unittest.TestCase): + def test_output_results(self): + lines = [ + "", + "************************", + "MRT Quantization Results", + "************************", + "", + ".. _mrt_quantization_results:", + "", + ] + for k, v in results.items(): + line = "**{}**:".format(k) + lines.append(line) + line = "{}".format(v) + lines.append(line) + lines.append("") + lines.append("") + lines = [line+"\n" for line in lines] + rfile_path = path.join(base_dir, "docs", "mrt", "V3_results.rst") + with open(rfile_path, "w") as f: + f.writelines(lines) + +if __name__ == "__main__": + unittest.main() diff --git a/tests/mrt/yolov5s/metric.py b/tests/mrt/yolov5s/metric.py new file mode 100644 index 00000000..72b9c75e --- /dev/null +++ b/tests/mrt/yolov5s/metric.py @@ -0,0 +1,250 @@ +from os import path +import os + +import mxnet as mx +from mxnet import ndarray as nd +import numpy as np +import cv2 + +from mrt import dataset as ds +from utils import ( + non_max_suppression, scale_coords, xywh2xyxy, process_batch, ap_per_class) + + +class Yolov5Metric: + def __init__( + self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10), + nc=80, anchors=()): + + # metric parameters + self.conf_thres = conf_thres + self.iou_thres = iou_thres + self.iouv = iouv + self.niou = iouv.shape[0] + self.names = { + 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', + 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', + 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', + 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', + 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', + 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', + 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', + 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', + 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', + 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', + 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', + 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', + 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', + 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', + 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', + 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', + 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', + 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', + 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', + 79: 'toothbrush', + } + + # detect parameters + self.no = nc + 5 + self.na = len(anchors[0]) // 2 + self.stride = nd.array([8., 16., 32.]) + self.anchors = nd.array( + [ + [ + [ 1.25000, 1.62500], + [ 2.00000, 3.75000], + [ 4.12500, 2.87500] + ], + [ + [ 1.87500, 3.81250], + [ 3.87500, 2.81250], + [ 3.68750, 7.43750] + ], + [ + [ 3.62500, 2.81250], + [ 4.87500, 6.18750], + [11.65625, 10.18750] + ] + ] + ) + + # status variable + self.stats = [] + + def reset(self): + self.stats.clear() + + def _make_grid(self, nx=20, ny=20, i=0, ctx=mx.cpu(0)): + yv = nd.array(range(ny))[:,None].repeat(nx,axis=1) + xv = nd.array(range(nx))[None,:].repeat(ny,axis=0) + grid = nd.concat( + xv[...,None], yv[...,None], dim=2)[None,None,...].repeat( + self.na, axis=1) + grid = nd.Cast(grid, dtype="float32") + + anchor_grid = (self.anchors[i].copy()*self.stride[i]) + anchor_grid = anchor_grid[None,:, None, None,:] + anchor_grid = anchor_grid.repeat(ny, axis=-3) + anchor_grid = anchor_grid.repeat(nx, axis=-2) + return grid.as_in_context(ctx), anchor_grid.as_in_context(ctx) + + def update(self, labels, predict, input_shape): + batch_size, _, H, W = input_shape + outs = [] + for i in range(batch_size): + x, y, z = [o.slice_axis(axis=0, begin=i, end=i+1) for o in predict] + out = [] + + bs, _, ny, nx, _ = x.shape + grid, anchor_grid = self._make_grid(nx, ny, 0, ctx=x.ctx) + tmp = x.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[0].as_in_context(x.ctx) + # wh + wh = (tmp[..., 2:4]*2)**2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + bs, _, ny, nx, _ = y.shape + grid, anchor_grid = self._make_grid(nx, ny, 1, ctx=y.ctx) + tmp = y.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[1].as_in_context(y.ctx) + # wh + wh = (tmp[..., 2:4]*2)*2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + bs, _, ny, nx, _ = z.shape + grid, anchor_grid = self._make_grid(nx, ny, 2, ctx=z.ctx) + tmp = z.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[2].as_in_context(z.ctx) + # wh + wh = (tmp[..., 2:4]*2)**2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + out = nd.concat(*out, dim=1) + outs.append(out) + for i in range(batch_size): + label = labels[i] + nl = label.shape[0] + out = non_max_suppression( + outs[i].asnumpy(), self.conf_thres, self.iou_thres, labels=[], + multi_label=True, agnostic=False) + pred = out[0] + tcls = label[:,0] if nl else [] + if pred.shape[0] == 0: + if nl: + self.stats.append( + (np.zeros((0,self.niou)), np.zeros((0)), np.zeros((0)), tcls)) + continue + predn = pred.copy() + # native-space pred + scale_coords((H,W), predn[:,:4], [H,W], [[1.0,1.0],[0.0,0.0]]) + if nl: + # target boxes + tbox = xywh2xyxy(label[:,1:5]) + # native-space label + scale_coords((H,W), tbox, [H,W], [[1.0,1.0],[0.0,0.0]]) + # native-space label + labelsn = np.concatenate((label[:,0:1],tbox), axis=1) + correct = process_batch(predn, labelsn, self.iouv) + else: + correct = np.zeros((pred.shape[0], self.niou), dtype=np.bool) + # (correct, conf, pcls, tcls) + self.stats.append((correct, pred[:, 4], pred[:, 5], tcls)) + + def get(self): + # compute metrics + # to numpy + cur_stats = [np.concatenate(x, 0) for x in zip(*self.stats)] + if len(cur_stats) and cur_stats[0].any(): + tp, fp, p, r, f1, ap, ap_class = ap_per_class( + *cur_stats, plot=False, save_dir=None, names=self.names) + # AP@0.5, AP@0.5:0.95 + ap50, ap = ap[:, 0], ap.mean(1) + mp, mr, map50, map_ = p.mean(), r.mean(), ap50.mean(), ap.mean() + # number of targets per class + nt = np.bincount(cur_stats[3].astype(np.int64), minlength=80) + else: + nt = np.zeros(1) + mp = mr = map50 = map_ = 0. + return nt, mp, mr, map50, map_ + + +@ds.register_dataset("yolov5_dataset") +class Yolov5Dataset(ds.Dataset): + def __init__(self, input_shape, imgsz=640, **kwargs): + super().__init__(input_shape, **kwargs) + self.image_dir = path.join(self.root_dir, "images") + self.label_dir = path.join(self.root_dir, "labels") + self.imgsz = imgsz + + def _load_data(self): + assert len(self.ishape) == 4, self.ishape + batch_size = self.ishape[0] + assert batch_size == 16, batch_size + + def data_loader(): + data, label = [], [] + for f in sorted(os.listdir(self.image_dir)): + _, ext = os.path.splitext(f) + if ext != ".jpg" and ext != ".JPG" \ + and ext != ".png" and ext != ".PNG": + continue + l = f.replace(f.split(".")[1], "txt") + file_name = os.path.join(self.image_dir, f) + label_name = os.path.join(self.label_dir, l) + img = cv2.imread(file_name) + # hack size + img = cv2.resize(img, tuple(self.ishape[2:])) + try: + labels = np.loadtxt(label_name) + except: + labels = np.array([]) + labels = labels.reshape((-1, 5)) + height, width = img.shape[0:2] + scale = min(self.imgsz/height, self.imgsz/width) + h0, w0 = height*scale, width*scale + img0 = cv2.resize(img, (round(w0/32.)*32, round(h0/32.)*32)) + img = img0.astype("float32")/255. + img = nd.array(img.transpose((2,0,1))[None]) + labels[:,1:] = labels[:,1:] * np.array([img.shape[3], img.shape[2]]*2) + # if img.shape[2] != self.ishape[2] or img.shape[3] != self.ishape[3]: + # continue + if len(data) == batch_size: + batch_data = nd.concatenate(data) + yield batch_data, label + data, label = [], [] + data.append(img) + label.append(labels) + if len(data) == batch_size: + batch_data = nd.concatenate(data) + yield batch_data, label + + self.data = data_loader() + + def metrics( + self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10)): + anchors = [ + [10, 13, 16, 30, 33, 23], + [30 ,61, 62 ,45, 59, 119], + [116, 90, 156, 198, 373, 326] + ] + metric = Yolov5Metric( + conf_thres=conf_thres, iou_thres=iou_thres, iouv=iouv, + anchors=anchors, nc=80) + metric.reset() + return metric + + def validate(self, metrics, out, labels): + metrics.update(labels, out, self.ishape) + nt, mp, mr, map50, map_ = metrics.get() + return "#objects={}, ".format(nt.sum()) + \ + "mp={:6.2%}, mr={:6.2%}, ".format(mp, mr) + \ + "map50={:6.2%}, map={:6.2%}".format(map50, map_) diff --git a/tests/mrt/yolov5s/metric_v2.py b/tests/mrt/yolov5s/metric_v2.py new file mode 100644 index 00000000..993c1206 --- /dev/null +++ b/tests/mrt/yolov5s/metric_v2.py @@ -0,0 +1,251 @@ +from os import path +import os + +import mxnet as mx +from mxnet import ndarray as nd +import numpy as np +import cv2 + +from mrt import dataset as ds +from utils import ( + non_max_suppression, scale_coords, xywh2xyxy, process_batch, ap_per_class, + Annotator, concat_out) + + +class Yolov5MetricV2: + def __init__( + self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10), + nc=80, anchors=()): + + # metric parameters + self.conf_thres = conf_thres + self.iou_thres = iou_thres + self.iouv = iouv + self.niou = iouv.shape[0] + self.names = { + 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', + 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', + 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', + 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', + 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', + 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', + 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', + 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', + 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', + 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', + 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', + 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', + 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', + 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', + 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', + 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', + 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', + 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', + 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', + 79: 'toothbrush', + } + + # detect parameters + self.no = nc + 5 + self.na = len(anchors[0]) // 2 + self.stride = nd.array([8., 16., 32.]) + self.anchors = nd.array( + [ + [ + [ 1.25000, 1.62500], + [ 2.00000, 3.75000], + [ 4.12500, 2.87500] + ], + [ + [ 1.87500, 3.81250], + [ 3.87500, 2.81250], + [ 3.68750, 7.43750] + ], + [ + [ 3.62500, 2.81250], + [ 4.87500, 6.18750], + [11.65625, 10.18750] + ] + ] + ) + + # status variable + self.stats = [] + + def reset(self): + self.stats.clear() + + def _make_grid(self, nx=20, ny=20, i=0, ctx=mx.cpu(0)): + yv = nd.array(range(ny))[:,None].repeat(nx,axis=1) + xv = nd.array(range(nx))[None,:].repeat(ny,axis=0) + grid = nd.concat( + xv[...,None], yv[...,None], dim=2)[None,None,...].repeat( + self.na, axis=1) + grid = nd.Cast(grid, dtype="float32") + + anchor_grid = (self.anchors[i].copy()*self.stride[i]) + anchor_grid = anchor_grid[None,:, None, None,:] + anchor_grid = anchor_grid.repeat(ny, axis=-3) + anchor_grid = anchor_grid.repeat(nx, axis=-2) + return grid.as_in_context(ctx), anchor_grid.as_in_context(ctx) + + def update(self, labels, predict, input_shape): + batch_size, _, H, W = input_shape + outs = [] + for i in range(batch_size): + x, y, z = [o.slice_axis(axis=0, begin=i, end=i+1) for o in predict] + out = [] + + bs, _, ny, nx, _ = x.shape + grid, anchor_grid = self._make_grid(nx, ny, 0, ctx=x.ctx) + tmp = x.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[0].as_in_context(x.ctx) + # wh + wh = (tmp[..., 2:4]*2)**2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + bs, _, ny, nx, _ = y.shape + grid, anchor_grid = self._make_grid(nx, ny, 1, ctx=y.ctx) + tmp = y.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[1].as_in_context(y.ctx) + # wh + wh = (tmp[..., 2:4]*2)*2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + bs, _, ny, nx, _ = z.shape + grid, anchor_grid = self._make_grid(nx, ny, 2, ctx=z.ctx) + tmp = z.sigmoid() + # xy + xy = (tmp[..., 0:2]*2-0.5+grid) * \ + self.stride[2].as_in_context(z.ctx) + # wh + wh = (tmp[..., 2:4]*2)**2 * anchor_grid + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(bs, -1, self.no)) + + out = nd.concat(*out, dim=1) + outs.append(out) + for i in range(batch_size): + label = labels[i] + nl = label.shape[0] + out = non_max_suppression( + outs[i].asnumpy(), self.conf_thres, self.iou_thres, labels=[], + multi_label=True, agnostic=False) + pred = out[0] + tcls = label[:,0] if nl else [] + if pred.shape[0] == 0: + if nl: + self.stats.append( + (np.zeros((0,self.niou)), np.zeros((0)), np.zeros((0)), tcls)) + continue + predn = pred.copy() + # native-space pred + scale_coords((H,W), predn[:,:4], [H,W], [[1.0,1.0],[0.0,0.0]]) + if nl: + # target boxes + tbox = xywh2xyxy(label[:,1:5]) + # native-space label + scale_coords((H,W), tbox, [H,W], [[1.0,1.0],[0.0,0.0]]) + # native-space label + labelsn = np.concatenate((label[:,0:1],tbox), axis=1) + correct = process_batch(predn, labelsn, self.iouv) + else: + correct = np.zeros((pred.shape[0], self.niou), dtype=np.bool) + # (correct, conf, pcls, tcls) + self.stats.append((correct, pred[:, 4], pred[:, 5], tcls)) + + def get(self): + # compute metrics + # to numpy + cur_stats = [np.concatenate(x, 0) for x in zip(*self.stats)] + if len(cur_stats) and cur_stats[0].any(): + tp, fp, p, r, f1, ap, ap_class = ap_per_class( + *cur_stats, plot=False, save_dir=None, names=self.names) + # AP@0.5, AP@0.5:0.95 + ap50, ap = ap[:, 0], ap.mean(1) + mp, mr, map50, map_ = p.mean(), r.mean(), ap50.mean(), ap.mean() + # number of targets per class + nt = np.bincount(cur_stats[3].astype(np.int64), minlength=80) + else: + nt = np.zeros(1) + mp = mr = map50 = map_ = 0. + return nt, mp, mr, map50, map_ + + +@ds.register_dataset("yolov5_dataset_v2") +class Yolov5DatasetV2(ds.Dataset): + def __init__(self, input_shape, imgsz=640, **kwargs): + super().__init__(input_shape, **kwargs) + self.image_dir = path.join(self.root_dir, "images") + self.label_dir = path.join(self.root_dir, "labels") + self.imgsz = imgsz + + def _load_data(self): + assert len(self.ishape) == 4, self.ishape + batch_size = self.ishape[0] + assert batch_size == 16, batch_size + + def data_loader(): + data, label = [], [] + for f in sorted(os.listdir(self.image_dir)): + _, ext = os.path.splitext(f) + if ext != ".jpg" and ext != ".JPG" \ + and ext != ".png" and ext != ".PNG": + continue + l = f.replace(f.split(".")[1], "txt") + file_name = os.path.join(self.image_dir, f) + label_name = os.path.join(self.label_dir, l) + img = cv2.imread(file_name) + # hack size + img = cv2.resize(img, tuple(self.ishape[2:])) + try: + labels = np.loadtxt(label_name) + except: + labels = np.array([]) + labels = labels.reshape((-1, 5)) + height, width = img.shape[0:2] + scale = min(self.imgsz/height, self.imgsz/width) + h0, w0 = height*scale, width*scale + img0 = cv2.resize(img, (round(w0/32.)*32, round(h0/32.)*32)) + img = img0.astype("float32")/255. + img = nd.array(img.transpose((2,0,1))[None]) + labels[:,1:] = labels[:,1:] * np.array([img.shape[3], img.shape[2]]*2) + # if img.shape[2] != self.ishape[2] or img.shape[3] != self.ishape[3]: + # continue + if len(data) == batch_size: + batch_data = nd.concatenate(data) + yield batch_data, label + data, label = [], [] + data.append(img) + label.append(labels) + if len(data) == batch_size: + batch_data = nd.concatenate(data) + yield batch_data, label + + self.data = data_loader() + + def metrics( + self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10)): + anchors = [ + [10, 13, 16, 30, 33, 23], + [30 ,61, 62 ,45, 59, 119], + [116, 90, 156, 198, 373, 326] + ] + metric = Yolov5Metric( + conf_thres=conf_thres, iou_thres=iou_thres, iouv=iouv, + anchors=anchors, nc=80) + metric.reset() + return metric + + def validate(self, metrics, out, labels): + metrics.update(labels, out, self.ishape) + nt, mp, mr, map50, map_ = metrics.get() + return "#objects={}, ".format(nt.sum()) + \ + "mp={:6.2%}, mr={:6.2%}, ".format(mp, mr) + \ + "map50={:6.2%}, map={:6.2%}".format(map50, map_) diff --git a/tests/mrt/yolov5s/test_yolov5s.py b/tests/mrt/yolov5s/test_yolov5s.py index afd564ea..5bd7d049 100644 --- a/tests/mrt/yolov5s/test_yolov5s.py +++ b/tests/mrt/yolov5s/test_yolov5s.py @@ -1,256 +1,10 @@ from os import path -import os import sys -import mxnet as mx -from mxnet import ndarray as nd -import numpy as np -import cv2 - from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args from mrt.V3.execute import run -from mrt import dataset as ds -from utils import ( - non_max_suppression, scale_coords, xywh2xyxy, process_batch, ap_per_class) - - -class Yolov5Metric: - def __init__( - self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10), - nc=80, anchors=()): - - # metric parameters - self.conf_thres = conf_thres - self.iou_thres = iou_thres - self.iouv = iouv - self.niou = iouv.shape[0] - self.names = { - 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', - 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', - 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', - 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', - 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', - 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', - 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', - 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', - 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', - 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', - 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', - 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', - 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', - 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', - 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', - 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', - 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', - 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', - 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', - 79: 'toothbrush', - } - - # detect parameters - self.no = nc + 5 - self.na = len(anchors[0]) // 2 - self.stride = nd.array([8., 16., 32.]) - self.anchors = nd.array( - [ - [ - [ 1.25000, 1.62500], - [ 2.00000, 3.75000], - [ 4.12500, 2.87500] - ], - [ - [ 1.87500, 3.81250], - [ 3.87500, 2.81250], - [ 3.68750, 7.43750] - ], - [ - [ 3.62500, 2.81250], - [ 4.87500, 6.18750], - [11.65625, 10.18750] - ] - ] - ) - - # status variable - self.stats = [] - - def reset(self): - self.stats.clear() - - def _make_grid(self, nx=20, ny=20, i=0, ctx=mx.cpu(0)): - yv = nd.array(range(ny))[:,None].repeat(nx,axis=1) - xv = nd.array(range(nx))[None,:].repeat(ny,axis=0) - grid = nd.concat( - xv[...,None], yv[...,None], dim=2)[None,None,...].repeat( - self.na, axis=1) - grid = nd.Cast(grid, dtype="float32") - - anchor_grid = (self.anchors[i].copy()*self.stride[i]) - anchor_grid = anchor_grid[None,:, None, None,:] - anchor_grid = anchor_grid.repeat(ny, axis=-3) - anchor_grid = anchor_grid.repeat(nx, axis=-2) - return grid.as_in_context(ctx), anchor_grid.as_in_context(ctx) - - def update(self, labels, predict, input_shape): - batch_size, _, H, W = input_shape - outs = [] - for i in range(batch_size): - x, y, z = [o.slice_axis(axis=0, begin=i, end=i+1) for o in predict] - out = [] - - bs, _, ny, nx, _ = x.shape - grid, anchor_grid = self._make_grid(nx, ny, 0, ctx=x.ctx) - tmp = x.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[0].as_in_context(x.ctx) - # wh - wh = (tmp[..., 2:4]*2)**2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - bs, _, ny, nx, _ = y.shape - grid, anchor_grid = self._make_grid(nx, ny, 1, ctx=y.ctx) - tmp = y.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[1].as_in_context(y.ctx) - # wh - wh = (tmp[..., 2:4]*2)*2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - bs, _, ny, nx, _ = z.shape - grid, anchor_grid = self._make_grid(nx, ny, 2, ctx=z.ctx) - tmp = z.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[2].as_in_context(z.ctx) - # wh - wh = (tmp[..., 2:4]*2)**2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - out = nd.concat(*out, dim=1) - outs.append(out) - for i in range(batch_size): - label = labels[i] - nl = label.shape[0] - out = non_max_suppression( - outs[i].asnumpy(), self.conf_thres, self.iou_thres, labels=[], - multi_label=True, agnostic=False) - pred = out[0] - tcls = label[:,0] if nl else [] - if pred.shape[0] == 0: - if nl: - self.stats.append( - (np.zeros((0,self.niou)), np.zeros((0)), np.zeros((0)), tcls)) - continue - predn = pred.copy() - # native-space pred - scale_coords((H,W), predn[:,:4], [H,W], [[1.0,1.0],[0.0,0.0]]) - if nl: - # target boxes - tbox = xywh2xyxy(label[:,1:5]) - # native-space label - scale_coords((H,W), tbox, [H,W], [[1.0,1.0],[0.0,0.0]]) - # native-space label - labelsn = np.concatenate((label[:,0:1],tbox), axis=1) - correct = process_batch(predn, labelsn, self.iouv) - else: - correct = np.zeros((pred.shape[0], self.niou), dtype=np.bool) - # (correct, conf, pcls, tcls) - self.stats.append((correct, pred[:, 4], pred[:, 5], tcls)) - - def get(self): - # compute metrics - # to numpy - cur_stats = [np.concatenate(x, 0) for x in zip(*self.stats)] - if len(cur_stats) and cur_stats[0].any(): - tp, fp, p, r, f1, ap, ap_class = ap_per_class( - *cur_stats, plot=False, save_dir=None, names=self.names) - # AP@0.5, AP@0.5:0.95 - ap50, ap = ap[:, 0], ap.mean(1) - mp, mr, map50, map_ = p.mean(), r.mean(), ap50.mean(), ap.mean() - # number of targets per class - nt = np.bincount(cur_stats[3].astype(np.int64), minlength=80) - else: - nt = np.zeros(1) - mp = mr = map50 = map_ = 0. - return nt, mp, mr, map50, map_ - - -@ds.register_dataset("yolov5_dataset") -class Yolov5Dataset(ds.Dataset): - def __init__(self, input_shape, imgsz=640, **kwargs): - super().__init__(input_shape, **kwargs) - self.image_dir = path.join(self.root_dir, "images") - self.label_dir = path.join(self.root_dir, "labels") - self.imgsz = imgsz - - def _load_data(self): - assert len(self.ishape) == 4, self.ishape - batch_size = self.ishape[0] - assert batch_size == 16, batch_size - - def data_loader(): - data, label = [], [] - for f in sorted(os.listdir(self.image_dir)): - _, ext = os.path.splitext(f) - if ext != ".jpg" and ext != ".JPG" \ - and ext != ".png" and ext != ".PNG": - continue - l = f.replace(f.split(".")[1], "txt") - file_name = os.path.join(self.image_dir, f) - label_name = os.path.join(self.label_dir, l) - img = cv2.imread(file_name) - # hack size - img = cv2.resize(img, tuple(self.ishape[2:])) - try: - labels = np.loadtxt(label_name) - except: - labels = np.array([]) - labels = labels.reshape((-1, 5)) - height, width = img.shape[0:2] - scale = min(self.imgsz/height, self.imgsz/width) - h0, w0 = height*scale, width*scale - img0 = cv2.resize(img, (round(w0/32.)*32, round(h0/32.)*32)) - img = img0.astype("float32")/255. - img = nd.array(img.transpose((2,0,1))[None]) - labels[:,1:] = labels[:,1:] * np.array([img.shape[3], img.shape[2]]*2) - # if img.shape[2] != self.ishape[2] or img.shape[3] != self.ishape[3]: - # continue - if len(data) == batch_size: - batch_data = nd.concatenate(data) - yield batch_data, label - data, label = [], [] - data.append(img) - label.append(labels) - if len(data) == batch_size: - batch_data = nd.concatenate(data) - yield batch_data, label - - self.data = data_loader() - - def metrics( - self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10)): - anchors = [ - [10, 13, 16, 30, 33, 23], - [30 ,61, 62 ,45, 59, 119], - [116, 90, 156, 198, 373, 326] - ] - metric = Yolov5Metric( - conf_thres=conf_thres, iou_thres=iou_thres, iouv=iouv, - anchors=anchors, nc=80) - metric.reset() - return metric - - def validate(self, metrics, out, labels): - metrics.update(labels, out, self.ishape) - nt, mp, mr, map50, map_ = metrics.get() - return "#objects={}, ".format(nt.sum()) + \ - "mp={:6.2%}, mr={:6.2%}, ".format(mp, mr) + \ - "map50={:6.2%}, map={:6.2%}".format(map50, map_) +import metric +import metric_v2 if __name__ == "__main__": assert len(sys.argv) >= 1 and len(sys.argv)%2 == 1, \ diff --git a/tests/mrt/yolov5s/utils.py b/tests/mrt/yolov5s/utils.py index 438b2c2c..39c67991 100644 --- a/tests/mrt/yolov5s/utils.py +++ b/tests/mrt/yolov5s/utils.py @@ -7,6 +7,12 @@ import cv2 import random import math +import mxnet +import mxnet.ndarray as nd + +from mrt.V3.utils import get_model_prefix, load_fname, check_file_existance, set_batch +from mrt.transformer import MRT, reduce_graph + def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right @@ -514,6 +520,102 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non return output +def from_torch_model(w_file, model, ctx): + weight_from_troch = np.load(w_file, allow_pickle=True) + net_params = model.collect_params() + for i, k in enumerate(net_params.keys()): + if i == 0: + net_params[k].set_data(weight_from_troch[i][:,[2,1,0],:,:]) + else: + net_params[k].set_data(weight_from_troch[i]) +def str2bool(v): + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + pass +def get_quantized_model(model_dir, model_name, ctx): + model_dir = '/tmp/yolov5s' + model_name = 'yolov5s-0040.preprocess.unify.broadcastify' + model_prefix = get_model_prefix(model_dir, model_name) + sym_quant_file, prm_quant_file, ext_quant_file = load_fname( + model_prefix, suffix="mrt.quantize", with_ext=True) + check_file_existance(sym_quant_file, prm_quant_file, ext_quant_file) + mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir) + oscales = mrt.get_output_scales() + inputs_ext = mrt.get_inputs_ext() + qmodel = mrt.current_model + rqmodel = reduce_graph(qmodel, {'data': set_batch([1,3,640,640], 1)}) + qgraph = rqmodel.to_graph(ctx=ctx) + return qgraph, inputs_ext, oscales + +def concat_out(x, y, z): + stride = nd.array([8., 16., 32.], ctx=x.context) + anchors = nd.array([[[ 1.25000, 1.62500], + [ 2.00000, 3.75000], + [ 4.12500, 2.87500]], + [[ 1.87500, 3.81250], + [ 3.87500, 2.81250], + [ 3.68750, 7.43750]], + [[ 3.62500, 2.81250], + [ 4.87500, 6.18750], + [11.65625, 10.18750]] + ], ctx=x.context) + + def _make_grid(nx=20, ny=20, i=0): + yv = nd.array(range(ny), ctx=x.context)[:,None].repeat(nx,axis=1) + xv = nd.array(range(nx), ctx=x.context)[None,:].repeat(ny,axis=0) + grid = nd.concat(xv[...,None], yv[...,None], dim=2)[None,None,...].repeat(3, axis=1) + grid = nd.Cast(grid, dtype="float32") + anchor_grid = (anchors[i].copy() * stride[i]) + anchor_grid = anchor_grid[None,:, None, None,:] + anchor_grid = anchor_grid.repeat(ny, axis=-3) + anchor_grid = anchor_grid.repeat(nx, axis=-2) + return grid, anchor_grid + + out = [] + ny, nx = x.shape[2:4] + grid, anchor_grid = _make_grid(nx, ny, 0) + tmp = x.sigmoid() + xy = (tmp[..., 0:2] * 2 - 0.5 + grid) * stride[0] # xy + wh = (tmp[..., 2:4] * 2) ** 2 * anchor_grid # wh + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(x.shape[0], -1, x.shape[4])) + + ny, nx = y.shape[2:4] + grid, anchor_grid = _make_grid(nx, ny, 1) + tmp = y.sigmoid() + xy = (tmp[..., 0:2] * 2 - 0.5 + grid) * stride[1] # xy + wh = (tmp[..., 2:4] * 2) ** 2 * anchor_grid # wh + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(y.shape[0], -1, y.shape[4])) + + ny, nx = z.shape[2:4] + grid, anchor_grid = _make_grid(nx, ny, 2) + tmp = z.sigmoid() + xy = (tmp[..., 0:2] * 2 - 0.5 + grid) * stride[2] # xy + wh = (tmp[..., 2:4] * 2) ** 2 * anchor_grid # wh + tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) + out.append(tmp.reshape(z.shape[0], -1, z.shape[4])) + + return nd.concat(*out, dim=1) + +def make_squre(x): + h, w = x.shape[0:2] + if h > w: + padh_up, padw_left = 0, (h-w)//2 + padh_down, padw_right = 0, (h-w) - padw_left + y_l = np.ones((h,padw_left,x.shape[2]), dtype=x.dtype)*114 + y_r = np.ones((h,padw_right,x.shape[2]), dtype=x.dtype)*114 + return padh_up, padw_left, padh_down, padw_right, np.concatenate((y_l, x, y_r),axis=1) + else: + padh_up, padw_left = (w-h)//2, 0 + padh_down, padw_right = (w-h) - padh_up, 0 + y_u = np.ones((padh_up,w,x.shape[2]), dtype=x.dtype)*114 + y_d = np.ones((padh_down,w,x.shape[2]), dtype=x.dtype)*114 + return padh_up, padw_left, padh_down, padw_right, np.concatenate((y_u, x, y_d),axis=0) + \ No newline at end of file From 5e49d503a9e571391a097f67b6059412b3208cc0 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 9 Feb 2022 16:51:09 +0800 Subject: [PATCH 108/120] [prune] remove redundancy for main.py --- main.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/main.py b/main.py index a87922ca..d2603015 100644 --- a/main.py +++ b/main.py @@ -3,7 +3,7 @@ sys.path.insert(0, "./python") -from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args +from mrt.V3.utils import merge_cfg, override_cfg_args from mrt.V3.execute import run from mrt.V3.utils import DOC as utils_doc from mrt.V3.prepare import DOC as prepare_doc @@ -31,7 +31,6 @@ def complete_docs(): "invalid length: {} of sys.argv: {}".format( len(sys.argv), sys.argv) yaml_file = sys.argv[1] - cfg = get_cfg_defaults() cfg = merge_cfg(yaml_file) cfg = override_cfg_args(cfg, sys.argv[2:]) run(cfg) From fd0356c311635597f332185a334060a78d4fc4cf Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 9 Feb 2022 16:53:11 +0800 Subject: [PATCH 109/120] [docs] add V3.png architecture --- docs/assets/V3.png | Bin 0 -> 179172 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/assets/V3.png diff --git a/docs/assets/V3.png b/docs/assets/V3.png new file mode 100644 index 0000000000000000000000000000000000000000..cf1be323288ad397bcd909cd7e02023cff4237f6 GIT binary patch literal 179172 zcmeGEby$>J_dkvUI-rDL00Ih_goJd%AOfNcAq<_u&@)Jv#Gqo3N`r(VjdTue5JM`> zFo1N2bPRkq>Uqw2pYvSL`RDiF@8fkLH*@cM-#gY`>$P5Mt$CrQqDV!~NKQmVM5T1+ z_I)BEGDjk!<1Qyi!6&LYPvAsE#7fq3a%xI)a;$2O_7>K52qGf(S7s(AXOy_un@mkj zOq$wmaF9E?-GBZ(=)Q?>L*3`P=DII+ZLHxj1_q<&X-1D}ek4j!uP?Ku3}W3a!6mN8SaOC-3T)sP||y{p8nLi)|uPxMMfa>f%hnh(;X#ly=%R3PDJ_v&(T zir<15tE>2vYU0l-{`sMPJx`8{9UBnwvqMh=-A-l*;`OsjxO#Qf`KoZY5zXit)u!eO z)k6_sKG#!}mq$(R%PQH(pM37x&YQu7RqkL{e|S9LewM$bQDI_;|10#X)5ngnw_eJU zAwz$9t!~G1i_7=qvBzITNJ*z&oG5tDK}m_joj-pSb?LL_6yMF2vYR*UBs9ZxB2Ksw zjf+H$37TCw5#N}IMsF+~9wrH$Ki|*Be{(-!`|xo2%i-anA*JA<@Uc(myF@ZgCV6p) z&uw6BwGcW=7BCnQC%8UAMEuN}=s36{20x78hluFd>i{A$@cRt-xt&b%$6YeV=eLOv@Dy>swWf}<4oq3x%pS>OYHt4k!Q+m6c=QO7 zq`Nq{L?WC`S>2I#C?|1usmniGh=c2+k9jY%{%qoGD|J~1rp78~?}%U(=DEpp^RhHK zD=Vv{qq&9n{o4w^9|wO)UAA&|ekjh%>*nUh<0imk?`X-(CnhGwdy}7+pPw7F;CAvr zIh(q3qnxh%>g11pZX=w`9IYQZTic^pkNP!zVDI8Ab@}qqi~jTP*EkXG)_=YU<@9@6 zV1m3ycX;`DZu0)8Z}6z((Wl~S*6s*9-P_hkusq-$(tLb^e3CyO_{Xh3U-_3ub^h|G z*iGTTKKhqie}5G2gm9FzM}n6+OaHkyzaReV&EF47@*d6oFKh8@p?`h~R$7`|lJ`Hm zCQW`$cw~Wy2uh@MTUOJZcmYrLMzd$2aW&;KBZd4NkNu@6_MbP~59b)#hxie07Mty@%a5d6 zT8BKBw%XgdwkkTDdbjz$Qc@8gr-2gv&;MA3No*U^*qo35&sIPCs5{FV*1|;muMMBa z5}}7;5@;}wiAYYc`u(r}XqqOa{%3=qkAGJ7gQ8khPT*z#_gui;6B9p}|7FIY)r0F$ zRuK!fCrkf(bVqlOJ9<$6dn-P$G)zTjZ@K;3auX4gR8*1uYb$ijNt&kMfs=0~{$&Bd z(9j0M$Nseyv=uB(!@`r!i~lMSeo&s}f8Ap!s|8pZGuK-)r~Yk^S)r`aTmMT4el0JJ z09YD6iA&C8|N3$uhFFWMjM z@5tkMCFHCdSpyUh0v_u@*Cwiwq%8;r}m#fcwuG;d&6{$>cm6wyLH%EgkXP}rqE zZWWx)ug5jA_`XR-+O?BklqEt3(5XEvb6Xxh?!vTtJKy*3qvJn8)6_=v+3xq}>{7&# zhh)^ub{6e#bUhQ&8-Wl&PeaaiR}(gIuxF<(zAV>vefke&HE`66!VAQ2y`5}+JPj6a zp(4FA@p_i`qC8k1M!h~Z>Uky9HaNl^&qcN=n0>?QkJ zh`5)MB#az9FA-wid!vXI=u;XwVu{VIu`hvb-%{^YC)rhc3^_D1_*V?r6kPbbx_wH| z8m8}Vw)w{r$$%GF!{T3 zP+2@%(xY!fzK4WY7q1J5ybfUa`|&yl2S!}IVu0cI1Sa%Z@iFE4_SGc}LUVDCmR~O( z?eaC=FrMV>oFUUs0kLs$aj#`mPW@feqwmv!8DlL&Yko_N-|W|vX9Z@rFc*$H7*Zu3 zeJYDqt~a>LMb;MM|F^w>@h~C+QV=6w!bTlk(`Tx%n@1Sw`MLc1q3X@S z8~MupmUbqWuKxo?K3Qag1Qs~H$XV9De=JAnA`F)sH$0q&+3@GAp-6@F$ zwNuaP`To9#YII=vTgC((X?}N^KOQ3C|Fd5;B4Sh`2N7rd?f*MhC@TdRsxE86iQf|V z`JT!5k55OtB@%$4)(`UJpKa3AE)N;Bl}Owr=Vq_Xz+#O|Jw5Z_4)tf^@OH4ik}aG{GXHc z9|-7pHb%lN*JEcgy3HR#QAKNOYuonmDTNd6#Jwwi(1DYh+S*=?;EW;$OKdHLhuzJ9 zxz})QY$iU^Ar3&}GEEoBjr`@z{-VxLSSWT1oc9kvO8OHS#RLA3o4+0`=)rM2qOA~#rKS;ufiom0 zG~d*{zILzmy;67;3&iCa6_WzAY-|{E;U+*6_rSR=ct1e>U!{%e>P?@0cY#OSE2M)G zmGVSrM_M(tt%-0|VMB~F-*`jtjlNG4p_dZ#V3}5F)NZ#hMV6BDf;C( z_nEiPIDeFjSpikgngnLKmf2kTUnR)Ogt*XwY5RD-*KwMa~TXuzhB-4;gw-}U^Zx1bk} zA6eRGs7^94wz{)1d}gF)e$HqEJf=@Asda?u{xMYWJ_8j)D(gBaCu@T^v~geVT@f*UJyCzjD`)pvo_tBDY1T}57;@tL)Tm|GhF}m zJ%8CtO@C<0UC}!sr~jD#^Xxi-4rM;4>2@mT<+@|kg4}w=>uHDEBKYvG+Px)x z0R;Bk+K^L6?3X}#w%zWJ1};54%{%I_>*zXA0~ccW7@!#RGATjW+UZ<}$=|KRVc%w)jLP zvYvCK)jT&BtV{8(l@mWIq`&TLk3Obu1rXU{sx5wcQO>(7R4@EGOt~u!23wnI`>C@w zhn!Tq3K7|FT%ES?fn?EAgCmI*Gcrxh>6@*^99W~W}TC9dpVntpMXEl#M<;0A)> zkRqYWGSQ=9?W;Kd?p$_Ws{#{pA|VOb^rAi^uh;~);enh2h40IITvHx|vA_=RBnuJnSTKE4g241yC!qVih}94I$(d&@ch_g9 z4)&I}NFo|9I0YR|iV#R@#A|#I%OKu-c>^wehHV9n%(jDN%cDGuyOLQQWy7AfU3tbbMTAX>^1xK@gWY+7)^Js6>)}Uhup-Av zY!iaExHoh#h(Vxr=4(purrYL#rKQUoz2$l`mUaJZPZW-n$u5j?qPLevFxtTJMXZ!p zX+pxPI`L5Dat%&`6jDZj$r=)>_LeFWU!l%)Qjw5dbXJo+hWR3y2a6LF;;mZo*Dbbs zb+}R;e>H%*R`8}2{x~GSzG_t=Wvdj|t7psIRkpm0SCagteUjNpOhr!6AsQF{BukT7 zNuJhzsbR%i{DFP_pgR;*+het@v6hJpjGs4H2z|!BNM`p@Vy9t`X$DC29FFG~io&1AmzkYMiw#Ozw3TslHea&h0j))3JybNGl?bb)#- z;6sMr(rEqTv$Ab$A1$7#?5%vFZVTkCi4lIzRQirzkoH1PMJvMB6!)#TY{aJpr^O>X z#9M1Hn-K4Nu$H8oqydqzuinrSFmIw+t3BMS%~(3;?0Knu9{7p$gVQ3z?pLNqE+P{4 z9+wZ6qtw&_1pW(!oDq6t1ZE0&{!k`rtZ3aLi`eYE8gVrvk*+1_-I*)AYinJq(dK%B zjt5Nk`;GhKEQdGAp%%DAr)WRCX6#L(wX2t+#5M;1rlbN2HSI+;E%Nchnu`(u7$U@wb;t>;9F`^ zoItziCyMKAY*Q-}J=(@X=}8Wrm;DIxr)(!JzutP&ckGmCouJJ*F6H-3MAual-?%jT zZh!aUtrW9^Ao#kDV1U7K!6)@22b&_x)74wUEeLqjU+n;z|H$!dV2hUf?Y^MD(U#Aq z#Rrl2qHMd;f(KBQi>*=msMzz=Zc=Vq*tNkyJ>HBse!doZMzSsJjl5Ql=Rg*h-LC}~ zHGd4A3-He(XZGD+Ra6hWt)k6f8QJ23!_4Q}io492yY`!s+1m{X^-&%cX?tslSs*Gb zY#95ruQ}l@8VNmO>bf?OYQ3T#XXE@UTu#39*}}USiB0AgHqCB|*mP%f-v2n;yV}te zqt$2N6zuCLYfL{^1vrM*{WZ(O?&m5}LRkgno=U2GWX_=QpxFec#<*Un`OA0!@#?lo}WkN1A%Gj{kwt023e5ZtRWtB9e`?b)5r zv){eePy^o?Y6$LA%1tW>u&C&lLK<@snjp{xyPG3Gc(HguufVZ4(vU9Jyh>1qiv3@>O(#g$+bRT|{~nBE(z?7BscqCO7G_)a zK{3)pdT*f!wzrvyc5!2fJb*_z>FiDN`dahU?mHH>i_c%Tv@hxDo$@>88J}0P`;C%O zu!%im)}9_*UGJ*l+lNdZQ?=IY z(Xn+Rx0>=a($QFe z==D9O-&{Fjw*Yw6A^~^?wrL3nQR^gJatpojIp3Ui^TwCkw{>$KlToWMi4Hk;sZ8bh z60&!I^j(;V<(Dz`S}eJ}sR}bON;~Pjw<*+ku}`M!!PL%l5;fOZ+RrZZn#ry`S#YDx z54Bf|8cwzfg=ORjtTb3jE?2Du z?d@eEx|hNE}fX7$nFhy;Z2NpEWjufbpcDasU} zNzJIb_1UiKW2oUOkIOjU@Xp8fL*))*3USA7P4oQ+DINh@PJQ56Ad-ATn12c=+W%xf zQ72H*3yX5}{IpOF);n#|p1j*^tSf=W`yVMQ-0x@&6yxk`A7^G}p5&&@c93+Un$QBS z=;VuI2|r*un01Y{uZs12NI3am(u;Zh9Osmp?7ugq=x`?GjM z*OM$;-G*A|ehpQro;NU{vdILivf4Q3(c4+5%J(7|QFZ%@mpDggVZ;If60eJS5)G_i^_L<6q0*x-D zb}CCzvm{}w_7J&0P&|m@q0f%II7dCf=>?yBUg?4pG8744I|psK7Ote*inZ|`4;Fci z>pXigki_{J%diN-vy^vYAHZowM994QWNxI?L7T|P6VwKi7v^y)59Zi zg4VBlw)6B<&6u*Q(ho1#<}^MizLnGmIPYu2X82pa_G!woug`mlN`~!ikB2e^YcJSK zEaX4V%o@8~J@T|~<80@2j`Ugm5*x(2M`oH0X_mnXa1xk;uYHKO6x1qh5Dc|pyoV2o z{=nCfot0H3zoDCoCEp$TM#W=_1fc>2f?5wHUisbYgI)E3wG6tFTG|_0=kb*9YFRXI z>3kxy;@Ysgd(u|Y34bny>3O%Ycg>tdEa3S(MSc|9Mjvdcjz4J029C&!c=ggvOzsE* zZWbe88S-_zm&v}$0efkdlx0za6w@p>o^R`0yFV%XVi|+fh@$d(TQm5UdY$2Pk2A_V zZPfu0pF1ldKLY3P-oZI{!P?qim{N!%dpS~4gu<$8$a>6^;}*0py8|0s zi_i6Kr66zBtHB06GJVT%ICa0vZ7=G$^W)pff5xuwTmjK%s#8V3{mW4{VneQOhykHI zw&b`i#nvfx8t&+{z;Cd4VXBC4Jx@&yXWFi{XcD9C;`nnu>0ttU4km?qvq{Vj(kP|| zgdw7OrFM~vW5bT*%-hE*;=WUJrsHO!jdKNgYqsBc0AO%-GGJt@fm|#35{^!5Q?v`S z`OzzFvSw$>7ro`*TYyehp0}+jbfjHaO`66WPc9lr@{N7)rf;~~>lvw|vFB{+?fiv! z=+s)M%mK}=4}7G#VIxV_s81W3gin9t__B;4SUY!Q1R$FZ1E6e-Xu42`r8v|*^MO3G z2&8Yfy5c}HM<}D$P)NOJ3wN12ac3Y8rb%+6Tf@kO1mZ?O8+bDtd2jR>(exb-r1>qe z9BwPxOD;xSB<$a+*9%_Gb?t(Q#P)vBi@(QP*==Ru5=>@`%TQ0L@>@EBgWtT}-uNEu z7|EMi1Bo&Nu1~SHgS{SoP7t&Y|^xofI zHL4TMFGym`sHHb{E*P*|Yr9Xkb5Ob5h7zju5Lqo3`8adlWvN?Gp`w~tsZ_k4tJF?qftWNM&&D;{O{&tnO zZZY>gei3PD6CCwmrAL@l>|Dc3wS_pZ2QJiPqTj^kl&RUbK>dAMQf)5wdo zz1`Dx@%)93NJd}L9Qljkl?!AnRrc3a-pmj}%85_9#o8s_5a8}B1=z^41Y4N;-N(LW z$KtY%gSi&2^Ootj`VFEFeip*IINfD-^ZEyGv=MmD%Jz_2WaV2*Aw<>2OhI14gcRou z^;nOCO$~)no`YuEt=&(P-v_4s``_(|(@v|89CIz2qaD&v+h~_qUiS~dKET@*wFUX4 z({t&JLa6dEb6KBZ+}yB^M5$wyS{!ad5J?Hw1*_eS`Dm`4?1GFfzzjsVTvv`U0RZAv zt8|~GCkW3~ZA=4i9b(oP%rp%_e`>HQ+tcuK22v%+mgxLW#G;a4nwAKzFaunXIsC(S z9UqTe0;VFpfmBnN?x)>q%~{`If$VtYh|C-&Y=lx)$JkNWDw{RgXD|LBu>dA=gNlLw zv*b$MvEi#1b?^gATKm;49cf=p z(E8SO;1t(aeMLQom)*Ckh3lFAJ&n>gCCXn5qw7 zu||v?k$y&6vtJci77@mShMX<%0kB{9=~D0J-;n4+y1qd{u9H!oAB|k?amQgY7P84ns8wUSqWlx^H)y|JC8rLR8^^8$=Uo7C3OI3$EHQ0(fvYa~AVKD^2=n!!z{SG?aYrOL?>#N)1Ez4zK^<4d$40&~QmdcOPIbyr!*X+LO>--z7?(@h z14-Uv-K7_cyQ-`?(ThHP7~)<)9Enw2+`D}}+R!zY{=HV|>PfaYqq^)(S!oCConFn? zqTTC}kVmu&T9YTsDoC+oS|*;YF?>cV{mdy`Jss52+jV=n?(1I_t63{6FD6c_nvBJ_ zRCVxTxu-iKxPK`efKDda0grj1gR}7$nGGdH-y%aH{j_4Q3A1OL*uh}1fJB6dK7)L5 zvftsYV#CYvTpE>jJoQBx1;&e|b88ut1-FDDmo}vDt9?VN6PrUEI#`@*M1)f+tM&|H zErsRo($7ZXQv;XC-$-xvn=QR_@yw-IWK80V*NSF!5XP0Xyk`pR;_0FZtw_|~P@jY? z1JG6HC5vXqN0``$#YlN+@jw15B@B`C+?bnG8S`L19#Ieli!U2v3b!6mUL))&(LYa! zJTXxt&Q?{GhgV74aTVKgSPscVYqx2Xen4U$R*${B(F(XPd$wyV@ax(GZ^`R81vHio zAx=n*S(D)O?6j?7Y2(NNrYJ&|OJq-G+G=}V^`;(gOJDrTnhZ7ouC@ty(DgzY+UL&4 zDmZ;}T2+K1oBIxy7`?QUj~7UHooj-7>{83@ckTnQw&DL8rM5F|7nAGMoOu-Fqo$uP#9{gUJWnroXJ?_bQ#^rj5=dJ+&& zv6SMrK_(@#8gy}DN$D0t&+v~-%AD4MQX7(<+Dsz{CD(#!6D~(7X}{DF(fus6Hezqd zy`Is**3jW1{}(+oO3%JP%YQiR{s+BHb$OH&<-#Y*Vur)i>s^c}yz_%02T7B0X2e=^ zg(rH{uD{=3or_$LE{j7{t7#MOHbP3`HfY@K!;9H|*U6eoavFOE2k1l?P$TSE@aDWAjPK|f(4A9=Pmop75d~(!}qKPi({iBMUaEV5r&+V4%mq1 zH2aYeuP!7*Z^Y9xyqd3F2^-qc<{KT;LLki9NyW+nNMuJiM7s;Z?6p4g9pYH>5u_VL zo@s(#PaE#AAs@nfO!q^RyW(TF6$i2~_40^_#_Lh~+3MTD8oM=p{N(+E3;LB*gap&A z^bXBP6D_}+IkRarkObPM9v|B7U{2l(r8Nm@HT_eymlUH0Ymki5ea3eO+gmmZ03vLb z$2&GQzj(n0I6h3lv`bBQ1j7d=e>)B6{zlgFQ26K#|0F#ewoSaS_~ONxDEf_$Be$ zLcM!fw1+=yADWI{w&i)26Bw|oK5NF5Q$qjHY2Z}L)b-I?nuNAN_1H2-0X|&R zJwF~!oeYNyNIJ99y$#-JUW&P-<(g6k)um$Pj+DDj&mif>p4M^n=U44D=*uGs-KqDs z-760!=l8^;xh6A`UlfkPql%t z-etiFqH1+wLLXYMrq2f*k4>W(vl&T{a8q%uw-^~b$tc$=j9A`nIfe58ff+%@KrL9( zSzvW5t3J?-9@aP>i{TWzu;F1|Hg55jb2JXds=e1eB7)uSv3!bE!A+4~7J|vbY%3N4 z*~{@fn<-WEzMjZOj)(Ih*^8&BO!tx%^|3nj1NpC;^%_tb9R#zo5#R?jn_i8$7SHoM zdt(Do`QatTh?6%0wY$0vj(J<`1Xn%vIZ%xKT#C?dAK_tXY|IGS&!06#k7ht~`Td02 zkr<1|O{RM<^;BEl%Jdfy9P*9VpEP84ffFLlPjhthig6T zGjway7n{Irrr+=D>VI6j3omCx>9A1p%RvM_dqD)SlZ<7Fec zcY`WpyrVJ-Yd-4r^JN-X>qr256G-=&3g?Tl1}yjnHui`-0esz&&0RwVv{K-YRrpJZ=xL@zfDi_@odP&Og<;XMhQ!35Pzxh-cvx?F7!)OFQ`V& z_MOi@P}VNS)& zHx>r#8F#suMKmP9X$(%G9(L6x`ue)oxTtj&gW!*=BQ^ym0(Z%WF|-ZV#+4>&-veL{+Zd(&g|n`)nLIATl*i7#v+b_uaL=ZUd3;Lzc^&fH{a5Mpw_JFT zOG9E7_5Khv+!+lu+n{}iteA{?8xOY_-^4A`i09FqrOoMnI&HAIGZx4j;CgMrf4|CO zjiiN>^^DN)TVGQ|kkIX}?dXHi8YAG?kmtR5D7VfL?M=5^wY&EY+6<^#LU`LJYMBLy z6N$i?vw{lj$Pz#PzITOvTD*}XmRlF&PQ0H_zXO}p7?H57tXgg}e%V7x?Bb*(jPRT8 zNJ?4=kH3`VmrT`~S!LgE`9@2#VfIR>E+2&+^W1s zD>Ib#t&K5VJNDyXskE2NBkX;xv^@WIETz^+G?(rJ&9?d^Vmt1VH-*bxLI@m)-?SOU z+C*N_>gq3M3tltum`;U*^VCw}%j zZZ~UgSe|olrI!>1pW`)jJvB(hmRxWe%;-(|c_u05M4SZT!80Mm_QdN<#?s&nYtO@( z%b3H}aRu$^8z%MDfgo#*_rf@O(B<@&8Ocn!zRua1YJv?iGg2knC zgmSXv21XQP&#aIcP&lk(O$zJ#oMoYIBY+UK`7zlpkyiuV8}?fEkkxBbc3XRKchzU3 zM|)WKoEl}eO4ty3`9;HY32d*3xbvcG(^3F4Q)~B@Qo2gQLRt{^UXtfrrctf4*~>yN z+N+(8C=I_ppLWhWv!qf8Yx|(N-Vgc{qBDbuC=A0*6X*kjwSi%274g}tT}768o%g!| zO<2_jLDU359P!`!QGUIPWr?`j@6JQyG&qaKwjSFJiTKS`MHVmbt-Nu@jGz+#hS@ky z{|uLfe!hY9W`Z`^{32%7hhESp*OwBn+ah3z?`VI(Q0aS^;`>Oyjk;w$Pca4}%PKlk&u#QFSaX7fs_#+cqp!fjr5#*&`l1u;r| z(`bQhEKfd{TrforYc9VJ?fK2@8?VT!IW;^tGO+nbdJvoVd;o;Ug>@2H)D7@}@r~>GpvlvNHcKkZvHwl35rPUK-n5=tk2ZD$Tyj+jKUR@Bs+1()CegJy9%bPUf*?=qzP zXCerD4G2Sy#TP05f;5h2@>9c;9{o@%a$g4UFCIiQNBUQYUB@_kmn(8u4%Ex*h^ zx(WQmxJz;Y?$_7|WN(8wgPjbTHgO{dy+?dhgACm~Hu6R_)cNp$&}QVd7}>OB!!Q^O z0;tw$DhW0bU$VI-9#cz;zH)K7KT{ZcJ?lamPt`JJ9`!cx;Acie>$xyWTk_VYLdY&- zBVGC*%azO0PD2iaPg+U_%7^tqEHcuu*XS^6Xg1E96@W2|r#EuHl+PB)0Z)bwpve6c zAOI+HIN*AQ7 zubvDi%^YeEM2c2wYI}{mkWRoiWU~a2IKhZU+xx4ViB5hukEB_H^IrG%Jz`6yrrzyJ z;SA5O^7N^Tdt@w+!di`7A15@cTutmRY{4@3x_>f-#WrEDYpq!>&IvRK6E0fadoXBy zph)nx$|lQDWvs;s_tjl|*4OUFf+s4jRb6tNT$kj$b5RJ-TsdXQq=R(+I=7W!C=;&; zd3adt=JNGkuVt4=)StX5dt%+(;l*?INed@`hrjqwbeA~u*W)PjuZikU3aLsg3Z^Ju zgG}1FE`hA-HaTi3b7rrF0`u#y4_DeH*IJv15_|_RHN)oNA;VwX41>1HmTM1(hg@;U z;ZPon8%4_FESx6E1(Y&`b&Wi(qUx{@8za7l-iH>2J|7khTn7+FJDnqhCAZ-(P6*{J z$wL1!H@u5>a(6zyoaBP~c)}9kJ<-tIUJWr@NPei~k0x`ArFZfr3iLZJMjuJyTz$WI zN{$B!R29B!iQ=YMxF2g{ndDunYx|+3%0U1=s8N3|fr(&sQU7vJZcbq);M7yR8Qe9k zwc>Xs+q=Eq=6+C{7Rr$Ud9^)EWL*~TpK_;I3keCm_?F%F&O+cK+Vf<&NPKy0tSJ-0 zZoty0G;6BFwy(V$5hBzUH3K){H58Ko?Em4 z`8%p769?~7bKY2py0E#WlvH*^VO+nlJ==jPK_ehf;98N!YGxeXAB7G&gw%c@E=yvLU^)9R>~YRd^i4#=n&%{4iNuu$1v z5aj;6*W9fbEELNV7H>^3n4;gUQrJM$5;oTwPKU(h&0^I30-$e?+pWNHebX&`jTgr8 zCZi)soREwQ`6OxN-{u;qM0B!Q^HEr}KlMnhXsx0DJE}JYKwo**8OLvGE-*& zkvbDBU@PJbB0Zktcb*}qjSqLD#Qm% zXa?8}!dP@e3n`RHGL0lk>(&e@Q-{yaAY|M;+XXKXX9UZjrc9jb=Ijs6!vSH*;s! zrj!J8UIIBNm?xQ68B$%osIn%bE&Qq>gIi03$ayC0?G!dUcYW6&iNJYY97J)8cBb^r zs~I3?RU|e9*P`ovl}NdZ3rV6eD~Y)uK&H`AXx7%m&cH1}>w_K}Rw~I#b4(;Bv(-_j zAv+508dI3b1TvMRoo5THD^@;Iw1NDQgjG@^?xb9fy$?DcI;Vx^L{Ev164_UM&ENus zx%MDiE2Aei1X_Ju^KzfXlVuP_Gs;|+AjW)_FBoREi%)xiKU}vx(r^j*>ORr$CR5sBB zko%cMRgvy~){+`B7N2cX;7Ip*#AXRU9$?AQ>LFtqUGKQ(x<1YJL!HQQylce0D|ndd zrZ9WX@)-|X-?{iguMNZ0!u8!#t0Eg3z|raew8R*q*G3o7GYHKp&4d=m1rcENhThTH z-Ic@C&m6XB*9Uo{l2=PMykG zJ+VktfLp6IP^fZ%ovTbu*S&MMia@pzcLjw##Zrj* z?EArK16vXvrf<0t9zfC4WJ>kiGxJLWqS>(gwq>a zAvpS2n~Za@Sq~mdf9keJtRAHuZKB>gN9WD2vHKFFKE!Lf7=eK0OL>s;X8$8iyHdux zJ?nl*h;GvNU76CFz;!ow;l|7k`^cF-x0sNktSs?WhRSbND(P zrJIX5ugPFM#qIzqMchHwtUJT@%=nLkFO{N^ZJxW{jN;B#m;E~V#>dE*wQjv$qtY0F|*f?5~|IGbpa@qzhfxbwU-@pbUqezH2HNA zDk%+Ecz)oKdK+@A#H{3wNO-FdzEk_I@E=AJ@=TtfSk*Pu)M+8Rn~O{{uc;npy_D0L z%J79CmTstJD;w+g8n;+^A3|30S6z|1Mut_rZCB<}NQ8FPN9ASjZ{2bNbz{}5_Mdjs z?owk!KXmBh?NXT3&kez8_QG#Nh>`PeoZ45hJh*XX5ouH!bQ4`G)k#qfVrH*F05sc} z{5<>U+YVm{miA^@60tg3-Oxb0P8PQf5{cTbMH`H%&zDe5*qkRu>bDu68jO$GRwU*` z=*}12@#UTKrs>r@r8nKq($rVDv|Vo6zO*@ERXg%-cY>#5ZTBSQP|;w7{*F1s249ft z(iI?mLm`-P5|WS;Lt;+8u&KJA;I-(|7sq!x^0_9mDT=mlInaj>O2pLU`vmuWC7&v zFo>$Ojo4&=sOoUJIL%!)^m_HsVn=;vO_`U5XeJS$*OFq}Wui=)UXlVXb-i$zIEL&>l z|9WFar2$U6(hUi34rhzc)JXRnD4Esz2JtmizF+swwlATJJI*1I9wVZ7lg^!!NHOIeRODYD`foh1P=7Oa<(+(qFHyz{&qS})Vigb`4H@Y!Q=No7oo z#Dg{;gsp`+Z)Vmd@%IWgloi?|Npt;=F=}fu#Ox|l*p|M}9qurdJ$@*Tf#x`_?gn)o zM7U&IJTAw)mGkXRJ~s5ncL9X5XXHRrYb*9N4h5rfP;=^Ou?I^9FyFolk5=l9nyn7y ztt*41ow_XztF&dDhjtC30}JKhCASzn$j{@ZsCzN!NoCpAHsO)hA)!qkr``ce4{vzG z2LfeS-d)if_jCsym8Rm-vP9Wua&q(|eR^TKOR&2rKR7xtKW*6+@;;(qAEfzu5e0FYbBo?-egg!8!z^a(xYr6e&Pr1E*NgH!K^A`zTsrS(Pvx zP*TG9VG=^LfzJxsQijDozmj|Y73J7&bl-?EIXRB$`u^IYEmBb=W~NA4#QMGMh7>8j z_3fE0^`tJ>5e$Pyy{PvR$4WvbqEKzBhQ!NM1YeHRQqzv@+HEZq`BJ{NA(pay41V&t z4S?~_v|k&KoZdn0CTir1<}@p#wej02-bYD+rhy(^&c{Lma=vcq>vZuo;k6!|&{yax zX7N7Ez&<7!H5JL7!YOaFXQX*J-Z#;P#||yO%y5bM8BA5vBeA`@q_8`yHT-Q( zr1}>QO}+|oq#C_1F19adyC5yGloX{lP*SrHR9pbd_Tu%k z8zd2!@1sx7s!%HPmYq~&@s8N?G7G)8ycobmjoMQ`8H%~2D6+^da3}Q)vdc2v#~6M_ zqc#b%ZS*{F%GKAi+cLdSJ(7^s(Uz4}`ihtVQ8;(2oQHY-9frxIXZdxZ)kbwHe|k?3 z+?w}JrFZA#OI?-@eiongx7G#f6h+qQu<%qZ{N#@v1GEz)mG*@C$~6t5>q&bmW!!l8 z6d#Vd>}7s}(YG<)hafOD^>UgnPG->%sWLlQ$bPZ+P(;(eq?L>u>siIbzb+CD zB61CLY3H}nXuPLp1-e7NAorpIpY}2cT7`*$D%*v*;psfoiLY%JcMm#iHVhxcV-4r` z8l!CboFQb9-dV~PxN|XDuGxhjHZES(ARymki4M{z2l8_)4;`20*?l3gN%zc!vu`~? zH8{&e3tjM&rltNuz{>7AGYE;)DHAsdCd%c*oT*%O68Dq^0 zTWe21#cYrj(Vp8R!>=LJ4tZxuFK=0J5xA4LN)DJigQ1w!27(tfwh}|!k6hYQ9GQ(p zEjaYGO*>JY9z9HGncf_r1C+K}JBj&3)Y`yg|4*Lu!IdyuqAegSC%D7U#~*ul1tj=V zjP7!z_NKvbw@vbqUn{FN2ZPVji}=9v38i)e*rVc$`cmT{DiKswh+C20VF3EULS^nA z`??3b7UY%HfQTOthEw3#5iB^DQDi2y+vp6B3S&xU*JK%5oV(5;A)P8AwNT-IO!!!n z@tDp)h6{I3cHkH`szdjPh&KMvuArT1hFDIsB_HFANoP$(Q98sopj>3G&~9r4WRrxJ&boOm^cOS}rE%I> zarHQxP1)dgnIG43rf|m z$fOwF&m8kqr&`a>q2cIhaaYG z4jjrriqcdB>yqj{!a1l*_Rap;9rs57096;f!Xr7<=JU?H+ln9obr9K;LNK&e8OOtA zVMsphqY@bHRn5C2RAV-YBJ(0$M z$T_&~B+yV&6)!oDmlIb{1bse>7|Gg2BV)556MN^^x1MnFWIQTt;D-z67QR9@6z-;e zn~l|rc-rf9k+xe)X+A&AW2w9xYgKCYIV%a!yQ)|lPP2J*Xq$r?I;+ck& zO@q&}AQmi?vVF0VX+N_>BBF^>BU|PlIST$18Y(X6$w^Xa;)SD(-zHfT5c@KUg$C0{ z<$^?ZpES(|X%)8%t$}5JQ~_BA=9R8Jt4ldu5{6gHQk5#)7wT5dp~>kVK7L|)Zqw;g zyN1%JGt3P6tOSfDJ!s^^qSc$ujHB#Q&>$mhauWTQ*B@VmPbwz_UPy4Ay0DZy0*&*BBGAN1q#d43dKQo9ez9q-LKf7SgN zlMtl<%p66UrgyTdhY$BrmTdw+1N78W?t8WhxV$PWiU=u9ev^qlEtX zy)SZRYFY172LVH?$%#)(0-c(mJEZju#l`Hjm-^quC2?#|P!A6v2U!!mtDCRKUq#gn zey&m8%iu4b)$QrhOA?19hcRX$93K<=A)wSX9I6@H zpKqo)<*S+8$sL*F0_X3S;kJv9i{emYDoIR8Tsp65qn+xGU%eaICT7ukK83#mv(rav z3rd}OBPJI%KQVXdT6rMe9*oh~u4wfXV(+w-%16F;Z5(QI)lbcTBcYJLo6Wktzq1CW z!m`gD5$jtio`(|lz^L?Yc_*N7KR1iU>O6=vQH{47{hSdNe6JA^D$l68s(`*_G+*pE z*y`j3NJMk8LstkWfx6vXMTI?Z7lZzaPyFCc&XM=sF?VA*;IEo3RHl6cSL4s&HpB=b z3LE6504;n!t!G2tJ4Dv39V0sgKw7tf^H=#-ftt)dl^8fMml>t$2+uMMk zgdh?IB~l_ON=Od|T?*2v0@5WQl0zsV2udTNNJ)rvNGaXYjna}z3JUz!Z$MAn^S)p1 z_2|r=y;tqES3GNP*JRD(lJIDTS1OxArl*OUwp%bJl{!#<(e|S(*8K8QHBR-&tBRWG zB;34emXTb#@A+==NJ#X{-`UxAO5zx8k8m^QH~X&QZ-keBOXxCNmx_lHq#Qq0M)y~4 zJgpu=l5bGzAT`PhST=Wh&GOY&iuGc5wd@95V_ReR*h|ice(E4{Af7CZo}f}v9~i$O zAo+SBY`>}U9RMd#$Erw3?dOsu=wmtHA)9Q&GSO8(_AAB3Rfv2weir9urF(tt`^fv; z>S!PalyF~yWa*V`U1R5cDIjg2|5i4()AkFxM0lZQ1)rm&_Jnp65Q;-kS0Wk7C2tA! zorCHzn~Oy162nkT*bI%aw@kfnv5CJ($F+A0uJ%wd?k-MliW-4}WjoQyhN*-H;{;vM z1v~lcQoBgk3%8Ad(FgAv{VUG76XET~KUg8Khazm|NE+~3k?xciu8U%VUwk@$Y0p7h z?WLxsf~jYGB#ZjHD<`crh&O+%x#qRt%)K zU5Ac|+&ifxrugDA4>PGdlAs4@53Z?GRD5QWL7~|%S!LgSJ?w>~fs^Oo?5-A7?zFNd zm6~>?ND9!d&G)t6qq$T5*y881#~yXV<@TZhDBDLci3JT1qdWEB0n9j85cbsvdSxA)4#TI(G zrlFumDZ3H?W=`#-oxggcTCNC*l`l1S%&ZGOI(^CbwS_&BwZO4yN>e3OoO)U`XvUt@ z*Mv0Z*@R9RIj*U-D>L`7fPDMkN5wFmgDvzw44Xr6zPOBG&_ ze({|Zsrp3D8;<2%qvpAhub{@mM^9g5BGu`7v?HOTph9m4QT3j#2DOAc$~GHFdO=n# zBR4dB#2K2ATqJbDCvIVFLp!to!c|@4mgr0MYs+I}tW}HFV3{|RR<_e@ZFUq+_cZA_ z#x||6ncq5H0dE#*_c_zl7;v11bTePa>$`rhHO;UpIdaK*RK$KVR7x^F&owsvMcy7w z|E^D5^W2=7u<%ye6N|m5vc_nU{a7&p(&}#;Gs}b&E1}-uh52SYyZZ6*v12>Oa<=d6 zF=h)l2;ZMoUf{|VKQKxAbfAyGp^8+w(=%#YPb*o?U3C6O>UVpOy~ks*5a@1?yY+Ua zNiRS7Ir6^gusnb7&1X|+5i7Uo@9=ex*BHB_T$oyyy7MFHX^>Z8VY)Tael7?(@k0a8 z9iz9!D%N|oM=#f~#p!5kQ;N6NLRi~DwyVLcyPRkvz_d>k?b6U>MPc#@!o|S()FxZreqGu zahG?$xZf!Qf&O@c#n^4A41XKdr6|lPsFptjJ&a%{{Urs2XUh}qr7$hLtej{*G7ss@ zUAb5J+ltovwRo(4p1b6cRx~<`s!lhL(Ok}<|7nsGjrVYE)-ZxY^O=rva@A6)xy}lo z5=IH|AxVsRn+SW(QehZshKsw`{ z8K30h%2acCmDNjc8ECdFyb+PUn~>O|ZIn89j;$(Vf_0MTo@)>2;$WsuNhc958@^^{ zimj~!c`k?Qt_E|M(M$W#W+Ow(tEUr$oCu92@gxHsbbJS&wuO%hXK58&B%B`FRMc(L ztY}q@C<8bKy)g=2wdtZ={mfX#@loEo+}z=P=#mnm3zAk##+mzmKnl)4urDGU2=+?~ zAq^57*zZ_FeA*Cw>~N(vDS{~rmt=YQojpbIEwgpO!+0U7N(u3AvnTUu@#w?-Plrdx zF4kF~E)7k|IS9tF$!iN`LFLPyMvR|y!^uM8s;mS z_^f^|fGiOO6^FiD3MZ^siOBcG=fJ94br+zVM6WSDDD9`|C<;~~I9sQbYA+`iTDZ@cwGeZkWM#YjrHX*FeYG{LUQhVx!zhzT%vNeoHW-TxlPg}X zURxY0`Vn_$KXQMt&A(`X{OJd$x<&mRNKxF^G}?D8%lmI{5F@B*C`PeEYWTWI!+ zVU~+vaw@(5wR6%w9z&~crK-K?H|;vegZrO*rqBl}eQzCrR$wrRXuRCh$DAN2bN9=M zA0iYks|5u_3IL$>&Z&79`I%g@4wLlCTk5L=a{^!fYT86p0G6#KP0Y9B2^%k1{rnVM6jEG{Ji^ z&XE2?W`TKr3Glj~a8jH-jyYd2PPA7s)v-u=tKc*q{rB2MR%kjd&0MD65Nss`-!x-l zPXdJ00eKF#rYsE`^6kc8zhU9voorz78+v*5vDc+{d;0r(@e81QJ`ze+G0q=%KM(?Lh-%r+`>9q<-PBLGiC0DL>vFNE{bltw6w15}? zVElX42T_*|-eFQo15Aq)sW!G)-W?aS*;6=_oK?!Sly2LC51@W96?EYtPOcE<#2@kR z5{@gzi$oK(6Wf~w6!w81cp>L4)amg5-q--aFr=^1c+k8t^BY_4?-GZyMNr!6qsuXW zIe7ymF)Ze`9R)BqM+yg16sVKvxw3xYeUNoyW8YlwGYQ+Pw)cUC&jLbhE$hR61(xZJP0G#`dWB8HrmIxsh^63b$M|nQCdGKyG+l>Y zBpc=w42^5_5V(3el;T(=8~~Uqb7SM#);~E-UP_E-3krQo=&64=^93knv4?6) zDZ2Ym`qc{X2hZ`fj{50fa57OPM68E48_y;1LPMd);ME;S1uODfX`MAj4EXnx2%u*R zxR4WLHy#2%jJUr+UEgV0_Q)O6X{_U@L{AU}BERMoj4}ff6)7N5g%@r>k8BIA({SiU z4rRyUa7o^cM&$y;azf#9jgrIQItEZEEXLETu%Ms-!D$_Rx}9*|GsB3h7y!?5*Lg9b z)p?@fe$pM&GqmMWK!)Q6teH0jRo2J>Yl;KfnA=PqS0J)Q$q`~DuXE9s{YOyfmyQ{L zj}0f(&Alu0h}k=jaxxkDNHSMq?uvscTdIgf|AmXT^Ir8rh#bq~H8cf%^?#|2uNs0C zuE#ce*FFu%dI1awUXsyAj1_R81cg@@?0pV!m;f}_5)~JS@lC3faNgXkiYk6jjqqrN zyko2u@wX}uP58k>k_oq=j?=&E+BqN*E~sQgmCUO^%&WOV54| zm7+7W$k|rO#RIG?6>Os~8%rZYP)Q(Y^w4MD3qtXKe42>##6d5LhlyhJMdq(CpJ)zH-@f!M(BNbID_6WjRvcrBWQ48a0DD~DbFR?mIQ$g5QI;XAOvVMSQgLn~sH zG_)W-q%0Tibv>LO& zaIVBk^y!DMJIC;7#BpkmcPMy&d_;T+iU?~5&blBE2sRYx3*L*nB*^>SugsH+<0zfT z<$PSO=we%Om$hSmzu;vhJTEpU+eFMV;v}GihULzZbo z5gd?kyorz+=5^QbxT+zP@v3(*^n>v{Kd01I&((hzu_Aw6M4pU^o=X5eVL|6S8QRe# z-S$cZ$?W3b5RM}JrGeJel&gM32T$6>p*ySB*R z{oW)hkqZYJeGRH{RJ65Yhid>v@p-#5K)XQ$uCA^nsMoV2qgmPlb(IQc-w>#%f8yvS zRQ>oq(faMnM>mzA8!Uj&ghjNE-1+yP`2AySNYjFEoDIoISjW%Xb7^C#D^)3xop79l zRw%4Y_|V0Flkl&7rtzZr*cUwo^)Lr|MCnI3{ze^;Z5jNAa#A0c|NGAVL%Eu32q`wdMk3T^k0KWOf8jIu%Lb6Am#N%x*1TW>3#f!Q;^QTt+-R1lb7Gs!=N+tpo zTyw&zNAL_OBn&?I5U_Cg4Z&4=>2{4D!Rd$$M{-77%-iQH zG5cXoAP5b1-CxVdBFg{=VgM*bEe!9-e(%g*CqkVkD2^^p?zY}WE?Y2%f?y6g>+_m; z5R*^Ln)s{oH`!W_P_l1ds_8@G6N@W!0XAdC&7U#us#B1yv>?-4B^TZ z+|HmtVR(QO-uQgse-5I6J{IY52NH->4Ldz7)M3-r9T?vUoNC0DoqlnR1cS#Mh$^+U zh1V@_4Rt|M=w%^Z+`2F_uTn3d{EZVVDBJ1rALdAVwr*3Fk?m_L51|!~y)* zZ+pOMf=)GdGt`S)HTq2YbGe`4x1)#<6XP9>F9hs6gJBt-vTU1QyUy7>pgQRU2onEm z0)dea2)duRi<35pLf8Lv#Z`eIb*Zn&4uGPROeje!yf_OIN<>L~kR!n>O3P0-SK)t- z)R0uQe6EcI*S5ep*LKyfC1wB2Y2H9up~QED$cj~F==&4nz1`iJdLbtNBjTjO2SU=p zZ8WhomM&u;^xIS=J^;N5G--{8MbWwB20FvJ!pStG`@8(T5DJgr8lLU-3%>+zJa14Vtp z0}nm!1ugQU$=p@N#mOEdkOe``(n+dZD9>dXI6Xg^d%5{wS8$No<$yb9qAU?EPu?J} z33_dKaNQ6yfY&V1KX4wAky1oPN@s%OG1m3&xzC4{Om=9b;|g5Ei&hpA&Wv!jZn-+- zIE)7Lw5JmH59)GEIk(PkcIjPH(7fDyl3KvhLQJqoZnBg1ZM^BU$8MMHHO|+Tce~!i z?&J^C#q#nnKD_ocA=P%Xo8L5s%3w6#*+x9>N@{YpRH9I9-p7mkl*h$e+ic~QDyMw$ zMgFYcyumjLjI5%g5S++H|3qqp^5^l>Ua!D4bV%0{!0S$fzx{{1_gJy1pWLEXljx_5 z4T97_uj7}MIZb%9BV5fW+yFhrL zPOczVZuIZ>L%1hSi9E0|NbxF$dz$!%Dd*?D-snchszf61Ug=QMO`jM~5)_-*El&xZ4|* zy1HA;!4*SAPu@$gMOxxjdaE@D63Qf0e7*VEkH-Hn@aX@3?D zx)h&PL)fTS_Kw`C%F8oE&$&?Ak!}>`w6sLkrN7L7UNRdIg~#er{uOa`(ju|w`dzzT zHtj5n@t-6kyt(rN?HXnuaBALB=W;Jg+|wqc#5T~Dk~aOu%o;jzU);So2h=JRD;ouWmj zyT3-94|ld=v8?#H;M6mJf97U(w7b;s=bWFTc182;bc*WkB4hO}8yMwVVT9(`i7}m+ zOxahT26>VFYCkTDu$W5cGt#RUc#>Q3>tk6qedw|IotE_U zWKRqXELFbOOwV4XHf7ZnF36=?)e@oa_|qi8X*nN#de8U{`H642)m)tO#$KfZAu%%> z!m*hR_Xi>d_0DQtU05x*FI*ixwDjG_sCDqDMcE7SZ%gswmF2pR>w9plZ2H~evxnDS z4i!nq;thYFOJK`;{ZrI`oA%SP6o2K(jHjOa2Jr@tVYRlCrrT1+0_30766#%9%5EP( zg3^zIv`)CW&mz8j$7dV_{15sL3D+i|e(D0^0~`n~YKpL*D3ITPn)%DI&g-0!^Y>$Y z?;TQT63a(x;F%}IaNT#evx-a`kbj%xSuSmHX1+N3g__FYXBXv0Qep`#R*!^^VCDX- zxPPeLaU`u?Cn9K2lB;WEqS2^)n7ZU|l}+i+z*rBI&(TKnrMSImDUG3i zUnHPFvFaASq!ERn>r-e|?Ybo|r(4KvA?Kkl>ew;*d0?+0!VBsA^}FVXdC(*P^PnIw zLUEZTTy+VOV!y2en_QKiMhivpdd6!a*YlT?=$}$^bZ*C_U|DL|L|BMd+89-C9QpC< z?P^vMS-2U06%{2%Fd@ZIZtw1>L zPEUGBaN6L)q}XO> zFVBg^MqSn6j5wj`l-J<^4xI5vDfEenPg7o{CEh(1V4OxDDj6bU8ZZ?2h3vc;gVy1j zS|_}S%)|&&)>uT-LdIjL(-yd=wa!~D?-0j5wl`@To^oE|-*j)ESru`Dbb6j^d&_Xi zjMMJkY7m#?reS)GA^XjfWtLXE*#-I3XGR^b8Nu$LX_>QQch_GGb41o$%o_6+1Rmjnbf40{Xc9Iwj=WR3KeoBQ6R?i+5mFya^3b8YXlZ!7%dZcOHT zX=+)E(8O*e+kvC!7#Gp7lnFyK4?fdVREiT_L5g5RkzY|`8C@C=EZMV*61|8i z!~29Rye|%~bq)K^5o()jCDi5=G!$Ji)freSu{Dh$OGsbzy?;F>%dK#UoK7=({q%gm zhu)hSGtE8kiF^499T;1EPRTt|!Vi1o86WtN&g}N1^{7OXfViV}d*4QGXWez+9=c!B z-5XGp^sp#6jv`V{>@^zeVCScCN$sA0QtBo{Xe~p)%gcLBhb;q#oJF41RFw57hPF*g zpP#uew(?xkYtlN-&ilEOo1KjeMgJXFho^gl>+VOz<$*z(nA}lrA6DpLqZHz1w=iky zqx2nFy!<6qX@*nbMF{&l)2HU~-?KS0h3=9)cA#lCyg}bhg)Qt{OKDb%ipVFl-Mnq{ zt#VK27{1l}!o_P(Y~F9IyC)OS9B~+pk+Bs+)kq-HfQ4)3iFL(_`2Ix&A2>nCF>K8V zB%5v51}lK^F6ebGY=%tT+mqW1j*z-*^{L zjOLV!rA1KsddHFNDDdbsL9tgtS)6(Ji6;xaQM~Fm`W>(rOO4xZVX=it zcWZp-pw6y}V>YcO5&t!K#CW5N+IM*R?gHV2z0#OyZ`MLkU@xDReTInog~F+Nf_kCR zu=)DwW|Qv(cT2@}t5r8smn*uUmuvpB*{k+W+W4Y!Wx{ z=_h;j61r|9TKWzbMi2SRO=$?xAm>!p#pT5}>#OXkD#jhTOmsP`I>qyWwr(F*7D(MV zLGhLNGFlJGuHc4ex2gk_)J~BED#@yteNs14(`pE>&Qi*1ZXB`9Ud+Q;$vxxnR5aOY zFwitReZ(i5GM9RpJj!7>poQ$mMO}~V#IMssFBV;!n)!LEW)%DN=B;QimFqVMIey9A zlzX_7QE6Y^JM8$QaIqjs#>Q5n<&y1F;iq774kXDNB*}8^QWi#%!m+r*^B5Sg5%GC* zmi|+(?xvb|IH=oc_DM4vo-TUx9?zF&5kAVw)3?#urkMHE3bUtZe#-Rd%eojBUnpkv zvWAUa#F*o9^^w{}LOa{m5be2?(v;w4d+DdWENyorf0T)EJ$0S9RMJ@C{7E~APH3f7 zKxJEg`BY4|a@u@%@7bZjZmsA12^SqK`yDK9J;HrBLT*bwX(9Rp*&$+Quy9Ji8n*h9 znIiw$jfFn1l5$OJpwLnqY+{tO$Z?FEh3MeSv}u2(_38O+&yq1bSr`>_6^@+7g=5%U z{>?q5S5{+1Gu)W`ad>LM>wLA30eKN0b7_KzthS4J75v*%M-&

}91;efrEBsg%pi zoo0)s=9*c2mXxGsa!x_YWc!;*j359Ruv_kOL^ig^45@1z6@C3PUwF*AS-YUQ<SVobx>1l*{FYp_Z zSD^2SOorVYi9JY;`(u<3eU~hR+3JPgQ0i)gadm@Lqs6HLj5wnMSUv}QSaRfHcay2y z2pagwa=sQ672>)3MbG;Lj z_U9iSlqN@rg{r!002njJkE=J@q%WYaQy`o6AuG21By?@qXM?uqxB1yu-~*MwFVQ)K ztZ5eF3Dz`Dh_7=6VtY}grKNupz^7a;;y1tB9x19P6S(4wj}btOSp2mmWoKM1HNEh( z;~4R#h(%q!dbNg1oh$sDLX7IdtHv`J!V*fO=Eo{FHXe3447I)=Usy;T3kc(X<1{Z# z^PlvB2&xp;*`k$gA1Xv%S=kP5tebU^%3!cP;I|R^Z0X|kbku2XKDFFC8LjeTT-tm~ zj|CpB-LrI5h`ai)tsd|s%@y2^@G+@wQrH?wf}jNo{+=adBGD77-+mEWtmm}>vA81M zrJSE5oGDUbKUsJ(x31fsGJ~z z82am7T@d|Q#N$HU#$PCIoesKURVdrslj~5#F+Z{~WMO=N_vLx^(Ouo5E5ogaLni*g z?1SYKeQ^0X4K*o{I}k*O<$_UDm^5+DpOs(XzZlIkH6Lwys2=g2%+cD-jF!Rb)x)@^ zEC1j-W@%-}joO*Nj5K9LWQ78V+uPSCr6C`6;VhXH;tOEoGd=bkW)cwiH61I=CqUHp z_^(6$jjF5{;9|x(#4L<#DO}b7@!#G!ERT+K_w@XwC8Rm?ll%pBo5#k8&4*JyOo;ra z99;v_UH6geMK;LhTLmTBEimh+^9^HU6I2H(2HrV>k)&rbpTpb+dDF+QKFsHrau_L< znXx-Ct^YbF$`&l`iAe4e#Ei2FW0$DLL#^p+I>-dqQ2iF?My8I+c{OMC_uz1~o_YNF zrl7UWi+?Ha{<^Z)Kp?Lro>Dnz6V)Ic26an?FQGr@JFFkhS~Ky-@2jAB^>{Y zhgdkiOr9ay(lL`5Pv!#%$R;$`2Y1W%7RL!W1k{jYW4PPMY@<9gtjlDstE0nquIO#L z;D1P4fdp)be>++Y7|bUN9=>^Rri^)cZ(SU(=}j#fVax_XTyY*dja*EX%;B<9jFQIv zOL6}?WRoH+w4jT^L-fjMY&?M}N9dU@Xqr{BW0dzZ7=Z+5A-R>57p3+F#r zXyyY3Jm@}eE8M$gPpe>*AVW|D-%`iRtBl_d6xM-swo5SY))M#A`J1%F$(YB$(QM-Ud*psG>rw^Bbg$u8sR)WDh-e)&}mJcR_}_!y3F8=jpG*94tpE z17~6=^OZ%+$2~eEX)SeecC@@KJtHIVH<^2`Ce9bex==I9jp*obyC|_B?)?7>dLjpC zOS4zp6~x+7JpC=240uoIM1_9qi|R?VuF4gw{_@=3THOn|+75hSz5lWf`)o z>RQ`^DUj{I@vTZ5&7zL|P+A*T+k$G$e+Vq9 z^*Fq3OSn?2K@s`L{PHjY&&MCh|J$|sA8EjlB_71H$BBm+5l=Mg1)mjXejNXQ7{|Z9 z{U#K?!neVOI09w?RZwA%sr&1lhTJ(-Z=J%`4#7)B#P zkvf#jAnIz&lk2~<5hK9#P|RUR-gQfr0`pZ+$@%&DE)uv;pd8=S_CE#Vuf;+jY+2Gc z$8AOge@7qW*-oW?#Y$IKS4(K$%h)$)|5iNsTj>*E&+o3EEbs+Bc1VLq@Ah=+DJ4e>@5FFfdB0CjVG#^4MJatcq!ZC=G0-BipP}r zQ_65_WvY5CfRf#7=T?RqUc#?^!TLpz4=^?`FX!xK7*r4}=>bNwNu06m-vSChk%;h) zk&qubCDUWXD|wx(F(Da3ij9`UA+v_Ov*l9do_n6-;$Yna%%@O^#khrI?k<4RTOS22 zZS)H#PV!+ckH0UEq9p5|Tvbi`;P{CN5$#)YD6eiPd|_BDg8?u$8St3C#z~HPpEWg_ zx(IA%Cd+xX1h5~v{yj;Ge?;Rq2hahGRa7OEeaxyDkHHv6%zPMxi(%ZjuI>{R;{}K^ z8+`Z(E%sh`Rzd5ct-u$yNKX-l*^=Q(505Z29E+mpLuc+*F%1G0W00I#`mP&y+l^C-KBQTL5Ja#{WkryDJ zB$oLw)w~T9w_%s4kSN*ZoQ5eQ1je7M&?&lQ8~g!E7%~Ae2c)*6AH!tTmIU!J?G>o6 zZvnn9ib^v5dwEo%8bN+$&!a9Y< z2NV975COW0!KUea>NU34jpi{ml7H9s1Ieg5<)f30@p&laKSRUFH?0X0I+0>8bA=vy zWhxxNdAj>L5g91+#~WZl1IJvl>`rp&Xhc({sQs_zlaZquIC*RW5akUyhJN(uk+fY_ zkoG}vwm`CKMr|v6u;y+6UN~LcUOds=~Zmn|7c(s8r6I; zOePe=mJr{nOwBxo^RDI%*U3PXsaQI)b<3!%L=})%57@L5TsrqO6knBYV_dp$8Psz> zYAPzPCe6136*AAmHC@cWomahxy(75{n^CR}rWIbXQg4jAK7!DY1!VEl$ki$Mvldh0 znNB1S&+7*FIC5&`3|zHrozteJy_!sz6Qfq}7XU$O8k#+&^=i|RP&pcxtYf(q=GVkR z8SwS@DCQhNRWR7nq}a}eq35yy(uX?8mdBJ^B2MZS5IUC(1dH9B5ZOE_vw_LgGAY10 zX(P%e@>z^Imjh_&F(z18h!Xe6W0~ieE$I#(#)izfC+;u=#-c#s>?o9?5z4fH-g=93 zGN+h)f5N1+>vxR3y)By={co3qDX!BlCPkkoV>C872P@IBUcXO}S4kN_tCq(2b`|Ln zkT(%*{}Qdf*nPF5hPSTO-D-sDAOJArM)CEpPf1cz2Tvxw**eZUA!9kRVSz<@Hx{kH96l9W;+)2*%|%)%S^Zk?flS zY1;#wBM2c?>9L1DsvD##Gs9&9m5O%FkK(6H10i zt11z;Xcw;)=8}O69-=Tigo!&~N;L^M^ecwJ4zXmBLW!lS&yt^3e9N zA99`$4IC-~jGY8Si(gXUj@RK1l+VOUoBt9U`Vjvrn4x-+b{cu^p|6pB9*AhLO6mkUppl@>Db@i* zB^rF;;d7E(k48r5!MD%)*G7S|q!tS*DOWg)G)|6=fCvQQ`_0#xp87 z4%2^)ka=-Wc{AX;HBOByl^h@OD_bh_*6a90Z*>n%`3E^=YWyYiFB; zVQQ(#@4`(}F)QHtUHz;rQORKMM$swg1* zAPpdH9jDOW?u`5$2uJvemSw!K#m(N@lG+#jQgkp%W@kuUYd!7QUx!9rfxsHe>!dUm zFdeRlZ42hTec*<`woz44+#b$>pO2mRqpb9%xS7mLD^7jXxFfIE>DK>xD1~3g>T3$` zInxc%(6pO4#kaCMgOfG?d~u&)xuiK>R*@NzQAhM$xaG`9{(d(aAX*m0(T}89;EES3 zYC3d;ghR{uF1+qkxzW%Al=+Zas`)Riq$GX7X>6%OP+H&epz+0Jm-F*8Y7*jI2lWof zDug)2YFv0mO`tw-&t}@XS%pIr#a_(xl?k}l7I5{Pr;H%eX1zwES!5Nr1^hG>+x>AT zuSZQAW{PRYLW9I78uuSxjK|zI`C=`e?frTWv4@yPj}EUQ-J>ufcaLaA{q%g_TLEpW zl4Mhu*!Zmo`jDd#4DI`irS==LCs}Jdlix-IVgv89A!sHy-QKX=URAmB0~uxv)2z(_ zNt$mJKuDsIG1-VOZTy(l@q+%&oH{ZtIW|(;gr{sKzKqcU>CzBf$oWV;A-*+v2fBXW zC+dPlCD{JM_ScO z56!ilwAZ-`P}tWc5{530!ARw@=#4D^6pDhT@C%&T(2rvb1101F1BY(JA|Rni2RkRL zQr~x&2Fkq}{?@hDqG-es9=nd!1wGy7m$Qe9e-u}AmUma~9S=$^Uu_$zE>C@!(FOy0 ztZc@>Q>{AE^U z&RJ&@%l{^&g5WB*9;awT#EduG^RNye{y%dpJv=f$H+PV6G$4@9EP=k~Rv08^meeW6 z%=T5uyKHr){n)j4FkO<5uS%4jGtOFSJnL{RGpQ`i6QaVSw2@J%_Dr|qZ0k+~l=3k@ z;VgsiFz3LM=DpLU9+ZcQua5smbvs$J2I2y04ad;Qc+%D>{sBI*P%N_ zW4ozE*<|yX$Y4bxWX^!Md){ejzja^0C+I`aIFX^%SLl_0bp8TK=GlHu5AE38N4{2m z^W{dSP&B}>Y?cgCPV^^%Mpm6(7&CMx_@dUBXlw{O_NG(i&p2oBSULwSE_Kyl?}+aG zT19tcn9Jdmu2?m_#*MWuY%i{u>&Z~3pLCBW+eXk+62$=vyDNWzDRF)ouyKyg4MSrC z0D#(bCDn_WeqCtm)h@c|vDK{qelvP$(Vv_Nhf4q<15`j2 z;FG`@>V!R!oM){^{4$Ti7^gvl&IXuvZp1&tG5Tf#hCCB-5U)0lrG$TT`I;PgSWSiO zzINd+FHzO+2_8H1doj@7d;Qu4{aQde@rV^(et+q+GX!RmGxfB$o=wp>fd<6@>T(}? z%9`t3BjFk}`nEBfYEfyLivuR@bjeKoCGo9Br6%K-Th&LFW97I?m`F{#*rE)j`g-5G zuV)x|Rd3D2CFB{|KH^vvV2ry_{Kbx-NMt-{!4%-NyL02 zYTpW`9&K8>W+!APB4r6tNu?1RE*JV9s@VxS-+I@6sC*MaV4(;WEBEx@y>89OO8!E6{DV zTDw2D+Y#W08YMoOX&7JESENdDntspLoEQoHLTI#FV;LF<_ z9kEocb#J=lg?mq6Rx{JVmTyA8-Awb&4=;}EfH*Q2CO4h1MPl+q#lx)X>TP|o>Y3YX zFkAT50Kmd{iw)im{h`kD9Kh zhSbblp;KrTPA^^nn^oHGE>pFze%mxfU#Gto`eMcqUBr@%3z0h<`!b+Q{5 z{(J@Tv9u0_U_3o$o1Px^+5mkma+`*bv*`tD$KtQ3|P#o92#^BlSbP;VWNz zbzjABB*GN&dpql32cwV~^26>w7xfHPt%A?EKZqCn^=;RgPv!)jRS0|KA(G4VIp0F= zji#%d+q9V|^cjYH$3;?syV$S9Ry1+VMf(88{PHa07!#KNb3lgiQjK;9avll-e~l+A zOGtm7;qlcn$Hkiw@3fPzn~Q4eM+sP4Wir1T%&5M))5xY{Uh~F%8=2?GI3V*w)qUsZ znd5?LPw>@i!j;8D3O2Ha8Op4hjQF_OXAl&tP_|k3AGO@>hF0&tOC~jvS#^}QiJet9 z*I{pOiz_Q`EFUUucdOQ7+YUZ{1TD)#|CSFfM>XYI@JXh1Z^gYe!lT%Lu>bq^w?5>V zL~aVN=fJ)&tEL+`oN{T;^_nueWH-V%RBX2QVrNa;7)tJldrNvoyJ{^HSh(W>N2$7# zHKr!5F$uq<)<|(_-&a9jA(uX5#Cm@T6K_a-L{L+5iw=;wiB(>HC1pK|zRdpV!=&Dz z4s6hV^Ut#%T z-l&Mda%joGeJ$xsEP@ILfyT1OUX}zmz%c~8*}@>2j2b_v3#yv3DPANSt=MWkD|y?i zH^L1bMvOg`kXH)Id{yzyx0RvZ&Oanjm!P2}(|>!4+u9K78>0)Jc9Rgqi9TGr073k9 z!gpG$t>-4=ou@ZD--sGDtxKx)en}KMW7qKV%WOl1&R&yS0rzaodl<~v$-=Nu;UWOT zNa~~D0zPEFR5C-QEB7T)1~-|^&T=64Lyu60%hKrr(y-4Dx2Duz4~r@B$8a1c0d&Ze z5_tTze6LRkaZFQxJ1RL|)P7TQ>j~BRNdDTFIHTr>hq_{N3*zH`o%kbPgx=|k;^tdz z1Lj*0$CoL8iT0myj)Ui;8gjO#zt3j{dcGGBfk|nS%kOhF`YLb_DPV=xAJ$E>{Q?)7 zg-Exs!_yyL@kA?8i8dV)N0K%1N@mP&J|k`yS?^$-X&1H$h2kwx9PqB86;FfCbTa7rsJTVX^D7JP1#WYK z7obJb%>2Y6rqB)|K@@N=$IMw#K^8gendf2A4-($mxJZDAc#Z8|P6tWLpqpDMLp{vN zp`^s^>+BdQtbe|H+Qc+fVjkWi;^wPNuk@U2!>8+KeqBD53Bytl^UvOoe zuVDEmeZT$OWS3NKuPDrHQn3n4T1r3=@9ajlH0vB^RNfZnv(=k?nkTm%5h?7y-5fSF z*S#JalVgx@e4}`26Vi;;_va&Yd2?oXXYxQ0jFH*T$NO40AB+Eb`RUResIEhj_aXu- z#xObOndyQ+QAu|0!4xH)v8^_ziyKdC4D~Pn`u4GlFJ<~x6-Do=-B$N4*N-PvE}8?352Xz*nkIpN}>B3o&pPiS_9P5p|k2^ z=D8Yq%QSYUt*_b-aq$>IlevUrueJ}z26H?c4GdwB7^qg9(`Px%AGD1kH(i3qVV%&> zo$W9b=ll-hd#-1GFH4Ll!gfwk4oN*iFa-dSdh{JfLicu3SD8DT9Xx211l-OSdDz`^ zxuYw%97d)|$y7Ml;V?u@PEI)WNOQiA>u2cRU{6f>$zA8<(Cd~J)r7^*w~N|0RXb;- zQRF=N7Kf48)qco{$rnO{Hy;K(gz&!qe!4RbI;v@r&|uHCDncGd2Jgc-%n$Be=qB+~ zQ*ntMia^{xjh{ZNq0BAdrBen6kL=wx8yN<@<{Izh+wc)NCaoIgki)_BQR2k)aIQ(j zmmxV^=eCseaq;VD`C~zt++42Ksg=sBe^&j3yj_>}0{s{LY`2eAh0NJauinT-#sz5M z5j?9%oPV&|x-;&tPziy6BTw!UP?L~R$TCmHBT3%Moo$~C(t*C6#N}0JSijcW0w^P< zbDBj$M&VIQ?ph}XDq#Sbe1B|Jslz;udTZ|BI-VxS&wjJ=Tx-_}>IxMQ5H%b-Y_2cu z0Xe1djtEW&*(N~f^Vkt_%jqhZeOgud5J!Etgq`Q+OyjN62&|B@Hn4X3)vJ;xvb8F; zr}9m^>Sjj?kC+F&S#^u3Bokb`OHgw6*pba&k$MjYT=#w^d9P2b@0=&nB?z|D<&e`> z`0y!Ddi_S+2z}p@qg^zvq5n$&O0dwmq}}_T%*zri&6Jm}MW(0uYIkao{fNBDoy1ju z$KFDz_T|+dCdkZv(<>akeRpSjU7{@i#CxAF7Y@alIL`^o4KsDmZ!7OLlvlV#s>H3y zlQHhp`fd!}Eo>-=$dpaZnc$C&ZOh`v&oVr&hGW4sKEL^}$Uj4|al$^`y7=a82Al{YrP^Sj$3xic*^|AF9aI&6S!t-833{E;YNS z&T4J=BKa~~+GQK6P1kT9yUDK-+9~qUWSJkcO#HokngOe#{vKKIFjMm!uf^HSnNqhU z>8kgDx~4M!@(f)u=a@}Gfn0{3{UDd2>XkPiz}F(fPsx?u-D>w3)u;n6t*713Za*)lP}ng^vh#JboTXwm+6k#c7^@CTXHhS1k5|`s>$%riH21yd ztvlZX@s9Gh$x6eCGr3Kl-XRX8jrQC3Yw=vmSPu(LZ~HENu_4_gR}$gzBPp})b9py< znsV)ZCB(J77QcxrOosAR)?KjBZ*oCz-&~|D(($_7C>%8R(j=sJrYBE*`I;k{4cFFP zIYw^M7?IbB{`N7S`vHy8e)i?#!yYk{Nk&-rX^{aa_jF8jzpfvpJXv03-fCAe77@Fx znP+@`bUkeJ_l2e=`;Q9VMT|7VyJoXnbZ@=`1sP80&`ri6w_|Kh?N_BDz<&0H_MJ|hCL|L zj_(IG-3!j1QU@{pS_%F z44JlF%I}^tVmn*qFl4hZ=y>d`kFMWY{}l_ByYz!`fM*&y?C`b;&qik!`VezcekAPu znyI3G93Qvzu!NeW*7d{-y3Hr5RMhzuNz`j%4D)Ju{#U4Y)rWL=wCb6zAcNbG!X0#f zdbNwX{0*x4X^kN=J7!$zh9^X=rez#YVf;xBz4?5v11O{U6Qamye>6*HV{(O$y1%|% ztn0$N`V^y=n*`r?K%6M^<;dOEjuH};a49JaZ+}(tFss;If15hR9D4Q26P(hk>-|%ePL}tqLFa23wDNSyryYnL zJbkF^=ro&SIQ|S*P(iE&9#>xTrRn3(Yebz|HOdc}klXd7K>gHDNx7(EiB}0r=FW4< zBTRFTyqDaHMEpOI*-iECOg>j4c_w%HzITnbg3l*68WJd#yBGuN8@n_BaCDbRi%0Iy zJuNoTgaL;^yA*eOmdWj={AU8#v!z`e*~^!xk(0IO_F&y~UW85F> zcE8?CM11&;LQa3eXw%8$xkrBB1Jh+9|E+?7R7=$YgDyhVrXeQeZ??7oj!dV z?^E;rb=@%RIChbGgi@*7#9|C)t>f_3WBuo|Jh}RYf?d|m7eAk1Y>^RGuH6rLsBg5> z2Bx&FUxPoj)a-IO7(yP05uVi|U-OK5&{)3{V}qYe_@y|M+vtW3MN`KmPLy!ceI)0s ztWSEZ@ay8qdo!l*^T-#O!vF-`5!E9f5{Oy)F~H$c2miS4{`e` z)k~C~W!@yWvh#9(6^a?xf|T_rfk7MW;)uNu%IpT+97#6M1!}Kbc9BP&&#jc%-GI0u=Siys>odk{&MgQt9HW$E2;tn-;?#J z#r^Tngz^ZRu?;o2E5w-5)GxXl-o^vACmgfI>oCu{^8QL2oJHm>^(r>x_b0$&jYTR^<(J5gQ$#on)1OKn2-q%Ex}0Maqg_Hk1BM7Gm$GX*Ufs)_Mp`TL_+}B~##*i77>L$58KIP#a|f_u z>9(pI#3N{M*855T(XCeGF~LBKWa?W`K++S>YxQ;+{i5SBJ#Vrn+uA&Q(UknSx1YmB zX&II7LLlXSaPzLzhwmV3b^li1b+!@9sn|vw8PhjKXEM72JoSsgFX-*_9{6=JzT7;M zCvA5`{7J3lnzrEUT_?L44=aVD_%vg|$pbQ$yR5#m3dMFDuwGtI zu*def)uQ{U9C}@FE6WFXV@D9@uYJ3w$}!V>8TrH(@vvu3SHBEpEm+R+bbRv5AN6Xb zOUZ)}xe`i8xM8{WslDsH=Kl?2VI#&Vwf}wgmh=`qsWXG}z}7-9mDA`Db8cjPBZ85= z*Q|IxZ52&SGaddY%AaDZNq#TK-t*`8t`HyfgtS$E?IE(YvO~y>{xvNK$y$v>rfd|= z;fwf);xo;K0qYdl4YM`YJ=o*^NAWUN91Z21l*9Xj6sR(R}LhW7MLr`EXyZ zYuYo1xLp0!(HZeHMg3i%r?C%;qA3M&&>tgwL-pC|=2~vu8Fh^6(=qvg37G)z)+sdlv|8PVW;#m*PVnT`YWM7J4^Hmg zBSZ>T={H`T`*pTcyf?bX0uwPUW*$;m9+YTMYiYh+Cb)Z>PR(H3*d&jCO^De^XP5FL zHx?h^yfw`{%7iuEg>lldd`6K@yYa9_s#vhjj_S&4#s?rRdvwPpFmupmUH- ziSU{~f@avkdY5V(u~-lDV0_={)uXIas)5MQ@};Ve)M}uTL}GE^Z)s0Hoe(kN=4aY9 zCD?UFJN2&;M`o1N?K_o(*-+K;3E62JDEPkeZKsJb~6~%%hn`a-dBg7pAom&n&aQov2aw|JH#Ma^z%p zaV*>k|5OVkf^Sf4b*uF9cm5L&y5H->Nljf#n#46*Q_C$5;&U7Qm8{O)I8rx#zJi?! zg3T|PdeY{pthqIVb|>q4zxWvxY2)3EHt&zSoz|53aY>t}5Gfn{(FX#YzB5Oe5-I^P z&Jo?+@v~i+ej`v%bEBVju%X$14lrOB$Oj{Dm1Yej9;O`q$?HNLb+9*b?MM07oQ|2N z&u8qLpqia?K*w^qYg3&j6Pw_>FGWkC#s%yrLs*2 z8`IW-XPtEHg9F(xPq05F=R=jnh7OU#eS`OPsw7){MkPneOfh|}0K1dvgLJ#{@Ofd6 zuKRwhq{JKsRkLQ?;(C}xreEH7AIV*RzYYUx0o#gGaFQyZdurkAviZdZF%Ms*(&~0I z!0(&0sU4oJo%vZ(z%6G|@?@L9>FTOUhv-8_F<0r5w%2lTX0Hh=J1fP{O+;8akHQs- zdnArCnDlgg6uAO)ZBDE$zU?FkMml~ z^OGtjUTSHCO$w{^F_OX}#p0J~x65zt=uZ>m891-m{EFYOv`1&akm^WG zR5_d^R=VO&uK*4v(Z3sL!ttM*Gw*2}d#hhO6kYF%KU4kC1ZPbqt2B6K2w%l*p#Lq( z`)7MHM)$6$vmdtQWAwi5tV89S92H3B0Ii_&RrsQYW6)MvUIVYu@B1;_m#K-d;I2HL z2ai1m$`1au;Ns^o?A`g25;}w@A3RzQG7>pTJ~TlOk~Ch{-&xV$Bwm-eM=MwBizVHV zd!X~v9eCxGYqNfu?rA8;%O7#uCh#`o*Ww?gOH`^pPC}#$WqD?DJ=}WApU$oLx?+RB zeT&+1LM%0O76@E;-2Frv92wt}IwC$Qq~_Xs%SF{$AowT5Dr#`8Wvxohk})wFN)oZO zPjpz`j^a0%bPSQPuIQuB$izE8rc_*sDszIv>}U9u?Phog3q&)Ybp#{s zYfM|lHsUwHsO;)=)wQ(d&S36&uiWx9&>yDhjWg z%5~j9KoRcwGqQ`iG7V#&zp9oXp604Y>zaf}7u3 zI!7~4vwcM-FjsVM@^ecI^)nuZ4UJ(^id5=xxPyvPaz$K1n?$Hp2%yuO;xv3`M)SvVtax|5aG@On&|d~9@D zgBfv~^?83?iACgdk_*=3^{?I3aXXO^AXo!Q2sm5SC5A2ccU&9V{B~-zLe}f|4CjC$ zu^L9baBjxOmv^1EdUXYO+Ro$W#a%zaj4f7$H!_myp7|P)9mb~EHb@5o0IuArq9~Z*mCn}ts;)d z*K767Hk#ms-deb2Q`@&JeoLu)AAxrCV3v*SQbtF`iH|eO9v)e4B#k~m=0IXkFh%`i zoymqu*k?{eFXZ!ca%VsLUG+TdUS~Y8C~0e!aO7SO8woVzbtG2?z$Ml$(c)fWZ1;m`~rim@e_`?OVlcVI5|9Uytj z7#D*rXm|+40zbmk#5*%N;R_SRW%ka|UkR3i?2J+wVju8oKjQM0=XPAdzFt$yb~^yw z$!6~Jr*+j2?=Ju^MSoOKa6rWXq$Jx|my>I14%^C#>c*;wzq0@e0ITinX6)0C^o2Ct z$!jTF$13Y=-S}U@MmS!rA9?YyFJ5fxK?rF^fNrs}_>cp_TQ+1DuJ=d)txDNWOJw#; zco1EH6r*M)OmM+>%-;NGb*U&*rj3Z2pmrwYliBkmHf<*z^dN(4{A%7WH68|iGe!3i zf#$9?)~*#HY67c&NJ~$gv+q&vzgWh%Y_yN&T4fJtm>#(P;e%eDi~*!)nbW|I5<*zk z@P%r{N3py2Vka@5W64tFpP%88+jA5l{firSB!Q9`yu9snqKuxe-fRT0o&yw0 z!c%usIn@Yv6&ko^fWWR_QP}N>xyt6w^J|-_-AtAWd|64xc`kE70)hsm%h^7PF11Yf zFoP!Mny_c~b}=sETq*9lGxaxa+tL{K;a&O7phn z3Ldc~KAMhy@4hNL9U?J{hII4?t1HD(0~QtZ=PhkeyxVHSrlCMH$tg`w5mP#m+#0?e z=$8&Hl{6NfVzI4YUmA@Xsq`H^Q=WX)T`~hCY*Yx$n*z4lsr8GxgyD07OUo^Mg%;&Zru12lufxU8%w`Gl;c8pSMv9<2S1e zRge8_W>rhoFxcr4=fpXeJ-lc>MXA-vq(XN{=j_2?a*AIvt3mxvgM4F8azHN4wJPfc z?lVjtt$Gcg*|YrN{&5e@Nkq*)kU*}zxHDUuuEcozQjvQ-37m+je18QJ`<3M|O%=$9 z8t@sH4kp7vCIt2@*v}s|{q7FLxiG{whTdnP$Y6*ckP**IbNPRdHo~;A-Kp#=jeoWj zfkO)u6rA^bw&=X$vv6kE%QP40_(~h;>h12fJInNHK1RFByru!{V9yq$*Uq_}%#xbo zY4p;e>F!3NG;rh7MtWwPr{L7{u9J=R8vU?Y|AUVmLMizTjRH+;$$IiGn9d&M!zEvU z`f+pjvptZ9E}4V59EpkQawT@CsaZlXE;mINg!goHgGjj6`QEMJ&Kr0fA=rK*Ta*ak zM7@$-nWjxXF1-7C_uZg~eVtkEY`zlhrALyK+0UDIjKw+-1B2RhIK@$cp|TVmkDbs;vE0_G2RU29Tu|Js{@eZ}NXO-o#YZL2qx zYHFAthAE)N3M$QT>eJ@dJ5GyROEHh$lHQOS{GiYmX%y|Y|rQr(|o?S<;>%=zst zzRB#t7xIm%->o4SkZ+^iM#4%_Hfo#}Dy_j4YEabr9IDX%tM6@I{O~27Nr54o_Wsfx zkc#Da{fxLftsRJEV>o51rBqAF>r*AOgMOviH}>>r2n%W>NFCzz&`G=><; zxz=9}f#5AcSFlDDx^@z;jqzuqW*1q?5CPB8Y-BlaD(%32E=b=lYWfzZS zG-+23o4$@RzZ!dI7HvF(ECcP*OV7qqr{2(G&aESLatTlP%?;Kpl+#$Zq95`j?Zutf zk_?D%PuXlXv(C3If(XnwmSwwBqT%Hl>1&1kygE!<^&H&c2oboGgdHUAd4q7Id>eGR zxx$qGdL}E2+-!Lleny#%XOqB;Sugtzjp8k|WoCKP{!W)W5AHIhGj6{#*qRrF*54vw z{9yI!1y4QsaqEh}pyG$}Gbg|6{{oy;%8~B^#EX2B{nRzY~STOi;PV+L@~$;c%Q4jj(zK z?RK=71l5N(xg8d2xYF$tfmSEeQGIfV^b7vQ*qK!!*(8buW+)v~9}8Z@lFlqjo=!K~ z+mgy@&sf{jt5NlHjawU|7(S6c!@Gq(79;#MJLCm?o)CxdLUu8c$)V|2BUVD1A>oy7 zkvwMf(16bHgNPYKs*jU6+~ia(b*n53k^;;AZe6S|XB{IJ`4WB~SyBh5#G6LhFXYfI zlEZ>N(*9h;xu=)dxc;TF1xN(afE?~)C_;M->a>=ml{%*MP{hp2Pu)0)&BQ3#ZZqfM zg-WnzeuoT0Wr=C^}iHgkGj@FuOP?HBGwhNx5xjzk~`JQ|ux1$y%E$Enb zNwc4(C(dT_{%UwQXVmKqPOSRm9Cs#)J?LIHkW;o6Z5YtwZCyRvRMa;k9==i)8XwQ_ z1RGI?seaC8h>SiHp|6k_y6NE@KH5Wb0-RN8v|sgne3H_B>f1HW7^@Lf&Jm=VK$RrL z9dI;zRd2j;H<9UvoN}^8p+T(D0u(HBZLe_IHoTWU6MgRs)VRDkZWMs|bUh;Rlsmqr zc&kX29ws^PO_DjX0L}}f0gDnfbB1`S?cYyUVsV;Kc+}IT#bAQ|NS4eSoo3InC3SPO z!I|HdjATL*(13Yy6mM2M`|SP6x=+o|j;qf12kRg{(dN4R!j98AsZj=|>-k1qRExh` znE{aDc3_GvcvS2PDpbeSlyK-jPl+!lz4c-IaIX1)TR1#t?AX9YOC;P>X3K|1Mwl&} zM&uz#;BDTEWJ!0Fu(nFbpmG_hl3Bq}AUqYZ2$`m%Xs~bEt^j*6Qv)xuOtwZqLa+Cj zjp|G0kCTx!-eJHBZ20V~!#o=)(K0lJ2aQ6gl6hxU(l?&W0^ROBl4XJk5ouT3FZ&}g zshVTaMk0aPXp2VVCU^6<57m*1jiTF)E3$f)=BF<`x-brfrS_KG?M@uetIM|fGhV-~ z|K?vSB-8ZeFBZTa{vVQ6&@w7FlSs=M=-#bg1kzk}PTRS-;<7RoCdQ0@s**1roaWGa zPx~6Xny!yLBGunCmRfL7>ie5J=)ejC3T*3qfmdH%-509B-iewD|4<{(j^`vJD(_Un ziD;Vb@0z(4J|s-Hy7Sp!y;oDZsvr85`a;|x_DmF<>5yI}3df0?tv2~SEuvYlBZ8bA zlkU_Q7ReKLIFC%iep6Y3#Z=b;B-KW5i%8RAu`54FltScSiD|>s_Vpc>Yj>+7etgU*m`zx=j0+%u}{MHBA$19=}vV0-wGm#xFRD9C`&@dtK;E!7#D zicmtF%^2CTn*Jws4EHZ1g7}QHoG@fws0-*A&Fob z%8jODKOi010MX(1g!v3-Ja`+)czSCzD>TG$*p$I5w*LHhCd5Z_eM?CqA6CoQD@&Y6 zpo~R07+&%Tz!f@~bW*I)Ltyp>XipLtw^y%vIO_k{xTWree?QN;tm*i^%XcOUoDUUe z+v<9ZoITDH2y(xLDLmN*?G*BNJK~?zewo&k$!O~$)$DbT^rz~Ot;ddZuAO%+X`P=mJf-+hFL^)I$W&q!{FbNfYm@hx0zyQYso`oT?JQ)vbvgQnC6 zi~f6?^YK^Gxk8__x1?BB8qThE+5VVX@!qw3m!dz`^scD6=EcK+W(9IzbMF*cxf@js z+}=?w zGViUMGIzY^X@NFeT;xb`=R$GBr)WOs@JwA?GrjHP(~GaIqS%tL|WQi{64m`GnJ0zQE(Vz${3d}|;m!WtNGDKAn+rD#8XB98{L zKe&kFj>Lr)vG2->0l~cOcax!P5j;a;7$p`&znAvY)Tu0i+GeGJyK!W>u;HLECY2h+MN|R-*#Yo z33OjKy={${C(KXKXsUq_(yGms_!x08_VOwrV`0HZw{k6v^T(+sbe{;Q#dT9^z1(Y- zs4?)0t|M#mGc$WitXO7IdvTcVy?d`g6BrTg(+FCcH@A7&gTavEernPY_#9&QZSQ zE!w~x#Pn`MNy1Tk*tM^}nyX`HC!m!^K5lqbR3w^TT-ipcDY?G)5mHtSj_2N&GSN!r zOvZzzzc=RHHFlsokKHMDwIt$eCha_&Pj%>lu+^g>*)s#WGTh4XYVqW^kk+~Vx%FEA zN7`Cv$2X6XI9AyW(Z6BG(w79A(%i2WgosWcAP}gy`j9Y|7gg^u&2>gtc|hZ>qn3gN zil;@{{wn%;bghuak8k7p4_oNAJah?`YD#<%*D6B$8E;^-`O09i!oJoj>O0$Q(e0P~YWJKo z?)jd4OVs#W$H&ySF$wfHw+`I|v~s3+$Dphf#qNRJdZ*u^xOggLgnFhB#;Ij~1zugnjwaMyK_57p$IXWc_i*zTc#V>|! zr2LdmBn-I4w^w-|rr^EkRZ8yCZTv_94sJ%P9Tr4isVAVXolYI~=2+BCW;pk<#q32u z@&oo0vLpwUWJ8l1ReUsI(skZ1i+>$%j14FSoT}|DN$@2guoOPH?Lqa*%V^!^HqXWh zA;kgrM~sXFX@0{_m3;vfg*@vnim|N$$lPj6I_If7>|3|cF_%$O;Hx?V{K)fq=bh}*M`rbRA3Ai<39G`?)=q|$a2_Dje{gfOC;CK(oeirmYpOr7H7NW>s2kW;{( zgqFY3pxjN>-0h+A920Bl?APCS8*-~#vp!~dc#lWhMcrn%H(1=haW%E1FuBYA;)gA* z$E+X*(w{1QG_|BPJ1FLn+J~yyXn{{3MpGKaH%h(_tCvuC1eVbcrkHoudB2;}LwFabUI?!Qe9fefMBQlx<6eowh zDwh-cj2a#feL0fdd!1VE0rE0%l8e3T;+TA0LYVF4$`d?`WbX7Lup^Uz*RhT98%m_1 z-Po6XHt_+)Z%J__3&8>foGof|o+~jyZj~`Z$83TK@3vQZ$EbWgDN-o(-X{vvl0vs` zrr|A-Dxlh&$D`kO&+#8QYvJjx*^^<(xGt}qyk%=cSyX+>1tjCwa1xYD&Wwz1eTqmZ z--+6pMn2acE6jKlo=L2aw06+Gs-feoP$Gj3uL>=D@QEz-%>#$8{^NdyqPPeJv)-0k ze`>4!z-GJ95YC8C66;&%T6_7w^PgEof)rG*J^Ll7xv?d2iMN2rh$Rd0soK135U&c? zDH~_{P|IYDHTw;|{3KzGCrp@ICI zFdulK=~D;JM%y7`AN15dhO?!3C~vmY+b)tu>)NRPqqY z_t}_@F0aylcri?W6>J71uNAzi>qi*+K(*79?FSkCZ4@MrT43gNQ|2pl?~tqbQTlBc z3=BwHs=)Cq7?CMS!FqcAR@wFC9kAi_^iZA2E1UL=Qo6yasrLRS^Mm5%Eoo-DJl;r^=oP9k!(5tnjY}VQkHrz`-y*1&T+7OlQ{V=YcrLJGMYl3X9GRYunB~`l6t^H`X z0O=Ch3>?m7Mxx+83t*uG`FRW2YCj(7jqKCjduUy}`L-vwG8^vjO8-plhpapWpTg_0 zvpS{zSdND>-zz8KZe-5 z@4U$+U(HG9OJ9mTQ>jBkmWREHeku7NX5~bSOx^!VvaZ3XXI1&3WYz5rlef%kxDi`$ zr`UNCN4Icfr`9@WI-FhIWr zL=7FSusL?Q(2wVK4DxF)XtDl4|CS*UhfKyXh;w#VFV{J)gpI z$%6S00N_s|K`glJ{^<)Uz!D&n6thVW3*ltLBPI^gpJQcMbZ$Y#i2@pQhnAW_%YjO; z_PFhsbj!X0=7h4GdTIU}M*Od922r7itRWeS0E-p$fdr_ymb5kX6d_K)VVV*qOo|ZT zSgg!+zIpo+Os#$`2<*1by`T|>!;IQQpXI!KurEE<;F9vfN(g{0{)DOj+(66@tk_Co z{PU7PDUjeUYBu017t{6W!yHK*i?egAF01_ko~1&IE&Z-m>pmIG>0njX`G2ni1+cJ^ zFY~!};5sUob_4qJT#8<| zO7tc)KU#79jf41Ipx}usfPmuJh$AY(ikV;HoEUG2c)K#bi~xoJ9JAo`F%3BEb_$Sl z{6$+;9>c5!{_if6CgE??6z_eq1sxqa5R3vEUCzFLi-#^h#sRjhUKA8vkQ$nP zN5k)({wyoO2Y{3U2F1_HKa`wu1Y?8DkR(Op8N0TpiXhuc8Y=9*eIt^?wNqfm_F33f zN{DGw4|CdU0lVqffOnM!guq?CO9W|TjY9h<4AVdEi29g~kHT$>vy6i92ohA2d5mtc zU9amUVbq|xP2z#FMjl9oR_u&J?*iyLAK*z&{=vuwjgkygReubRWi<#=lqu1*dj`(# zRTQtL{r7vJ?n1{0WPts?LLhrVtX*yqSd+i(r1jaZ0Jx5#1hLv0N2eV9jB~}}|9-l^ zODXXcXwGa_Y5?1$OonM1sRKapR(%&|d$f@Z?;3Gemt}S6cR{r}=I9$3DB1k~Tqqq} zs3%?xR6Jm|fK1Q1CXXlea^79$g#iLuz1mCWXx%3N)2#k3xbA51eZzWbK;Hly5Wu3t z4FgBVkC!6~ZlQx^%^h%}X>J>W4Ke`$&pPBatz5(k>3_ z2lOjU84XEOm)sX*_8P8yF`zyCAKzAvM=5$L^$GyUT)7aFahnU+43~IA4mK~L3^JlY z`jYza4+{Pd|DPD*KUYnX1t+VjK4ISAgq};9WmqDnvTLA z{#_gY{y2qan<|3Gc6qsW6jAHtobAuf5@&{}qRH0}yecXAE#rV}Kc-FufN=y})(6c* z^m%CLFQG4gT{Fm;*bVvwa02xf-qXdP!4b-(rMbXg29X` zBvYWeJ}RhtsOtQ)w8uGv0J8{gJ7XmT>bV8;K5(0FiODg$j1-JuBz&1IjoKy_K*RHg z#R4M?F>s7rny7QFuJJSt{?EF~PQ`QGo~%esdolwGgNi8-?Qx9zg8{%>1OC{nc}vD9 za(G}?`~F%V1J30$-Tm`S@>;5w6y^Vv4&CEyKm*GI^xAm5?sn8-EzT6(yOsp??&J4C ztJZBpw|ofxN{Y5{S`W~;gB$t~4RW-|`VefweNEZr2lS;Bgz>+x7xa!qyT)2$WS{xj z-!Bdf6>fl2FJ2DC5>!*U{`!i|U%aacnf5wSd$-!^M!h73P9U?lzmw3?$fo{e%N<18 z>M)pI9k*Q;&E>VEiGl!~;|V)pIyU_U#)^A$B_Rf|lUTv?aiW9GIEPAhdArNIM1t-e z-~I}Oe`OI&#)}G3`~5RM3KGE|C9oZGPQ&JcTC`zr)P*%9vw^{y#yC|fYC4e3j|n;* zI*C0pT!O@5&_?7c}C~*-+Q(6QHH`K0!VFP^L(q)@|9rv@7)X^WE$h zp&(dm`O*}ExCFkvDn<9eguAXv`ZvQ~6djxd!+27KP&baGcU*c-!Cch(o0fbRg#k<8 z_5wqShWI6E)E(tM3wa`G1`+El>G!v?FY)CT^eerWrbo1XIq-;i;i8(s11vRmRHq0f zoecPOJ^&Jgvc7351Jw?2-uOxx2C&b7AW4Hxj0=WTLQ)gdlt7Fe9avvOopl}qbSA)q z@?Tb_`gsA=39pB7(WN00z>QnqU&4(6G>$YVFpgUMw`Wc|uIuJ_58$SXter5JZ3B{_0;C7b8riC^lylI%0f&b1(G#Uvann@+T73~59SY{Qw z?8?iQQy$a~QZ#Oe-@Y9TN(~eaIN7scwUHmq=ekbWOblWz7J|N2o*2O z)d~OxU-=#;6%`eI(gtW8U;yu>kCW-7xnq1Kxjt#aK%po`sXK`n^v!q7Yh5yzw^F`^ z9&kE#9v^&UaLHl567+>6PH&WiL5iP;bP(knp)7$8jG=LwA?W$6V9fgmm>R&h?zw!C zjG9P&6xz!a2bwGZ&#!4_rmo40X<~yBDE55-$bI^YQYE$qLG8LSB+UR;2g6?;4i}h% z;3UBX2v91_?JvHfCXB4N(MlpY|btjk}(tZZH&NAs9#bc^r3YsLKso2tNUjOb5t zEA50_PQPStXIs3=R5EY_rGks7rW!!4>qr6rR{|37cXFdqSY( zEI38+<_HIv+gB&jSw3cX=m6#O{N7&k0qKG$6{^0nr}0pStfP{C7FDA5m;?Ypa|JW? zvDlD?&N>k#Z>QxCIzx2ouJn(c^rcmaF3)vDf>bJ&D(8o!FK)8K6O%ml=H`&skL`FblRBb*&< zfy@KiHuofOeB%L`!gv>#NWR#1=B#`P|NJ;A)D7&2Zvb_MX>uie8}QuMj(!19(EyNb z(36c(6`>FjjF}Oex3fF)fz;|T)j06s9V;_>*g1yYD%YpUgO6&`zdP0MAPq84>CyB#w4RnJvw*7G#?+<- zUI=k&SO8wIQPY_eAq;q?>HBml`*_{Z^x2LH1R@{$IjRA`5uhMT{5|Mt9@9?lm)Fd8 z#a!JtB(*)XJ$?>)&>fROoUqH__SdaYZ}Cr3Q3S)H9xW1BxW~vT`cmR{TYE2eM#jGa z(E&JdXrHk;$3+w|-7)XKQRVslDZ>by5OQ_3RV|=i&%LJ{g}%%_xg=WO@?1o7>7wy4 z<&>AZoet*4lw*!4bCZ9+s&X`l6E+nHD?TtjBn4u=b^JP6@k}V{oEO;G2s^JzJ8S?3 zTj#qjxx7Y(wWnS{=1~ZBQi_des9l4&F)?@3%Zq;~4Gk#isRuFy%m4wdtjk-((FWyb zdb10OlUk!%rS|-1orT7GRhcbvA;0qy|M?tZ_b=Ub4q1;cTetvPWGD!>vLSR8762j+ z$o7fhfu|NI9}(yajkG_8J4yLZ6Lm>2UH$qqMP_E5`?$zdXxV=|Wqik6Riu=g^%TAN zhgyIt;y*XE1-`bZU!_?d9gR!%JMayNjH5rD@LJIrqMZRX!#Db`;$k zA0ce=0TY%Rjn>k8Fb>V7)7pe*oB}5QMOKS2TEaUaYM~2uWX;W{o2A_!*t6;A!UH2% zSUr8CD5!EqvO6uuJwHkE-(~E)A^%M@5|^F%UBo-77ugXJffAV4#7rBKpFfT}6a9H= z2YZ{J-(+F#VeauVKxBw_ZUX$Y-Ez$Em@m58W@C@?nU^jNG@lW0n4i5^BGBxq$@BQ_ zaQuqvoBfD6<*9h&&5`Udzrx^C=zAp-gAJ|M3EOgG5C0fM3Sk!Aubo0!j8+!}kou|=hnj$X zKO~#&GXXD@r#Wyr4Z?>ARR@=TG(&x^CLXTgpUVXeZI}I+LwRvR%P8jcrY_F{K=n5O z{uH|-qyAssw&4GNU!b32*jAr#xshP1-k#)`QiKebF7R&?Zqz4b_n=SI?o(>)O+NJnYNbt4e!okL zxZny}o@VJ4Q|_QE=o`1Q)lLSp9zGPoPQ%I>Uv~ofY^m8oY=lmgQ1Em7Ki~NCkib8i zDO-H@*{z=+s$#`6@K(J)C`Vr2RO}U2ceeuXojHa;u~XdZ6ckUcY9fqt)nB%QYzp$b zxs}+LrquT^mW%%TIZX=!74x?#I$L&DeNA-NuKInOB}dhmST8|beA<3&1R8yy*ke4< z#QBz0VX@TWs<+8jKF1`WIBfdGV7(>pM)Otz>rW;Brx46YhdF194fQq~m`jf*Te3^9 z?xh*k4&EcUEN<{7T)?9lOj<#qy`jGYq~H;HR`bnXk~CyJWW#Jp;e^!WP9y`L%?xI7 z$=Uy^r9a;#j?fdss7E#%(XXtRAVsu7>iJ6VO(TVH{`?=^58wft;LuM-EJjC&*$nTR zXg7%_=NdIWd;{$SjuMiy#A@tM;*>%8|En~_^wFuBY3^CH$Q!jz31T2z3&z+IB5n9R zf48#ieN5a{%}fz3Dd5!v`KC*&6%w8pg?ifcp)6V_6@%h>&_GCz)c;cme_dUyJP>d3 zf`-YA-@|aT@03M(Q40QJ=2Y_xnu9T76NBqL{$s2+V`Ht zp&*Gzu^Q{O?L?N4rZ2KvVNm*)YJXKg;cYQGL1-pM4(PqI*N1JDJxu)c>!kOOCj!7+ zl7s>bkU4j9oiPdiyO-%Of?OT?EeqKZO<~KeBm=!lr#c7VpiA~1iZ;9CcTgoAulKw1 z==^}ec-~&svsF;WbV19@%MqEEYfv^as?HWbl%qd|i=Fx*=O|Q^{--g&VO_jBxp}T& z)Ov3-vHSB|&oTQd%I`%}wG?i>Ng&$!hP05dsQmRdYz*LS%x+;}h+hfng?KJUve0u6 zoYs>t$`pdOA}1%uaC+;3XL+^d2>rXv|LNkRI_)A{mGjYB$#%`i6A)KH7+vw2?xU#L zhvh^56u2tNv3c_$gA{PoO`>Z*t?zk~b@y4=dSdVS88RgZ1BW@_ivDje?7yoh2oujT zP%~OF1U5!_RwC>(VIxNhsdppN%a`M>g!kqCX%=E%@QAwynk{;1ZU>6pM899N2hMsv z4jTK--g)_c$J0!r_#9X-ysowxz5DO(a|Qi7t`J5w6{o@Jvv$`Xc?OlvE8HVTR' z3+~RXd;Fh4gXksep2#jq4$9cy)`B7HTf~}Y@x`!fnh0>iMow@J$gP#$_0b81c*D#-+ z>)m{?Of-GW2HO($+wZ8Fl=u_u|y-Nm-ZHCp`Q(hXeS0&S&2o8xDRQ$3Gj$QqAWBkY_SA1O3pl^?A(EA)Imv0C+ zE=kS~pw3vYi1`yZr`SMC7tRv)0Dnjh1UoUN&zbjj-hKfaGB4}f0WauNVlN5%pLiHY ziN1U7?5GrS6x5~|?nSr7HY!_H{!?}s=lC?}qH;#~G=tsZg>K@vYd?=L{MxSVLnC~~ zsQY}o)ZiEnMsuG9= zl_z|(`QYn$4q_bERGG*?<%v_KR8vVA*!0QTvfr!Q%fzp3Ho|no zJ~=PlC`FzLzuQ?prw?p$XgklgY1MmAnG(sy{FU-Bm#Ael!391=;fnqET*{bN?fhlh ziwPg4gI~xeWyrchCbJ8lWsjW>0~fzvT*Gws!)+t}ewyYjKagU-?6(%w&d$?_!zw>{MSJGi?+H-6G^00lyoMP@KuS^{wfNgP((h(ETY~@3J#2 z=c7G0Ui8)2%mr0lk0T-?tPmJy_YGpXGSSz4n-0BWLS?R{r!OePwK+QU+bg{U{0E}tN=rQY`*NBq`qfXCw zt^=4US)r93IW@U%yc&I`xZW}F`tBo;Mo#NQ5Q1~$ z@1XnIX0o=?#JO(hrJreOUP&HYWk#}k^Ust?Iu8t~rkkUJVDzd+6)rHyk>9wRzqRNp zyT+2~N&obOM%XyhZa%7U6mmLl zoa6?HPgTB{!}RKNw$Dq?q20t$5G+P+5aHJE&0*rox8PlW?T6fISM-9|?Z}99QCd18 zm*x>{#ph9_kBi18nHw=n&I?zpCWg;ad+<-MZl+V4kVm`3$yHknf7I0z2v{l2=9cJ^ zm+~q2V8Jkb*0s)GL?SJNM)>mnFT$APya93I zr8g6T^5DSYe>Bb!mzH!xrq!zE);(v_P7R=kP+EK-@p}#%q)v8csw#Eke(u-yRM_AP z9U70}@S&tf^%IlqDiwpH$6OUrcn`uwdwu`bi1j92jc}_{knfbFZ?XK?m z7@rPcx>3|nw+Ob?Natkgf5sLUPv&)}JmPOLe{6){$r0H!?i(@Q%WC}OTKhwgCHb@X zUchl-{@-4ghFgHVS84II99ydvwtp_}o zGc<>;bw{VF;Xmp|=im*rhYO)}T7H9l_IqEqqwM}OBM-s{^{Lmfs8583sro3pzFz1l z9cp^~DlQ7eI^whRUx^W9ejjjO{BqEtBSEL)|BgRuO>W4kW^tb{x%qRB_<;85_xp%4 zZunL4^&f*y_c2PCqlYP9+(E;#h@)fmRHLLl!NHJ+X+r+Vhi$r zhkzhSTubwv)puOZeuxJPyM`ye1|Kl$M=I?yY#Q!kRPM@a4+KLg%kPt!>x7h>i40N1 zVEk*;M-4&BOJMZmNKYpKw=^TLO#X_Rsf4}lcr5!A3pgQ?EWxyHJn(dU$|LD;`CT10 zWAxfa^R2_Rv`->fmDrEY^Nz>m;gS~1){dH8s}y1rZu%V0L! z5#_2^+$=UaZYTF*jVO1}?Bw}VtHO?bR6KcCcpvDI@BAlZ|HR}MDl%J5KdST{E5*+u zn#vHdfyeErX(Mi>7?n?oem(if)4pZ-f%$*Rk&c)d6y%E#8cgDD>>!X5D>eHCMFmiK z@^}hsd@|z*)p#P0@ESjl7}RMfAG>*t03?8S;`CF&|agD46domNvBd@eWk* z5dO8Q`#pC<4IgyR!9px=_@glxe&T%52FTqg;9YLfiGD_YD96+h@%6!sT;S34*HX;1 zH4xi68b;ezV+t*pZppUSS7Wtl7ibfA+c1b@D526!k=W`g`_)hJh3N0@^VeEZ4Xhi? zHxt%ciN)I0P~H4pD`3czxTz;9i21efW`!s@?YXC9+RIF5~_3#Hqt zBQNFa(?rw~ZnAJy+~EJ;s$K+Z1q4o*HK=NNw2Whf0dQ*BD1F-Y0%iCi+4&|lM(#3x zbo-)te1m&+`HSn0Nnuno7(i2jPzpL^Tz8nJ8G9Y> zue?lMFyZEW6dwGq2Kc*=IVuN=?TQoBf~x(9r3up}aSB&R6No9uq4!5?vz-No*qsZM z7MK6~9v1jU)T^`l)$6W;F(%Y<^1cQy*58Yq|9qlz2cuJk-UwJSjq0Q7ZJDUibprxA z(NA>$sd`i4E{7gUR5`HYcT%@QJ7zQqi~EiY4pc#I2kHM-RNOuYHEI_mcg4UIClQs> z#n8A3meydy;ya#YUX@R;^-hrO?8!|l)a3YmQ9GC}_9!@TMq7E-Wu)_9Iy`&#?$5yf z&%i>g0Xm>MJussc%fm&TR;bxCGRn1Hpm_(|P6K9TYrBg_xC<)#O()LKDO1(xWyJXjY8_gXw>*o-Ugxgad@Pip;~XE9h+O%KG{@?^c4X zK*@0xlBPiHE>!M{6Z+p5Wf^WXA@p)ko+*dv^E!_0-sLs^&lvnyWlOsRJsR`m9=nf*jtE%bWkb>51M6elx7$3mZO4d1RX&%nf$y z^x+A%mDUDOE4KTU>t3B!u*`l6UeAX7+%O_qsGG9EUm!@LcfUE=O)k4S zoVf4l9=NDpyC+ca zummPCHK%3vWkCRXsdh6&aLdkPobc?%ThRwSn@Pgc|c+egpHGoLKa~86nS4O1^F54J+o-CK%@}+auR({**xK58Ho7 z)uO6tMt*N0KfQH}v4dmG=8fi8DD_t=HIaoch_!?3x?j8BSas)7DLdZR%~VIQBwRP0 zx+R$NJ9MBl7158?G-RJ(za@F8uT~L6M~SNfU$i>5^0NL}YcKC8g2K%QPg+Q_l93q4 zZH;7UcKe`a0pjjk{&pwbSwdd@WUZ!9Q7un)qjpKnWrH7u7?;9 z&ytcO-^LQ5f zo5YA0e@4Lid9Z9~e}!r2Nl@vZeY4kK6k9r^RI8?GR;u!yZEAxy*&?QAELR*|=B=dl zwVTyGyuR)rOr>>Gcb|_|XD;g=4ZyLqq;>KIg1jh6)AnHPaqZADPjABW3_fOuffD zzYi|ac;cB(OrD7iJS6=HhCup_e`(yJE#Xy{;!+$uCg9mVZVo8F^)ddpT_|El_1E;@!A6&s4bg*9Lh~Rb1zD<^r)%pHx|*Ly6ks2O3LL!|ojn(B5KuT_4bn_`D$Cu4e1fiE>N2T~tDvdbCXFl}KoXRU9tZ zCU3QuDvDz6q4G^{J~4sf(w`5WJ~C&F5b>`OUJ~)7iRh=6uO7U$n_O?5z0mtbO+4ou z4S`9Rs;h;*$w8<86a&2)PJ76~>v4wIGtJJSH@scQ!?ofDC+HVI%*S5(Wk9z|k?|F%7DX_bTtLtmS z(e*p(P3pgXLzHxTFbTI@di5Q`LZ{3C3NM+#hz?eltcZ}##W+wfZWI2y6{F8n@^xMZ z6(_ougxhA;@%9UYag6sx2;0nGfvew1{|vOvdt7$8oqtf0c?i*is;)T1LbewIj{KA zBH!_8zP6LJtP6gLd63r`yu|5rE3(i$EbzEIssi_AYd{%p zdR5t7TwoT+vFpT@Gzw+37aU=NV?4DGgddlg03}_%1ZLfw-na7>_zD%?%Zuj=wUf3nR@D_h4qKRl@sQ#9~ zpaPn?)^T;-(6YOG(zQn_bBC+=GIZ7NJz_JgDHi`+354yKl)=}P8#>+4-TZcFy>W6KxSeECIurFoi9~@oBI>u&(ny%lm+FG z3rsqHPC9Vi;REJ*W7J zHt)!ir#s~J$xi;9*Kn6BN&EGEOgt$yX*Q-5BPW5=(KO~oRg5A+{h1j!1Rqe4baWKX zZtSjLea5I=li3~r+|{DFD0w+ZwuwDsREFLXB`F^{drMNnX}dIau++mORbP;5D%Z9i z|4z`&__Mx3zMr~1ujuU41Uy8O1TLEt+?bp){rx7})Z&GH?Q<@-s4&Y64jkL?>9iE} zBU7}ikHMUxbb@)DS>-*P&-|WWCz|iyRcHv%rg237sax-hPhC|SNvp3l@px=Db-ai1 zV!`{_{pX>GZvfx)I~}_%M;?d;_809L(6bHqg*j`*-&e4x`~5j;GE;+TJvvU8+@|$v zd1p6vTRuaJIKq8FmAo1FJ1coEz5n*JPejR7gympdZ*ztyCA+HxiM#8D2EAWAewwIs z=SN$^`u^vKmr#GF(w_gZopGd$WoLDY*==U2x-ez1pcMJ&=Jx9rq+&eT1Hc|2m(dkd zKVxbROMDCi9@SKylH~oKBGx=y`IiFU)OH<}m z7|*(fNeg7m6Fd1g<>Yav5e4ygz>wNiY%8KAnH<}?sKJ`KM0B$G{l1Ehl+>5cprvihIfg%HW z-MSrlAkZ`_rpvG??Rq1gisbes}L)npuvG z2CUVMaTjVgwYI(klncqtce;f(_wK!Y1IX1}M&uhmVE*u5zx(2S8zpf!2{M~<&Ef4_ zd}8vqixqZt@&63|h3MmFDu{5g-(zx!Qb18(c$o^kSe;3lcYTORg&m);lBvrDhu>Mn z#L4Zoe39u-@#d(Yn(c}3Z7r?(egG?B9O(dZA-a#B9iyW-F4l*Nr%`A0$<;XsyS_&= z19-MTA+39t?yB$aFa3-@p#UIA?%I;SE$tDI{X(@H)tIHIxcJJ*3+F*_9e`*~1NP4Q z-R0V`EeD|0NXIF=R6cogD7XBcv9W+v!<)B7R*gi)Wx&p6;=}>+vRx7b^?gmn&9U6;-qZ($n2Q z4Q8O!Dd_|7%B%721q~dXv@M_zu;pYha9CW?uTRgQ8Q2u^0LWn3@?)~hfHc7O;U@uJ zli<_{DxgZ>HOM^%XJ0F$ACJWo@?VKyz1xoF+aCTs>)`6B%Y&z?K!d`~tZdXh@)>mJ z)M-L3L4yN!%K}5*v438>$&n(N&nUFq*aL1@+g%gj*OmG}1Z~?<8d@&Xo)%2DFZ-}i z_IVosM>{R?-3*oA3ZkHldrl5?A*P}Af`1mby>Rp4yGWy#*!F zlL-LXJNGQD&Nk=XYZstg*unKsODpmeD{I!a-5D;*Rk~wKFu^vQ0h4UA_4Et9>s0XK zzghssvf)yTG2nJ^yHCL%;!+ufj{2SWfU5na=>9Vm1yM=9E|*oGfgff*J!G+V=XrMB z>4Rwj`5@c&LO$|VsDHjbJcdZXsRR9SGF&A+BxtSq63VuNs~IeEFZ|&11y3;TXHeeD z<{efK8Q{sFsPzibMRpZx0<^X%dB^4D;R;=O(uS^4(WB=;UP|Ai766dk08V*ewij-K zUB3Yz#(3r@j6;EXGr1<_HMEQk5)rd)dk?H@2DxT|KMF4>UuMWLj( zXP0oeNXEhAYmG}BYOtMD#1vp*7x2SRDI2v1tQ7I`5+^Y}>oXZvowM3XJg{J`qF+P0 z>ld;NC5^Bd-WW8*q3RtSMy0e(LZ%xG#Wt zVC_7F|H7njmj0HE+R!I;HlNMZj+Pmrr0MwDU!UZ7mjRcwr%-VTEP7k+o^?d|#Op$< z_L}dT0DFyBGz$!@0nE1M2SZ5qv?^eTaw}V`Kds21D|i9usqq8VrE2U$AE5O;4J22K zq8K(Cmy_vdnE14DA}d!m0DEU zz%k@l@;acHy-N8wO~6_((YSdp{x}DSeDFm82kn&{Yyg^3oX5xafLi?&AoIt7&ZVTd z5UP@!Pz5w=GF`@MmXz1Xj*F*;CAU7-Zvpwa0VkjzO}R74y?Fhe}cf8^H5ITOQ<_7=~;T%$4(&e{hctZ=(xWJ9GgDE@*sqD3j~Ih*<Z<| z$p_%ydAmZ%-sv~>R&5G6HJ)sH+UxfHTH{Mdp7Q`o1;lJI6KL9lZSf85omuv zB~3TmD(Vd3mhxI2b+VmWKtO(B!fBrqU@(fjNwPoCCjeC1WG?~O;j+*ocAWsQ!|(-z zz zN_Y|-7KW{)j4ENxiysh%Vy{OATHE(LOsEn$q@0WM{Wj*#1)bSs^FC=*{y2Tk5E!)C zSK2|`zPwnUaZN;5ORGCqaUqQu+{Q{j*rSdpx(lxKTMP>Gh1#Jtqqv+D4v%16 zabno5j7`EKy5lv(FcnzIq(f05!STTs`UnGNgN_cD$1BS<20^`lue}RL$Xmqo^clL} zTO^O})Z4d74pGlm59R_uON3W|8oM&ErSb7Q3v1h{qY5gc^gf(D)E7Jt+SJy7H};6K zScVhjmu#3UV;If_qEb>dek+VNOfw7K3^){hb@`CR6rgX|ou))Jd(O`fKvTW=5L%N3 zXVB&$redCjw2Tsy>tOZ9k!4^CzHAn5Ii zx6$?~eqv=1l?{h5LtV8EhJnNG8CkCawEd6L!TIaOb}M>RIK4c5I_O8)1!SYv^`FmP z^2bZ85P{R~kHiAtP={L9qsq?8DI371Rcocfb-5IPcxKj}A7XvMMHstPUsd)~{a36Q z`WM8oww(YoIPY(2>$k`r+*h~hR5ro~yo{@pJ>knp;6BXjAve+~awtI&1$hpw+v*6~ zLx1q>H%d>#jCQ2%ZCvsJM;zY$l12ikcxt@mEDKJQ@foi2TvD#tRTH-~wy`+N_-C-A zA5nI(ow!p(AsdGF?-8CA9{Rf@{@okF*}+=-(5L0F`rM&*9z;r}?&&dbG{wAoXLWd_ zSPLuYdDu|mYK{upY@~6KDpEnC(;$DC;vu|8LpON_i{x%Il-rqlSCT82jHJ6h$kNZv ze#{ggSgZ3XzKM0*&`klB;)Og2uHH8tM@6bSowI}Hh=}$KiXO7G(kTzb{ZFrx2l73M zR{bM=?&5-;Ksso1-aJIxwzl1gAz{>iy)C2uE`x{s`i9u1cxSsL#02|6j?k-n;c2#l9FLaaUkU`60HT9C+1`3Mms& zLR+tS$e3!5{Yn2gN(%~C$#(T(Lq`_nVz<&DLo#9?Jr{$vzi{6isDh=dE7hq3P#^#T zy#@Hlo2nXu$7hRM%<;M8J2Qy9AseS|;}o5JW5htO<-dhq=A7FU41o@k2v*sbvNJw@ z#7D?$c2~(@&vfM_{^tOjXpbDqWBBfypWeX50il1ya2A6$mV^1}&wI2Qizn%3k9e{& zFghGE2&at$5`RlN%$)=6XQez#pjgSIhe*CkgT_4F!+x=8b=d}f@Sc)j37))3Y+4De z9LKce8k9>n0E{KxYiYbxdO$kXwG%DcaXU1JGm;_I*a&;JKdr>rF8CJ^L97S=Bb zX9xdo`{^TB0Y#wPUGHEVa;PcH&`tpAy%|gRa?8v>GkmA6d-NyM?@?2vFssy6M?=~c z-^h≥02v_MojTPI;=BcjN|`+w^1i+m1xADvF2ba@Hn38W;{os8s^Z20v^?d!F=C zpac}TaOx}U4m;egP#;^|H>T^sX~b|ICNF;B!TyqTxY(pY_DwHj>w+WLmxG-M+JF%o zzyR4~hZ}RB+7^edEj$?xarREdAh)hPS+U%$5$7RpPijB2+4fDIL=2UAr@k)`E<(qY zC~)RiaJM;Z3bg0G8R_U682u%j6QcLdCcjYu!~h)#exEe9|6EfAe`Bly-eHuj*}wHt ztJzW1KN-pkjlN+wViLfV@W!?cD2$As0wGKr!`2%+(8{E7c~>~y{tNaaz~ z0=Ath{zu}Sc~dogZ4bytOjLzze~F!@qVVOp{E%8;-g8@AF;AB(Ny2l4H1CU9vMg-o z>pM1Nw*|@K>9>D3R05q{GXF48T|_M}LDeTM^!Qeo{&V96@g|~dqWRWx5R*mx58PRJ zPu*H(1sdS2&|q6qTpa-6kvt#_WerN6#xcC*iGl% zP5j5vAz(iEuuSO6m1FQ}2VytY@M$lMAzw*PXsIsSY<^oAZ6`tHTmO{CYkZBYjAd7C zp@uHhtj-3n*1YF^E40A4S1GhXCf3c_8Bty9JR$=eyY{TOw~|nU)D;q)Lxm5OE8)9L zvvRVbSj=Xj(X2kUSMJx!4^b-vxG@XnnHX*$QQ8URq0C^$5oqNmVI>t#Bc32z4n+v1 z;U*hQ=;7=92ZukX(NMefOiEunxRf)$yr+nkBh#>3PK2+D!+vk+i-vX7o)S-70ZZbW zT!h2ZFlBF}D`^9WEHVQHBDT_}VKbbryzQF{8_DmPc5vlhU%2W>KcU6>1y%*Fv}7&E zAGlGn&7K>f&tRx$1tVrEaoV1;0z%U3c}sg6B%BYIRgdPevaz*-%L=}9DNrMb?>4+g z3d%E(Z}-@kv-mnys=3oLyIVY&2qEtQ#<~@-fadzHJjhlW9{V*TMHXItr8VT=%p3nh z0o)lQnk@U~QJSmtZyZ#S5Gg1fY;V9Czwo}d{TNivV7GH3z75Ll@hd$92NvX@1ndsZ zH0%d;EMZ60Z#iK$&8V{dnzZmYK1wf$vr%XT`;n}_>7(xv<|fUk*{_!ml6)RoiZ<9b zaE&~p+`7W&A(Z+fD za0got)ynp4v;u=>*dpT-(4>4&>NEuLPKTz`vM}5QrUOIH8KmAE*Q<}$fDUKV%Fi!& zD_D^)z64A3n*QGIarjDdV+M`h$C5stDIhJl=t@n)t~oAdhk1mb0*N=97#>4wZMDPc z^rP^h7qbDJY&=`XlSLN;Z9rkskF{*hio;9jU=Dm9T3T9j($~eHBe)Y`>=lxu3EMB^ z^w9cSIIPHG&W>9!gcTYslap}G9t`JmUFMaOoxzJMHHT5(T4NHyzajixP%`8$_N>v> z69=2~a#$T8c$^^?b|N_#ddZ1e26w}J6yhC_G%wP8H;cVQ{#8Xu`U3L+o~xzcmPh7V zlh#`Vk=yqZ7EPTLuVUk0QQbLlwMPKDg;~mxo$L!32;=s-{$RTOMQ?$=?%H~2P}mb3 zyzEx?eqY8wY5N-3ELEAq(ruM%oVzh#tqPsVAFZvt zo-MkG9w>pv`vb4x&D^B|GlpMDkFsJUcLR~&HeJL7IGG~A8?r**t@yjW=T!^&m;L=N zXM1f^6>roJFVP&vI=9R7FOfS)+|n(0*xv$Ud~Zma+x5Pa5aBBbL735B38R^jAe6a1X?RK zLLqD0p_`>aC)6=6L4(+Q_&U)GyI5(VsiTekCok|8qv6%2Y|* zLBEOu=0L(6*)$~%9bFi#Xt(#TCW=xRl0=gb5;$Q#4g(yJ3dDor08TE+D=?BQtY5u^ zOi2s5-FBmYam0>XW-6w_4+`7SJ0|zhwqImvdR zeNe7uXf-SG^sLTvs4^M}`a$MhZ=8h4V%WE^I+DD`lEolCv>z(=(`ff{yfDuCQX;|c zR%j@is(jay+>a;cTKBbi)&#?IrBr`cM1JqpZTdM_d7v)A{*ZTX&b>8Y!E4;(d*UlW zWs02e0()$Rg_M9z>9z4trh)xUqNc$_+v&h=OFGqcK#k#BjpF z^{|%21#|RBb>u*7q(Z6lhyW;&prOM?)me$_%Qll$4OudadQ>o2U%@xQrfslaTY<}+ ztslb(aA$6wj8fo|{N!P1i%5>(kb2?pOVm>0W&S|*Nj{AuC4|;eiGdzZTt9;xy7P{L zbAo+_C$^f=`Qxx^hAs!RYu61?;%Kn9-AJzaVQ`3}HYcH-dzTb;UZL+?&X*bV==F_p z*Dz1)dB}+l4OahCTI<$p6(%2BW z4y9Fzy8>Wc|aSQMA=LuE{yZilZZkT!ZcuOZVM=F&>i>y^Z`Y%PZdRF~V{t8x21 z$O0t?detOf+tM7_mD~Y1*~HZv2sL!)+`HfdD9ekxfCP{&XM@J((?T@o2hiCA#kI#K zjy=IqB;Va>;Ba|Q&X(xu=@(1cA9y1<9A&O;eWaKGKIMi0)X&fU%ZpV^q9oD)+vLf% zzPLdGl*O_ilSCp8sggP@iNRIPb%zSYJsDHpD|T&MwN z@2XYzo6$HS6HFv3T+t&0S2@#6#-B@G$e?0DqtZud(p4+*aK2mU8!&4qo;2OI4m1M%%-_aeJU%- znI2xWOWGRq8?CfQB7L>FtM)_eX7b~f8ok-tCi>=Ifm2!C&&jNf3vut`3%eQA9vE#^ z^T3_yPS_XrP6-N=%9D-UvnhUvgo1h7)BydBSs>dIr!G?6>-%eP5yP4|&BgGQh*G& z(y^!HOTfh@a)^I(`oxfM)V3hF`F-yd69$xnR}MsfA)i(WNE7k1nR(o^Gd5umCDPpR zgbxA%azVhjXG<09|{TjLq+hjGn3=91p zLS`FVf0d|oTM@WB7>Y%oW!+xXIHWK3&a=f1nGs;uZx3pXa~PEIaAFN8(b{GeLfZEx zFc#1H3p_XBkzkSo#m93MAS1>nzk<8@AsMelLq7&5Cu9l+fuyx614)avK|v$I^qmpM zsnhte+FH(wSf0ww>`x}xP$kD?qOlmXWZQ-xca20DOdDJhLHjeA_tK@tz_GdY`?B85 z215=r#mzUo{A6~!@jL4?VOa=9_IJBCHjl5I0C+0D(80UwL}~z)&-ruP<}ssYEDB7# zaXm`&bp&Q4M5O+OYF|-$Q?2$9e9NcF_?B%S9Fu3{Z^m@6m z&+{Q`|L6G&GS9I^@qv`)+@tn!8h|X?vkW;+W*kM=Qv%ju5!JaRlin+3Kpv+ee?W~V zmy-QSUvMVvJx>|4wjUBISjj$q|B^j;*{En=2oh6TKx;`jdek>X_6M5>3L%w`?cr~f z;K^6&2R96wAw*KfN?RWm* z5X}Bs-r}zPA!&yaGyzW7LJ(S&1xn@z{iGwAVTW`Dvm4zXy+2r4<0m@5ANKBI(xIEc z==~tKVFWA`sjk7kl>;N1!umN(47J;r+dpy-flQTPY@iqVV$6R%LXHx!C`d1|>P<-e z>Rpy@eAecCGEI4j?LeDA_^AE;j;`Is&U!!gnN0J9_T!hsf}6j4(z>^Hac2L)k_0Q# z90m7$noxJ?zxS{=!Pw#!n2Pf6j^1rKisVN|*g+4rSxoYh)#3%6E=t&^{4*%y49yUe zE~`mj@cN2A)EZaI{FKa4uB}wf6@R|agQp<)+*O>7Irx4$ffUJ4JI#Q6#942a-QPdt zKR?cLx<^7DU{2qjzhHfOKzRJuqY%Ik8gTwfB2!f9?XG$^NT_8mGhB81^LnTU;MN!? z|HPT&Pu}Ihx&qVRd9SN=XV&D=BKch!j@s!Xw2&G~JbE-9n)*T*h%*S-c&EMpR&xY& zfdG!6ZYuO3UsL={4YorITRK;Tj>T3I9TROh|ZB85n_HGR@EHJsN zFTxM%faiI6d5<3hA{B-JdHQ>>b9u)JE`$%#?)Rb5Z)gvG?;r{RqUv!=hv`#3%*X!l z>?o)KPy-FoyfyiSu{G*nIu6Rfa`lB7`AVNyy8sQ(RFX(3i zAYC}Eiyzxm(@Ks1y({`sf6G$8zneQdy3s zC(E7s3NfZ*u_DiW9^Frjr4-aJhz8w|7BIusgoVEQ_uY6ZlLt~S%(wQ?U^CSZ=vo9h@r zOx=rTIqicDqB_iLBo@ovG1d&5M4G2xv>qJ}xpKU79KXFfISpDMtueAQlE)R(&eQZG z%%B#|d%t!zahZ4CEn-2WcuDI6%4Py&(M%P_S5WdO*zx1Av$zhX5GsVX7_4IZ;?TIy}_q%2lg|V>`g)fQPRs=G&=Z8PJ)2 z8oW33QGtCt09>c0Ke)G_OEaPC2vG-=i-n`$vQ4u6dgT8sTIME03Wi};BZVS_OTJMB z?9&kPYGaS}kNKUoH5K@csC$5kaF`mj)97WNJUvXFGQ}XByIiC*JJS|LAb4;{9&9S`26~iykMpJE9vRNL|hHWEo*0OMQ32S$M5DRC7a445{|dgEaEg%7?UQ`8mn=)i$J zT<)q-^udAY1!xc{KBs4Hg6lX70}%IrA4;T8sKWz-&6lTWr2(|B?<0*eU(B7;qqI`Sk3HdjDR0>N^9?m4uy^+ob7<5LV**88$)xNZh4mq z8`nHbAU(qT16A4n$%eMv&Lg_vsuCHS=wG|ZbQR{kM@c_;vWe;4^0ivMGaPW+YD@fP zotGv~|J~;*Scp@M>tj-B^;(;^Y)qF#!2c|Eo@G`i*0wx`77W9qZVfa1M{v07h(!XX zH_cIUHtkfu$h_fx=hBItqnxP2_ku37gicM+R&kHE^0*nmcNIXSd-MU8%i}H@;L3 z<=@JcSo{QCRc-gt-@Hn%xTe77aa5L@_i~nvoZQb@lJnhHzerzV+_p@R5SAS~ouM3( zhGFp?;WL%j9V%~0l&@{`tv#-txmUTn!_oSY)=gVRRBbQdUVcshFQ(poy(YeXGW^7d z5b58zV4bUIgmSLVA(!<>X{1F#wbgD$2=GSktCML@xe`38Wc1g2CB1I_?3vYRP9()}3a15ir7sI8|IjU> zamb}cpTZUZ>kyKI-IB}yO5Yn*mK7SVpGOci`Q_FHraCE#s4 zkd`WIG`u1GX?|?|+D9 zq?Gb-x)SQA@0exh49{C5vZ&o5vCpr2^`xDdVU5R?>N0L zFGqZEN#9WqF>iit87=R`emkGGE4wIg{N4S58Yy3xXYAdhQ%g~^*E(7Mi93fVEJAcm z`q$-VJQdN-F0#@Vtiyx#<~V)qXB&DO5&i|{+d>p==D%AgKnMhM%rz=lmdMG5h*#-1{?HTWs6}KjD0})y#MCRZ_)ZUhxx)jwTYGK`57*F)5mU2Bk#~R* zeR1SUpUBl#(S#be{0;dG=W(5&0SWCFwY7Ikm+m+Rug#ofKOCvdUKqzWl1lsZcy^yT`R8NN*%e`?#S}%+|B#WqV>t8ZSZsZCH-4+KE3A? zEl@)B)!7!_TR{&5T+F!6HaS_tv;EA&OnvN~9VvQIm*?J(kdC@%q!3pYm#rre0r!UN zLk08%F2z(Xhs_OqzBY0w;@?C=^lqICd}un!EdUMi6f$W;C0G0BJEeyV`9oPb%y9Z> zr@u{wzp2`R=!SAjL`FJ`wI3;4t(m{$N;HFh=_An=iGIu{DetHhp~N@POCLKheN3_Y zhPt=5P~Y_(HTDLP&E;XY2s>jc9M<)iWH$}-wmRXU5%it=R28*zTmeqQ$^9NJ9pA4o zoXuXhw1K~^jJl3kzg9@5Mp;MHJ)=W51I5|%OJ!>U%iKYmT+#Rl5Ts#^dZh$Xk2jP+CoQsww~c%*e<#a17=#6Q*zU!me7xPVk7+&M zC2)k+nC;0_KS<%$8OMG_EDsjMogK?S_+us%`4lZDuQN&J+*3i|KX4EVvS$k3b;JEK zO2rri_|F-5H5LXU&s_Ke7*D)V2>PC&>}>Fr0Y7kZH>mQwRu*8oc5Bs%;3Vf%UrXWR zyu&4TA=BnALY_B&-@(W^O}9!~=GXnBj`MCzZ^&r+%i-X5h03w%lHSM4sO>$0xL5df zw>%s1eCVv{(+0yBWj{s~tbJzVuJ{Y9VU}dXcGsEJfTZPAF>FIsigRblka)h(yMp<0 z&)4&IHZytnwYmp^MiDb#UCDvgj2iSw1Uwh0MUynee;*(UYmtg1yMJrwj7k)@-8nwwVg}Bpa-4-VryzSwiL*NXt zXuJwv3)fc+sv!I{yrF{bX-!HWonMi;cG&fjiSC4%%%;h}sY&*l)!_r;wSbmoPgaDe z=fZ;>ATw_RaD8i2@3i%Xzg3(7Romy}7&Ic)U|jZ(#MD7r8?r$b2N>-@epcOI^r1oh zFkat4yIE`LqcfmZv|MLAM|$ZibFYgO&sB1E2lbvm)4zYDYuro6KFLIxSZsfJ*$Da# z;_d@ZGwV72ofzFO3S#Ssd~pLM-8BP8-R zvLo!;h3LDKGBj(_A8Yx0qQ#%c0!~+9zfH|!4){b>~dCZKThQXo92G33%iCc?|K*>i}cD0-jnCdBU z(_V&z=u2^>USbM*aK19b`s`t^iR(|SI`|^xfaHD6z(ouAz*4}~J0;b|eed>v6?^vb zrxMma&IX7`X&4GrlDaYP#8olBFGaUM{D#`TRpYv28nyu-nr8%ceq1HSGq<%t$4kQ` z)OD0d?4ngJHMuh^|6Q0oROAcZxUy!ykObUJ)4b%VPQZSJV(DU}i1JDcB4T>;^iyav zyWjcUzVgB7JA%Ll(;1=l6K7y7^_ukMbgld_A#D3nDJCiFdySTQKz3|>fd%b~ASTjz z--^A3!y<4QdvS*NSyf`Xs-N(&lzZ>qH^^j)*Mk6`^|gTgV$Wu>Mw~u5WTwZO48RZGqjCwqqYgv^g~EvazPy9?5;<7YxqFX3^%U@bwiOyXaXsZ0*J6-|vJW2hxz4h*VzinY@wQfj zU$XM|VmxA9p@<*!ycGn3F6fE>dy)Xc_%jCM&?2QzfDU#!8cJCo zr$lmqMyI-%dT&iH;N4csc}3@6_lG2A#T)C=2Mx?V-od?}gThK34SVP%b7P|jWfR5Q%{`aNF5YxfN9_$x4Ji^Sq&W@%^* zwTt>ANttW8Ij1-6-o~o&SBH&q7BF5qgTTvTfV6sMhg0yZSlDQ9iA3{Ha4#o z^wZh;b?pw5N?ksYL7f@sSNDle`YXie6hc(Br_)LE9wP>|MW*JkU9%e_J^yBbX(y;3 zF%0`MIFWB|iU`^#q^DIta^??S{-2qOChN9|?Vo832-9x69P1SUZDHuglq%UQ(g;m| zUG{Ef;~hE=v)}xQjz4wxaDHkU%5_#=djGkL&B$i8QrrDC!}k}h{JxL5484o7LcURw zvxMK_5Q_HmY&6}O3<|d)MQ)EV!Qj7er~4o7;nec2Qdp~QXc=&i-BL{%I)C-3(e9X& zimOObmPF8Q{u+|WG-cHdwA?aN>I#g+ra`+S9u#}j?d@|Nhe|ccxMfY4co9}ZZt{&- zrt^qzdWd$y3e0i1{93V#WFWC8^)6xIEv^)(A8)NSa3GT)>DXVt{O7UAAUg#+tF0w7 zQP9?S$;ZGh9(`1}{A$Li*(&+S1=WwW`Rvq|mRLa>Aeoyn+088^EiHW?fTHbx6sCL3 zcdL+{%9>k`?vcI*L=;(ByayeJFg=5OeN9kPY0(*hN#xrA0n@de+0-GcAE1LK?lkxS zbhL!Y$X%e#d=KL(E8jcnVzoY_37QkB&lr~nr%9hg0swkVmcO^%@q^DyVtzrzW7|$Vg*D1cx;U zHJj+3t??|)q)brN2gO)1X)=B{Xnzkt$T@G~t0gx^f#&YAXOpj%T&7v&W=i8z0fO9p zQ`0L%{|#HfV%W^zle?@Qq=cO?EL80DXs)+aj-W_}16xaKwocr%KYh-3|ZAeVP%EHEnm%9SgYoV^1DOm3!3O z{5%Yv82L%>OB2(#dt(yTHlSj29NU>@QxIax@Whvp9)e0!1h-xHDS-KS3o!M~aYB30IHX0b`G#66 zs31=PFnR39D0yS&ifLBCy21007R^~eN4qd|yxmFy+Fwjn!G4?U5V&~g18wsM6)HQT znEQ-|59kLkj_&|efg?>kDX6w9ZC9B|Zfdc?-Gu6mIbKVFIdNj=_|Jr&^M zQbJ4vxQIZqeIxu(*Xz-aoO>NDphI{gv}qwjf&DTzc;{N<*$WCg&DvAjla*>XjA-+Q=QXQUIW=$Id>nzhc(OM?uz(dpqAM71j?IQjdqgO_C)O z9fwdy+zxa}4r)cwBa}Zab~>DoE{&cmY=zBizYY>Qp1gTIr?@Fscz>_??fuGy`aYAO zM>G?Gwlt`WSc|HPoAP%j5A`Ri5f4`oM&liibnLyFdHZtmC%4k2jK;5<=>vsbEp2k_ zjm0Sim8nUOni@z$ZzQ~E3wDQZEsX`<4b)H?X3$UD-{=}+{;GLVKHyw51tm;Q#_`A1 z4j#nrruYz<4D-JBmrVj^v>UhfGzmaOT7d@^RcN{SB-Sz?;^&}9NcP^F?7N7-ka-7Q z(fyoX2@6vIybxC(de~&(3%Z)o);bwtYvDCu6S7rm-CX9@qY1)zyIo>k)<>G7tWBU5 zt>&}0b1imrbOdc%2EbgkZ-YoUiwVXFn5&G1M4BuBAJlCnLmv3CFjzt@B~vmI+sspd z+*YyJ`s93gsSVp@b!G#qG9ZKDvcE{ul_IMyoEgQ!=Id%QZH}`=$N!y%ijZti=jmPs_C7^UjXCNQoBzQ z>F6O#v$qPmeZIhOsumb*rOZCjTNScl;;T>X?T#X|uNsU6{3fAFSo)pDsaKok}fvnyGKE52i8jsv0-F!NbmHvTjv6H?4HU^O4tbdN+2t;7Q8`1{dG#2bM>z*3gTJxUp&=n#T&Fs3#j$_HWSAe&E#qk zCHm_OBka4LHdvQ|Y75miVFt%{^oKdHdFR=W)qI1Q6Q!2nF>3HMZ{I!5J2aXNWNl$P z#l=M1lvU0`+FlQ#VGygI56(%mZ}*jWv~A?Kr`hGCm;7YQaWBoC!!Ptfo4_s0-c|%) zyY)OUKR$oHAQ!d@G zKLu27qI}yNc7S;V-tnFpwlBYGkBD;v0qCx|`F2ncnNI~|;pvhyuw6C7pHw5*@9Qv2 zhu*nCVd$L-3HtmD+Xm^yg8o>Kd05{_A$Se95zkr=gD|`T0IzSx(|;BS8t#&a@r2tj zpril|s0P4Rc4vX#5>ngvqk>9sxg=fR1~QPIlIj zMbA*Z#tRn*H=5YT2}h?`_fC&XgqT#zB$gJ*9cZuoo2^__<@xn4?CZuIku#T977I$c z5w1czo?9zSS0*d^?h9INeJ%>O=|PO=!Y1d@8H1%R`X<8o_k!LZIxjjj6h+P#b@VrD zL6d*pidXKI{*`j6C9E- zyUT)8in@-j*bvUSgpCk6VWu5_qPEThI@pUeS725gxZl>&FS|BasKshhpMG3-j|1pO z5RZV|SSWy&!zPoAwM1)I!^{Bc2O!22!mmHb)-pSDedRtmt;6TqOBB9$o+fkFlo?;u zw5aWFHnxxvv;+-LCq3^(38?;Xh-pt4#6lj>a~@KB)rZpq-gbF@qqtSCnK*|rRt`V3 z^`=l+$oQ0DVolVu1@ZB~)o*W&5@1GnM>d&sN-1&l6CfX)Vo){lT96X^P)NY|5%Lwz zlbjce)c%?#JPaE&qHqhSUGS+T;azM1dFf$e-}>18UYUgEy36N1LlU=2Ds!mWPxl}I`OW4Qz=3_{d)#jvwdl1lmfk`?EB+Gcx zo2dqgtPJ@kHMsBNNbV9kCv0RUM|OjvFDbnG zbyXs#V~wll+4_in9VU;(qCd^isP(xMPCYjK==1~|+q*;)+OLV+L03MZ$BH_Br3apT zTltx)LM6N=Zu+EZe7dcc5|1k-IGZixf2}BYFiJ$vv3&KElwbWi>IV2yLp{{lk7nTR z1xt9tr{yze+kC5LQ^s050=+(!i$wwXWR*}0A(K+kHY;BVr5m+ZblTdh2>mAbV7Tl` z4%=mZ{#Wxdxr1{cHDX4J+yZpoyTzV{*&2xxJLxb3W~|RX%u^1eP!)i&dGk!y%r=nn zg3m{8Q6rk6gM1TuSQMwI^-Ei3?7+TI+3+I;Xq$jnJd^x3&-v(lC-KTHuz0+s;sMFF zxddy@u}wqEz~~n%A3%I&bIh-#`2_|PdBbVV4V;I0dHeSbPPqu|@0)x#F6E7m_U^JXSFVkzTMvpvzO~BG5L&Dm5Hz>obJu@iYf(P*V5+8C zexCnuc8*4GiO~6qCS%!7SbCC-`!1F_>&*{jAU^n5M}BaQ zsMIg{L0L6hmMaqKD}KD~bvCHi&j1{KGDv_T%datr?oF^^ReJsOHgH%Aj}~JFgpHeh zZ_ZobZerg1-OPhH^n|Jc69?kET4g2u5YntNvcwDLvk5DIc({NQ`9|G4>A^^<1T@Pw zYZV+B4nfy=jH5@O3*&&1pk_VcJ5mdSLgv4|I{AeWsskti)qbg`^ivCx$vjWup*+9d zrM`(59L~`IGJfq*+c3C@+_j&F720={d_i}}n>0jna;$y|1)Ft1S7H|~S8!&SebJLG zK$!hv_RH!9Jix29YNqS*&PwGtw*|&rz%UB`(VPo;aH5mSJxD$nvIk;3wAKL1(^8joSD!o;og zx=8UlNi}Bid=Jl(?=o6}^rI+#lk@eL9O-F6Gf=mHLKuSSEQE5OYX-3D$!94^0zVF`qAI@!OzjNMm&KuABJlvw&j`nBo zDVr`ciZV7B4iK?7cG*{Aq{ z$PY2A^(K7O+X3G%L=H0S3m^+$38@Vgkub;Os?YFS@Byyy=t?`oijjukRA#m$(G~ZE zySq{jx(Uk5Okz$Mu7ZiYq@lH+}_pLGe#DC68#AHE|z}}B2nDXw1pLY%Od4hl{B&s zXcTaTXhoB4KS16<-ArhC7GEqmc(vzPs85L5Lu~08tLj<|Z%!j(Hl~*fMp^(zbZdk| z6arndq!%_6mTnR~c@mv{<5AC0pm4w?q`tP6uHT(W!hIl{TzZE47rP|@u&4EmyUKzI zUC}t#>B3*5+r{?Gp2LF-4TEgkY6fwAYnh6(!F#q81~IO8BS4L3Dxn%XH520bK~gx6 zi=!f+Loi-J)%QcHmI}WEnr+p((6ap>7r>iIddFC2^b)C~*5$KE{a$&QO^w&W5br!f z>acG=*IWo_*`B0T9_o3&qqY-iCq3)Fv%pfx?jDo2Z zh*?tihIpuqf&|3#;f^Fxfr|EHE<8eY?{AA7o!diJBdiwwPdgnUSgS`TY1EoP0|M=S z6fafV;`ys*BCYNSw(c3$s~ZGNlwQtq>s^a1vNJfK4(iXyvtip_m`r+rzxL%cYY}cm z3=(CFSk@WP9yK~c+cMGN3u8x>S+G~{r_h!48v1{@nAG@PrrKEIyu>ZK>0!LEWy{R! z1}eoP-x%E)T(r14u$YzD2;|}=AERXxdwW(`Bv{cyL%a(J)uUn1mTb@Ut#+h zsG*L1($qs6ayzWKZ(`MQ;2rH0hu-Mq5x|92o`5S>iVtAAomE=RWl~eAnlh*LYp;%w zyRxxT^)FtrhyM=f=NcMSy0^2nbXS0SWX-ZB2@BlDpN?U5(7}?cj^m4Sn?C`rBD}?P zAi}8q4{%`4_x1(h2My7IOT#Tzv=x*CZ09n_N4nFT0M}7wS`Y<*Eu&}+Wk{e+uLh47 zp+y!3di-?{*P??hCsJIM{@VBi0DhsBUZ6gbcr5jJso4KqV(A^>AFnXt_q8i^#FWcq z*Y{L*<8|3I1*3Qtq)-1zv3sn)iC5i4qnW1GaTI~g!E3j-raO`s%Ir6m)su#G!xkcS z*+g?|9jw-Z>g|oxV`9Y+or&>lVPtKYCX>T%<{QKH7WS4!mHvv@GwaLuRwyF^)qeds z&1Y2k-c&~QL)AT+av9R*7N^k%exBq(tS?f(m~xY87n@4Og=x){`asx(TlC9y4|?;k zzNRt599@SyF* z55m~WfpRm}psm+)O~uYOfkk%?@U89bo?g_$xIIb73!gv|bNyhWGYQjG0JmAf1%bA- z`^_m#i9<|I?dCT7%Hu3ve)V%|*s$N1+Y^GuM_KoP0%ZKMQeldBgyesv`H2C_zb+SJeD@Y0=C z$>OOS_<#n-{iV2rAA7|!v^d*QRL3{dxmx^A9_7-D-!J_N0*gB%AeCfOdqC8Ht&I3+ zw-eF0dkfsqEJM;+y)F>fVn+8kyX$iL-B+>|1(4%G`w2Z0A`Sz;aKjD)%??0_>S>Zj z>I2yft7pHwN1R0We^Um3v~1gvuRfnh=zCr}%ai5}%$ri#>=SPha^^Q45Kk;B`I_qJ zSdqV~VSfft+~#r{r?I~ydQ~NhLTDKqxJX=>j;?eO6RO5qL{_4Cxw*Hb#B_Cat$~nR z6v6$;o<9Q^VSihehylVGtk*su!x(b!l+xQaJz@!3@gFbnZ*Uwc%QTSnFqxEol@sh- z$Mp)6GMAVE0GeKaJYtZj^lZpBXxi8JO42Qjkin*KarQ||`Ot}EJ;%9^`ObdNwjWIR zE#I4Tu%!qH4YzFOtmw=RwXAbo|c+ z<7m;d%pPxl_VIPf9M!s?PoTIW);V<_fNouIs@~~)iFZ5b3*ph z?#f|6;gRCVjfx*DvkI>n=^8Hsf-pJrPw#G(2kw^*dL^agS@f9&et7JKpToE!+BX9i zHha~OLHIrKR9~qpMM4lI383qXK$gWspy`WJ+cg zJSy37820#UorQmXXW@P%s+~hg7FZAsq6eNwk7+z9@{KWBe6IRa>-^!LO#D)+GqfbD z^j+32=WB+MT`_BT^k6M^KeL7J>cH=*zZu^qHDgc<}U8spMxu= zytqCWwp1&-5SLhU2bZvj;SZoS70*y3pK|OzPg2I7cFM4;76I12y@QC{LjWCBCvqz- zH+ZrhWZ#BCiPW~vZ)?5%YW_0kggl$*291%?{yQlUV8WO_0pf;%Qj45ME4eu6>TO$t z``APgzEO?%5#&)b!&{x;CWWsK25s^CVxq3VV<|v-PJ`^6^Wu-$!a=MM5@L)#}%C*u(+{ zOJ~5;cy??W@T%PFkzEF125U{!XhNOMd{0)rEIia;IJm3f_x_66OvQb4Mq;rSsPK4o zN^O&v0)m7E008wG&@+QJ;Jx<2nki`#};@&_PiS-j!Kz~cIiI=k~#$2{Pc$1I$|>yFflum*0v za{|4E)${|^-fgh6%yR)32t9*SAuiqW{Fkb^f$hkJgmHF$A(-otr@W^)u`&FFDn1U zoHTTMKiGAR*<;S^yTYzQX)S6mCKEP#8aK3SCG;jRqn(s(f`K#%t*hpDwA~=-_`#&U z>i6{cuvtt8&7ZNNuPhhVrQa=^$jns|YbL~gFims_XArbCogqm%i$3tz|U2Q8I@|qu99GOncI7L_Y3}YaN#7kw2k7&+3B-{iP5c)=lxpTTjyWcAl{wG@`s$ zpyVMnUwl_e%bc>QTSYM*F&pArEY16dsZSyObc1!E3;6W@oJlJB*dN*@(3G34q)QE# zZ5<)j%>)XWZX29u?zf#(I;tL>i4^0t^Hz z4+lr9ms>%7#ZAHcdUk&Ak))D27v7^+J;Uc>8^ESRO;DLK?{B*Lj~(}$(0*$4$9h|v z7-(+$C-IpyY>bCYGkNpxyEHK*803rY90lrEuZ5$K3d{y2?qLqJ_U2UHYvx(b_!>Ur zsxC5fuJ$Fs^X)L3TrPNDUb^$6CUx-}6PeBg!RE(-g&IkU76q~8jKX=i8u>3pKo)Nh zRD7|zyFol+IamWJeLy&)CXpB5+3xY z4ePZhGJ5w$5IcyD2e{i#Xc9eu#9%-{vl0U+Zk9(-PFg zNU4_mJ`5u@*0Zagwo4`85!;A0<50o7KTy^_N|SRumDTkxSpqk^RYk6gzJhx#9Q_38 zMFGROW70u&Hh^k4H}!z_zTjvD@_QEpk}{4JT3TaPRH@{*oV@+5NbLY$(p^ZkBQ`OZzX{vj!vBL+dhr0<7=#v?MmUjrc=wjJ9&VHL9%Qw%vY=jlHI4Hb|37 ztmju#>EK|#(R)+PqB!Lx1X(M}RU6rLMftV7XuC%fZ%R6oz<2dfh31)bv&85fc+E2T zXyuHPE$ga#q`>PVi?`!qzjZs;1O~DGpbo z+B-*dOs;FMP){;@R_*O!Owg*&8i(z@uFu{KXKCb$j`?K7i0ETEPi6a>#6a9C$81Wm z@Nj-q%a|XQugv?eGdn`QsOQY zSw;0gl6Ep4|A#?z_YRWtR-UgpW_=A6z4Y}rg9MQ*QWgD-prC7_Ko9W*df<{NFBS6a zSLG@ue zkk4@o!~1yURI(M{t^?$2*r)>>=p!!q-O@F1uBBXDaL?>~sgPeI&y_~Qda(tOAelcZ zH51FFmAu)x+*Hp=gN}qb29yjX*Vz?Jq!e2{>g3=N8`|-FJIHl78bB zE-wz}${XlWwmz-zvJ_wJt%^`Ijh7b;sE)Dzfn^=(3E_yUb7{D&Z%u9;78jRvWD#r@ z+mLkdn~%)l>H1z2pO+fy>%icZFHD*N^fAjJFF?103$foH@m$WO1&&llGLUlz$O(Xm zu>A^%?xcz^c2h3u8m(jB(!~`-oz^?efR^J%#lsg1KkmoAnfdLO3f)HRgR*l}R$m)0 zU#0iHhZ&E`r>Hpn*lX(ZM+;gY!f@`TDEw9i%Ju$-yR#Py&f)52ekdUeO--1+o4RWN zcW*W~@U+n>yCpQ}-gd(k3Tf|Zma?w)2gjY4vQJ4l)%T~~t9YS94YkyDW$?@XBdAeB zs~Qs{I};9GuXqLp2c`x(MT1d5%Y+K!Bo&)!wpa+QS|*OvzXJHfVqX6!;)xwkZ0a-W z+4bJ_ZrC?5292-I7*s#BZcBFsuIx@Cs=M3aT~8y*y0SP_p0aC4Ga?AcnyaXjWl#$b zUFD36ieJhJj~95il4q@M(VLw14QIKvdBzoEwWs=7BcqzrK4-=pXD!=pqm~g$6D}P+ z#s-gfcwULLH8peAIA!!j4+;4EzwlR2T^twwM4XezAbOj$kh)>)y^3HUWp|4=k~6JH zJOIN6H!6NNuy=rxAMdOs@ezJ`U$UR2Y7tJYFRxI-O!3`ouF8zkQ+5o9Dxi9@}@i zL;Vw?#NV%`Y3uH#C+mwqN9O{%)e0&7g3WmKb7*b0xD;R0Yj0i;R6o^Kv&>WcOB8dS zlzS}c^71;yYXl>N)-~5~*#`V!j|*{i!OPV)T2aqeo%&kn63>d;daOq=OC}2iRXdK2 zI5$`k^iC$O9*!UF2bX&7K3jtEoYh{KF;&-4tbWLnKwrUG0iFB-n&hzI_dm?qe*sy0 zr#+7&irTbEyTkf#@34l&AYv-*3;j8Z?)Bp{TAqyX=)!uuQNUYDJoJ*-w*qQ9V3W8I ziRv@a=Q7{jHK|!Fwd!awjD)8&1cdwcUU|7PR2b}AZW$g9HTdH>A#=D(X{O$`Q~`gz zurHfl6CHM_>vbt>3y(uKEd8R$@2-*oIhaY}PQ}Qid`=xF_3l@y&Quy`%18~NR^5!J znDq0v6E}o&eL6pfb_3uOxnVn*w*TyR9eZ={DE;c2(yPFac`DEDPIEfqDb`xExUsa} z=tex?Egz0rGCM!~_NAs12fdWXHlR7^)8U~iekqK3|Iy$Pd;7C8s%osmQ~}cGG!i(% zT?QGEnDis`*9e-^iv%g`n#&Ex4ludw6C|EpbA6Z?v&Dc`f|qq*vpQ=Aw~SHS+1C{GUZ8Rt!LEkTrof(rKA?AdLJ!H zHu#26;IaDyUEN*e=UHR^Y)p4##ePtAqXI^4R=+Fy>3zVY@?0SHw(q4lp66>Xq5mQirpQoM9(KvsW+2`#N zHPwU{vbb~Q=+OHmVVSm4^p=$JLOP}Q7xxwsP$sF*X)k=Oo`_qQdUwRVten9fB7nG8tuR8zHzieq19>ZoRYBXl>XJtZa|$geF4 zh@pAwG&tr3#c#gn^Ro91cU4o%N^aZMIHI}m#Y~nU zy^v=3r!v`J84`#|mwuU*J|MveTjFcwO+Q2c(NX@1ylS7i4DY&;xh;;=0CSK|^cuBY z{^q%`VVa@x>`m16u_ixth^2Cw~@qU7Yh#mgEw zr33I{fsP=a^V&;wD*wj&xb@6*%J!gL$Ybz{N7`_sJzj1}XzIWm_wdMLh_aWi^6 z5dk{A3Jiqsd;axm8WUGy!(-8gunWu!qkZZVdR%Yf)|I}Dm-tHdE@_V-mXfuf$f>0| zKyyG`qK|}Pl+`SG2D1@!uLGpwSiCX_l02VZq7T5Eavd(*Y&XiUu8KwJS+ytjtLE3u z_vV~EvwDwG^U1jjAjUQV?4QLtD4087st~usdQ3iG_dZ&ExgUV-&$$O%0WdxYG=CRe z>;3@(k5$iO^594vZGh`$6f{R%o;)CdY|y3kM{9~X^o2@G?$rWX-vOb{Qs|TUfEszH z{#nC)3Lex@wr|@-rLHv^G?{;KV>$5x<6~ElVUlq2JYwx_zbkG>tD@emQld-@2a&>^ zA?(%Hmhu@>!l5Tb*y=Jwk%c;BNiw=z@7-^kIh75>Bkk?UTjG~`zez0j_DLWe4Uv09 zMTz>pix1_odCw~`2A5z**R9zf3nLdn>1Sh@M@ncbgB9NrYb{pFIhWs^L@t)kii&p) z4^0C&;=%hKPuZC(@wlS^SIp$HKxzYB2`;J(KY^!I9vA%jX8Q{FD=DAVi4vW_tz}Ym zh)3Oxl=HlDTB<(V8mZ{9?yjGR;uYa6_)6^!$A!x)6681(%(&kH1zMS`M|fQsQ_|)& zYM0jQFSIgFn{=FHr49)B+G5d%;U$gfI;n#zrn@ctE9K5TrpCU%f^e&UJaG%HeRkJk zmN}n1eSgWVak#q>`pny=M1Sey?TH|caix#g zpX$_RIP7~st=%npCrIc-^%*M)r9Gh`x5YqJJT)))`1kfKi8=zjO^*h6DvP`i z)ufC?eb+5HzL&EUdOWKHnOwC|zg?&UA}UAYlMH@dZPLpdPl0Zh(q=&3!YPdNjJ|6^ zsiM6@J(RkHYZ3jA2F!V1XGGAEpih_nR5V`|)w6!R-|xF6t(;I8nU_8E7{S+e+;37w zNc!w&FP6T71g&i` z$)^}ueCBzC)3w4^&RnnOiEs7u;AQ&+?cQ^c`ME>`m!kVtH_qO- zgsV8mVqhN*u98fl*Ma(QvCqGtcexJ9I?{p~%Ao~PN3dYi4{4r&fD|@I=7Al+z-!kP zYk>az0+i(j?w%Ry*^GCO?ZZfJ?I&DoZ7AHkh3v<@+5Ur`7RBYM#(0|jxs1J>+@n^# z`ir6~y5{>^rW<9Z0c+p0V-a%lqI+LIeUn+p%~&)V-_xBUuJ&KS{OS;&O|F=|d%^8z zrYcullY){>8S4rPHGf%KDAX{u7$ma0wq4}9_I@r1wMv@7j3aLy9%hXr3xfSB1r-OK zK0vk4;ALj`(a!C!apkFa-lA&Px{>+H8Ck6fNyl$5d*08sv6VX25-3c_b=V9VMGD*d zOIM1X%FF3M%z9JC2Ppbj#(iz}bi7HN_~T=|cGpDpJazZmh|PxBp(C_;Uzq1r-Eh|IKl0ZyAll!0&W=V!f4BfnCpHg zr}v5`5R%+_T+id`uCLwoK~wsr_^70IBp1B4tA2(vvh{pDq`R5IX@FV<)ChyrpI$P= z#g8z@eF7KLikIK3tNFeCw8OqRQovW1t%uZCp?+?^6K5f&rrp^oXZHI3i*o6Uv*+XC zJr<|!3Z-ZVXPTMCWPgtNRs-WpHAk4V^)@YuL9Vkxl$d4#v%=C;ncb&z>yCU?7R!t^ z=_^6mX+HU7>;ra1so3?O14qMfuHjy%2C2o$cP38Oye8-(fxbca73cHGlHRG=rQ)|K zN1EY_=#D68gN>oe(b7R>HtjeqX#X=M%7XW|3=?n)R=0lYbVY_TIaif_aW9NtYG_bN z7V8F*02lm0zYhkJv*iy`g_WQff_|*_p@a0hV24<5wZZ4jhS%)jO&50hi-Y>fJkGWY zGa*#2_ETPVLkMyV5x64&;-Sr;7u3h*Gex5o{GX&urloJ-gO5b}9@hne6r6tGr%`mW zN1}t*L?cY@Ws5by<17kfL zoeNJD(<_{z2mSgAw%(w}fEuNIZZj=i;UbD?6r5vsJ?e*_U65$`S@L^Xo#?T%IH1O> zbxwI#B}b`0FIO2jvdDyl!pE;i8RN1&;VY$EYy=>?39?#YAc|F{emQXPT)R%eypHx1 zoXxRp!m=yKqc9pB=x7G56Z~FAjBBl>@v6(MHYezyRTPB6{Q(c=lq|w(&a5Xj^>uW&GO&ll8ceUML4ge28&{((f&-0(aS-#@v^$2dFXuV zugcWrl9y%$D!#_LVB+V_U2*-U=eUlKar!YSsLW1s@u5(al=iBUU6^nKw9Ho?v(>v> zRBhKBv0It~2p>BRvIXikH77{Tyv3PuuFiEbKLl>Y3307Fh6KsA*F{D>z-WH>C)Q4QJB`b3W%B(BH|Cx4Tp4lzG%`h z0*NA-h-gL8Sr(rsPuTSAC_N{m3Hh|x#E_Xn5Wisxy^Z!Mn1m&9nRjM=p0<^beY`mG z8*GNz2sh7S0ipKi3a~TnlEu^?6aCnAd_nOStI%WrAY;a#!f)r++PEgsmjh&_M|InQ3GYZ^FLu4udL=370t;e9Y#~9cZB3A1oe>tcaq-#_|QI_rZ3%J~a z*22wp_R_GjR+{Vdfub%-px|awR#w)tJCD=zyTocPlOn8l(&SxD$iTVL$hjD5IFr|Ee>`>e$(VVsxmV|a?d-rXZahytV>SKe5*9=763UZ-hUE3s6rJvq0RUkIIiMEGLbjCKGLHkQK$ZybT7)gts5n|SgASOB z2(c1V_R6goRo@BN*#+p~{F!incJ&dFDdQ}6M~Il3#JuIK4QRntE%5_L?G6f-NPhJ$ zZPBXD-g=Y$h==c@nYOVWiSO1V6axcyr}YK_F5GZiyl*xT1Tl!RvO6O(B5p{noQ}&Y zqPEWfQSC5jr+7v$PP7El(Bj;@E)YWBB#e~2D#NkgBTJ~m^Re=?D_(brtDfE&%G2{3 z1X60Psvv5!AqbcXtKeNgugVXp%v=XztLNQYR{86os}>FNmCGQ%to#MAoCXc~`X~?G z-|ULZUR|Y^x-`#6ZclOfP9}Uoa(p*xg-FPXTmv;7z|h`a(3r$T)fan?K!@KB((kkn z`~j279sBnzJAKll>1UT%{1dCoerRxG_q7Q;`Vc9TWK)$MM;6}UHD_k?s z-Q){)lsfMtF-~X$@HXAca8j-T*%St(llE9HD*yrb8&Bx= z=FeS)fmpFe$Xrjgnb|wJ_o26C_1-op$_)eVcp7xSV-oPO<75k=zNVXd@UCMnWlTLdo=2sH7W6x={PS_NbudcIV5@LV4~;% z`vzY-r`C|MMt#5R6_B+!ypq%u_dcN!TOd=}JU(t|CX*{a7Zx0BlTlZR3(q0GulMUu zC^ps5?ksYwsovAJjdxi%KyOwC@rzq#^kO}+5(vU=A+$bP6PhxS8nhqVngdeEp%*T_ z9j`z|{!WOt5b>z6MGOz7%PZ}->P7t;SY9t z8i5~>>ITt@F``WrT4@*VmHkMURUuW~dh*QT6-d%x&bl9o56x*r<~>zyr-N-m=f@(@ z_wZ!t(sB@5*ZqP4jWuGhM!*CoB(3mzf z`KQ3`n(-pZZ=essa!tnGZ4FW+{YBZ!5|M&hH2LgJLv^j#8MxDk zmt6QUl_9xiBU6Qn4Zl&51^tzl{zn+;l%%19>BElo&9H$BqMCKsRCw@&Gil#dopNB89qq zERb`nG^mG<03Z3?^itA&p|!;*G_*?MA!GkM#gsi0WHc@eP4TsWf35BgrQP#Ng=ual zFWR=jQi~L2Ax5&4{5hi2S^2uSthDz|g7}6Ejq)+~2^P^gV{RT5bKli3XHz<}y<@%q zNF2NSR4pms!WkVZo4v*`SmpT=?=qvf4;Wq_4fpDnZlW4kMt^n$&ppe;T3I~h#p!8J ze;=$>5NKAI!8x%WSr$p`8ZU+)^6(s(;WmiaW?#EIEWXG}0{-@gk>5_ZmW8Jb!JE$e z%NcEsMC`8R3#}c;u^d$kvJvH3iSSu7-v`FM5|RD*ZoR@JcmH()_Lftd2;yWTFyyF& zb}sh_MabcE$4dBeCE%)ZBlV>bx+z8?!U}^wx*y;oHCJ_$wSGt3BGkbRM9Z|8HQ4~q zjDZ(8$ojHYRQfd_`f*=%i4@vv7P9eM&>W%SZH#v+m+Tc?AwM@DN8q(fDS%u*-Gb@-d?z0Hf{LOYqB{SIhz_h_L#0{H_bd7v7O(mdwFfb z>4qDpcrG!gNavyfQhNOvq!nGEC1X#PalG$)|A%7&%ve& zHGv_`l{6=HBY>;79(Eoat#B8R;I$Q8cq1zdlZrf}9io(>9H*$4XCZ8kte0nZ+RE^K z2o}+dBd|R5b)p##l%7<9WTj{UYr9^4V>)q-V)r7&z>N-MQkBJuan0+ti{rr?M&rtb zL#Vs-iHI(Mg$6(DAd3OdM!2&QjCmc8siz+Xcxm zEb*70xGa(Rhx=Wx)V8!{#wON)Uy2jMf%v`-w-*^%dj}D?aR~wb8<<$HES@K(IyYdY?5S5%>w>jD!W$|~;!1E1s+Kgiw zXEQW9Z=O97j$@M~M!Dv9Q~rDdwH9O)O`rn#mbBX>@Mx8*w6QZ)(oSnpWfbTB4A?X{ z(nt|Omc~Had!dN2+zmWfckRs3^zYhb)U_1r>ZjZQv0{f>*SZIGG{hQ(eds9_h@j0K zP`eMDVFCirF01|~}6eUU&8V<>*2AumSWV$^R^((Nq$>ImGyDtsBKQ- z>A$p}z?x}4gXg(+H_rp;STpKPOm=z=HkY1qvS|N3QdKuQ_ZP}bb+Zc)OG*OhM2h$X zZmh;&(7uoG9b5G3UAog_5Uk_wk0KE#4Qnn(y64O?90#l9gZWey6r$$GLQY^D!nHO; z3m`BmD!3DpUGSpN`$m8(RNi=YBmSleqe!s#pF^K$S#1)-%Du7_(34#c5Ulma4DHMS z+_nwK0DdN@scw8>_UzL@>@Oe4Bsc`)qGh?rI9&?TidxULiDn5W>bg&aWl8XFE8AtH zISomxC}()+T(ZO7T4nt^UxZ0p=LRXc;?XO|rb}pVY>8|fvI&+Hm9#DuE+sy;WP_V$C>nH)r!q8mKqd5|a;in3fI2!^h*jxHzYSrTfdYbKG7DBc{D1Sc8(28`<;V za;$&`;=YTJW?szA%X@upD%&u1`tM4F$mWqzweVF6ss1I8$RvkG*T}GfD{Kqg5c@valyXVAj{m*MB{|iD`&FclWP@^pIPyXFcKa(V8h{OPh zA0n_u2-(4xbSK9tm;2oR4EBo920E9_WB6b8w|}Eh(>xMJF2yK`yy$rBA23TV61*GV z$iib=AX_{z4>BwstS}}m&~xzL-}R1)?Jd*m-Usae|09tTlaJb06t(V%_j4s3fGX8z zIemz2bLUX?Ip;T1uBTfymp>d!xcr-8wmonXWdvWCC;FcOPKFW+4GBdknahy>;8t-P zoEx%K>GiVy0vlS_RdM=Ja{u1xx}UhVJO93jU>C3I2~U}N-|yeE{r5NfQay^hDL$ei zCPv@VPkGXMbG(uuKGgz#qTMHNKZ4WvpUFS|>oO_ls}awyf6gOFO?9~5?|a!nfmdJ! z9eHxi{(m2SA{acyF6aOK;p1<(O$2dhQfSPtSDUrIoTFw9HIn`-E`0K4bu-X{w-sk` ztjPQK2c4|jt3uhq1H_2)oXLtbm#q;(jYhKG{hY$cfYCdO{~fy#Al0ynt0%0-|N9MZ zblG@ak_LZsd~ht&YBRd7E4f0K21?aRm^&-BBZt%N_mBl6Aq;;XSzQ1I(g=P|%?jWJ z**?Hiq<}x)6ZF5+d%OWw!Ty!>|7mT63QDRg3D7b=HGnM3`7?jR=-%lvcE3umf$O$d zqDiYS|R`wJ?` z_Yxt|d);@dC(n=w6rjHh9@8%q*7^8{Jh}!>1+oj_|NZ*Ef2FxVe*A)aF^UFtbc|Ej z~Z!$PT_Jb->lCT9eRAOk8}pcFul`^pbK2r}&8SQ!(~_H8hsh@oMQS#sDt`nkS5t*fHZ2m&+FWwN(cZ0!8Hpg?(z0HuDw5d;5g zP~60*c{3dyIy9lOOVbib^Bbs?v#I-lF;wK(lp85V?W;v~+>X|*2^HkV_8*_Lt)Fo; zFjYk~I6bA$$+w+Uc_{i@afXKduMA=R$r z6W6zAOF<;kC_-5Pl!-Y92-dW^e2xN=!A&DgsAIy?ZdP|2vCie zXG0FE-ND~aBO%#TCqMSgb-<>`@|&3#J=u`*H%nZ8-@^`->%H50vrKr-?>DGuTdeBK z|420d^vh^mdOsLVT>keoeP3jXRNk0Kgyc*N)FK4Km@hKU6^)PyP;qL&2mv#IS(&BR z1&dC9%o?}X~nRG<%%U}4}xs< zAeSuL2Q}i8$02>7I`PpWYOQh?B$+FJHwmowpHMQXixCr``Ng;8f_npbG^j5Vd4C!S zBY|Q%{IqV=c!JCw>=&d%Z1Q{$k4?u9dhh^j#Lv!9+?i?SdmAGs2rNkhJcJ0jp+& zmp0B3Kf$uA#L`4kx5_~e$7KWR03mJg84{99kxa0d2Q;7&Fo(Ei%422mKg@eA?_`Tj z4|iQyeI{U6m?JUAl(9nE69kp!EvM9dgj@QdvAi;C9Q*IsOcA%~PiMtZhJ91oK1 zwN`bA&?Ro*%KPm88^sQ-EFwk#H?1XAR$l%%A?=M%ZlHI$PT0qSL{=9+-my}&W-29U zxv4(~`#O9zv**Uiu2+;}ay)&(*l|HpeO+<3-1|TElHS>R5u*lB!@r~ui zD15Z4{gZJSmq(u?*nXuUD;;o7ml33rJ9e~;3pNreUL%wl3)y6w3Wc2eD1eNWA=zYx zk&+Q4J*e&ZR0xae1|V9Z-iXCxEp)=adDCK(b#!!G2^1@{DcgYYrr&j|_SpHgy#mw7 zfZwuGe#8r)B(!$5+dqksaz%cAKjXWA3_EXKSui(2L@ng-tb?T7dtWKZU%Kl)yTPPADQs(k12rx zib1{t(CbK%P-^TCYM0YU2~^Jw8^dh=d&oe8J%l^2b+{Xi2bKT?yHI^d zXtiSHues45G7J@93V)rUV6>Fk`2uhSYbO+^1di`XDW8AjV$?KU40Ql!`2&iYziHM+ z_bYHRQgrbZdsK;*s`!g)_rQB#a{zytPW{w2Xb>p@c{VYRW}v(~b<-qQ700kyv*MHl z?%g`?MT*foJ&3Ug$5*m)AOq}9*Oemb{7FYw*U;0mQeZ?yPUMpUU{cP6#7b9#{%(S> zw@SMwr022_eKtZt^hzWGc^YsQ&q;XZrTI!JW@P%(5E^e)r-|2j9n8#G2Y! zKy)WSOrAE^tm4~X{+-;Y+AV7%<4hr_&j87Rln)3OfYjoYh5_9n$&_KRMQm@;xXhwdL^J^`K%3nSqk+V?B@N|G^DUq+A;3uK zz`SbYd#_3W+kHo)u0UBs=8g1L6uSeA zt_)n93erSZ{w-Y@Pg=+_D5W|MSJ*v%IBUI~yLhZ_&-N%pqe_lDp}+de2t&LxIkSO3 zzP$f4yPfdYfh7KfcYF!E<^R+qr}QSOt&*@8s@x_;dZR;8$h&$f>PXcv;L@op0uxr6 zckKNuI-&TWQBuMiKx%P7tE&kFIf|Lv9t5^nTVta3XCuJQdN|P=LRBy$r)6f(BuDRl zCd3URM{q93dx)R==WfF8QTq(hv}_@`Zs!t?Ey>%s`(D(=7eTvB+9lvI(oY&>sr_~A zCP=F`{B^Xo2j9@~D9d45TNMl7V+m1ipu5j5>q!`4Y}rPEwAi2Az{e^Fg%g`kZdp}1 zmi!ZEoMR$UDhNHyV1V1t>@JP!p9rhV|K?eplNj}>+fya~4~Zv3QMp|yYh>Sz+$6;a z5OGmi!5*D;lSDc)TKNwjpBr!Q!`5{VnBLtVd$c^v)Tgo-5?Xs4iNiKQmwjx{Eh15u zg1St}1(CT|%f?9Hz9ODgKLPEW;=)@W6cdpxjsEfd)w* z?;);r^cmOm@eTo$YQ%??d4hb3<;gUh3Kk0V%H~UrQdsXa+;Bm z&;71=<}d6C_Z*X~F8m^g`7LD_Q<+I3^U&im%>3dj4>mnB)Bl%mnUuC5gsIl5&#Cm& z)4hLD`BUN{@`VfYW5Xy4vE8&2&Ab)FGa(hesujyoQ zO=K$*%I6+aFNw>3_Hfm}j1(~OGd8bG|K=KGOJvp6w_z_T3E|)b#obB{FS}*mW+kv{ zY=*?tlv8((o>oXhD+mmYEzu#LDl$*pPw?csR4&huU=JkD*lHQn-@f}I6?%_b9}6$o z!c6dG0ClG0%ZcD+y+8L2;)!=EENw{U@zl5%RnQ1t~7(a&}R5T{0RP|eH25330N zLQxKsG5RC>e-k0cqtS+k0B0=ttHMBit)qL-GA=TbJkNHh?|tN`3hFfdu?uqiMls^N zPyiQ^F?fp$eHd@J73QQ%h^JO^MLI@ny9n=laUBc>y(-s3e|#2vrDn5I{hfTykWerN z^LLxH>UcjYV&%*u5rG7|+-&{Tt$RL|d=Gs3*ydl{q@wLp5thnGN;E(_`vtbS_h-Lc z4%;T}EYY;ZX9L)X`Y7=`$|;cP`E^Du2wBy6vF=#y$aA@T>mL_D%Of^}*5nAUZ>@j# zk$M?1baf!aDb*4@MSPjBeVQIv+4l{;vzR0e>i;dlg?!?+`oT;)Id@A-*H66 zo3;`YC?Trk*W1DD*>+(2C9Lm1W~V{#GTYlvYNUd61)(kO>$=M0V#2)Qg zSQ)YkX4qF+!eF*ro4@90S{I}~XgvMIVtBl{-@-#!=05*tTfsy8X;e27=YI?XfLwKW zKtuG(=vVKIs3ceEn*_WJ;XnnqB7?fRx6MFY87d79=g^`P-CAFdG`ej2mOLD+=-YR) z-6U@P4x&Xm62ba{n&wu=9}1=aczcm*7WOF=dOh+)fa?@2`J8S22r#)dsfhw62gB^M zA#J=VQ& zB&XG3)4R3I01d4kI`~*3KzRQxh>)fNyK57)mEO9*?%z#nsl~?YljL(A;?o4bETjf4 zqpo?q|L4DYNV4;!-?B_qZ>Pe01YgowMReB_IBjHa5<{3}_d z$Y&O~5&*_-K`A=UQ8NCR()Ynb+4)V(OK;7}y(TryqAzZQ834ck)tiu{I|g=G&B#}) zIJ<@P;Vy@>&|KdvpZ{issBTg*^x?+?P^b>=Gas*Fj9xm;^qFlaSJqu&>K_pmvG?|g z9{5ao`6h-8T$yQ8B;*orN1&ZMvT)Jdqy^=9%7xMJ3-0TKSW3&AW~*9DNhf z=L7T<+G)Bq7V<+~=! z9N>%F8d(HKyl&moq>b1J=hG)L^xYx~TszRMdT^G@#()gQ?n6!^9 zV17eC&tLC}K+pz!D06#~slO<0$SgXt*kMi*5SxOPTqCPp+rxTci=k0i{qdmYp&K89 zWZ+SvKOvujXAJfyGyMa3R1(+Iiusx7^7sR=?h#F_+s>ZOslQmxudl!u|E_0CT;s=~ z{tlHQzQwlQ~qrC zZXgo|4h8RVE+jNRpWBR??kD8Tg)6qg!osdR1Q;EGzD7^)mGj}alC=&N&*#5^J(RHj zyKrf@NbZnLp+svQJ=VK48+GgvoSZ;A5TR>&3HN3N#{ZaIQqOQOR+vB2sQyBhjC@hE zB~X{#DzZ_9S0?=X6CuN6q;=*+TTs(=j-%tz-C4+e#P2d}m$R$uv)TPH(DQ|M>9u%O zao$&w2Wj{0xtvZe*0fEMSUN5P>+6xSk979{%r--fvwTT(6Je$2(nZo04VK46ojc>O}e(-)$-}Hpjo|^M1OlCCjgHHXXd2%CVv#}G1haMO+ zJ8PbcxFXVr?4f9=@5haxS$VgMKIXiME1f8sX)21s|1zcv>Pp~9@cc6#;ldVwa_+r7 zO@#45d4G}cf4Y0q=6Wb`=}(1jzuwO3jAK9CgC^pCjV~7B;dB?|CjFs9)*MfgRLw>; zP{42L5O$S#Qus{v9Vu-9n;v`q58m^t;Re@dL!iflOldq>d}lrD@oSPuoc82kzrml@ zeHtJ$wlHZuxrUL7q&$i;NDsUS)@fnj6dgzG}OVeg|+k2j$x z8R^o95U7Mc81fs0T>34KS!gx&QEYbr;nb5QA^k(&@;dGE@fXMoOpHY^%H&Acnc19| ze9q>idG&wXme@D4*%=qXxVeS$~ek zzC?U$?~B|zan@TNuwf>Oyi@afc>K}R|GBTgm7aBN!qn(xWRFT5CR;msjstkAR0*5`ly25w(l+tt>|0q$+3c4x^r%9IdPWq8VUM zFdrBp(fqi$C_E{D3IGzWs}vO4DYhC3U)@)^dt-&_Ri-RJ8N#gIe%)T#Rxq`w)LP1avWJZw&B_)Jwfg&Z{p>$kO8YCUXB9v|f>FzFr?yd_+H%Lmq z`$6YBGr#wJ=MO(KI&z#Q#bjPzufBM9A^7!|3Ys~uhif79Ro4MM}Bh~OwMiNi7M!w^%db6 z)GI&!U%7y!Fqa7(GQR|C^>gT%>2gK+H**f1CA;-_}W^$PKsbF-_ z(2PFm^!Fb@vKN9X!OAv;eBpD{i(*rMfe}KLe#$*FfAklx$f*^tCFebwR}tN@hQa2a zq=*bM1exF4N=_5S#Xh3XCV+jAs!;Oq)3#;dNZTNC6o&Vl`nCArRD+Z6+ zZOc4MwXo?q)(ISZ@n+B&$M6CaC_XOI4i|>8JEpB}aBgny#6otSK%fy6-!6pE``_w5 zN~?~Z=#tg&!}@6sNZl>Hg+7d0{D`n~`sZws|A9wf?0@4~vH_x9Q0+gG@48iP5%vHH zQ7wj?(iIA&sRF>JgpXNL)!9E^x=Oi;J`EySk zVmp+&HI6axviu%<#GL+wPe=QbUO(jqH*29@hWW3A0h2*6L@T5xJJWj3?q0{k;&)}u z8|NP#{{7zI4eHBcdJl7h%?nOa_4M4R3N>B6T~b&8g`k%!*S-83jR3Mbqnh4_-*~VI zU*R-GQd-(O)wjf{#W$_VQeU7~kj&~R@yf!tSu<=}Fr9yD7U)k8*FmbXSh@c6%uozH z`BxJ8kAD}l$SM7NwvBIZ%PIc}_i`EE+&u+9ex)bV?cG}^U3`(tcO1GP5JG+49YZ-% z5~78aRjf~Uv?*K#xx*38C6L|tXN$I7M-Y#E*l_8@!wLTK(#b*pDOly6q0n=pyc5S1 zF!t+21_?F|^=~S~R`wZ?&VOC`vE)E&%cj#u5z%wXhLObTi>wSxO+6D11!HdT3U2+y zl0#1Lgf@fH+(Zmj(VE`1WeY!2+kdShWgQUMT5Az@u{{Fq;3r|Q1ZN~h60CU%tTrwmKyuv=L8Qqe zkd)@=tp5Hixyz{8Pg=_AQ(3VXgw^_#ElUGy-#U!o)1OZp_hv+9UIV62u1uuR_FVEk z9bJ%4M-+dGNO(IOK{6dcX@#Rs41A$}YU8>YD1y{!t%nT{BeEoZ=PbzkjAb58&~ja# z;4Xz*FT5nF$MxAGwT=u;B+V={&&}1&akuP<%!2Rs0e$sP0TwbO)eONGvY8p3moYGi z&-8iIL~t%Jy!gDv(e1|IN8*8unZS&EVm_O9S>&|^zmoPs{3o4{9{)zKkM@zIBxZyn zAl;~E<+ehCVpM@~qzOH)kzT_Pi7MA}J9n}2@-a449wz|cvtVEdhwRb@AA;mF@AQnR zvCm%ezHct>jh?JXA9`3^LGZt|?ZIymW)*1;B^^tU8FsyGDbHji9ZB~ua}BvYR5JaZ z)Tsg1m{cz*q8X|~qv!Nf2>R<>54UST@$lXsNUppS5u0ofZm zT)+2*wFkl&FML1M@~yY_$iNxilnK#LeoT*JSow&HrB_B~;)Iutw(i0M{V1a6M&Lbr|(7qj@=O!@DaHg~Db zsTCKoOg~NEJrKs<-z++m)xEuRCXdS{(a~hv(0L)9WV2&VHvJoY=}Xr_qJbzOd`T@> zuVgpWj2MdX$_j{s41mi2m{WPLw)1J@cNEEo;h>4!4;<%lyA?3F9~`ZIo(?4MzgrM+ zIJoeKjxDu$iCSTw&KPWc-6Qr>7wky%t%1##fI55jY$*2g&Ur#m1Z^#`ou3*U^ATQJ z`R7hEM2|-CSR~sB#x1F!t zXhycqNBv#^()5N(PSrDpe@JiO#+THFrKoqqKZbz}d!WFAw%4S2T(Ul@>bz0J_rcJ2 zz%&+nV2S7AgzkY2v=#Q<85M3p90xlI)+G`xvU2N)EH-Tt3hTSJ4Bu zm{dm_ocod%v8gpq1c)-12`bhd)9&O<^3uSMOiHW*!!!144yiU(pzoV9P2bP{P!~Gv z%FyanNq=7g4hFAN$B|Yqd`A2gBr%cJQZy^kd!J%s-2aubs@s3@2>LvFKI9jMH>ieN zHr1CNSU@wACH$}_>5_D{E@&Y<`vU?f7rO(X5R8AuIBHTff)vA6c_D$GZf zzH8XXmODc4<^DvWM@{iFI+zmCW32ANJAZL&*ck|T7BQ}F$Ate5p2*h*@Di0v^D&iS z(QhgJc4@K!{*&8|s&bzZp!(-Tr!BsuQViIeS7fLtEAPxDB~@bjG(peXC;d>S|IDvN zB}6m3mk#C!nh;p=`37LIwB!)Yp><{`R?!eWSz+3s6@!DCdv8TgESF)hXAq>Q67g`cp{w8r>`o;gf#7Af3j;X|1 zcCwVbz7|U@8=d(0o6x^MOzr|oviKQbJ1tvRyla&F1k#KThT`W7-)A$QdbJY^j$9gD ziNP(<(Fv)l@;H8UxUa2E)I8Cjlx}Lc$+IIt{qn}8?8bf;qXl614LQ|E}4PS2k z>q;PJK8@$%CK)KNrskvglV0i1i|CT`OptV4CxwY2Ofn&8RDbDJnyTM;&s+2e?j4Wa zqH!N#)O_-tqhl3w4&R1i3{rA@yBFJ*$JLU4{r2x8cjoN9vtN}8E)}Lhck+Do;A9VL zC$knB$^u{?QMPcq!2cbMSspHTv2F67zf>>zjjva8e6SVKPk?)^!s&N1&P?TjH`zR1 za_g9Y&vSu)FB2H=p>8VlFvmFeKW>!5!$F-OX(pET7e*0bnQSz*VO4kDH`&UXPY)CORoJV+{33voHWKQ>n2}E3hc+SthvCj`J^5= zNq9$Gw|D*#BX@fIM_2<46@j+fzO6HGLdG}yK!;g)?A0ZxAUdrO8sGrv#2*0)!p?{R z3P?@*{_hCCV2vpbeJQ%`QJ(iKxAUvChJ9ZJM_6CGfinSr@kUfBLC=3lS&)lHxmT!= zypz_lUV50GZsL!dtMDbTC}-urwN?FMe!<(fQ2p%V z{2A#ZlI=eSP+tSn*^7izs3JOGpdrX+3#k|$98`ebWC8DN!1ep~pKsx#??5c|h#(Au z7_v31e)yiw15&8|EWIz!gu_J~bJA6_KmRWiEG929yP0mHZy|ISPct2u zS&xvu>gbg)f0phtX%AZY|M1jcNwP5|cv>zhkb;hWvL~D|Olvyv;&Ro6;ukjj#-M;c zq2=!QbWH|(a|&n_InY0zth{ysTKABZPT+&~%jEj@lBL~!AotS7kdYLKqy3Z)x_TD4rFBMamVz?Ie$ApKC+snhr+oPD z?+XV2eH(*^Ln3X{$8h!-;W7z_tir2Kx7f&EffF3ThNZP^>2QW`mhLyJCE?Ka-Z<` zAqL<7itw~}mY71I(>*!az6+=BYxj>69=E7Jx1rOb*fFxJwoJo`yW4x%Dv;PdNE}R&FXDL61fHQoXD} zBWmNARnFv#!3XNfP4HSdrg9&Q!1Hr>fK}yA&*Lb~i}!ojupk~?4`wlN##zz6YMJ`?S8>N&A~ybO2|w#E5x+_6t0=k9I3#9%MW(s~1z$sDbysQA-PMXQ-YMKs`o(qHka^ul)LK0bBW zqrfhCAEQ&p!$R|V7g|8v{eRj1C)@+UhTRG-6qLp?$&BfNCK~1a2i#cKx&*I4vJrt9 z6DgKleo*i0F3&CtaIJk-SQrg0kKwz_DD^WLOJgy8!Qt>}&av%_OK zR*;;CY~$rw0voRFU2Y>A&b8^6EDxjEfLu`g!JrZriv`)@_ZPz}75cyitBSIb$EfA< zx2Fd($0csTqQ8-0xOPLigievwaO$?C%G>{bhKDing`t#Wo^qdr{@J#i`?^L$?Zr*e zv~{-)NRg9E9;|g#XL(N5YZX5=uo05O=ZFLvk}lW1>J7x?g(e!;ByZ7=?0IomjLO=yp-`agjEtH)<_vt5`&O!eFvMR1 zsOzMT5u8^jjwh7eDI1G7VDuV30!Z70sl4?Y9N0Pb4#RF0zs3J76_6*5ZI+;FPOxcA zMwNtVsNiW;2#7C>NF;yL*%48W?b01Uh{qF>F`%njp~a(;;d^=dEM$B64E6+$fu0Ee z2>Bc#1eh7luGE{w@!>8@9?<@P`Qm!lXUcMFB?&|azQ_%y;5=u-^Amg$)0EnXWw zSvW3h_|1B7zE=op-kpCO=HIQ6ARCQ^ba_?zhY%U+_({?{*aRc*JyB&9fk{X4huIO& zrF}_A?@0~m&Z^dLlI?Gp_x%sizM~lR&o41Q@dL!SgQioY`@=t@@o-nq&(Z#wrE#0z z5XezT9S$_X>G>AOmkB3!6rbumfvMp^B-1$AVT{WvwsB$m&GgGb^@=D&Dv#i}`qdrp zWcDObs{c1ThtE7i!0WaIZqs&X z7C!qj{^zOs^rR%_*9X+{Oo6u?`?B=-9aq8o$K(!T*>*qsLgMdP$vYkM6r%=HR^c86sv_Y2~E{-E!|i3>ZXLZ1Bo*#y`OyqHKIEk(r!)&AaEf zo0T6khFsv`Ut91c{!^Gywr#bkeId(Nys7CuO2;GY;EK~M@O6>UfUG62^lCEF zvI&}Vc+MZucp!14dDM4;q6muHis9A>LAaRjx)Q*xVyGZA`8C#>2h`xdn*I{z{O>~_ zeFuU|F;$C5K-)yJc_?6m+lvXT?a-xvz+JMt*Cqsa3BE^+`*&dF(0pXP&~X-6#r|4u z`x{GxMI(qPstG6L{{Lq^e*dT3D^&2FOK7Ir(u`&Y$F6bG3gN<(V>7}DIpz`KK28Y2P6^S&V1(M#BE658{T!SA*Eu3rsknM#Wty+w~M6i~;klshal zeC3_95q|Oi-ac5tnZb<7-5c#|)SxfKlaKn3`2pQVP^){v~*L0=)p^ z$u0Cwn+qmJk+xK1%6x79&Huh-;Ji~dl>-vTWK7#rO3D$g3UfL9Sd3a9Ygcd#aJtTj z%pwF|d1BCKX)ZkDaq;5)IHgl`LcCYW6W90+8Un=4s84c=;0ZV&H2}5txG`A3G01&F zp%>0{adh}UDE#+Mfix05g9Nvd?WfP}dJ?U9cE<}zb#1WZGDJP6>o}_YWr7hC4vbuk z6^N;SB9oF1F68rUc~9ChGg}tv=_%@mDnit7jvqfDz**5CsUl^lyJrc^e~!_l)U~kT z{ztw+UfK@vRiVNIu*I3=jPEl+N@EyeX=DEIHP-Pd%Zp1^V5Sq^!NNqdb+J^M?a4mPshVYM-qL1pEhPbk88Q9 zp~8h-GEOJ<8U4cXs8YfB-Gu0>ES9%r3qqm=Xl#0TGltOC$>#;?%@*KD*^HfG(6Qen zG172xFu1t;S3j)Ivudupd-E%WY#)e;MA%q5Ss89F^xyB*`{Uo6mO;)GHJhLj#kVIn z)xwcYR-4iLP4e|d=n|4JnGmsSm2@FPZAG@=fP#fd&3CEe(&OlvXU|~7uK)<{I=4Y* z^sMmV-kQsXB4soJ_$+{A!KsB1RMGR3WCbwkTo2kNCZNhprs?}Dz1$(5u}nRv*<5zZ z;WO#@8X&(7Vu>4zEB>;g*=~osBX$efwL3xoYxDd%VgV_b`5X4R9+S-$BUIAnKf+`! zI(z6u(cL#TdrqVt*m&h;SQ4Ttb<>Gtm;<|WR49kKSS!G=XICs@@y(;fC#({WH&;M%8zoU`6V6uNL33KlG zFQnNJ0Dj(6bhAoL0NCRVGB@-jO2rA*M-A!HTMR`RwnUhqFFZsmxa(r|qJlLG<^Yl; z9J+CH2ydrlDydEyO)u26Zsw~o*2=u6SFRibz?CeSc>rn+)QQoW;s(3~Cr+IkeYdb$ zaFF$dG?$3NQ-Q;|cl@XUj5)w}-AL6LSZfu#XkP`?*H**UD7xPuv&UyDtBdB^kY(x# z1oS|?7>5k-R13j=wK7Peh>zOZm+U^e0R^Q6-i@-}TqD`e8~g+g#hYx4 zt;fqVE8XQ;5-GmkFdC3|Hm<|l5F^*1ia2)ZDe+Q38w30$UP#eZQd3gU>7jW{H{IVC z6EWQV$#48sgeCCp%AHQ=f-ut{O!+Lh41_bpenSD@psqwVO3V-ZD^el4_*mN3=u4O6 zV8U&IlwPC{R1B2RxOiW}38ffl4h$lfuA?nzOgP%CHALq=zb1igG46+PKFYU9DKZxv zE|a3Vqxx>&N@D87Ml)S#qvNTL*D^Tp;hi&EU`dP^ON~3ij-kB>pqGfwZO?fTF{AEkZ!{EE*Vui9+*V#ogS( zLPR-l3c@{}oF0X0h2W@;3=^~*7J;ng1(P3Zh;#d0YC#q{;>^iB=0J>`xOHcOzbX3GNz}z6Y-pkPnU$P=_+YkrHCW3Y|AsiRcH<47saY zAQ@^W|ALV}Ko=W(k9Rl|NbbFD(9ZyTKFXxKAz+J$8W@vJ0Z3 zOV$eMwZK&)q>{U8Kjh9G4sE=uv-k2Z)SY=Pi7I}7U78m_avF^oqt+-ek&(^t{WQu( zI7PL`Vk&q1`BW~pjkN#V(N^L@iA4BQ85ieR@o6oa2Hdp*$jgS zNN#}eBoR(fyk!0!4@m+5VOBdDQX z5qxRUAx*(qRXL@*8jaoU>`Ds70ew7sNL_1@#Jy2ACVHEkyn!m(pS zZ78ug1V7Bra@ieq#5`mBHTLi;AHlpv8^2+wXbhUEhpDeSU_bNJu}bt$rniPvdF}$; zPupfREmJ19dMr6+DgvCw0SMV)azB8=djNIQL?iISn={gQ1lC{x2-Y(fQzc4@uR&xq zw#&fRz=3_~nFT&Aq~4BYB42NlZu=tGK7XF$=r{hb0)2=(aUb*G%}A<`MmaCB%pP<) z;^(l$C^jPPCLoRy4g}v8+ zP|)xfF4?P_{AVf{pU^Ld_t*Fw$|w>EX6ICggHoEMhD40XO1*GWN9%d({%l zO!6Q2oB@Su5v}Hr*F@(PS zAU|!r5xpsomVhn%o7#Pv9B}VK7(PI8B)*#EKeYUA0YxFGYFuL5?qtmJBP|Wp@$x5Mn`;wRFUh<2TUg>(HN0d(@ zc;{9tgaXr>57)(j%{7mu+`BCoCfF%RLsx$(6yUJFUCnW|~`D zR?$JR#Z$+yM#DbL@!TUcY9$}lP8Z0^Yc+SdHKd^T->b^@~42Y+6W43 zq^ll<>APOs(DCUkOxqRLzY70v02+3Yt@7>OxJq(**uqp6E4 zvAxS#_5Bi{(gxEl7up1!Y4snl0INj$_1jRSvU&`dERAl+4QIO;U!Tw9S>sdUf$GZQ zc3_xvs(SCULxtdPc<$&YqLp+(?BRuz@dfJNnRZuh)sR8BbYgf)Q_}Xc=p>`LsyU*G z6ZdA22N)y1VAFQf<2-Zb1qPNRQVml<)({xuRp$Zz3XgGHjHeKm^#5ZejF~+kT)iCN z9Ry;%gzjt&N5aT@VRMSKMswP?d7>#H{{Er|3<9(b=Eng>mBidE&suGGR?ME1D7daW z<{h3&0JlQff@Vz%$z-0R#<-rP;*{ur3Z%b-32d%46opgpofFndrfSui-f<67 zFCqh5;|=QY;Dq?hnVR^rPtSNIEet`wA{4CVC0f!KhV;I?d>={6^8fqLN)V?^}X z#?HzWPlAid<)#)o0c^y1USsn3qPJV|pWpwt_y7BY{Olo#Xc10LeT|+PURgKgF+G?o z6VA{a)&qcfZz0R_Xr%xQ@^HC4$c>S=p{v)QhL(RQ^8SEzQG3-D+g^U>y6*3{SO*>% zI<5xbMmoR02lft)D+9E4wv9m?JcvPYc{GT_^Dney+5YdP!@xjR^$;FToq#v56eRa7fV4JTr{x0uRnx>5TKCk2-wXfqAcXo4 zG1Wsv<>M!<8<(AU-a0FWgqtdcczt__#L|8WW;Qk+R{weazy_sbzAxR8cMFAe|2z5{ zLMu+$YctABYrHR)fO6Oj(7ANqH`LPJ5FC$Xd?>6e5UJf|p^9=9b#>D)0n5`fi4Z*? z-Lb+q&&q{eZP;4A+4S~!r)V_z3_>beaSf79#|hDBkklOZyAbvf(2nKGz0;`hX(Rg^7F{?qxBkARfv*NFmk-5;CFC&hl=lvz$JwI-(KT=y)R z+imxCosfA`?AE8}75dmmux7X4)mdiz>*&fq5rcsV5VHNj(Pgitw>8a+Jc}H;Hse8U zvokTT)i(REH7??8Q%$9up)m2D0^Aj8Jtda z!cQ;4$7z}8lnSL`?*>mt2N<$b(1Nq31GRg4#`FT|W4 z@$SqTvMY%YKF_>?TuJWiUmv)ynh@S0>N}-Xt_(WB`eRzhbdQ&Sq0%S8otZc@PtHqm z$`8cgXOpXp!b{r}wTyDzFuq2Ftn@!5eEEf-DaK4u5%;!=@5wINN+9Syn8BvU{aITdx#lqE9kOm z60*w=WK(g<^xYbEZVdb3`V+Pm5M{uN3C~Kw(a|wF((dMNsPRRCiF8hWJF!BL7(SnCK&+-fvK^6S0jGyS zhhW{sa9B;N{FJ6kZ?<%Q33?sb|9;^m=N}-i5)Uu3ZRO;5n_KL2_^P@DHkA49+DsHz zq^^RT>OG;DRCcXM_RC2cU0ige^d5QmHXSR-5~w%kki-4pFYBl-l8TCWpVDE-Pn zYa10B%VXGdb#1zJivh73bGdl`Q!KHB>vp4#Q|=;oRUwI5vepDR%LEsLad0zq(-kxf zi|obtu|NP9Wc2wecs?i+QrJoW_G*= zE2R^}c36=BNwM*1L;^ zy^6*TUE!OcT5z7AmaBAuo9o^pXUyc;qyyZts}W^DtbzFG$8WSv#B8G1ZV2;TiU~j8 zQDlb5f^HQ*HYx$h!DfiAW*2*gP0ulw2V0c>9D93PjB9cIP9Y7q1hsx&!d3@VNDE(1 zly_&_f(@bFMPNJd0%Ac(F(rZk7x&cf3Q)T9@2u&TR|t;PSKH%Kdr&rEsYlnByv zJ<4cq;Unho%cV8Tuo^{g%cf;V@>Kad6ty{ze>OJ;i;V~NE`MS0;66a(d>$=uRk z!kWzy@UU#-=XAD*wm~jEn13A9U-JztPG!hYR}D4nH;F<_t&N>w7N$rL!leYjIv1Cc4(-76oIvT+g5R_lj1*Ku;v zc*+LNbAn2PCAO*569}imEcL1bMTAa|+)eAiXUgPGz-Jo1W*TjS_c3g978FMK#%sK- zgQl=o@B^}A-7{)@Y5Cmv9@iP5_#V-fFx-qjxydw#@_O}cEyEck^nQrOtzv8YzrQ2u z>n*`5J-ifJGk@hg;}jqQ4v?!1yT}`P7X9%15Fsv({UFx_$q(UI3Lp+Z;a{VPmw8Id zK=nf)t*noU9?GdX$tFjp;RfhFYZplMJu7HEVY8u{dD~!?94oU z=_s>dmo2SKjq@=(oT*jOGAk*~&;cVZG9Bul&nRta++4rXwbx;PYLK$-2H1!#d}-4l z*jxCS-|%zQO+Z;!NeM*?{(%^B<(prGmx{g&InAeQ8}>nTD?6oRe;ZAix|bOy$?wG{ zOl1KcFet{z{k76+69Hq3>D}nV?TcJj)6MxJCpHVIwH>O7Jz3ACEgPU$2NIgp4EjLc^cR{4o}+W)|V7A*YJ2&py&Ozk~EyErQ`|C!`lBJ$$IQIb!LSdpnMsL@h`$1+~-7EOhjndDN`vR07&%5b;xK+W+&~pP_5@0M!zygsde7}4=-+29_|l_9tz>mVy;ZyCV*gE<$9GmEpOnj zbfU)s-PCxe+A7d%zw&TbEc0Eim|yCt$7Vn4nW94wtp9@j(37Qx5ybk|;4B++ z`mvqh(tr>HzRb0buy=3u)nj|zxaXOJ90_Om_jndvD~v4{?p`9PG^$_p;3QQJ-O~>f zT?SHvmT+p#e!|&A3~Ofi80Af-Qps<|?QwhwwY`6-{Qz*+t1 z93>03{`l;2TBp)l&ezYI=rnj=KhrUdFXY(4IxmS!nu*^2tgfg#NTRRKaU?Rk`5R#@Agm zb~}w+T-*)%9p_%q#K|yywf(aQ6upOiW%^iC6Q7L#WMYE6e|A9X^46<>NnYbNhR&pR zAM8(6ji$e~0PN+rmP?nlGrz{W8G4WO#?{(0{;}WLo2F(##jdIg#?y3h*G|~VYm3p8 z%GM@O?C(5ul=T)=RWC_&JG-%}y1OzHmwSA^r+%)&g$=c@M+e*V8suk|x*r;_Q4VAG zJr1&bc2@h#9R2NAhq2a;L46Ftiz%cB=GPG0nYwycn?3)g7yoWi z2&Yjlu;G~auI-6Q4_)ZEcQpjmE~5Jpa{Py$vAibp2szfY2A+v zTCNCjHgA2oc+j@15i>32p(-$AL2~x$!6cVka{r}p4u4$HlgG=}9ccsPf|f4Qs>LR6 zsfY6BW!FRf7IyozSAbbO+#``SpykHB1)~D-<&uAjm3~m8KB{4k!UYM3?(?DnGM84t z<@aZ&HYtQpH<36#BAj>`(}rjYd}52amb|BipdEj*324a$y`{_op@qI4#-vn|zB5fF zvNai$GzDNk-a2DF4!Iri!Ydild5O@6;Ne@@tYwcg@DZA0=K0xFZJ6k1&YX@>lULVK zRj84R78N53r|eHWwsvD{h$19o)tiQ^ZG1>~ZJm;xLdY7((4Q9Y3n#g#bqZl1Fa^jT<1rW2x|%ia z^ZmSpUA$%4kCKO1ygW(yuj?iDuZFWqK;Fsa#3*n3M}4SKl*wja-1EqEZS2{FHvWC- z$FJD@N}o=mEibfX$-iz0>hwi%(ooC0jkjJOe;3wN6uFnWMsFt4l-28gg|)6DzH#ez z@Y}g(GatRm%d7|QvMqGC~s(u`V73YzvjF7U|Gqab#6JjIc9@PpGV80 z(zC*{qL{->i1m5T36`@HNLs1IyWX>3_KNy~l3^-$IP9*2Icv_e**~c?J`6-+b6@c_ z(^2$B)}HX{o^BoJo_;<6*Q0KW(7gGwYpjm&@*eIRGxlYcSyD_+1$B*?o%tlZkWS$O z{anfMbe?XLWtRxtmY>+XSoePO`kqjGa@8t{Y-Y>0f8_0#r(U0bpib@Hx1}HOR#bK< zyGUsm=W1tizT6U0Q!dLWm(BiKHoCO8nKn1WIC z`;Krtv7JrCkAuqW`HsWmJ7V{{PuuAERItsvIqVw`jb-T_-xCK{bj=xib&K24gJQe= z_S7VEhZ%}I^_?!bzQ+@Y zEaM0Fb=yabkqh5nDNRciayPHyB-Any`S`J@1?%cSk_3t{AwBzg#^5e|_Xb?OTIu1C z^&vN6flGZyGOryLZfNFmEUFvKfFezX%kJu)CaB>hjc=Z^Hfkf9QA8CttQ6e5YJWqh zco5Rg!neDGmpNim@KW4k5}Gu_Bg2ge(8uO3=M8QSnaOfpk2|I4CN~vFaxoNm-p;vf z&o=??u%}Kh&t#}|*}@qetS5>&o=*>piFqz#GhccSIyPmsDUu&4aHc}piv^6CoXP3r zr-N>$S}=#iC8V5w9mZjhMzPVLyCFbeX$fM~1Ebkfom*EmIBxG|x_fZu$tz^>r1dW{ zGq>d!IJ`g!_$?VO&aEW@{;ZMCnb+^K^&v|de`m zwijIL^>vrAOQ9@fB_48-pLQz!`6*)AQ6)leO zrakgTts&k@O`;~WG-6sUXwvyx;x1%8TZ35%?)fjd{VE3)|;4LV+Zvp!*d}+Q0N4A83*U5xQf91T42>ofk@InaR`pa3O ziI=5q2+a105#u<1?v;h>_$rHvAH`6U1HNM zX&>!v&rGRQ?gXj~yF|OVl7~zuitQ&;KUz)A!EsRK`Ml6#)Q+-2w(ZS_lmbLRj zk4=XbQ;Ee2gL#yLOUWaxNrSUhG@^UdKMb22O}3ZTf-8rXBW>1H`bIUxo@>p zJl&&jqc3p1*u0;cYbI=NUf!S@e9~`Oe<2a*3V4g9unuWZSH7x}sp8g2Gz1KSUVYbv z(P+vYRVJ}OwFyf!yY0*huHNEs{gzwWHFoQdCVL415%@-Tt#kJ({n>)wptq z|8dQ!GMj~Z>wqiP-xb@%hLa}-Y)c3dG~bt zN4oH7b8?ll9Ec?mEn%mb)H0WMFI~_2@Kf+lZ)RD;S?nClRWJ>iEV*dS#7I3O@Qm{` zU)q};R#CH8N_}|3Rk1T}0yNOu&4Ib`A}gT~Oz?Bv7Qh__4{gjIaDmIw07Hgb7(Lsu)aB%Z%KJ-YVF{H4=!H4A zYx@?~;VBPimUUwj7*aSF9%cn07zCxiJ6+G8tRXNimLa%H$28bV;W1?XHWwicIp9V5 zWixp!RL`Y2YMqz5%@B^l%Gm?Sa9FI39~0i-%AA#Yu{`5!pOS)Z?+N=2RXpv$t`2^a zWa6d>L7qj13|yG!BN~6_1`7q0L6EIMS=7Zq;WYt+^H*r=y2}nIDAl878W!_a%ujgY zT1bU?xrGED%c`g9FvZ*QRc6qu)C6*S*f3VO#>(VD6UL1PvHdmUPH9+3As*w8cCk6_ zE~A-1@ikvIN?X=`*fBA98^Ln>mz*jiVF2nOi+G%{&5TaZg z92q7LN$YUd^UyRnUcT~~_`cCavxx6mLB>*@SqA82S-eXEuja_YN~$}uVlrv*^KZ6v z1pRpaMMt~yQtDr<)8?|q`c7Y*7E@+RQ!1#t@>%?1M`JjJ+KkFPc^a(bUrX_6jZ&O7 zZ?Ats@Fb8iJ}on8JCC0BYy0bGZb50y<`!Up!9XF?sI0`%V~;4cdbO|9UY$nkHcrFHC9Nm)xDahz^mUHVr*(~{HPtc&xTm)g zL)5-VO4hQ4(^EAmx2dZ!&GNcXkuLO<42yDg^e1K+1b8Peqp@m(a1?QkqNl=jLW8lT zKzze_4ZD0MpgET5M`Z0PcrRS zP)OLP>pj#eVYpL0>n|h3BKPMF(_PyeqJhD#TaotV-SfgfNxciS?j$U0HU@KK;>^3Y zA_mSsv$tX2IT+7G#k9H^1T@?_z$)kDNGk2$xv74*#c97$;2A2qrMRlrO}{=A2{!N7 zsL!xGM*vxpeYdY#IUN$}6bcgewmZe1W4Cxx{Jboy{S+DL+=PEl4z}F(sV8bO&qqKP?ydB8kEZxBmQZDW@dU|7!_zT!_YP{CDX3!HS^$NxIG(;x18x38z@JBI34k!el1c zgjv#(6>}taQ%$5Q9d2qiuML=G&ek+8bWFb;TrZAIs0`GLR$2;qgB8@Xr`xQM6iKa@ z2xK2hAeQM`-@2|%uCmsrn<%s56=+S`XxjZ0+eyH>tgk7+)MP?RCiI|Cy&>QzTM}p7IYGId}I|dOwOXkgyko^xY zZ{EoKNT)caEgk<>R%G_hva3+#x?_5)`k>-@On=WNkL*6{wKRrly26o%w3r_In*^P4 z2P4_@{_h)12bW(X4QA?7DpztXc3hKNqqE*BEQ-_*4IE3`RB3&SOVCoLrtj-b-5GiV z__<5RmV}FazS4P^v(%%1wo*MnY|9%xl`T!Zk<43(3!*nO*9^q;jK~aHPPcAaxRhga zTrQXct!EXX?v70_mL48E)7p69sH(nCD5ot*blK2E4lg`8HB!#Kmt=u;{7!XM39p-9 zuU4>UnJNGAZ_)Q--7~|=H|VeX5?>ovVe+zgeaE@11ZNPeRkm>Pu&B`M;&5ZQ#n{*I zGV?3T9%~P(|1`LnZ<5UU37lFEwLI1zmuT9Ip*BE-w}>uW(U(vjzRT#~fe4Q+v+aO>6MS zBAvmBqP6YBQs1+GJ<0vNkVj|NSFPzZ8_(C@>DAA+!5zXbx(TDnho~IkC ze%8xA;`#PSS)Po{=CgxF&E180h0qSBNZzX5w2mLphG6nYHxtn`u=)ACwf2Iw)Z)+3 zrCVqlXkQxfF-qIpl^);Pa6M(#Fn4dtg|dL%h43E6fHv?+Tx*9EZ=_uBbiRx@TdKl& zX{XGYPX#rUc}xxagYtf#4L{B8pXsI~f0z*NNoKnFnOwLM^%-4Nv7kY+R$@JX5 zEO4s_R~%1f$<=myZs80f%w_(IP7%yzlF{Bz3p9a#Tuqyj?pJNAN0mH| zZ0XwuJbD{_C-fG157R}>_-vkY*LNvOcHQ82IhhwPvR`pv%W}KrgbzjYyiQQ9Ep^?- z6RUB$kD`|-zvey;n9J1zq+9!HlU9qH-e>?zVX?n|MP}<%5||6i0Zi(bg^McINReeiuE=YXIPtPN(DkI}UuUH*Tbn~HlM&pzP`L)P1 zt+xHx49?w=Fm5ZmQzG^k<2;L&M5wiF-cuhdU}Rt9jntPDpO&16?zc9;DoHq$$t?HvC0Yo;1QisH_- zAFiT1KXvS%`l825T3@5`ORR35KXpLyh_yYe8IyrJ-LUs+dZgGoxp;52u^A526#YxE zWlFHkSsi`;*h^*x+K~bW8O^SF#H?2FX>VQkee$(DGz@Cz^Or4!l9NWyka(03G8-pV z&eFI&9KTviESVyeto;RB+?HiA@ABzGn0mAYnyN(Un0aNnt*q+2wKkrfTj%uin%jz1 zpZS$+tp)*?G`%Rkn3&@dNRpKBAw*BDc)7B&0CuEPB^4b?-`thLtc&zP;FL#M$Y~nt$Uw7 zCM^(%9@x(1)?TdJX!UP-ytOu0 zdS%GVKUUmiyKIhjViRg7!=E{%bZqQ1jVW8;bT|gfs@{ZDj+a@;m9^EFBnc<35Y?HJ zu*Otngm0&qlSJBd)t`)1%{yO6QsPgL^JDs8UvuU@v5WFgo+tZWX`}u%vz_%Q=Z?OX zIR81oTM|q(MIze@`UUJW=W1OmRFBRH+>>S+DYYGJ9nqSu72@~ii$C?q zO@TbN4OM*bSZclKqYI^Z9-|n}MQQuG+m#Gq+_2bY-*{P*z+t_}jlysN%arrPIWHpf zhp|f7os?`9f*g}C{7Jn<+{`u~+DF#^AHuFWuBxnS%LSwakq{|CMLMJv1TG*d9g>nt z2@=u*5?94QYNSC*Lg{W0EI>k0=~hCzL;73i;>w}+q(iB83-oE#_bDozQ1J!GuqY_#rI?`W|R4%USve`sK9B^;H1y*oCdwSTX4!iC zJDrELtTl^u`K>#%lAM=kMb--6&bZCpJdU4a`_=NE;Diq`N0Hn3JYkAyI#Q74h z^a0Kr)5f`m4|gKUly^ff7(N@xJ^#&En}%!r4UPD0#g62}ceG(hW(`}T=HX418M}l{ zxcJfhL@dN(t`$Jbc=Jpo+KwOSaJ8X&JqD7gg%?{M6fm3MRuY?)wUEwd48{*NnH|in ztd^-_I(qDj18eNWbBgQ=F{4j@z0N1Sr#g6q+1|yJ^&7P+ur*HW`pjO4Q+h-^VjYl? z7oj9#*X6h)i60^L($FpyM8$E|Q0-d+wQ1*sLNb*OnP=J8Y?JY4YOLDmGV8G2>de_r zOzRuA<6oM3O=D~m_`%xQvh}C85Z)wbBIP9UM*4-6#zS0>GMMniR^nX;J14Jb>pve0 zB2A}k<4m7Ed~~DJVJMhvS!Fh}FIQ!ND_J>#i7r9wwchr4$~yF7+`RXO*tzGxOTz?x z6+OK=v2!^GiXfJsCtEM?zF<$_decx1$6)OMb=wy|@{K#{;==)gRY0G0jqd0>5ihA$KSFh79D{JfHGv1Mk8CZ?1@ji&>dnlxqc7~8SsK)F-@L%16ZENH)yG$3u#-aAFl;#J z1LK?^>-0&HN^gCO0m+Z=vL!3~YusnW^ox76d%mv98HSm@s4U&7xk7yGCVR3Iu&*~= z?wrz4_Lh`__mf)KawOq3D`E8cRkjHj-{W%UfwCzJPzbn3{e>i#6BV`F&x5%rw#4 z=2>~TEF)rEgVAgU#9S116w~X5K#3Jg#<5#>@~%$0uS|`!;vPOv&d&Fuc7iK+s~(zU zcVew+wtYm!5ZIujzID^botY1qDh6QH59st6p=c z!&c1dEZ|JUgmzk1^bdFi+S}HnT-Z2u+l!|L>}b}GT-Ha^42@T3ECxNH;ae5Z>ZcqR z(;Lh+mdeG;^w`0d~>-j=Tf7X}0_4ZPz#0Y2%wlb_&103mR;=J^bn7*K zQd5r$2YFUvUcn3J>qb76%gI9_1;tbsI`WO7C2!zlMMER$$?lbL?cG!3-$l)rr@pf= z`UKn8AL(39RgCPIHLm*94uM9``qWPvUL9qo2W}b**CiJ)_4}4~*vH^nkG5^GsY@kd#UGYeEj%E$2%4SME=SV7_d5Sxa ztIcg&Z#p~p?bK`2x^90;BB5^+QhKQH#In%*yVq}-^_EhC*)JG2jurJ54SqNnx~&ylblgj1YVH$c#%Hu{e3c`ydr-D>aKbCm`slT{jb)o} zypZx$U8Ey=Gv(tLl(Q4uEkDdNU~;kehDILED>;dJNQ9V;BRxNp?1_tJJ80g~@To_Q z-`!7{+^f1KK7EkU<;{B$&%uEnZ~fIq(&HRg-b*al{Aeld&Ev{-k+E(~{2D4r)xy+4 zWk5_a!l|gkR;+dK%V3y0l*Cy#*W`^cS@PK&FI64At)-z#d-U3CPPc*EYs2ALPf87P zRGZimUS{10w^m+dv1lX8HxtkjSTNWSRHVqhGMYs#q*1N``Pql&UB`hY-RRbPshR;? zBzE@xp|If{X<6n_6mk6c{Q?286;U0wB4~^sv=faLj<%hY$QAerV9XUCL~Jj0r-(`C zD5>(~qF;7gD_jYy4j)IIF^`>OQ@?WbmG`TSMXsgQFQwH_qKhQL&5C=Ul#(&NKz$-U z#L=W=9=&qz`F1g__EpJg6!oqL^;R*C@ve}Bwus~N;gTkfCpuCaD$5WT=)CRy9sj-S zpx4^{`c#wLmV)A`Cly*_$>Q;LZFiPmu;$hCDmO!vmZtsjW%ER?%zkg<)~Gg*>Yg-o zg!ov=OlP*y!`WjU6;U!YFNE6MWOv;n%s4~KhMOzuBc4DsHevVs*V{DFU87sn54NWA zmW1Egw(Y9+&}<(b717csq@R5s#}>U=aSU2N(=9xdEjH9IF`9!Pt*$R->@!yv%rv(x zQMfxiIrqF*Q~qQ|`RP}}!9LwDN3k34s8@T?X;aCwiJh|37Alx#B)C(qvG)y=RH`zI zD)~twHJrF8_0jNtzG>R7Lz!siIXvg*T_S6*W)#P?O|2(btR+izCXIGj_T-WXezIz{h-j}O<$#}P5S6RFyl0I}@cSzwhZ&TA@dFxW=XN^x=xJMD%}cW*C6sueZKF_t#IpS&L@g)2w% zqH1U3rZDCkQ#12{6L`giM8owU*S|jCqYhc`KR_#K=URU5-k$8nheCj%z zu-6N>UPiaRZ(cO6o6}z5WiEUo>JRQ{l6SVu7dOGBP*B0?5^Md{?=c`;HcapwO( zWOzPDtUgE*mO7Z8#_g%uG_ufbJHAWYJE>uGRm)lERfUU2h{!Od?Y!xSN|fq299V#-rZaCmF0h z)zmh{YQ4RTWP3gkvtxtl4@`3Sdk+FVdC)qxtWtwQZ9>TB3K4?M$($H)VJ%@q>B%^r zm*&^5M7rcpgUUC9Hb{Cei3cV4tmoBpo=yv;TDV)*NUV`=5%{(bs@HNy;sv-V z;>-wpjb5#cCI_pGwCmh#V|U9Hgt7sl$>)z5-E3^2JCa&K*nXfYY6oFd8q4wxP)JW_Gm3kf4lJ{KzOiJG>rUeV62s`7C+53-Kg zl-OCQXA0)noi%2SJl@ot4b8wGXZ#4%H`AY3^$iyN=k*B_mT$}ou6Bl;xoClbsH%HY%>WvR_g<}$Xilwff~ogibVtQ|uL*rLxlML^GyYOhL&unx=K5sltbDCmd`*Sf6PCiAiss>XC8thGzDm*QyF@&j zeNLa+14>u@N@KuwkAaHh&xWbsf{$q)j6-rSuoEYU*aeZ+4>E?Q8M^HH{2ANeRXmP% z1~&u$RZ~V^c?ad(-HX}pq?#Yr&9wg<;w+6<_)5-FLX`s=E6NlK0_{Gy;x8iHc1utw zeSS7W(EX09>}-h0ELJdwIMjB-LXdb`U06@& z8XG_$z8E8uPX4h7G*0t%W;)tgsCG`$oVmAjuCl}7nG9vTubg7tjx1 zc;2M`Nl8<>bB%3=%t*ZWmo?>ZRyX71$;$8kTuM;sK672TJzVn4lcPKtURmN-Bk7y9 zPLmqyCh~uJdzIB#^xD)5q!kd#0bcj%tBi+DUdI>qnKfE8Qfp>71X-y}*vcs;`8EHG zf$ph%b+hde?2_#tN6%)k`{kQz?Em zELme;f5g<0D7Q)O+)j2M!Vt(KBNcI#SfAdV0?(a^S<^L^_+BO# zsf0UQ^+TVI<$r}7QCh7$r#Me{uSHADY4MwJV~Q{CJ86#DR;^vF4~lCZ@SI~j#-%gI zqcwhZxs}Ot?sZ;gumJjM=}v+eBOA$Hzf#B(6m-RVHv3`dBHKu>`pqmsb3%Z(<{MpQ z%o+Uq2<9oN&hx~!oy{40O&PEC#ZgwHZa3x5sO}gYWt{w}+?s*S*gMdD0Pc$p@bMCos0-a!0l+CEfHs}tV>)m=1yeJ z6$rMylBhZQbH$??B1hE;2&!(?N{7DDY7Vcp3tv^cQxbDluU+}*(K*i)p3E$#%fyFn z-q{4`X{0SIYN2&ydlBl2g38NkbLQH0j01^&B39tQuFKAWqOngSQ+0L;Sk$K+)DJiN zne6X(FX`MaozWK(;o{}ZoU&fBSbt~vZE;MkOC34#qWD7I0X51oLLUa-n?hDn24^?C z`=nalAU{TFkIrN%<>CpL86$O1#5qc(R_TtV0Ds1Prl!v{> zG{|)}OPhFkwkszjI^>_4^#Y)dtYsi)eXOeF4(=1;5zZ%Ryt#Tjl}jI$w_`IS>&2qO zt)I~K@-)89k;-65C>RW1pXDpGiM;ZF1j{&f03$=CHueobA&fn5GzvKj9=yr+bC&&Q zib<4hhFvl`74;(HL27N*t9n*qVlLT*9*54J_O=Oa>4|7T%epLCnwy*?SEL1Il7no2 z@;!*XFeoM9a>v!u4BV!-A440nc@I=i7c@Qf$T7PE6)3kc^_nHWaV-TgNlDIE^>{52 zIJ6sUXk2IWETzNMk=8Hecr*z34tzgn4m!Ae+tt!B9Q`=iuQT=&s>A7*j6UB7;yKT$ zTY@z%zkP>3h~}rK6;V^_4-5R0Gn#(VG%Qcm)p#FAM0WzYzZUZ-oJ{J|ve@xt{D`G-mzKeuf?nX(Bq3*AR6nmEQ_cm}nm_XZ8?=qmW{I2dKiR*tXlI zz4kS-iBegzIx`Q2WAVvFT4pw(Y*hyL_3Zf%1BdBeWhp3?FS;-oQ?Gm8yPyiIbubPXq|W>CprTw`Jj0YT!)iq zlH4964apT*C;1}#bJEQEMBU4OObfwP6LJYvLD9*Rn`M|FVD8Vs$M-2Y*+l0^10xPT zQ@@XyR?l+0r^U!eGS+f7s194I(^~xW3b3ka!_#g1${zb@)G=|Xxp(MUMqhZXLc0q! z{Q@7%HmuqB%1rKqDdC3N%={!79;@7zrc!-sIVHZ1)N5;@#*f##-(|Qe|I#+4)|Z(} zQev%xj7zs0LSADWEn=jj@h%;A*KDr3b~vZP+UCXqqwZsX)hX7QSeX0_8;2Kwv91T}w5FGQrKUKEUAus$gZcUv9D*e)PCh_u5r!ox zVf(UQ8sp$gQI$f!2TJC+Liwm#n>e}-sPbzN@rVO|3CVwz{+B>Xu zF{klVlQNBqU02>~6@^J{+AKUNp?zU~6+SP00PTD3mI~{sDGjlSO^t-jJ`f0yV4xK7U5p&>IPh--Z4RuLK*f)PX zsmMe6viO<2sH8l|et+eySlVQ$7K3@4Smo$@ zpA64mV+f+FBocOpso4ToLDkcWp51&+aI<-N!Uvca-P6`v?7b<^@oJ1;d^K}^rs%7+ z0_!Cop0@VkbiC(9SIxWpgv0Z%w4Tf0G5C3x=8)14mXMsV!#AOhIO9Wvp^1q70F_Ah zl=#liVOdh=;_cMPAb$gGY01DO>WI$;V=8*N&-`IOyZlqwYs<4f|kUOjId0m|ZKuMj9bYWvBG1R;Cp$Pgq3Rs)2zc8~f zKfbp^SY^8R&&wlf2THecM3ob)?A_N$h)&;LO$`YOGDR@b94{wkWpS(3DGU=`G{=m# z<`i;)lm?@=HyEb^qf-|Iwv~rj)4AuNo6b@*E!*kG$BV<|)&{4-7r*G1&^>Ilt=Oos z{4{b`(Y1VaW^G>b35eslUTP=L?~mC!0#5XvP2>BFYi?uR%;s}tLLC2-rdUVBke06& zEiF=NUAFDOiao zILBvH)paAQ{bl9?!J%X1x{)(C2$?Ev`vK6VnDyQ!b!k&&jYSJx?z18zC(el%&Hd1+ z$aN+G6^c~MS3CCZJSMizw8{4lbIHHSAue)<~S zK2_k$z`5TCHjtQuf;J>z+5w5(Z%Y(pP))e*IlNT2g3<5*Zj zVntO?i2I|fi2~?^Gg!-R+nF@0S4!iHgwtjuT;Nehb6dLyx@Vv>q#pGE`eqSIk$|DK zfBMztY|`i=bYjcpvOphgOO?qr-HGq<5}qCj(VU&JHdjJ~)k&J36*mO{P$7M;Lh6?- zTWC2S_*tp`+HQ9XX@U42l-)}%IV%|twR&Sf?>k?Y3cZtF)nsARjC_Jur&{p|cWP{1 z87s7EwQ+{?67$6q1X_hxa~ee~ldY5_^d0rh4oTKuk@K(zxN%+xYHhE;*A{ zpYL*aMbN4n*l-%Teta!_|LSCjA!e*aKMJ7Y_?@fzG}kV?PQRV{B)Q|of`=!7K}2Oe zQTxKPhfvSNT$`Ek(sw6^%4$akEG=5-P^k|W$J?i zYs{wGn0DqL3ZK5m_h{yjdzdYg8;$EgdHT7Pwk!QPVRJXL0LiLC>=X?P$COKVfjI|| z*^s=naqk*5H#|$u*P1aiA)J=)d&RcVT2&}5XTbiKRkKeXE~_LeHnEODO9!r?>K)hO za$EJ~5LFDmW7$2l9hua*X^1#bA+XhQLy1DJrx4>6>bl9sBJ}O}8%LC0q*Kj$Z_YqL zL*MeN?+8jEe{4roA*b9kyRSdU8q${x0JeDaV@kFdaETr&t8~2(DEFK#w|uky z+=4y*!^wECDb!cZGL)mAA-w9)Hhei_#7_2SS-p@+?FS>hYvxI5y{;L0tKdg3E0||e zIf>aqyZ8KRG7uF^aL2C-$Ub;BYW}94o~g&Me87|~;f`a=f|0rtMuyT&X~1~dZy=npF=&P#kV(pzYTvV0s3f?>s*Qg?F+a2 z8Ex7#ir1lE#bgI`lBc;3C>q11^&bhIrs;KDnbSG4_2;eIQZJguSW7?=`JKLeU17Tg zq@UV;M>FA`&1PAB`X)37KBGB*=jXSFfeMFez~CMD*);pJc*p$BaVDQFBBPQCIIqn} zP2~OVV&eG8H@Jbyz15y)D9_u_uY7g`bs637+S}3x&FmJ?LYx$wKZ$LOaYNsQ((=N(? zsSX#>NEi<=M%;E*5kwF&MzNoj?tp}O1Pf_*SJ~AOI`>;7)|&GUlZPaF5p>a4C1__~H5d9>Xx;8P7AQI}e@OU2slvQP#&&6%&5Ys`qs6)nXzfYX zan-{yFgM#7B9LkrcQCU~D30f=*?Ln26^Ua2EV&$y8$O`5ZrRO6`0~(@65h~aJ6p5I zOwZw}#5}KlyY7PbTzWjYPhC%^Dff!VoAW%(77u>~9j0&SgS%@2>fW{ck=Q{IjJ18A z!Azlo#FSTBZDzqcpmQJl7_+(!L6lmge;+d%D`a)uqB-if{24rv5L3oWOpN+T7ZPxx z#r;!$wV4~TAa5wuM#(*28+~JByadqDe(bgD1u}QE&#~f7s-jcF_-;GPnc(796blFv zIFl{^P$Zx&Utd(pPk<}(42m;%`ctD}o(WJ!-gSR)jvP;bEpaHZE~j0!b#_DFn*|httL65#HZHZZ9lAJ7J;jy~(-=GT;#CjSGFbzvB5j3- zxw9pwE<**57y=`-{~}?(g~h5-&HecfS~5y1;*Jk?q(^^VzGKri+gNWI=mkNvs-tS| z<~|7S!6 zw>Noad>nD39G@fbPmL7Qamt34hnA0lY6EkZvI`^~w?1XD@pl!;eh3HOUiIuZJg3+A zG*Pf1BFl+%qINbP&;5ATYUPY=U_^RFv9)SQG;R`U)Q0ld+PeV27gHnFnlQzP=jgNz z{9};7VaDHBn`1hg19S)Y8(7#RWXHFtkS3~TC{SQ@`=Khaq6wikoA97Ak~4qTQwW^L zjq_N{n6cR6+tmo*Mxs=tICQu!7QTH2(2=i+!y+RIAo67b=usrAQRmM5+O96vJdpL+ z6S5DY61JzEFaOW7Bp%0mQ`e-WJ?kEv_3~kM`yeOLNJj$CO~iEgPK?FX^#bHofJplF zRX|IGWB$`SongUUm&{$dlv^E|B zskSv5_MuU|L)@Hkyve!##6yPb3JMBU?XyK+ zRniPAUDE)mCCzJ7LZF(X<8SN?uq`K0UW&1?zda0(di<#_C@_qPVy9q5$lyMrZp9rQEf*w~;bx)z?&`jmW7oi; zVDw*p{3GuOVE?qvW(S~38{ZYTMCckhI7H7hcIzu}{g}S;HVC$bpKy6@Q0i9HPOY8E z!QU5fpRoUNsF;(`RTNE-W6`cQ5B=z-OH^2gRj;Es=xo-g+B(l ztWz7YrRwG$@drBCqE>B$F2K`5&%~DDxwR;?-A9Sj(+Jo`EJCe_`)~^`@jvD8&p8<4 z1a_(*!z{`rLgVauLFP0Y1jiryDsVG(<)@coBW*tPTV77yYaQom)`E&_ z)OzgfJ78_RVra^4$dNAanM5kiIxFXer`!K578gBA{dAy zA9xQAJ<7}Ze^w#Gx4@AoEG46E3)v!^ILb0OLBuD1h%nmx&bH+ zl+De}m6#6y?HS~kbxhtpI&u+1iR1!XTTvU_6@Xwt=OKz!7U}SM^~aCIhl{&)j~&!G z5z;%PiX)E5DIP-1LO1>l3j+90Q-3^a^)*f1;vD1o2*4c)AbR<1acg}^+1=f}uH%D0 zA~c%#8xbOhTNCBx>Z2%Y+f%efgD{^wSOZ+1kv&JZ4amxu-JYT(DfRGI5nAMGK*-@>PAZ4b_n=F!cotpdNo z0^k-ga|5#i*ih_uBKBLJwKxQATQvh8m#4c!DH<2@qTpPS@}8M(pjc}CPRhTTZ~ax9 zk4tAXlK%OT23tax5(MX)wDajADP5QV_Z-s{i}q`7LZE)#!C4l)i|c&^8>C-9 zLB1Lt&*b^h2r**(xDuZ>Z7dw#941=>Q##`f{!jzFp)EiF_j)EUS>^iTBRhPMDo?L` z$0Vn&@pMS9uj-*##5i!&8UpM6Fc2im740TW81(zBk^sRBD7rL}UgVs9p(NbQ0(hDJ zullr#Sewnv5dGS)w{No~7|G(mpTM&Q@!vptQMa7*NV9oB-yI*hd|+%CX>9L7EvhXOteKsA^v-^A{<FLN zzzi!&_!Tk!bJmX1iS|}YwYuevjf}!lo_ghA(tHcAl|&vaPk6z66u^z{$#b{BIZE?Y za3rG@V7YY*&k2_Wd$s7%JfeBmZdv-OxAo5<_dYe4Ly$+~mNnghiA>Tg>=1AOEs**y}j>Hh`~U3JZ-pcS(_3ui#x3B+ZCuSRciP zJfhwIM#z(1QbSlq@iimz6dyd$zotNjL0|aRY_#syeBx#NuxL*t)dWQo)aeHvB9c9l zh6aDU8S*+x56KuEod~+Q{<2Oms-v38CPN>aD-p7S%owbZ?4%SYCfs_70QH_V5ztrh z0jhzcME1vRWD}lIw^|~%BhmMC8=-=q_X%{$@d+F!Q&ur~&{>{jNJFc=Y7#BAC1d zwD8yuND=x6-W`@>&hc&J;rn%q;JFMd@FRZX%UEC1fmw*cT0l0!1vKwoRl%Qg3`s^Q zirVpCp|L%Kz+D}5zG-ej26)__3e2SgH}nv+O0``SaoeZG+kdEJlItwocUO4|G6ux?CkCMSFR7l2COveNA>oQ?{V-C9zJe^@E8;lgtDoQU9b)yJWT#m z1#LP$`S5=OO?#`J!l_oEI2;Al#1%+z8(hB^6&WdGgOcgxQF~j;hJP`Lfwl{-XU^n& zX{&vk_2;(jeF`~;krh9PTk>_M@PrfneD-5>&f)tH5kY-L1k`DGuI_*Fg?|_R{jbXG zg-Pny$3NgzvdNqKL>Qh=(Si@N`h1BQ|9v7Ue66YGlvrDl zD{nO1R7`LgFti^q>mhWrtsyA0QIQnM-oJoEqYub3ND?9(75B|YRbqrVm|PRQ z{;qfam;;)b53fMaC>m{NRK?OIk32sgy}PrGE6A^Hhh)h1FWR5qf=`nyA=!(Kc#c!c zLiq^hdWiV%{#TwXNCnyd-fsI3-~x){yZc|Ltn$#+bC+!lXY&UOF2D}3Qt2TWf&O1w@t47xd0xZ^8d3AUye4^orNputG9

-o*!I4f3;(if?wt zKz0UyV`BdyMfBgkoxrr=RRFhxF7wyn5sC&<@~?>TP4%D_YKU+N1nm0C5KHP`7l1jW zq87s#TgM$X;owL#Qa8Vd49(7f`R&xD81QorlHCAY^M8nF?3)6!FlGZged|vIPKY9N zKYa$|uXYEJZR_ExB>3<7kKhM!2DUV`KaFNU)?o1yz7t~gy0alJ^A&CFqg``gQT;>Ou0d&2jL{*ayC6 zCMu|MBHt*4LyGFIZ-M1zH9~*6g`Mkl>mzu z!~eSZ`^AC|q2zumMKFzn90O{sA77H}*o2LXUNZ*l=jqJW2^|5n~DM&P|S)>z4uvk@|9s633O(^W2<31t2p$lOg(y32~ zxr)gmLBg4NEfZf~5d0B_{iiQt9#D48#>RBqinVOQ?gHtF))%Tw$w~<AG?sc@EC~ z?terJ_ixkh`zMX*J(?}rBx)zjC2Gq*=T-Hdp(npQy0wz!tSPaC*+`o2d#t;WL2xlT1@4;cF<7b~D!8|=Z+J6b= z{TiwOc2%@;zy`?;Hqc@~@LIDF(D>12$%0=2-G35@kqUHVNI+jZlYRD*9c4qd;0Z3| z*e~jsZ2eqF5|^@Wd)ey!KWT^knt{FbmE5q2C8H-mZXOAFkL>TAqX?}rL|l}H$RQjd z`*`#}@7Vh!Vbc`!4Ti;!1Nv@pc)$oacu$WaF#9(ik3wXdwVKT0-*30XDE!r~RQaL# zAH2N`F{WswZLR(#C z?u>rcVJPrBnmhu+eZ3qaVYhBU>hIsf!N2A|iLirgmlYreBeOknKbat?_g%Z-2K_(B zbmItk15!Cz)QIri!~n6ASl%u?^Z%7v0X~r0rL-(;D&G_G6ugi%VW@{P*?)){?t63e z7%(ze41@_I*9iWJGu~T2TZfMU4Ac#&xW6sT{{M!Y11%I?Xr>H5L`xGP78Drqthbb$ z#+sAfIimgk!x*}buWi>u(fix?Q>b!|yZqvyJ=K&DV%0-D`R84yyRKg!(#4AIKCSoX zIccZ>2O=x9P#Rg>8oW0O+_{F89ZJv6^4E~vug3bJfBx~lPt3s(kQ@6P`W}kNAog@3 zk)jC^MjH=sxx#eGUjE%Z|8tGh0#KeRx+!!3YY6@$wI>;QlbPPqJ^H_Ze|$ZxT) z7!{Fc%biv;GCF?sdO2LR6isp~3PUFUx>^M%&=>7Q!nzS&Of@tLZ~*ur%|y!Z8Sd18 zyVd^_Y1+5qFDt?6a}hNoMRr)7$iQo*J*i$CxM2SuE#?A(ksh0mhdi+jwV=Qq47D~d zZO`6Dtb{)g`2J4sv1lB? zsi}pc^_jWF52>v`wmM?KuV`+K+eGPud`GqUgkZ0 zs+2w9HW_wfTyW}|T2?F{ZU2#{{Bt$-J~f2ZlANF|ha^n;(j?%}Tgqr-_nn-z_ICL- z&gFmA9LyolK+BChi`t9NlOkfIt*xDwfAQ#k?o~v_{#+R5Qao+j9=bwVsHyOGe>MwE zy%UZ3;GMw%)Y@=s)b31{cf=*}D3f_H@0At^m?-@t7VFb_(Y)K=>Jh80FlcgM^_3 z!8I}AKdy;I=^?)mwB4KrkV(z=dfmfz>Pjn)yyb#L+w9@a5SuylFV5x zg_1DZZd(CI!8Z*G*jImg1b9&|-ay#UAh;^v+x$n;_3siIB8X%t$)=lp?Xpo`6A)ll z)(P4(D85iUqG<+#;_FwZ%uf7eC;qae1w45+!yUD(;w24AB01Vh%%Zi5*rSN{A_1OZ zNoPs`;t+)k;U7Yte+~q}&v-Igrc$S-I@(@2R5@+3nH+XSK42*}m7)bsJFkM33Ru12 zN&iA01qvw=&xnv{WR=%8BHxfDB$i)68>=Nl{2?iS?scDLE!N)KCsg}p@B&Z4mB?by z)hfFDOJlFsm9LJIkGg4*7souOP~$K+|F)qI3I~hu8&<9OucrcurXm^IF#zR$*<4Up zAJ|ND+qVFK|xFXp(9O0x$g3q6m8S)E6Hb5eV-gl=lW|ttqm`11Chv&J0BOfY}VpC z-D^}o#yk#*6%3=o!y!ZiH4jA;uuFg5DDw`D5(hF%AAy7u2e7{(_dS5e{68P5Ref6< zseK+zFvBk~U)XLzD@qhxGU_`5zBq7FbIG&`iOeRWL~9t#_U8ASQ#n^<0Tvzu25T&^{QcGkGol z)}41vR?FYq7+EKHTn`>}Y>kcD_7tUeLq&Df8jvQ50!^D@Q=pf)|q~7yu^U@t$r^QwKR77pMTBo6W^5TGU0TgpIcuI_f02IgWFIit_ng4u=zUsTw#`vj1 zhvAEJ)vPZ+7~TGcKPTK>1G{4kr|{AL#2f z&gIcB$sePd3y2j9&kBL5)7#JH?xJFohPF7wTy9XhvC_2LN&RpXg)PeM( zBJg%3{THvVFo#TU<>9G_&j^8DWCC6x!Ps|PdrO*_LU?(^3#vJr0r0{SXjmw^Mc(yY z7P5gu<9`7?SWm-iaHAvF*#0P=kl(xYNhya5w83edLci7Nf=9)j4c zpH^$gYn&vKvGWdQOO?e6IvJd+DRL(#!iqyrG|rB`e0`Fr?Crx~_P0i#&9H0!G6#=k zd}n*kM-2bH!o-m9YXZs>#V2>)shPCo1alqMGVNINS3=Nk- zwBU!Y9F(3r0sTkfMk`knmrE}(-RPOo77vMkf_J~qKrQ7E_8IKQa7d|;GiuXQ#G%G^ z-R^*V@#R(zjQY**Mn2ms`O)e3m2!@>yt2pTw0A800Q9&U( zO6JHhan~#)w??AHxUzSY58^EwI`kA--8cH^ORIMxgbX9|P)j@3I_2w`MeJW3z*F$^ z?bwMDbIF*1;;KY>3O7hGTLE1FdDlf5bBfmdi|?|VO%-#oQd4`o3Fk7q)!-|y1NA^{ z6j*W%*;`^|bm&d$BLqiETSw9lBN}7_c`PElp@aN{Xl6IOc;juo#Jt(;se@k}qt%us zqectRjd%AXdL$MNDxaoRU`C`^&O-2ep_!-aGrN)8wW;q&%Jx>{dvTVwJ@w|!+*)K+czLc@$H@Kk zVPOpxWN(;p+`k*Sv}?U4P1)NRc}g`F!Shav8|0tM1WqL8HG0#-xsl@dk#NM(DJTqu zy_*t4@SZ+KNOCVU^NhS^zh#IZPu>WW+Hn|cb*A`reT^!iuLF7&1LgP|AgIT+VnH6z zK!9oDtLbCcWIKpetD~3DgS2}7SzAH$JiI)pwW=djj!TT%dU)iHic{wkh4+pqK8BrN zM^3&ukYcOgrN5!VBZPF9?uK&L0XJsKUzZ(5;Pe@@h7feq>5Mb%G_^7ydLgE$4Q1-1 zq-^!=Y7Bdt=&|oB!2zhPcjt_|enM>>FaBzIJNMN>9zd%W6F!tODu#1HwDST7A}ny2 zWtgL*Jo{Zb=Ld-*ba%~5G6 zr*gK@;V~5f_8t9L6CJ2n4;+mVThSuMSo!RpZ4H;Xuce(zSdO!dT@_z+ImKnxF{HiC z0)V#eb9%YzlAw->&n>9B9@D2E=bCQ5(C6%&Sw z+yG^G^Nl>bKGuB3sKN+5zrLv;uI95WGIs0nqD2hYSCS#T+&t4?>DF*`*bg-Y>CwW& zvJ+uG*P(H#v4(OP(LNyjA$QSGGmCpp#IEnyr`L*tT6gMCMBJ+vRJ;wLmVR%n6q8mJNv;61q6x5NWQ1GpVvUCkpb|Miu z`(e~#o#EO94W@3@TA%Olj}*h$^;ZBq!5CtV-3R|=jI&MPgt^Q@nBjHZmmdnI@JJu* zDV7xJTD}o$npdAvaDuqunRR&cJ&ng`#SxKWcSivl5bwIk7c^lQF~)O%@PStR9mlD= zB8N|uCYGfi!tM~Ua-w-t1Z#IiIdSp%>+c9VT6mT*>DRsJMet-}$gKLgG5#j{_V&6$^hS0$A!)c4gFpi|{lBqEY>vL3xAMbx% zWSjQ603ntr!gBY6N_#7<)`JJPOoaB!QRoATaJ_Vc@h^Y8?|X>#L4|X_&tL4B^WI7n zoG_8)aT{+v;q2!A11ToEI8crRLC;e3&)KZX?IQOAS2bvNHGfbb&4&^k7_j z-qE5J8^LR64Tsut{v%Jvk>^JLz8LE#^5#;^En2g*#?r&}m##=3O+7hcffH!BH>4xc zjs8C5EY$_lUvkwLD2DLd-G{s~cJ9xl ziuR<1_%b>v;zvttWSh9ow}?wY43FLHaDqv+u;%+;m-^T0L6sFz3B@hLuRp?1lUviC z=#=2BLSzGeeHJgv&91GdywTq+2RESf-?yzXpf;p?&||owW%f%(&_sJle5_^|*r0N+ zRZ{olfERBhF4lU+pvtLf^u`yJe}BV4%2toECmlAMlHV%bhJ$U7Ugo+8N0tBO6*lag zz|ny*WR+r0sRVRYHq2z&ScHyAnLQXOM! zzO;N+;LXR(BUdS^etC5Q%*=BJ^mgUY=kc2-BCLcJzd3j9p9xmy{ILUx^N5Rf+SwMl z0RxL2c#hmRP`s{t^iH1|xA+(-@xO}>B~yc&Kq(`$We@FZJg%W%a(s~1 zxXS4Q#jRZE1GJh`yokJHxXch7s+O$NOav8z>mwbQB8|&k`BZe$^uGoTzknVn--w04 zgFE8K0JG)EN;YX7AK8awE31#28~$>}QshlW8^gzd+;QHK3HmA1baIUrBx(R4%xNY! zXGIc}>HFoGUeQS3SE!qN&Z_}yr0qpo!^s__6O=^8L zZcx~=^P?y(shnT9u=F)yr>*G5=aWS)2p5psfYeg6;24np*)s>f%Y@SB`ma*k%U7Hl zIbPf!3$$FUw(87@i3fp<_>Y7Y%`hiC=fz03j3>fvC~P6H?9 zJEb$drSz%SMy1iAWLe)b`!7a1_q9fiA1QI~c^@DWXoggvwN#xk1lXR0 z(7$+14@;+p$8tk&s;|7zYpR;psOs!axzmexwhN@^?i5~sPt2##a_u3b1r#dIqdx$H zTA({r0aq_0-VvaDaD_a)KdFp9VUt@qD^>`|nr!$Et?2{iEE;u1u7+NS`b>9}W3=M&3N(fKEQua50j0go@Mjm`j6oSV_VYS7}S9k){&eRclN$x`9?b-UJlO#d$I{K&tpI1C3u2gMDTGlt#V^{?3`p(+NR`I$4A9d>; z7P{ou^{$m(eA8NDlyM{*q^E;Bl$5UL$UJEhw`D+v%K9>O^GA9 za!opSe%4FTDIZX@h*4SqUdS4Ri$<%!X2F(}PG~-dUguq){fM$W`Oo>chbfOg9l`ioGWXvplo2j6k; zMTo8&&X@Ph11g}q9Iz4FrDr-8C-qo`EbS zVwfzAJ74s{T~IBo6h7urpb{&>Bi+!xrkwp@ zI>(-_{X0T!@mh8jVKa7zX36ITY$*sI`54rV3kzC)RJy$86LQ-V_>DZK4s4b#lsgk5 z7&b@`Hb>mS&lK40zbua^QdwEfDNn8R-nG{oiq)w_d(I)V+O@rV;~|gciO@kHyPO*ag?l5^J^LiaCBgd|V$WE4HEAT{d{7Y17F>T~ymd0kGiIDE zSod*0Xl2YgK()_o%WK9$uibu)t4?|WwYCW_tDQ{-`{`#e#qggbI#yn z66p&~tO8tus?61b!=UV<&n>sgo+@m>UH?C_&O4myH~#-0Dn*Hm1|emXk;o`BdmfT; zY#CX{sLW#~D=L&tj#bv-9D5#4q3rB)Y?6I&>KG@E@wvDOo`+mJ% z_w)I979&w&z;yM$E3JwfPd8eTCuDq_jS7VL+JC%bzvuN&0|kaG+Se=@EGGm6S6yZw z0NzcC@D;1>q|YE`7&wh54N$!WQ-3eFU@-x%!)?rlc>NOKzDMbs3;`d{<6?(zoMBmUN@MDmrt}bec%Vt>e)V=9=%=lS@M%H4`qsKIj3@fUzMiii2ECLKOdpKvW=uD<(n%) zfBYplpbWjf0Y-@|>U&*+=u|+2x#N7Q72EnPdr0?C!hmLw-sBIdhvZQ?eej9V&u>sU>NXUl+?4aKQ zK~nUweEL0U4McpmYzKqt;>whiH@aV1}aK~&8j*u0B5#iXN-<%kpSD}pJxLHng?cePjfCEQUJwE4{0HX9+oVXLM<%< z?36E&vmlzW^ZV~Ay10(v8aMMB=uP0K$*?34U98Oa(|0Fk4i}#pHj*bj^=`!Z!2EAF z5~`7<8}&zka22qQ3o>0wLu5%W?OXNx^Lek2kH~n;r=dN~6JFKq+r2ie^m%(}MYH`8 z_kL@SH5x(m=Zl^I`~Hv2OU2fcUguZS_1Xhw0DQoKFjl&$?KX0f1)?%gxda%7;?^Mt z)ft&&(8?puIKg8=IoN4%?FU0eZX`oH_t&Z&y~280f47TZ7!A2*pa^Bv>>1(8NvZty zJ@)wHpgzemXAmY8T-g$%klDw0lb<@6p*bllP=8^O>|`$Wd`g!-z2wb7J#ME_LOu_- zw?BKp&{JAFwMrioMc=IT@i_TRF6WytK1gK1IBY8>I9JgCt7&pY&flx$0VYG85(k(d z+R}uQ$~WOL+9FNE%n?(H_?g3<+C|`$mSj3(KkfsThztO&8D$P_Y9k)5H0SQyT(82= z9#2g2sI2LOweJ}PF{$kHQP}~O>Tkx1!}a5xRqB@rm1)=yl=A~?zR(io_NQotXbUrI z!xr5{P1h?SBY!@#HChMlCf&zO{II%dPM>c+RGtc=pvWL`-Q!Kvql;*;>Gv^f^GvJI z*4N7-n97bLr#%;pp#EXYOax1V%gchOx|WPPv-v5j6fG7n-tTqHTs*Vg1`;D?RnltG z1&L66JkIevGgkb*5;G@{d?%0nG2et1hVW)GoRh)`WQxwr4hWAau=YvsW5QDL{<@u?gUP=?6f;J zN=h0oQs}yaMJcRk!=+sa>^Z><=P?2BGegV4bJKU-f*~g5WXLDYe(C*Rt-T;o23F~Z zPCt1={}I`4iv5W?hj-#G7~2z@YLABbL=zcHC2FTAalLtVt`qxTFIoHboe|v$a6<}h z{{ZhS8Pmdhau0&V9t%+Qa6vrg+|YKV0|S}KXej}UmO?&}0|2QJ2N)sm!R-&*uJs!Y z1nNGuZFTD3d)w?ieZf9HnBIE7JWq3i0t1`)&76CsSd_S2UsT-X4aoav@RdEU2n*Vu z8$NO&#w8{;0E(i}#y|Zh%kgifS2Q~}c~I@?3tNY^do_-^t{0}ROY zXDsVei9%sLPWmnvMq|VyCOw|#$Je+;vvm;BEvRjqBUqvF0XAItj(-~PT{@jVn}-q2 zqJ}(BOU0|nse&RF(RrYddKFMoBm_Uh@$@s*^!NswYl@#xj-i!4$)!36D$yY#I>dF` z*t>hvh>v&;ehv1}`a~m%?!8pJ><~|j%dE-t^KlgM6_bNSf*&_p+SKHpT`l5{qA5jF zw#NH)6Xt_qUprUG8eE|B?h3$d+__{K_>TWiU4lh&Q@zl%b<#pQtMO!i1zRC>#$Nxr zJ1VXq{GI1_xpqQ=WE`O38r4NPJOvh!&|=($2ll(0GZ z^V!>VpBqKAvM*NJ^D($Yci8aVW{MrTos_XQ+J!XeqobYeEOB`2aY}=Od~@i)w~ND| zC*qQ2Dr1F=KuFEX&-OT}D$~(&l%gQ}>bVxj+hJRIMektu86I)f*&e_}ZCcwdO zx+0XiLZg4vG{K9fL_O`KCC6(o^d<9H#QpR?_IdoHfLaUK`y1^YWa*`^B_T(L2)wWT zw^16%)ywjMev_UuLg?>Ca37dvw*6A|cXNm#>2;lZ8tEiHNW**F@nhrP>-YxkmWz2` z4|{W58kTg|)2hHDPkg>Pc{1fyd`3Wi;+cQu4IQ+h;E>q=ZW?cggW&2oSL>^W`}cWR zsQ>xxO&B$fxyDe2+aqtR(lMYnK^<{YFPdLcs7O-7KB2If$|9QfkZK;VC;fAyX;gT( zy6n{9JYAhU;aZpXN6^6waI*7X)u2kIz5bR|4|6K!jJ{^p8U*TtAqRAD^j#8TLVr%D zZwN@3#@y);Xaq?Vh<@%k8G2i7@%NqPJtFg3mq&c%(rCUde9GV3Xd*YKKEQ-nOuln0 zuhHxs-)Yd<7>NgGRqmx>t)9$(vyk}L!nCVh^QWEZO zI`nL4wo%7wh^dG7Q6~YySFWfOE&;uRe^E0-dM4{F#KSbn!Q8^jIU(P5R(n11vB+=B zb}c3I`ZU31tMMjNQ~F0{IBBv)LahU89W-w{vX3QbjroW&Q-UmJBObvXt$;n)c6MZ= z@6xs@v)b(|p3(dcD^ds@Z_U}^14Yh;jgRMOQKbI9q?Kl|A9!Xa6^ki;FAgkl`YvW2 z?HBN;-uh4;E|jM9Me8Bn`!OUzn&)2Qb<{@4Yio1Yi~DY&{D+*LJ)*Suy1iS9Q1XC1 zAGdZstsn`LSWvK`nw-LCsUe<7u4;i0{EaR@nHp;FB_f_OT!I3p=!v14oW)p)w<;EL zf-Fj=S6Md9o@oB6oKEd3&{SZ0HPxwD^P#x8YZK40N*^APtK7THHt%-%1~*}T-fRvsGrPJXYde9+!?r^NBg+wo2u9M zZt-$;e7%wb!Qx>R;%A(K{2?a(F>0wCM~D@xwoX8Km3F>{F4dfzWu|1NGWIGx;Q$aB ztINxU6&hfo`CJ=}gx0U~|m!7Tou@grJ>6LZ0f{H&5_IW z567T{A|zMXo=t2Le<+}W`Os%__!R0RI100k2_`J7_gE@!A8`vfOdZzzx&qEMAk=QY zNTY?wDStxgvkOqpdrY?$R0&d_oZYnP!D3YbV;fGngQRkej-YG0MbyUgJQ!8>H^YR=IT!7 zr=Y=PChG=}r&Jg`XzTY3{IsI$#LAOQE;!UMkR(07(k=AjeBBaaKRMQ|? zGZ!Gjjds5;5~~H4>iP6lBAB2*=oAsKglw7BcO#4VK94&Ke9Nns>yy8ym%1N%O>27% zZWou8;ds_|BP=YVb`Uv%0IM@&;)C_FlCV);N>&n^fSD(_FHAYADkt6e-5bfRS1{>C zDm4WEa3H5pMb#9frNK&znmPJ)QrSk3SZGj7hvcta7RW|v{tPR2Y`J0bm!+l^Zn(^WZKj-rOkDGX19}>|~ zfgn55MlCq`IuoJpzewmym&JWG#QP-Pl74|Y4fRX$t@zICt3$&r6?1Kc@x=??7x6I7 zUv&Sj$tS3d`$u#JbueBd5>opW@;BCElzy+LIWD$0J)OjT@EX(<_4we?igBIVE8?Gp zm6^2b^XYNW?>VVeJ2dk%vjBU{&vo>dbL%4Lzvy2%g&h1bdR1jlXQwb(G!c7* zkZQERXX3oxFTJGsYk@c4E@wMQ+>gg2*UMB*;;@wg?;pm=_TZuneXKKw2z}~uopZLS z!oI{%U^@pkvxw6uSpfj6YcW?2551ykPG5bRowC1>a4)Ti}@}^pX?M;ZB8K*CPY>a-RoDXpu66>oq{?)RK9AT2uMzn zWtblr_^4@6yZn&|Ej4pS#KFV6-D|B3B5`ON*7n?a4=S4b~>21MWjXeexMz)6Tu7P)ynG{7A9m3s%u@u^buK=Eeg_ z%d6wn$V<;nPmm9r>!-17k5k$UX$Y;ZadPSKX|?WT;mz^F!v?sUwfNJZSzofZd8~Py z^?Wsa5YB21^U0nFu0f-+Kj?@XE7&(+8xA_~zlYh{m?#I=QEs){heM9+VrEK(Dg(tP zDaPJCFE2zV&`pwdGPi_JO9?}X-TM6_{-rptSV90)UI&rN^tA+!<>RP|jTo}xIQzpL zf=ETxmLQ(t!loBB$wqx%kOuqmy)}fwClh9bmE=R;#Uo;7hBw&uUfFvbs;PI8aRX&-AEV-ItGju~v5%)6GUrx3~GdFP|_w-lCBk9@S9L}HO z30(jj#8;)=`0u?;d-BCiY!BkItAZ#YJ-t#huHr}qmux#g6}0-(*-!UDg??k4$|^$$ zU8qFsTx_6J(I!rRGCYmu;a08K#b9k$9ut9iM=A}H%!PO|WjD6%6y9WF>bCfw zees8A{Ca3oDeb+=Cp_VvSO6Wp_L|1n>(KQTZS7Cg)k`K(x^vZs!;)~hj=$`qWO-@NEZknhGTFm^3yYYR>ntbnI9CiWY*y_KW)LT{Lyr9K)r9!%@V|sZ2vqJJvBk^gt_glXBI!U%&MGLd=!?5d8nWO zv>9^-hWnX{+h&IBjV*o~_30~g_mPB;KI@qWrcHh6u1w#o*(FSKEgPGZ$*;8%YgI^FafVM{%F3!*ur+}U}qYHZ=N z3lZwUId`kdn^oW|W{QedNu4Sd>&0K-kXP|_MVjgj(`zr9+8$P``|Pb01oUYVl{*wD zfw&hKB@wkWRu1t$?oDB?R)Wo|l_z5iPpmNoczYO3HPl%A9zcZ(>e-A<@iyYc==tJZocb^PZ~3CxT1?j4lVMnWCQ$8)bd&9iqcG4`O#k3_k~h z**SB##qXh}4v)Q{oEe#&$+3{56Z@vrS*&llySC)C6W+KQo|ZG%l4;W=MURUlcm+jy zx_xxOy0rOA@r~lN5%=M?2dlA*337wXfmBIk4ns#Y7tani&1m=%P_GZYaE=J@`aa zPOgHk5>l3&4Z|`xQPYAhd_MVewkZ-bBg^a`9s=!kbK4c6=axEpQ}nNpG~Som zc(^(zMZ(YFG-l$?)V8rgEjUxZa3x_AhmH=2eR6uj z^y&7tw%OHG-UqiPO-jvv*hZV@pY)~oxwU1&m;c2J{$68vqyRidMEbB{i7?6$>{sjM(+x+YSnVNwn>svY zlUDw~enIiC+8ROh%W0p1>`_bKDOR>UUBq14nR)=IojPlwE%DO-)q>%H2apFOn! zWB~)#iJJShO8K%iQ`-i4JHHDEY$;O7I@7r;s3XUwuq_u=i=-d^2@M3SO9l*31gJ~d z{YQH3W+uLbluj+BqkN-JiNWJn_Vu2iE+z6YnkF!Q)HD;r-VqRyg;P3N8l=;l2jl%* zDpF8J(8(?0lK)b}M2S-gvCwQntjFRzD(b0xTmcAMgT)Pshjxrpoi>Clmn|fYBTrhw zuLv4?Rlq7!*Ns&eY2(Ep8V{FEWb|*~*9XV(>B~>hf|i?fO{Fk%Y?#Fl>)ba@vBiVu zqpgmSv=-Tk*?F2*ZK$6ZtqwFF!oh?0A1=cd8T+tS(~z<_P3_&OvGSAS>mK8^1hctv-IF9nF`)mS%7h^OHsAU&`5 zZZGr^8dlN+^@7Ou+KQ12d6_Ikn()+?5ZKo|ya!W}ZKGL^>azqIj?u4f{4G z+i5lOJ;S}B{JM$-Z8{i{pa&nPV@``_m?KViDhX@<;ZQg%uEPC_nL|_gMEjXl7U^X< zd6X3%B;Dp#Y*$EnMM&oEGoSK|y`LYT6CNxpd;!0K{&Ke0DJ6TLev=HV8Cx&1THx?8 zdSjj+*1$G9WagJX|FnZU^O2E3|2j~i9&j?3cMi8HJ;QiOnU!-$g7%buWcR#qdaP-jNko8;vV7x-{==u=x~DBJqT6R^%nLT& znHlI?t>FY8+J@4!^Iq4urXR#)A#?*Cu?wM$f7!6lEoXI~x6CJ|;z-`ItejFw_h9_2 zg^=LLnxKXKaZIqVr{fWhS8&#>hV-pc2{j0m$_c;k&Lk(T8qIuVvGVQDRvnN68q>i!isAPzJIDmRbJ~<2VBpx(Mjmsg_-@RPB zdmXVHkmuQ4?9hJNFt*Kyeer&=1@;u)?2R8DiDM{bO^6;F`(Z;_QU7qb z_Be(9LPSRf?T^P+aOzRT;aS`-7Wx-XFEn9r-m|P1n`?j57>mE4V2=V!4U;>-w7tNQ z=c@We!+H9nXjHoZ}rAQGA# zzqB$ch*aoyX_|h1;i>Qmjwm3*84uo4A~oW*0+nN@mnrE=0u!3ofckO=At9MqT09W+ zB@?4RQtbWK$(dz)6U?rPT%Nt7Q21ANPmOhO1hMZgXvhxTl~3Cx#4tTeNYd||*jK7( z{`pZRN6$7_!j~(vb}A;HmhFE+JjYZ6C1L(oJM zzocXM83Kf(?INB)EC%NXEKIpa7mG_1PlT>&%}z!^qkNh^s9Jd6tZa0;N&TO|obl#q zYE+rvg;xJHm;S!r|^?jOq{Rjz{!;1o?*7GO zI_z?K3%baPp5#$d~x3p>T>UFwf`o?RG)eNVQ znty=Zj%n`P%r7>Vl6~_D%SjoNo8h5>DMcQ#Q0rM4NzcslZb)V7<*8ZD_UhD5h(AR2 zCrN)}f{e5SK117SLs$7G7u#q)j;p?tzuljAkx@?zceDbR!EwCn>?(z74{Bw)9NlKK zj+>1=IX{pmqG<|kzI(H(23LXmy_tcsj^<%#vI$plm@(6D(t+er; z*Tb(*I>g$`dN94y#F??oCSZ#3OIl4&P~=BJnxki{TtEA?AXm%sf5q=uyd_yw!`FSY zyiQrudr`W*QgE|u5$qES-#l9Uoe63)ECy>@%J)Exgh^6}7iG4UKJ()f%< zDI6J4Gh@)R2(_ZDjRWkoa)drvMlsc?q+)BboX|%(wGzmo7a~`qw`0`DbI`da2PK{r z{gvY0;aN(q*=T$i>S8V+C`EJ%^dN<)dlAo{X8i${VaM4p6M(oCUL-5Iz@6u9bYP1L&jDb-TQ*rbCN9^4fmlzo6}PiIFtps4j>8X| zr3EMSivgL?;XEcN7?g%h+iI->_dsl(?6l3C35cfa1*ixJu<_V&)=2S`c;9oMOv<^w z(UruNdYaSkT8V(iUuroa2Z(CA6BE@0n{sdGK5?m;z{=W1B=#@*r!LtbE=v0#V{lJM zREawohTA`Me0fp={`%%*6)W{*%j70xX#td)$=3=&IslG2!n-+=-sfg`zIB^tx89D` z%eN-OXH6ZAO2j+-Rz9VC`$}Ca6dg*knt`Ex+@F7$f!!HV=qOgx0KqSD(yPnik^r+a zM%f2C4LD(C@9QLk!u}n;H}dS}H?}6eEQe=q{!z@~_MUFAZ73e9_gXDFfeDD+W0s^< z4ErhFzK>6#*#K)fT;+f=LpnLe>l{}V>6N;@F(uRgy)5t}YvhY)x=xfhUNhfn6`W@OGN@hi#_Jkez_|aI+(6KODu78p-GJQH)^v zQGT_dl8?bJLMSDIHo1nAmN{K$K7Av~*sy7}7YOs z6ae9=B8y=0?xL{;XOUDjVxdLTz|2_?Emcz@Z!r;wJy>J7!}SP zi^HbH?$G@t219AP(c;m~#vLP$e^?p96EDoNF^FJj~mi0fH(aHm@oQB zxKFlaM7t_PHEEsnQMHiLYmYq*pGLT+2W7d9-h&fw>bMk@%DGUIRwV>ZB4uB*x{r&9 z?RF21w@FR;83>iEBM|DvC%@ffxI6OXblKE%@rQ7rQ=?G1GW9Yd+1J$ zTULGJ$L(goWn3JGp&*RU)?$n`*{lNYa^OlF7r+5}Vd3IjV7NoIK?;O)Dw< z(@J0h**WzUY?D}_&7qf?{9sP%cb1d~LZN;(tCfZ^PsO748Iw!(?F#F7eFX5mP4}jg z$xNQNvHPTcI}gE>Y;@-h8$Ly$wdSFw0J5Kxfv-XYQ^-dB(&Wd_6k+p28(@!P(SRaE-Uk1`5&J}lR`7GCM9x-fVm)D147mL>=i7-l4DCoMSiqUYV~-Gg?TD6$pQ zxl_is(M(U5&QDF0a1Q0HofZaE^u%A5niurNZNB^eh~QwhtlcZUb^*%A4WhwpY)nr$ zGxf7HDD+f$DUOnB$}pdj^C?}RkYe*lPL}WYHJmzX(IMYD;M|IOjrSbi?i#6(PRCqw z*t=a~w=fPMl~ZxNzk^kTarV(<2 zx2VmX{&OU4F&+9Au}cS%TQ1vUkk+C@ImOV}*Aj7deIgg5ci&S7s#Us*ZLwB3m}I zm1asp^hVkrqQ{A>O})=_4s1P#>mDKOra3hI$7k&R`dn3rPcYQ!w=MZm@FPL&$&sPH zdvd{8?Ci_4)G&-0(KJHBx2bb-Z)=+Foxc_T+0r$C#9o97o&sHhRUE=Nwr2JhrL6Aq zd_&BuOgzjLRBDi|f=q|wTyN@t=Wa%{~>etmMwsvK{rZ47O`c3;3>SwhnB#IW(Q(;H=I|9ZRGN1pC|5d9e`#W5rzC>OxOJ0ss%!(e$ zat;zXB;k5Wl=kf$m{A)Nr4Y#@Px*b8IVAvfCp;K7xD1h+nvV|K`(6@|5X>(y7%9lp zW&UVDNvoGRtOjgLRuD3ti#n76 znSzA{A(U;A-Hk7n?P@dh67STQ|8kobX#5Ev^Gvc&y|F{#G)F|0ZEbC#g%*~~FRY>x zUzC=jPClJd{%Sb0E8a;4p%vY#Nx#L!b3Sk1!;#M%@$kxpxC0jM!)N!c9(J%p;)g47 zLFaE}Wb(}UuOjJfTCS1?MoFpss8 zSPq#D<(T~Vk?4Ju7z-y;nx@3>Q4T3YR$&YY$Y!tB9S*Wr!rzXKU>Du`)#w_+H?_HZ+u$k%ZU|2rATxu$ZoLwsU=28I6q@wT~2GqPaA6 zPM_IiQWhg3L`9$6MbKRO9mXKP9I-y4!YY?nwK4TxDmgWxTmro=-R-}T`LuJ6a|`ei*$IJq zw${SlhpC9r-ZysAZ{$gjCKEl5nl4#>@h>W(Cz0?k>QqkAJ7{Oew`W@KKRmJBq_jeZ z6)tcs)ZVc^ zHlT6!LsEog*j{ta6ONWMb>! ztTfN2t7sgN&#ER4c$^c4*g(3kEsJz{Vf&wzwF?MX=L~IA)b$Z8Kh&@`UJ6#tf%OAq zHas(QeX!;@CHyBJNZibzefO8RrY}Ga)9QPRn|>)ZAe*p%Q6bO~AKIPvsCa2!AI!g+ zYBp3pXb+42g%b~b#EESPA=DQv=&gwwC(=Zh8dX~#tU}dhl(#X-E0p@T{5i`V2kzHNpQF4uU>$Y11MWhAEM&2)C=DvutlBZE(aqiqzY z_JMA{igLtr+}&~cOKw)esjFPDL!t2WY@UCDjqPr1~8 z<}p!fwmZYtsRc>a^CN#DDXl~Py`#_dUmP;tMGZtfl*I^hnL{`wdUJy()Im_D1i zm~Y`R)GK%q1Rr`QV91Btl_#z|Pn`8n{%G)#20vf@6H9c8*T0@_o$3Zc#H6k*Sqnl> zl+p3};Y^jOM_iiv${6>V-tUYR1YJ|0{mPrI2!m9`7ZwxrUw%4FLY>GArthF&{< z%*y1j*>95B(C4~oK?wN_Ijuyp^R=#00jz(ktDeOebLM#IRH;@j%^eV{hqcIC&Z{O;cVeQuMpMrTXl zi$zPAP%&is8`Cayg&Os>#4fk`nRls0@Yq5q?KvB2)Xu@N=(zB#I-D8eX1an|HG6;A zq3PnbP^a2w?E;k2(trdy;G^ur_3A=Nr;67zo(4Iitnc{N$Z6rykk1KAZUPDFRZ0Vq zL4CT_9?i7``P(Ay@PqCX*34%632wtQstgZD9|weVe6(z^7JM0=g*J~2cZaX8e%L%Z zd>un~Vdva%_&wUs(_*(~N+Ci&1}w>@+pE(8`az({biZ5Sk0w5+J6-CS9Gp<+pbCUq zz}$J|GJhGYw2@*@vPxjunGVProp1M$`(Tt6tX+>S*wO($^jxy}t*Huc`Q@>~rA)*a z%EYz1zdoCAO@O7bw;k|4;zOvDns1}D8M*PFz6AVJ9|5hD?zOZIcB3>wug)QSdJtNo zvUu-jU$SX2I^8)lHlj4Hh^u0Kd_D8~Mh?NwnNiSSv(KW>>&SKq{|1KbM0Sb@>~7j` z-Y>bGlu&f9D2PSUl4^2_mcG@!2=}_~^%7g6wINH$FFRB+=CuD=rnrV{yvHk`GpXLL;=->x|FlvJkaT$Te2lQ(<` zuM)!Va9B%S{Un%5X@wSJD=awml2X?ETp9`YGuKiDJVZ;44#ub1>YB!Jf)01-tZKJw zD#=ut1~&NtQA)EeQ;uX#rerArNVh@9U2EI4mPv=X0d^yYqK|??=#3ivNHVf_Yb>{oBhfRUx^SSo7w5^De|Eh@AvU_{h7GQ zAo1V7d+zp$gP7i;CFeC#|GZc!Ysuyzt~Fi7k` zkWs>&Ha-0y3xh1AEz`S@oyy9$7~82c%mN08`Mp>_V3zUk>(f~sAU=+;!^=HT!XmI6 zyTS~PNw7RE`$v*+&<;#86`m)|HMF&;KbpMypR*ZQNKf%)D=Nb_0y6TwQv@tKyhHAc z2d9vbA*K0`++ijYEi}ygoO(8tCkYq91l}!(iDzF4HO~8`4wA7IK3Rj4ABz)3bJ~3GLiwxJTACXR+%^#c6Y{i=x>K;M z%#qlvKNVgpwb!$I?W`sIil3Ja@?K*QC8#3pghdVJ@|7eTa_uTOZ*HM!=AWeZ=Th0z zzGQo#>o^#SNpsccw%^tesz%gwl2)c&PU(lp(?6YvHZZ<7{P8BOI@^Qf4rEQO%UIWb z;g`7mM*cwDUk%$EkkQ$|{?9=bUjm}*>FjPZ$R>S)P3V5B{Hk%r7td9_atNiLhh@uj zfyjuGQJy!IxsvTeXT4>Oa>CCXBsD;m2TF!BQfqfQl`XlOc`KI9-t`OU`KF<+xTU;$ zf=0Ts=XO2`7Wt;r_5KA5U)z&Mkr`61&m$YGAKsKTFOoTA+TE_Py~~bR4qxz1T7Ny2uK=kSMcv5dXbRowkPb5=Z8gqs%7} z)D$1e%=4~U!{m)9kKFJdc@I?68mB8re?l7VJ*Sof4Z>;VU<*d2hs8%5Vv7F81)d-F z*OO;U6@*!{X^{=LdG-tTf;kqy5JV<|H$q%OD9NA}xTZ`^462ygYLeMU7-)i*%Y^Z{ zCENdcw-`b6&!{xql+`divw5F{%{d{lyK0aar6Kt;_317iR&mB{REtvIfK`Lf)`!VO zI3#X7c9vX^Aq9WWFR>spFT7*oEfzc%NZy!I#&;zxB?`{FFpBq7(M-q6Y_QZsS26Ki zv-zyBoZ)=eTMsZwVEK)#LfGv?qX^@U>LhKJ0c%m6>yfCGN0pBh5TCvw-%r>nXQ;z@ zji1kWj!8u)#`xIZ;Cy0U6RxIus^9v42H#&!)NkJ=KJ00BvNa!`uko7T{UVyQoa%0B z)Qbl)ToDVbu=-0hX36tOx_CU=j=$W*Ru+gQ5-$ReB{V{^a1i&^TV{8?_@_G^XuyNiyUq*7bjz%3CI2fj^{h#p_!F?{D=br7>)R(i@|SJK5WHV%0bMB5&Hr4UsB4xs1rdwUcwD zHffMFAFhZZ;qYB_hS%yF5(eV)y#|~qm>1~$pt4Nh?*|QkFF3GSi1>W=>_VZ$J$A{- zsj0VQ#+w3E1pPv_kL+Sqh4lH)|D9xcbss=4=>beBc&HHy7UGc*59u%KJVUL9`N~Z!v`3f4~+e6u*3MLbfNpe z#Mjmm;M1+iQ4@cYn+(QrlKda~yB)L%+dwXM?o9u<{@pp>(VXXDL?P>DSz&qi{6g=> z+%`ksnZTyFV)N^I34C8^k?GFX-sgQ*-kbh0JF0ddyqUA9C9e#%n=kbOc_D}}jjPUi zt)IGZM`j_>pyj=0*n@#H%MH80u!Y$JcBI+d>adu9UnV2&r^EW{ylb}^EK6-T9F7gs z$7ZD@eEE673~^AMlW1kDpCf^U;pYQ&-nrYC2jt_eoN0k2+(%`F;lB!BiUL4CXzOt_q8djNc8pYaWLdnu00Os@>=tIxLor{I}pq>#asr# zxA_LscaZ;jZlg%iF{VyG7@zq~nU&^3tG>XKdWPwxgCVR4bmY?<5$<>ik z$KcIp)Ng${%jf`eqz?W_a(!yV0&sx17aqv zW>p6&dmIyta_HCLp^Y;>0x|XHN6P>ZTr}Ja~)L~BoIEBe&YPS&sEOM z^6X8juaiL@?2afAv#G0;Mr!#W?)<*~aF3m7Fi#$HxMzDjsttDsxrDyQH!&s&r?x(Q zFk02{k?Lbvb-4G&ipfIDq@rLgrmz~GQ~>_{eDJMPp6OOP=1Z_!^g)gyPt3tQUrMtq zaYADP+!ed=4dV%m=U#X3{{Bi&dv9ZHM*4>*Z%CnjXJ($qKj%z;-?>kEkAWHWjN3f0 z{oCg7*G9Q_d*{l^@GeGO5_xe*Sh!oUGVA%?H!Vmn^;BnKp>uMgIkFTt&y{|0+8Uv~p!? zbG(JLY~B~{6TBgmcXHfIh}p)tooCfdyWsYJV7lYi!Cq~4NKfG{gfwA6y6*1KbRG1$ zV3A$7RoV#W?3ES8*Vt|JH$l)f+iUGHA*vmvf0T7j|IgC@z?pf}8$b1?rFah#_LtPO zU!G|_>BJ_09Pdd1Bg1jnK$HI$u4BWr1a^NxM9yZ1fNpJ_*J?|@_xs`|@VVm5$<83y zewqSk2mahI4&JU0KI+rorJ#La6XxzNmd zvIA>%c6S~Q0%YzY?Z-;TJ~6Hk3~!Yy@9GD?tpW9(Lj6mrKu>{}-L>C3NxfiQKIi5+ z?yNw&Z|T`PGX5dymLUlM>XiGu&9nKGk3rsx1aw#e3!AvpW0vQ#95B#Z9|QcPP2Vcv za3~H;?Z@i7Y5()-{R^SVwm)Mc3Z1SW(QF%`Ium<2vBxHd`+vP@BN2b6QcZYZIhIj4 ziIFT7sc6ggtDbM_=iiHl2G(mhjXw1Sz|+f~paDPG#upvQ}lOB zEbL`Xxh-mrKr$&Wz>Dl|QuKQAw$mK5M9t2wgVplNLa!;5;n_(famQ_8QRj1aVDG#D?gjHz3EPC`+3LArJ}u}fN#J#c-55VKQ7Gw+(|%m)95LUjf^+vh?5TbmsmP#^9JIc^8i|czPnoz;7J@TJ!4JPZ)96%9Q-zW?&|swzkh7jRa=7 zuXTZ}8T$|Kn$~=9n>ZdJcAgV>*sj9NxmB=<1rsqe$Z zNu$0j+IB}WrpwIErQ;z0Iq>#BmdeBG(JfX z%J*?$v-Q*^i^nXXJ8ayeAyqKBy6 zlE=+*V^-|_Gxc8;XsZ5&{vBWTHMLpbDcV$;7vOOzU!PCV6Rn+m{@NJ;`o2Nl)h>e_ z(+(EQiNH0}MeH|rV(;qbz!O-nmzyF!{cN{(^Fifa{!oH=H0kr_$CRw1q>I~lnIxB2 zzapZxPz4{Ds&w}FQ!`~P{pq`q1ch6TjOkvF*C>?)q7eb4qmGoxZ1R4Zr}R@R)n%r+2_(xEtbibmZ-{czaLrOat z^CQ9?*YLTsMMDY@O3ay!VG-b|i4>o`uZOQ+P?#}{-ph(rUC-*gPr%*KbEn;q)}@17 z^$8r7q^&b+toB?|o|^JJY*q}5v?^RH$O;~Z^#SSs<73Qg0HH6kTDXLYKhx{^KARsH zAsG1MV=~vR3v@GEV}vx!jt&d_nYpTNKN}wJCuQTL@h51`y^8KFMGoJ#JVCNgSwhcv zS$1?^`PVh^KftSF#v)MvAzmSqQIxH-U?2BrdX6Nd*B#!dQ5IS=`Sa4^@I&DL`u>$3 zD2)M3#?Mmc(Xt%yG_U;b!-RtLvSYtgElnY=WMsF1u|}HgGZW;Sx9#k?vQY0f;b>*a zF6F+p`rhP|Kd4gd(B8=uhoZGV9ajfEr1MuN2 z9^+e_`?EvawWf>c**6gJwIAdGkanQN8aqu$*{cbn_z*4PYyB$odv)@@y?xHfC;g_+ z2x{pJNUbBE`AVZ2`{dV|2am;AweF~KBteC^XvqtA zjkfgwa$}C%F?bNr4aZcU(FvF7F#qll9y+03`F}TkZ@yn0H$p^1C=)ZqZywAor;1`V zkRO0RoE`pRRb+Wg8?RTMK2$AQF+Xl|4nWjGb&TEhrIX2@#dON%nmyitPJ3_I(>=m+d(>hSBf; zf8IWC9LB|-3W4#gZIVyH=yT_RO3!#3mZ>3rKDZIz-&Ydj%R5O4pGg0^fW5NsDY z(}nnlzEQ@|cF|pn0{!BQ0UjkCRwfrpXri2HO@Q>zPhKeCKaRe~j`Q%NAnyzONJSxy zI>(qvz~jJEXb!&%4Wm>*s}s4<%noheCiA8XBe_rj%sq(2`js^R^(OMHh@PX9$nLu4J9@2OGm^H zk9HU2L9N1~g2BsE05&u0)6-vr&joJWfwGF$EcoaOX_DpL8C@Xh2`%@GD5I+Aguqmp2o(Jwgaa%@LlOdMwBQG)%tjOE9S_dX9FzLOMS zd@1#R1$k`t_GnR=1Sr$z$Axq^0qU3Glwy-#E2d9KoEAl^vEv@oO!;*Gw0QttJdsp1&-<7LSh}y;By~u#uq2g(6EWnQv4_8 z+SXl_0x}(|ZzRo(L2 z2_$3lBXXVt`EY35v1&{R43h)jPtKr{6Y=prr%Z3|Kv?MTycuWu0U8nFD{`#Vpecpy zraa=+fM75+0hkEZN@jjsuwaupXt?tt&Vn>a2YnzDp4WuR>M-v9?bdI|&yH%Qq}_Y! z_s?)VVIKR}%X}F%>)k%tLf`R(B0=}qs;6X!d2)Q_Fr^^6r>DspKd_%IYn+^W`=ug6 z9&d7AY=PVdvJ$iN2KERgJw2*e1t4JEIPGHM-H_#cJdb#`u{sHsv&9=H z>TGarqY7L%toWF+jShMZ05Vkp5PZ^vsvH2zbsd8Z3$fuo7f|f?oOOBoUHJbR3?xDx z$rcQ7{iiJ60<=ECecYRiRyhMdqU;3TDd%itIezo@X3v0^&i691C~@OL%sff9oGpyv zHj0xTbxgZvdyiZNo*{mo13Fm=(UNT;?VsWUc>L;f@XOc+QU0K&u#{PmhwNYersEm~ zs=!-WJP9tGt@L0_oPQmF7{|MUy<-Xks7ZwrxAGh?q36wlzsY2!zoN$g`@!?x1C9lq z4}NlRsXcl%c1H-hIr<%f#*QApt$0cJi6Qsd1L;@gM`Tb?3kXk+nE!bKWK;*u*S_GP z7~UI>;El;)0D*IsQP}+VEpu#ky?J+jPKR}L3B;foDWoChGe}>%%|H-y4ZK&al!fZb zuU&nDEF{fe{N->KsEmiV*@T&TQKnm^nI9RLmf_To|3!_xjCMqGYSNTzye*V6(G_VCs|K0?Hj6JyO}LW2M17SZal8)A6gZ0S!6ynE$d z-mx<-K@eJ{iR4KTaBMc+5T>0~Bw`aOa^=SN96^gygSwjp3_u9)_>bnN9xZ-l_*HGI z?L8SJJFf6euq;m0akLviJ~{_to1ge| zFo3BcQWpb~NRkE!=$&wI{};uoyjgNDvY`eZ-q{JH!|r0zsP$v2`|yulJV||Wkf&-8 zY*3B0(l$0cHPC6Og+r87gqS1c&RqDx5yt>NKe(*eRd<@!TJzPI2N&v8#COnhvpL!8<{VyA(>t!3&x`ze`jaqE6!-IB9ynMI;W(>pPl zIfJ7HcJG0!e8`SMhe@611_~ZaRJ!916y6jskGy9!VbI&fO(&swD`Ke^B?4}L{tr|$ z!z3q~isiF2grn~61G4I}63|ME0JyoZ1L%`@)dMY|<9}CNmt3+KCXFSLPoj@q-K!kn=cybX zS5WQDXO3^Th5;}VRwWc7ZCPZ=n^0gNfssDWx=q@UJR;Ov3^Z)BnWVMY&6OZtACxzk zT&0k6zvV<5?_^(9kv5io93O^_D@TE9ZmiUk{lftIi#+NUxB&#p`%}Z*1bWoD?D>r;10xTD39CHRlWKji=8~ zg#$o_2OuDAGH$>Ru_>Nt(`yhoCKN_0A)@T9Mksp9OmuZVy8`hEu1)!{1#Jzeyi5(b zn=FT35P);oFmOmM0%bi9bpZsX$)~Dp|F)q?Ar)U=(t(f$!+|`Xu=oyK@ipKRUJu{l z*dqlh5@e@d&Xm%Z$Z!LHFA*y<4_A#X0CtQp-;zIp5LyH`;Y1We_?58KGgm)+xt0ud zc=0`c{tQ4OCi``jc576`A{^X49;HhplZW~fJndBL$2io@jsv7p+0yDYw&*>W?tv1- z&u+5dt&;wU{_ggo49LBJjs}WvK!Eswg&dCyeY9F;hpjqoab?k;a?fO&N`vBO?#XsHoDKvA!n*Wp3|U$2v#t0z%tV zQlVBG3w3on&3qE2LEjGOm<<%qj3ZrVekC6HQMfaO3d0bH64-l>z7wY-?70*rPD-ti zBkrfFuKnC!TmA7Q72e1IWF6Tqkf?Q;C`oU1FF>d$z)xT+s4W%8;_HqQ5*m1tSeiRz z*!tI%Bx;*ksh~4>i34pBXdR6vS}?Qgb3ke4oxg4Co)aek26K4}M9anAZlZ-R+5W}_ zLUtllZo^4qbZ(cORkx(JQKd25>tzUR%kIY)@*XM`vQPC(6CF<*cByS4edF1VH($Gt zLzJ9gL!KHsVa4NmSQ_<=)gX5(_xZp4MkR9RaJ79YM`lqNTvYMF)_BgGRQZ@6etiPJ z@NG{3V(^1;&+rx>KR0q7VRbLbDj?HoO>20n{s4Qw#?k{|xc*Cl+EH|n5L?zrx?{ew zy@hV8%M^=?MV>a_qhigL=va||2(79WJufJLX%FiS5(5_eEa-eNsc3tsS&UABGSO4) zxPAT@-`?#ahGSX(gxXzUw@lMD3q6|GB0SR{#5Q9r?VtY-l`mwVpy=AGOnW7O%uXLi z+@y5`@Ux(t9B#m-Mm$M(mxEQ$MdjmMoh2z`}$MQ*aIkP_H(fZQ^ zYwgjgWKHptxOy={5e54nbQL5}HnaQm&3NZcwUf`V)${)I1m`u#?CX^e%9>?{9tbuU z0=b0NcTCKK9|*8LJ81novV>-$Rx!ZaNpx)g_0*UQ(1U6p0c!)75}O*`cIu24h9wp^+=LeeDSs^$)cdQ z=98Kk>~nxNhDtI5AEj-@>O5#D?g}e(EDCqfJSQ*s^{z}QHvM_PUMJ~+9EJ}*>jZB$ z%dx%rIJuGKD9Pra=Ct8ayTbnKq@2uR;&Q>KM*o-m=Qt!zdCMDD`9C{kecbN+HE-{G zebUdej~N@h^?hvJ$L@X4$|?vcr*n_3k&^cETfx2z`{;oWjd!GzMNQVt&G&cfyIYEf zj)!a8Pb|E5vFutoa$?b;IU&(Da{QBJYh2Am+XGw;p*e0w;TTq1Vkz8P_kAXph+%M2 zY6Q}Qh#2;fjatTTzJXUni&?@}(!CE|PgHks_iH#Qd4Pvrwv3(7aUD7tUXi8#1W|%t z!&8f=-7ht(CS%(K7E-o{+#0*Mg{;*3s(@RzTKl2JX0EuG{^|CE9BwD1aPs+C`+;J76Y|xq1 z@}Y+Wb79s4z2CBm#@mW#uTZ78l)dRoIbE<|HPNiUHOTg8UoWCZpRLTT^k}eL7Qia7 zFe<{~Ih=wThj+(9Mm>kszR!DC!>6OIIX~*=VvN!^(|uZyGTD=sHs3LpENUb=b)Oke z9W?MyrQW8Ppt+eViZRve?u%3M)0f07E`Peo#+V2v$LPe|Wh8w1_{ra8`qwarYzAVb zD6Kzos}rM8bSOv}Gfi9y9HcUoj1Y09W2`jtL)%elH z5o2a5uEN;ZneX(_w98qadQ=_h-t>kKzkttIL{MajDOR%ck!rCQD!&rws@0#b64^-g0D#2*eQ5 zQb&q@MQ=IhI)M1z@rJ%)Uni;8hVvv>hUa}0{IvqX9wVbXk*kl+rwRYP(X4j<&q|X| z+TkE0({~CqunDk<6Bk^ic4dYL{pV=>;o&eb$NZuz>5)6kEY2n^jh8K2t{iHaJG-S9 z8*};0^!uimK6@Y>zpmFP6Alu6*gKpKNM1{ZBdV&Xv$C#Qrd5TGonWoV1L)MA=Z4+d zMf>cD(o8!6C7p5;#f1Db$t+G#`d@c7>jm+-Ki(^Up9V5i4 zP|e?7%BjWN?Q|d`waHR!u_oL(K|`4Hh+i8?Gt;iIxO-YkA9DuIw(22yHPJOVRXAVy?xUt2q$KL%CYS;tXbPK> zc~kIrP|bPvcT@a%RTsQ51)Mf{i(HxJiAc0$!7B5#P&@&0#qi~^D1rVV4dCz@tIGTI z8VwC*RIiXe=g>QACqkb*^e#93wcff(LOR*-H@=vZJMuuPWNg^fp6osa%r-kU8n-SX zrWii^r{8z3!Ao(h>wa8gKxkLYkU61=C^m*C#y%E$QD=N_s#Abi3RUz8I5a|TYQY*U z-9o?U>01=M7rPL!zT}fo(M!L_XbJPwb@R50XEO$+_aFc{PgER1r^Ve>zf3^JEhmuQ z7mn>A4^!GK|bekN$?IudXHDKxYnluspRrO^-&YG6*%zrXT{ zrMr5^BQ%QJmx5sUbh6TEdZ38G%6hkyg&JysJG0SMQ!6fR!>&J=-1@+kHvEAW`cBINWr?I@g5D5;&kHCG`0atTKp&p1#9Xw>n=&vNLP=+e zKo-4;H?DgRW7yESXcT$K!ifIVS?mm{u8Y#Ht*PTj6kE{$na@5*YEw2}VYWx{JXH{| zuH4y?<0vOmfheBc{cC%Q6d?XW<04FK(fX5=)P~VEK`oF_$_6-89M^S3EPsx;NGt9r zlbj|3hD|%fJ2pPwTp<@`6Z5+4C(RM+LBC$v<`aa=e)$*~(HC6HD&g-}HOca9*py%! zQVx%D7Q5kk@15bPZFBQ^>Gp&q=k5j{(Jos6Atih&M@MUz-^t$}UZ8LsYWCYD4($hf z^?v=NZoW;@KxY0K=iY->X`Zdt5E%pNt;U380K?_Bj!o8gIyD8t0W{wYBZ6-YMT0^l zwZ=)=il@pAhZqiG&w7G8pac&9;}C^>nh9)P;aqo&*@K=)<{&m)nU6>Vmrqs$vJ?w>sS;e_9hSGPc@{!SS-C>+RiorX%$R_EgP$(xS`1D&gXOO zc%H9rFd(nBd1|T#HdRRJt3&!EIl&n(HqEDSe9=`cGmzFgYKjE z?hBG!GUSAIIW^UJRAZ(M6jZl$IZ6$PVb8k7#R%XYFrvf*sg3?HiqB;EF8uphj=oB?ic8>htEhYxdZmBza!1kq`z(Xv5%{H+v&7;&1Qnk!d1yEa_!SboSU0i38nvc6>qe!b@wlU2jb* z&Z~nPvReigJw;F5iz-SvOI;51!k8?1XGfq9f-SHdFO*PSGo$0@BF=MonGp0lwoO?bufREDsW?}x#KfqUvY@;-u4;gAMuz~O0< z7wKfW=0D_+SXr$7g*@ax-4#k8|w&?4ItDYXF z^I^OP!bZNjsZ&Or_;nO(kPUWc0Rg?)KGM{m^WmC!l*#DehZl=*Peq{TeYq^A1c58i z3<}3_M@|R| z0uC0BM!dz_giU>tlif`1zR~kKR)=;bxWT$WOt&MZb39gtk1jS(xiOY4Tuj%zGekzN z2TbddS=9cW$u{MX9cDkwUa3?q(lGb5oE;dcZ3~@CX8vh$3j6K3-DCin%qho`jEG`t z;CH=#e~72WvtW}T;Pyip&1pt23As9rJ#EbJs%P1N4CK~#ln>aDM7dJ7+! z@G!rRnAFk$ba?86Wv#Z#Q|wR|z}ztYmVvexGNa*^<46+qV7`I~){u$JvNx zSxtRW1^X8}neFrQAk3GZdQf`LNNIM6!ZcM90^YxmUc16c;Qm6oUFohzKQ>iCvN@FAkpIBv!O3slJ0XJwT`+$ zM-YN6z*POouF)bWkpL1Y=(0x~H%7H)v5Kv$Cb|hn@G-!gQdG$A%v%YX@82n0YNeCS z=MpT0MkrX&B1nC}2r~TCA^bC3qvB+9y4SKsq@H7Pn~3%u#rR=&IJkm}9T zcJEiDw34tAIoWd}?5e?Gurj6I_C3`TV4)Qdi+@KmLLiIW@TdKklzOKVo?;Hp-%Deb zkl5vBUVYdZBoCxr6vVG$QMY9A9(Bu0hd~L~YJc?{Fa{YYcDcM7RtbZp!dM-H`R(e5 zrxrp^(P{Z(jGui~AI8a2Z06Qg4Jmg`xjX+mo-&((CKga<^qA!l) zqY;L;$aS6h_8`WwMalO(ZlwGb5=#r36XVC;XT5x!>Q@RFWUc~OC3PWh#}IAW6Z9Rr z`0*I178E4IdnK7xMu2J}NL^q{B@M(#Ui4X5XAeS2_xqyX$gzaG4~K@PH5)-07CG^ykTf-kB z4ZImSe$ro(SGKm(@S(Q3CymkzDE_Ov4wxXB5#h6UU~+i`>L&k}wTqB{-H)%jWl5lP zux`k;@x!^jUl{b=`kv#G>W5f(KHP1fH|jyw-o48*I0v7ps|4D44U|lpQBAGM;(-g(?YHytA8$X1R)#J2rSi7A z%e(6X1SAl^M7+poiFm88JVg~vW~rD1$(=VCT*lQ}&G#Sq_=u#m&#R6TbF^ZQ2<;Sk zsieZS#il4?V7I5vb(JiCJfIL~Q(nS*$db`2Ks^9lH3Xst>Nb1n1oupl>hX|nj4e&x z8^b{b4=9%)H@l^yam$=$D{j>I$@x`+v{2?_N;WAw^X5Uciuexbs>Q8SCGy_&I z0n#B*v*}WC8JPVJ^0E`aE!oPfbUp{06^$kEwx;Q`U4t+=fm3S0APoWd)Q>f1hCX{zE^6*zJSqAZ-cFn(D5|Uz1zPyq}$#${Greb!(X=JL7OY0g-KaVo>T}=0pN9bsX5N z8%5tc2#b7R*~byo(Rg#sNNEqsB}VZ-_Uo9bLxuodPhC!RPu2`7CnaCHsD8XquKhsI zdg9#@iMP!GKNJAh(&4ziEeZ%$&X8UV;;zQ^_6Q6@*ee&v{bz|+-M|DqK|P+`Omooz zN6RJO3%F;6)$o7Do|lz=`8D9?h+g_1+jS5P;KV-%m2`qi(oOR;NUM?ei6v~r@@^^a zbv2~qXk_679j>Oezd>FKmix>6{Zg1h+a=(bntlFr4oEy{vW1Z?mG@)h6P{}EeihR?`9 z#iMj$hM*NCId4Kzo$GnuNbieyGza#Ty*465YRqn6UgNDS0B@+ z-6~kcsGiOWEx|klx$fk+oY&mKlw}=RtN$=+VC=&rD0MblYwLx&GIiO^@F2l8g8i{I+7d=n zN$(?#Bx-Bof9J1rX~+5WwuomL&9ng3F!%}*bjp^Vc6{WuJs+Bl_SF77^%B9_t=e2r zi|b#EfOaQKc%jG-|3H*~$j$amJ!FRWkKDjnxH#P5ls%OfFa(m0F<_j1Crw!)onU|g zRr2a(kB9v$G)%C1ABT;}8cL?2s0uQBgTyO~Q*E2^asF zv+fEy`&Xn_#^*4x-`hun8Ky@?+HJpzrQP?^Q_W~P0+O=gQL<7QByiSKvB!@}sz!=E zbjcpJ*ha%?;UPlJ*;Z_X@xBaCQQs z>X^{jMq$%@%PDk3YCXdTEL2g?yYon|DfT9~=MM8d3S7hAnFR5FNES5TtTkv*`5r59 zNE#kw7Tz}xN=3!lWS6NK6(d_S8*9<{*pa&$PvOI8B~!%e>-%Z8hDyh5B6Ezg zz?ZfWcU(^>C1$?+d=Dxw3Y5PPUxmypj*>~4*&grivM?W-Qe45WizCdU1JVHuL08KwKq z|JeX9@`TIc)^YD>3R0ud{E47eyyC>fFkUcyN%Y zL{m=nQ%7%tgweJ=lj^|+D2j|NyJF7K~yq|~Fbi1k-KMnK$Nuw>GF=LbjY{ZfIx{3-73 zv{d!txsI9>OwyfaI3Xv2G{_?%$K-c?_uDDF;U$d09|>pm!AK@0w0eL$vrD%hVB&iI z&V2cO!a4p2?X1+#*D`t~KA9I*Wj%^dlCVgwDVfzwE3piJ=}KBp&P)1Ax5=NVT3{v? zj$z04G7D_g8m%{rk(0^rKw6d;G3GE1oKM7mHjHDja_VYk^)k7%IAF$fO1c}e+Z#FV zBM}~&0^JIRjASoABVB4cOiXaPc_5(f?8dEb)E{+df(or z<0n*(;^Yn+mN$&%> z(8J|aFI`g~CaLEzyjTNjR+n*Q3Y=OfjD-94o?oX*$V)$vIh=~&jbnoJlKc&bIo^Kp z%-tgzkjm~y_`6Ku-gmX1zvlbZF)7$kpc~a^eQ6qm1u;^!8qJl~q}1_8%c<-JmrXy~ z;nc%7bTwI{7cna>>r;+e>|cGor204fPYrB%D=TbP;gG68N|Xw0#&?e#w`~K12XZb< zLfm#**LE80zH@7SPJvqT=}*t^UcLq0)R>^8?^8PMR6cCzeYb7cUab&O_XbJ%;BMTi z|CJSDL)=tc&qk)(d~A^)OTN*p=@lenNg@3PCM2lKPL^qF4)W(1DB$%P_;`=K^cJv8! zcM<+x;(9tOB0(FvLw8rtT>n`z zsd*kUOE3Q-9H$mglsrG#%fgamz@6|bd&j>dK*n@G)pVP zFk+RN=xDyOXx8nXr&qZ?bz~z#JjKy*V(xnNn#?N$KWWhr*19 zsRvP2)p~nphxp+}8-LLIg-nHfMxEN)c3R%nH#NSKTH(aJ_*uaR=%j0)Qi~f?vJskvZTDVO+#Ei^|tc-7xmi@nZEsTNKZ*e zn=vMSpH5s8PMfNmE5LNwGO*4z{Jc5b&@qZHfsF!VA>;mB4GM$yGA!ynjOK4-Xea2* zm2>3*u8;AVVSLx<5Tl~{Z!vF=Wc37^4c_p@v{0wWPUVr)6A9P^b8Fo=ryyHF#vU>C zX!E(tmeam^;+czE!y?aunX2FCXYgcFVw-%l8(oTUxZ&MLO&`LfGbBR0@pd$cRU_D#wdxYik!u zf1indH+Th&YavL^-5Mj3uU8559P<=SKJ6Lfq}yyJq~dE+pO<0U)@O+5=2J-)O$Pmr zHnkc32q^dpAI^2}u53}UtM8-izk75uJForw|9`mQ1|r=5(cA7By$;eGrE*Z>*ke)t z8GG(FcgWdlx8?bUii4 zZ^HjPx864tx1?*lUMms3nw1`+tX NDyw|?i?qSx{{xbW_yqs} literal 0 HcmV?d00001 From 174fc9a7cacd8fa8fc64c2e5aeb76193eb684c75 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 16 Feb 2022 11:32:38 +0800 Subject: [PATCH 110/120] [reconstruct] metric_v2; [bug fix] split batch,init logger [enhancement] generate V3_results.rst --- docs/mrt/V3_results.rst | 83 ++++++- python/mrt/V3/evaluate.py | 68 +++--- python/mrt/V3/execute.py | 2 + python/mrt/dataset.py | 1 - python/mrt/transformer.py | 6 +- tests/mrt/model_zoo/alexnet.yaml | 4 +- .../model_zoo/ssd_512_mobilenet1.0_voc.yaml | 2 +- .../mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml | 6 +- tests/mrt/test_V3.py | 114 ++++++--- tests/mrt/yolov5s/metric.py | 40 +-- tests/mrt/yolov5s/metric_v2.py | 228 ++++-------------- tests/mrt/yolov5s/test_yolov5s.py | 57 ++++- tests/mrt/yolov5s/utils.py | 4 +- tests/mrt/yolov5s/yolov5s-0040.yaml | 4 +- 14 files changed, 305 insertions(+), 314 deletions(-) diff --git a/docs/mrt/V3_results.rst b/docs/mrt/V3_results.rst index 3549ca70..88312145 100644 --- a/docs/mrt/V3_results.rst +++ b/docs/mrt/V3_results.rst @@ -5,7 +5,86 @@ MRT Quantization Results .. _mrt_quantization_results: -**alexnet**: -Iteration: 312 | evalfunc: top1=55.91% top5=78.75% | quantize: top1=51.54% top5=77.40% | Total Sample: 50080 +**alexnet**:Iteration: 312 | evalfunc: top1=55.90% top5=78.74% | quantize: top1=51.44% top5=77.23% | Total Sample: 50080 + + + +**vgg19**:Iteration: 781 | evalfunc: top1=74.13% top5=91.77% | quantize: top1=73.29% top5=91.52% | Total Sample: 50048 + + + +**densenet161**:Iteration: 312 | evalfunc: top1=77.62% top5=93.82% | quantize: top1=77.26% top5=93.67% | Total Sample: 50080 + + + +**mobilenetv2_1.0**:Iteration: 312 | evalfunc: top1=72.05% top5=90.58% | quantize: top1=69.79% top5=89.14% | Total Sample: 50080 + + + +**resnet18_v1**:Iteration: 312 | evalfunc: top1=70.96% top5=89.93% | quantize: top1=70.11% top5=89.61% | Total Sample: 50080 + + + +**resnet18_v1b_0.89**:Iteration: 312 | evalfunc: top1=67.20% top5=87.45% | quantize: top1=63.79% top5=85.62% | Total Sample: 50080 + + + +**resnet50_v1**:Iteration: 312 | evalfunc: top1=77.39% top5=93.59% | quantize: top1=76.45% top5=93.29% | Total Sample: 50080 + + + +**resnet50_v2**:Iteration: 312 | evalfunc: top1=77.15% top5=93.44% | quantize: top1=74.15% top5=91.74% | Total Sample: 50080 + + + +**squeezenet1.0**:Iteration: 312 | evalfunc: top1=57.19% top5=80.04% | quantize: top1=54.91% top5=78.64% | Total Sample: 50080 + + + +**mobilenet1_0**:Iteration: 312 | evalfunc: top1=70.77% top5=89.97% | quantize: top1=66.11% top5=87.35% | Total Sample: 50080 + + + +**shufflenet_v1**:Iteration: 312 | evalfunc: top1=63.48% top5=85.12% | quantize: top1=60.45% top5=82.95% | Total Sample: 50080 + + + +**tf_inception_v3**:Iteration: 312 | evalfunc: top1=45.16% top5=67.93% | quantize: top1=49.62% top5=74.71% | Total Sample: 50080 + + + +**cifar_resnet20_v1**:Iteration: 62 | evalfunc: top1=92.88% top5=99.78% | quantize: top1=92.82% top5=99.75% | Total Sample: 10080 + + + +**mnist**:Iteration: 62 | evalfunc: top1=99.00% top5=100.00% | quantize: top1=98.96% top5=100.00% | Total Sample: 10080 + + + +**qd10_resnetv1_20**:Iteration: 1732 | evalfunc: top1=85.72% top5=98.71% | quantize: top1=85.73% top5=98.70% | Total Sample: 277280 + + + +**quickdraw**:Iteration: 1732 | evalfunc: top1=81.66% top5=98.22% | quantize: top1=81.57% top5=98.20% | Total Sample: 277280 + + + +**ssd**:Iteration: 76 | evalfunc: 80.30% | quantize: 80.05% | Total Sample: 4928 + + + +**ssd_512_mobilenet1.0_voc**:Iteration: 308 | evalfunc: 75.51% | quantize: 71.26% | Total Sample: 4944 + + +**yolo3_darknet53_voc**:Iteration: 102 | evalfunc: 81.51% | quantize: 81.51% | Total Sample: 4944 + + + +**yolo3_mobilenet1.0_voc**:Iteration: 76 | evalfunc: 76.03% | quantize: 71.56% | Total Sample: 4928 + + + +**trec**:Iteration: 3 | evalfunc: 98.19% | quantize: 97.99% | Total Sample: 640 + diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index 44ab76b8..349ae31a 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -33,6 +33,20 @@ MRT_CFG.EVALUATE.DEVICE_IDS = None MRT_CFG.EVALUATE.ITER_NUM = 10 +def forward(net, data, ctx, baxis, olen): + #TODO(ryt.dev) documentation + """ Multiple xpu run support. + """ + data = gluon.utils.split_and_load( + data, ctx_list=ctx, batch_axis=baxis, even_split=False) + outs = [net(d) for d in data] + if olen == 1: + outs = nd.concatenate(outs) + else: + outs = [nd.concatenate([outs[i][j] \ + for i in range(len(outs))]) for j in range(olen)] + return outs + def get_evaluation_info(cm_cfg, pass_cfg, logger=None): model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME @@ -60,7 +74,6 @@ def get_evaluation_info(cm_cfg, pass_cfg, logger=None): # forward function for the orginal model model_prefix_fixed = model_prefix + ".fixed" omodel = Model.load(*load_fname(model_prefix_fixed)) - #TODO(ryt.dev) [bug fix] load revised model graph = omodel.to_graph(ctx=ctx) dataset_name = conf_map["dataset_name"] input_shape = conf_map["input_shape"] @@ -70,30 +83,26 @@ def get_evaluation_info(cm_cfg, pass_cfg, logger=None): baxis = get_batch_axis(input_shape) olen = len(omodel.symbol) - def forward(net, data, ctx): - """ Multiple xpu run support. - """ - data = gluon.utils.split_and_load( - data, ctx_list=ctx, batch_axis=baxis, even_split=False) - outs = [net(d) for d in data] - if olen == 1: - outs = nd.concatenate(outs) - else: - outs = [nd.concatenate([outs[i][j] \ - for i in range(len(outs))]) for j in range(olen)] - return outs + # def forward(net, data, ctx): + # """ Multiple xpu run support. + # """ + # data = gluon.utils.split_and_load( + # data, ctx_list=ctx, batch_axis=baxis, even_split=False) + # outs = [net(d) for d in data] + # if olen == 1: + # outs = nd.concatenate(outs) + # else: + # outs = [nd.concatenate([outs[i][j] \ + # for i in range(len(outs))]) for j in range(olen)] + # return outs def evalfunc(data, label): - outs = forward(graph, data, ctx=ctx) + # outs = forward(graph, data, ctx=ctx) + outs = forward(graph, data, ctx, baxis, olen) acc = dataset.validate(metric, outs, label) return acc # forward function for the quantized model - # TODO(ryt.dev) [bug fix] remove split batch check - num_xpus = len(ctx) - if batch % num_xpus: - raise RuntimeError("Batch must be divisible by the number of xpus") - split_batch = batch // num_xpus if conf_map.get("split_keys", "") != "": sym_all_file, prm_all_file, ext_all_file = load_fname( model_prefix, suffix="all.quantize", with_ext=True) @@ -110,14 +119,13 @@ def evalfunc(data, label): oscales = mrt.get_output_scales() inputs_ext = mrt.get_inputs_ext() qmodel = mrt.current_model - rqmodel = reduce_graph(qmodel, { - 'data': set_batch(input_shape, split_batch)}) - qgraph = rqmodel.to_graph(ctx=ctx) + qgraph = qmodel.to_graph(ctx=ctx) qmetric = dataset.metrics() def quantize(data, label): data = sim.load_real_data(data, 'data', inputs_ext) - outs = forward(qgraph, data, ctx) + # outs = forward(qgraph, data, ctx) + outs = forward(qgraph, data, ctx, baxis, olen) outs = outs / oscales[0] if olen == 1 \ else [(t / oscales[i]) for i, t in enumerate(outs)] acc = dataset.validate(qmetric, outs, label) @@ -143,20 +151,6 @@ def evaluate(cm_cfg, pass_cfg, logger=None): else: logger.info("evaluatation stage skipped") -def forward(net, data, ctx, baxis, olen): - #TODO(ryt.dev) documentation - """ Multiple xpu run support. - """ - data = gluon.utils.split_and_load( - data, ctx_list=ctx, batch_axis=baxis, even_split=False) - outs = [net(d) for d in data] - if olen == 1: - outs = nd.concatenate(outs) - else: - outs = [nd.concatenate([outs[i][j] \ - for i in range(len(outs))]) for j in range(olen)] - return outs - def get_ctx_eval(ctx): #TODO(ryt.dev) documentation if isinstance(ctx, mx.Context): diff --git a/python/mrt/V3/execute.py b/python/mrt/V3/execute.py index b0a2d5b4..762c731a 100644 --- a/python/mrt/V3/execute.py +++ b/python/mrt/V3/execute.py @@ -12,6 +12,7 @@ from mrt.V3.quantize import quantize from mrt.V3.evaluate import evaluate from mrt.V3.mrt_compile import mrt_compile +from mrt.V3.utils import get_logger thismodule = sys.modules[__name__] @@ -68,6 +69,7 @@ def run(cfg, logger=None): Console logger. """ pass_name = cfg.COMMON.PASS_NAME + logger = get_logger(cfg.COMMON.VERBOSITY) if pass_name == "all": yaml_main(cfg, logger=logger) else: diff --git a/python/mrt/dataset.py b/python/mrt/dataset.py index 75006f6b..7f1dc8e2 100644 --- a/python/mrt/dataset.py +++ b/python/mrt/dataset.py @@ -611,7 +611,6 @@ def data_loader(): yield nd.transpose(nd.array(data)), nd.array(label) data, label = [], [] yield nd.transpose(nd.array(data)), nd.array(label) - raise RuntimeError("Data loader have been the end") self.data = data_loader() diff --git a/python/mrt/transformer.py b/python/mrt/transformer.py index 61c3a7c5..7173de28 100644 --- a/python/mrt/transformer.py +++ b/python/mrt/transformer.py @@ -109,6 +109,7 @@ def to_cvm(self, model_name, datadir="/data/stdout", device_ids=device_ids) def fix_original_model(self, model_dir, model_name): + # unify graph names and check graph params _sym, _prm = tpass.unify_name_json(self.symbol, self.params) self.symbol, self.params = tpass.remove_params_prefix(_sym, _prm) model_prefix = path.join(model_dir, model_name+".fixed") @@ -122,11 +123,6 @@ def init(model, input_shape=None): logger.info("Model initializing...") _sym, _prm = model.symbol, model.params - - # unify graph names and check graph params - # TODO(ryt.dev) [bug fix, reconstruct] write fixed model in conf_map, move to fix_orginal_model - # _sym, _prm = fix_original_model(sym, parmas) - tpass.name_duplicate_check(_sym, _prm) if isinstance(input_shape, dict): diff --git a/tests/mrt/model_zoo/alexnet.yaml b/tests/mrt/model_zoo/alexnet.yaml index 59ee4e13..a4d31ea9 100644 --- a/tests/mrt/model_zoo/alexnet.yaml +++ b/tests/mrt/model_zoo/alexnet.yaml @@ -8,12 +8,12 @@ CALIBRATE: LAMBD: 16 DATASET_NAME: imagenet DEVICE_TYPE: gpu - DEVICE_IDS: [2] + DEVICE_IDS: [0] QUANTIZE: INPUT_PRECISION: 8 OUTPUT_PRECISION: 8 DEVICE_TYPE: gpu - DEVICE_IDS: [2] + DEVICE_IDS: [0] EVALUATE: BATCH: 160 DEVICE_TYPE: gpu diff --git a/tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml b/tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml index 4676e3ad..a3ccfff9 100644 --- a/tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml +++ b/tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml @@ -42,7 +42,7 @@ QUANTIZE: ["ssd0_slice_axis43", "ssd0_normalizedboxcenterdecoder0_concat0"], ] EVALUATE: - BATCH: 64 + BATCH: 16 DEVICE_TYPE: gpu DEVICE_IDS: [0] ITER_NUM: 2000 diff --git a/tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml b/tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml index 33d8fa4f..87d51645 100644 --- a/tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml +++ b/tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml @@ -16,11 +16,11 @@ CALIBRATE: NUM_CALIB: 1 DATASET_NAME: voc DEVICE_TYPE: gpu - DEVICE_IDS: [2] + DEVICE_IDS: [0] QUANTIZE: OUTPUT_PRECISION: 30 DEVICE_TYPE: gpu - DEVICE_IDS: [2] + DEVICE_IDS: [0] THRESHOLDS: [ ["mrt_rewrite_mobilenet0_conv23_weight_0", 11], ["yolov30_yolooutputv30_tile0", 416], @@ -43,5 +43,5 @@ QUANTIZE: EVALUATE: BATCH: 64 DEVICE_TYPE: gpu - DEVICE_IDS: [2] + DEVICE_IDS: [0] ITER_NUM: 2000 diff --git a/tests/mrt/test_V3.py b/tests/mrt/test_V3.py index 02a66994..c66879e1 100644 --- a/tests/mrt/test_V3.py +++ b/tests/mrt/test_V3.py @@ -2,30 +2,28 @@ import logging import os from os import path -from io import StringIO import sys +import json from mrt.utils import log_init from mrt.V3.execute import run -from mrt.V3.utils import merge_cfg, override_cfg_args +from mrt.V3.utils import merge_cfg, override_cfg_args, get_logger from mrt.V3.evaluate import get_evaluation_info -# old_stdout = sys.stdout -# sys.stdout = StringIO() -# sys.stdout = old_stdout - log_init() yaml_files = set() results = {} base_dir = path.join(path.dirname(path.realpath(__file__)), "..", "..") def _multi_validate( - messages, base_func, data_iter, *comp_funcs, iter_num=10, batch_size=16): + messages, base_func, data_iter, *comp_funcs, + iter_num=10, logger=logging.getLogger(""), batch_size=16): log_str = "Iteration: {:3d} | " + base_func.__name__ + ": {} | " for func in comp_funcs: log_str += func.__name__ + ": {} | " log_str += "Total Sample: {:5d}" total = 0 + for i in range(iter_num): data, label = data_iter() base_acc = base_func(data, label) @@ -33,8 +31,39 @@ def _multi_validate( total += batch_size msg = log_str.format(i, base_acc, *comp_acc, total) + logger.info(msg) messages.append(msg) +def output_results(): + rfile_path = path.join(base_dir, "docs", "mrt", "V3_results.rst") + cur_results = {} + with open(rfile_path, "r") as f: + for line in f: + if not line.startswith("**") or ":" not in line: + continue + _, model_name, result = line.split("**") + result = result[1:] + cur_results[model_name] = result + for model_name, result in results.items(): + cur_results[model_name] = result + lines = [ + "", + "************************", + "MRT Quantization Results", + "************************", + "", + ".. _mrt_quantization_results:", + "", + ] + for model_name, result in cur_results.items(): + line = "**{0}**:{1}".format(model_name,result) + lines.append(line) + lines.append("") + lines.append("") + lines = [line+"\n" for line in lines] + with open(rfile_path, "w") as f: + f.writelines(lines) + def register_test_case(yaml_file_name): yaml_dir = path.join(base_dir, "tests", "mrt", "model_zoo") yaml_file_name_ext = "{}.yaml".format(yaml_file_name) @@ -46,32 +75,37 @@ def register_test_case(yaml_file_name): yaml_files.add(yaml_file) def test_func(self): - # test preparation, calibration, quantization, compilation base_cfg = merge_cfg(yaml_file) + + # test preparation, calibration, quantization argv = [ "--common.run_evaluate", "False", - "--common.run_compile", "True", - "--common.verbosity", "error", + "--common.run_compile", "False", ] cfg = override_cfg_args(base_cfg, argv) run(cfg) # test evaluation - argv = [ - "--common.run_evaluate", "True", - "--common.run_compile", "False", - "--common.verbosity", "error", - "--common.start_after", "quantize", - ] - cfg = override_cfg_args(base_cfg, argv) evalfunc, data_iter_func, quantfunc = get_evaluation_info( cfg.COMMON, cfg.EVALUATE) + logger = get_logger(cfg.COMMON.VERBOSITY) messages = [] with self.assertRaises(StopIteration): _multi_validate( messages, evalfunc, data_iter_func, quantfunc, - iter_num=cfg.EVALUATE.ITER_NUM, batch_size=cfg.EVALUATE.BATCH) + iter_num=cfg.EVALUATE.ITER_NUM, logger=logger, + batch_size=cfg.EVALUATE.BATCH) results[yaml_file_name] = messages[-1] + output_results() + + # test compilation + argv = [ + "--common.run_evaluate", "False", + "--common.run_compile", "True", + "--common.start_after", "quantize", + ] + cfg = override_cfg_args(base_cfg, argv) + run(cfg) def wrapper(cls): func_name = "test_case_{}".format(yaml_file_name) @@ -81,29 +115,29 @@ def wrapper(cls): return wrapper -@register_test_case("alexnet") +# @register_test_case("alexnet") +# @register_test_case("densenet161") +# @register_test_case("mobilenet1_0") +# @register_test_case("mobilenetv2_1.0") +# @register_test_case("resnet18_v1") +# @register_test_case("resnet18_v1b_0.89") +# @register_test_case("resnet50_v1") +# @register_test_case("resnet50_v2") +# @register_test_case("shufflenet_v1") +# @register_test_case("squeezenet1.0") +# @register_test_case("tf_inception_v3") +# @register_test_case("vgg19") +# @register_test_case("cifar_resnet20_v1") +# @register_test_case("mnist") +# @register_test_case("qd10_resnetv1_20") +# @register_test_case("quickdraw") +# @register_test_case("ssd") +# @register_test_case("ssd_512_mobilenet1.0_voc") +# @register_test_case("trec") +# @register_test_case("yolo3_darknet53_voc") +# @register_test_case("yolo3_mobilenet1.0_voc") class TestV3(unittest.TestCase): - def test_output_results(self): - lines = [ - "", - "************************", - "MRT Quantization Results", - "************************", - "", - ".. _mrt_quantization_results:", - "", - ] - for k, v in results.items(): - line = "**{}**:".format(k) - lines.append(line) - line = "{}".format(v) - lines.append(line) - lines.append("") - lines.append("") - lines = [line+"\n" for line in lines] - rfile_path = path.join(base_dir, "docs", "mrt", "V3_results.rst") - with open(rfile_path, "w") as f: - f.writelines(lines) + pass if __name__ == "__main__": unittest.main() diff --git a/tests/mrt/yolov5s/metric.py b/tests/mrt/yolov5s/metric.py index 72b9c75e..4697dcb6 100644 --- a/tests/mrt/yolov5s/metric.py +++ b/tests/mrt/yolov5s/metric.py @@ -8,7 +8,8 @@ from mrt import dataset as ds from utils import ( - non_max_suppression, scale_coords, xywh2xyxy, process_batch, ap_per_class) + non_max_suppression, scale_coords, xywh2xyxy, process_batch, ap_per_class, + concat_out) class Yolov5Metric: @@ -93,42 +94,7 @@ def update(self, labels, predict, input_shape): outs = [] for i in range(batch_size): x, y, z = [o.slice_axis(axis=0, begin=i, end=i+1) for o in predict] - out = [] - - bs, _, ny, nx, _ = x.shape - grid, anchor_grid = self._make_grid(nx, ny, 0, ctx=x.ctx) - tmp = x.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[0].as_in_context(x.ctx) - # wh - wh = (tmp[..., 2:4]*2)**2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - bs, _, ny, nx, _ = y.shape - grid, anchor_grid = self._make_grid(nx, ny, 1, ctx=y.ctx) - tmp = y.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[1].as_in_context(y.ctx) - # wh - wh = (tmp[..., 2:4]*2)*2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - bs, _, ny, nx, _ = z.shape - grid, anchor_grid = self._make_grid(nx, ny, 2, ctx=z.ctx) - tmp = z.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[2].as_in_context(z.ctx) - # wh - wh = (tmp[..., 2:4]*2)**2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - out = nd.concat(*out, dim=1) + out = concat_out(x, y, z) outs.append(out) for i in range(batch_size): label = labels[i] diff --git a/tests/mrt/yolov5s/metric_v2.py b/tests/mrt/yolov5s/metric_v2.py index 993c1206..c81d5c23 100644 --- a/tests/mrt/yolov5s/metric_v2.py +++ b/tests/mrt/yolov5s/metric_v2.py @@ -8,174 +8,63 @@ from mrt import dataset as ds from utils import ( - non_max_suppression, scale_coords, xywh2xyxy, process_batch, ap_per_class, - Annotator, concat_out) + non_max_suppression, scale_coords, Annotator, concat_out, make_squre, Colors) class Yolov5MetricV2: - def __init__( - self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10), - nc=80, anchors=()): - - # metric parameters - self.conf_thres = conf_thres - self.iou_thres = iou_thres - self.iouv = iouv - self.niou = iouv.shape[0] - self.names = { - 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', - 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', - 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', - 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', - 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', - 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', - 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', - 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', - 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', - 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', - 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', - 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', - 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', - 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', - 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', - 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', - 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', - 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', - 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', - 79: 'toothbrush', - } - - # detect parameters - self.no = nc + 5 - self.na = len(anchors[0]) // 2 - self.stride = nd.array([8., 16., 32.]) - self.anchors = nd.array( - [ - [ - [ 1.25000, 1.62500], - [ 2.00000, 3.75000], - [ 4.12500, 2.87500] - ], - [ - [ 1.87500, 3.81250], - [ 3.87500, 2.81250], - [ 3.68750, 7.43750] - ], - [ - [ 3.62500, 2.81250], - [ 4.87500, 6.18750], - [11.65625, 10.18750] - ] - ] - ) - - # status variable - self.stats = [] + def __init__(self): + self.conf_thres = 0.25 + self.iou_thres = 0.45 + self.names = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', + 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', + 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', + 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', + 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', + 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', + 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', + 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] + self.results = [] def reset(self): - self.stats.clear() - - def _make_grid(self, nx=20, ny=20, i=0, ctx=mx.cpu(0)): - yv = nd.array(range(ny))[:,None].repeat(nx,axis=1) - xv = nd.array(range(nx))[None,:].repeat(ny,axis=0) - grid = nd.concat( - xv[...,None], yv[...,None], dim=2)[None,None,...].repeat( - self.na, axis=1) - grid = nd.Cast(grid, dtype="float32") - - anchor_grid = (self.anchors[i].copy()*self.stride[i]) - anchor_grid = anchor_grid[None,:, None, None,:] - anchor_grid = anchor_grid.repeat(ny, axis=-3) - anchor_grid = anchor_grid.repeat(nx, axis=-2) - return grid.as_in_context(ctx), anchor_grid.as_in_context(ctx) + self.results.clear() def update(self, labels, predict, input_shape): batch_size, _, H, W = input_shape outs = [] for i in range(batch_size): x, y, z = [o.slice_axis(axis=0, begin=i, end=i+1) for o in predict] - out = [] - - bs, _, ny, nx, _ = x.shape - grid, anchor_grid = self._make_grid(nx, ny, 0, ctx=x.ctx) - tmp = x.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[0].as_in_context(x.ctx) - # wh - wh = (tmp[..., 2:4]*2)**2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - bs, _, ny, nx, _ = y.shape - grid, anchor_grid = self._make_grid(nx, ny, 1, ctx=y.ctx) - tmp = y.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[1].as_in_context(y.ctx) - # wh - wh = (tmp[..., 2:4]*2)*2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - bs, _, ny, nx, _ = z.shape - grid, anchor_grid = self._make_grid(nx, ny, 2, ctx=z.ctx) - tmp = z.sigmoid() - # xy - xy = (tmp[..., 0:2]*2-0.5+grid) * \ - self.stride[2].as_in_context(z.ctx) - # wh - wh = (tmp[..., 2:4]*2)**2 * anchor_grid - tmp = nd.concat(xy, wh, tmp[..., 4:], dim=-1) - out.append(tmp.reshape(bs, -1, self.no)) - - out = nd.concat(*out, dim=1) + out = concat_out(x, y, z).asnumpy() outs.append(out) for i in range(batch_size): - label = labels[i] - nl = label.shape[0] out = non_max_suppression( - outs[i].asnumpy(), self.conf_thres, self.iou_thres, labels=[], + outs[i], self.conf_thres, self.iou_thres, labels=[], multi_label=True, agnostic=False) + f, img0s = labels[i] + + annotator = Annotator(img0s, line_width=1, example=str(self.names)) + pred = out[0] - tcls = label[:,0] if nl else [] - if pred.shape[0] == 0: - if nl: - self.stats.append( - (np.zeros((0,self.niou)), np.zeros((0)), np.zeros((0)), tcls)) - continue - predn = pred.copy() - # native-space pred - scale_coords((H,W), predn[:,:4], [H,W], [[1.0,1.0],[0.0,0.0]]) - if nl: - # target boxes - tbox = xywh2xyxy(label[:,1:5]) - # native-space label - scale_coords((H,W), tbox, [H,W], [[1.0,1.0],[0.0,0.0]]) - # native-space label - labelsn = np.concatenate((label[:,0:1],tbox), axis=1) - correct = process_batch(predn, labelsn, self.iouv) - else: - correct = np.zeros((pred.shape[0], self.niou), dtype=np.bool) - # (correct, conf, pcls, tcls) - self.stats.append((correct, pred[:, 4], pred[:, 5], tcls)) + if pred.shape[0] > 0: + pred[:, :4] = scale_coords( + (H,W), pred[:, :4], img0s.shape).round() + + for *xyxy, conf, cls in reversed(pred): + c = int(cls) + label = f'{self.names[c]} {conf:.2f}' + annotator.box_label(xyxy, label, color=Colors()(c, True)) + + img0s = annotator.result() + self.results.append((f,img0s)) def get(self): - # compute metrics - # to numpy - cur_stats = [np.concatenate(x, 0) for x in zip(*self.stats)] - if len(cur_stats) and cur_stats[0].any(): - tp, fp, p, r, f1, ap, ap_class = ap_per_class( - *cur_stats, plot=False, save_dir=None, names=self.names) - # AP@0.5, AP@0.5:0.95 - ap50, ap = ap[:, 0], ap.mean(1) - mp, mr, map50, map_ = p.mean(), r.mean(), ap50.mean(), ap.mean() - # number of targets per class - nt = np.bincount(cur_stats[3].astype(np.int64), minlength=80) - else: - nt = np.zeros(1) - mp = mr = map50 = map_ = 0. - return nt, mp, mr, map50, map_ + return self.results + self.results.clear() @ds.register_dataset("yolov5_dataset_v2") @@ -200,52 +89,29 @@ def data_loader(): continue l = f.replace(f.split(".")[1], "txt") file_name = os.path.join(self.image_dir, f) - label_name = os.path.join(self.label_dir, l) - img = cv2.imread(file_name) - # hack size + _, _, _, _, img = make_squre(cv2.imread(file_name)) img = cv2.resize(img, tuple(self.ishape[2:])) - try: - labels = np.loadtxt(label_name) - except: - labels = np.array([]) - labels = labels.reshape((-1, 5)) - height, width = img.shape[0:2] - scale = min(self.imgsz/height, self.imgsz/width) - h0, w0 = height*scale, width*scale - img0 = cv2.resize(img, (round(w0/32.)*32, round(h0/32.)*32)) - img = img0.astype("float32")/255. + img0s = img.copy() + img = img.astype("float32")/255. img = nd.array(img.transpose((2,0,1))[None]) - labels[:,1:] = labels[:,1:] * np.array([img.shape[3], img.shape[2]]*2) - # if img.shape[2] != self.ishape[2] or img.shape[3] != self.ishape[3]: - # continue if len(data) == batch_size: batch_data = nd.concatenate(data) yield batch_data, label data, label = [], [] data.append(img) - label.append(labels) + label.append((f,img0s)) if len(data) == batch_size: batch_data = nd.concatenate(data) yield batch_data, label self.data = data_loader() - def metrics( - self, conf_thres=0.001, iou_thres=0.6, iouv=np.linspace(0.5,0.95,10)): - anchors = [ - [10, 13, 16, 30, 33, 23], - [30 ,61, 62 ,45, 59, 119], - [116, 90, 156, 198, 373, 326] - ] - metric = Yolov5Metric( - conf_thres=conf_thres, iou_thres=iou_thres, iouv=iouv, - anchors=anchors, nc=80) + def metrics(self): + metric = Yolov5MetricV2() metric.reset() return metric def validate(self, metrics, out, labels): metrics.update(labels, out, self.ishape) - nt, mp, mr, map50, map_ = metrics.get() - return "#objects={}, ".format(nt.sum()) + \ - "mp={:6.2%}, mr={:6.2%}, ".format(mp, mr) + \ - "map50={:6.2%}, map={:6.2%}".format(map50, map_) + img0s_batch = metrics.get() + return img0s_batch diff --git a/tests/mrt/yolov5s/test_yolov5s.py b/tests/mrt/yolov5s/test_yolov5s.py index 5bd7d049..e5dc3cbe 100644 --- a/tests/mrt/yolov5s/test_yolov5s.py +++ b/tests/mrt/yolov5s/test_yolov5s.py @@ -1,11 +1,38 @@ +import os from os import path import sys +import logging -from mrt.V3.utils import get_cfg_defaults, merge_cfg, override_cfg_args +import cv2 + +from mrt.V3.utils import ( + get_cfg_defaults, merge_cfg, override_cfg_args, get_logger) from mrt.V3.execute import run +from mrt.V3.evaluate import get_evaluation_info import metric import metric_v2 +def validate(result_dir, func, data_iter_func, + logger=logging.getLogger(""), iter_num=10, batch_size=16): + func_name = func.__name__ + func_result_dir = path.join(result_dir, func_name) + os.makedirs(func_result_dir, exist_ok=False) + log_str = "Iteration: {:3d} | Total Sample: {:5d}" + + total = 0 + try: + for i in range(iter_num): + data, label = data_iter_func() + img0s_batch = evalfunc(data, label) + for f, img0s in img0s_batch: + fpath = path.join(func_result_dir, f) + cv2.imwrite(fpath, img0s) + total += batch_size + msg = log_str.format(i, total) + logger.info(msg) + except StopIteration: + logger.info("Iteration Stopped") + if __name__ == "__main__": assert len(sys.argv) >= 1 and len(sys.argv)%2 == 1, \ "invalid length: {} of sys.argv: {}".format( @@ -15,4 +42,30 @@ cfg = get_cfg_defaults() cfg = merge_cfg(yaml_file) cfg = override_cfg_args(cfg, sys.argv[1:]) - run(cfg) + dataset_name = cfg.CALIBRATE.DATASET_NAME + if dataset_name == "yolov5_dataset": + run(cfg) + elif dataset_name == "yolov5_dataset_v2": + argv = [ + "--common.run_evaluate", "False", + "--common.run_compile", "False", + ] + cfg = override_cfg_args(cfg, argv) + run(cfg) + + logger = get_logger(cfg.COMMON.VERBOSITY) + result_dir = path.expanduser("~/yolov5s_results/") + + evalfunc, data_iter_func, _ = get_evaluation_info( + cfg.COMMON, cfg.EVALUATE) + validate( + result_dir, evalfunc, data_iter_func, logger=logger, + iter_num=cfg.EVALUATE.ITER_NUM, batch_size=cfg.EVALUATE.BATCH) + + _, data_iter_func, quantfunc = get_evaluation_info( + cfg.COMMON, cfg.EVALUATE) + validate( + result_dir, quantfunc, data_iter_func, logger=logger, + iter_num=cfg.EVALUATE.ITER_NUM, batch_size=cfg.EVALUATE.BATCH) + else: + raise RuntimeError("Invalid dataset name: {}".format(dataset_name)) diff --git a/tests/mrt/yolov5s/utils.py b/tests/mrt/yolov5s/utils.py index 39c67991..a8c11e84 100644 --- a/tests/mrt/yolov5s/utils.py +++ b/tests/mrt/yolov5s/utils.py @@ -539,8 +539,8 @@ def str2bool(v): pass def get_quantized_model(model_dir, model_name, ctx): - model_dir = '/tmp/yolov5s' - model_name = 'yolov5s-0040.preprocess.unify.broadcastify' + #model_dir = '/tmp/yolov5s' + #model_name = 'yolov5s-0040.preprocess.unify.broadcastify' model_prefix = get_model_prefix(model_dir, model_name) sym_quant_file, prm_quant_file, ext_quant_file = load_fname( model_prefix, suffix="mrt.quantize", with_ext=True) diff --git a/tests/mrt/yolov5s/yolov5s-0040.yaml b/tests/mrt/yolov5s/yolov5s-0040.yaml index ed0e3ce8..ea98a1b2 100644 --- a/tests/mrt/yolov5s/yolov5s-0040.yaml +++ b/tests/mrt/yolov5s/yolov5s-0040.yaml @@ -1,5 +1,6 @@ COMMON: - MODEL_NAME: yolov5s-0040.preprocess.unify.broadcastify + # MODEL_NAME: yolov5s-0040.preprocess.unify.broadcastify + MODEL_NAME: yolov5s-0040 VERBOSITY: info # RUN_EVALUATE: False BATCH: 16 @@ -16,6 +17,7 @@ QUANTIZE: DEVICE_IDS: [0] EVALUATE: # in this model, the BATCH should be set as 16 + BATCH: 16 DEVICE_TYPE: gpu DEVICE_IDS: [0] ITER_NUM: 10 From a8864b70e4abdfceab215ba5053644eaed8da075 Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 25 Feb 2022 14:46:56 +0800 Subject: [PATCH 111/120] [doc] mrt.V3.evaluate; transfer mrt.frontend --- python/mrt/V3/evaluate.py | 119 ++++++++++++++- python/mrt/frontend/.gitignore | 7 - python/mrt/frontend/Makefile | 9 -- python/mrt/frontend/README.md | 1 - python/mrt/frontend/javascript/init.js | 20 --- .../frontend/javascript/model_submitter.js | 36 ----- .../mrt/frontend/javascript/mrt_executor.js | 50 ------ python/mrt/frontend/javascript/utils.js | 42 ----- .../mrt/frontend/javascript/yaml_clearer.js | 19 --- .../frontend/javascript/yaml_file_loader.js | 8 - python/mrt/frontend/manage.py | 22 --- python/mrt/frontend/python/rpc/forwarding.py | 34 ----- python/mrt/frontend/python/rpc/log.py | 20 --- python/mrt/frontend/python/rpc/service.proto | 20 --- python/mrt/frontend/python/rpc/service.py | 109 ------------- python/mrt/frontend/python/rpc/streamer.py | 47 ------ python/mrt/frontend/python/rpc/test_rpc.py | 33 ---- python/mrt/frontend/python/rpc/utils.py | 25 --- python/mrt/frontend/requirements.txt | 3 - python/mrt/frontend/web/__init__.py | 0 python/mrt/frontend/web/asgi.py | 23 --- python/mrt/frontend/web/consumers.py | 104 ------------- python/mrt/frontend/web/protocol.py | 57 ------- python/mrt/frontend/web/routing.py | 24 --- python/mrt/frontend/web/settings.py | 144 ------------------ python/mrt/frontend/web/templates/room.html | 144 ------------------ python/mrt/frontend/web/urls.py | 22 --- python/mrt/frontend/web/views.py | 4 - python/mrt/frontend/web/wsgi.py | 16 -- 29 files changed, 114 insertions(+), 1048 deletions(-) delete mode 100644 python/mrt/frontend/.gitignore delete mode 100644 python/mrt/frontend/Makefile delete mode 100644 python/mrt/frontend/README.md delete mode 100644 python/mrt/frontend/javascript/init.js delete mode 100644 python/mrt/frontend/javascript/model_submitter.js delete mode 100644 python/mrt/frontend/javascript/mrt_executor.js delete mode 100644 python/mrt/frontend/javascript/utils.js delete mode 100644 python/mrt/frontend/javascript/yaml_clearer.js delete mode 100644 python/mrt/frontend/javascript/yaml_file_loader.js delete mode 100755 python/mrt/frontend/manage.py delete mode 100644 python/mrt/frontend/python/rpc/forwarding.py delete mode 100644 python/mrt/frontend/python/rpc/log.py delete mode 100644 python/mrt/frontend/python/rpc/service.proto delete mode 100644 python/mrt/frontend/python/rpc/service.py delete mode 100644 python/mrt/frontend/python/rpc/streamer.py delete mode 100644 python/mrt/frontend/python/rpc/test_rpc.py delete mode 100644 python/mrt/frontend/python/rpc/utils.py delete mode 100644 python/mrt/frontend/requirements.txt delete mode 100644 python/mrt/frontend/web/__init__.py delete mode 100644 python/mrt/frontend/web/asgi.py delete mode 100644 python/mrt/frontend/web/consumers.py delete mode 100644 python/mrt/frontend/web/protocol.py delete mode 100644 python/mrt/frontend/web/routing.py delete mode 100644 python/mrt/frontend/web/settings.py delete mode 100644 python/mrt/frontend/web/templates/room.html delete mode 100644 python/mrt/frontend/web/urls.py delete mode 100644 python/mrt/frontend/web/views.py delete mode 100644 python/mrt/frontend/web/wsgi.py diff --git a/python/mrt/V3/evaluate.py b/python/mrt/V3/evaluate.py index 349ae31a..77d18cd8 100644 --- a/python/mrt/V3/evaluate.py +++ b/python/mrt/V3/evaluate.py @@ -34,8 +34,28 @@ MRT_CFG.EVALUATE.ITER_NUM = 10 def forward(net, data, ctx, baxis, olen): - #TODO(ryt.dev) documentation - """ Multiple xpu run support. + """ + Multiple xpu run support. + + Parameters + ---------- + net : mxnet.gluon.block.SymbolBlock + Graph for inference. + data : mxnet.ndarray.ndarray.NDArray + Input data to pass into the graph. + ctx : mx.context.Context + Context for inference. + baxis : int + Axis id of batch dimension. + olen : int + Length of the output. + + Returns + ------- + outs : mxnet.ndarray.ndarray.NDArray or list + inference result of the graph with respect to the given input data, + for multiple outputs, outs will be a list the entry type of which is + mxnet.ndarray.ndarray.NDArray. """ data = gluon.utils.split_and_load( data, ctx_list=ctx, batch_axis=baxis, even_split=False) @@ -48,6 +68,19 @@ def forward(net, data, ctx, baxis, olen): return outs def get_evaluation_info(cm_cfg, pass_cfg, logger=None): + """ + YAML configuration API to get evaluation function, + quantization function and dataset iteration function + + Parameters + ---------- + cm_cfg : yacs.config.CfgNode + CfgNode of common stage. + pass_cfg : yacs.config.CfgNode + CfgNode of calibration stage. + logger : logging.RootLogger + Console logger. + """ model_dir = cm_cfg.MODEL_DIR model_name = cm_cfg.MODEL_NAME verbosity = cm_cfg.VERBOSITY @@ -134,6 +167,18 @@ def quantize(data, label): return evalfunc, data_iter_func, quantize def evaluate(cm_cfg, pass_cfg, logger=None): + """ + YAML configuration API of MRT evaluation stage. + + Parameters + ---------- + cm_cfg : yacs.config.CfgNode + CfgNode of common stage. + pass_cfg : yacs.config.CfgNode + CfgNode of calibration stage. + logger : logging.RootLogger + Console logger. + """ evalfunc, data_iter_func, quantize = get_evaluation_info( cm_cfg, pass_cfg, logger=logger) @@ -152,7 +197,19 @@ def evaluate(cm_cfg, pass_cfg, logger=None): logger.info("evaluatation stage skipped") def get_ctx_eval(ctx): - #TODO(ryt.dev) documentation + """ + Get the context instance for evaluation stage + + Parameters + ---------- + ctx : mx.context.Context + The input context. + + Returns + ------- + ctx : mx.context.Context + The modified context. + """ if isinstance(ctx, mx.Context): ctx = [ctx] elif isinstance(ctx, list): @@ -166,7 +223,31 @@ def inference_original_model( symbol_file, params_file, data, batch_axis=0, device_type=MRT_CFG.EVALUATE.DEVICE_TYPE, device_ids=MRT_CFG.EVALUATE.DEVICE_IDS): - #TODO(ryt.dev) documentation + """ + MRT Inference API for original model. + + Parameters + ---------- + symbol_file : str + Path to the quantized mxnet symbol JSON file. + params_file : str + Path to the quantized mxnet parameters file. + data: mxnet.ndarray.ndarray.NDArray + Input data to pass into the graph. + batch_axis : int + Axis id of batch dimension. + device_type : str + Context type string chosen from `cpu` or `gpu`. + device_ids : list + List of context ids. + + Returns + ------- + outs : mxnet.ndarray.ndarray.NDArray or list + inference result of the graph with respect to the given input data, + for multiple outputs, outs will be a list the entry type of which is + mxnet.ndarray.ndarray.NDArray. + """ ctx = get_ctx_eval(get_ctx(device_type, device_ids)) omodel = Model.load(symbol_file, params_file) @@ -180,7 +261,35 @@ def inference_quantized_model( qsymbol_file, qparams_file, qext_file, data, batch_axis=0, split=False, device_type=MRT_CFG.EVALUATE.DEVICE_TYPE, device_ids=MRT_CFG.EVALUATE.DEVICE_IDS): - #TODO(ryt.dev) documentation + """ + MRT Inference API for quantized model. + + Parameters + ---------- + qsymbol_file : str + Path to the quantized mxnet symbol JSON file. + qparams_file : str + Path to the quantized mxnet parameters file. + qext_file : str + Path to the quantized extension file which store intermediate results. + data: mxnet.ndarray.ndarray.NDArray + Input data to pass into the graph. + batch_axis : int + Axis id of batch dimension. + split: bool + Flag indicating whether the model is split before quantization. + device_type : str + Context type string chosen from `cpu` or `gpu`. + device_ids : list + List of context ids. + + Returns + ------- + outs : mxnet.ndarray.ndarray.NDArray or list + inference result of the graph with respect to the given input data, + for multiple outputs, outs will be a list the entry type of which is + mxnet.ndarray.ndarray.NDArray. + """ ctx = get_ctx_eval(get_ctx(device_type, device_ids)) diff --git a/python/mrt/frontend/.gitignore b/python/mrt/frontend/.gitignore deleted file mode 100644 index 4ac9261f..00000000 --- a/python/mrt/frontend/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -db.sqlite3 -__pycache__ -.DS_Store -mrt_rpc_service_pb2.py -mrt_rpc_service_pb2_grpc.py -service_pb2.py -service_pb2_grpc.py diff --git a/python/mrt/frontend/Makefile b/python/mrt/frontend/Makefile deleted file mode 100644 index f1c0561b..00000000 --- a/python/mrt/frontend/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -rpc-build: - python -m grpc_tools.protoc \ - -I. --python_out=. --grpc_python_out=. \ - ./python/rpc/service.proto - python ./python/rpc/service.py -rpc-test: - python ./python/rpc/test_rpc.py -web-build: - python manage.py runserver 8000 diff --git a/python/mrt/frontend/README.md b/python/mrt/frontend/README.md deleted file mode 100644 index 6ee19ee2..00000000 --- a/python/mrt/frontend/README.md +++ /dev/null @@ -1 +0,0 @@ -# mrt-web \ No newline at end of file diff --git a/python/mrt/frontend/javascript/init.js b/python/mrt/frontend/javascript/init.js deleted file mode 100644 index 5f153931..00000000 --- a/python/mrt/frontend/javascript/init.js +++ /dev/null @@ -1,20 +0,0 @@ -import { - roomName, update_yaml_configurations, update_console, - create_socket } from './utils.js'; - -const yamlInitSocket = create_socket("yaml/init/"); - -yamlInitSocket.onopen = function(e) { - yamlInitSocket.send(null); -}; - -yamlInitSocket.onmessage = function(e) { - update_yaml_configurations(e); - update_console("yaml parameters initialized."); -} - -const yamlResetter = document.querySelector('#yaml-resetter'); - -yamlResetter.onclick = function(e) { - yamlInitSocket.send(null); -}; diff --git a/python/mrt/frontend/javascript/model_submitter.js b/python/mrt/frontend/javascript/model_submitter.js deleted file mode 100644 index 20337db2..00000000 --- a/python/mrt/frontend/javascript/model_submitter.js +++ /dev/null @@ -1,36 +0,0 @@ -import { roomName, create_socket, update_console_v2 } from './utils.js'; - -const modelSubmitSocket = create_socket("model/submit/"); - -const mrtExecutor = document.querySelector('#mrt-executor'); -const modelSubmitter = document.querySelector('#model-submitter'); - -modelSubmitSocket.onmessage = function(e) { - const data = JSON.parse(e.data); - if ('activate' in data) { - mrtExecutor.disabled = false; - modelSubmitter.disabled = false; - } - if ('message' in data) { - if ('first' in data) { - update_console(data.message); - } else { - update_console_v2(data.message); - } - } -}; - -modelSubmitSocket.onclose = function(e) { - console.error('model submit socket closed unexpectedly'); -}; - -modelSubmitter.onclick = function(e) { - mrtExecutor.disabled = true; - modelSubmitter.disabled = true; - let text_data = new Object(); - text_data['symbol'] = document.querySelector('#symbol-locator').value; - text_data['params'] = document.querySelector('#params-locator').value; - text_data['dst'] = document.querySelector('#COMMON_MODEL_DIR').value; - text_data['host'] = document.querySelector('#host-locator').value; - modelSubmitSocket.send(JSON.stringify(text_data)); -}; diff --git a/python/mrt/frontend/javascript/mrt_executor.js b/python/mrt/frontend/javascript/mrt_executor.js deleted file mode 100644 index f5b4f155..00000000 --- a/python/mrt/frontend/javascript/mrt_executor.js +++ /dev/null @@ -1,50 +0,0 @@ -import { roomName, create_socket, update_console } from './utils.js'; - -const mrtExecuteSocket = create_socket("mrt/execute/"); - -const mrtExecutor = document.querySelector('#mrt-executor'); -const modelSubmitter = document.querySelector('#model-submitter'); - -mrtExecuteSocket.onmessage = function(e) { - const data = JSON.parse(e.data); - if ('activate' in data) { - mrtExecutor.disabled = false; - modelSubmitter.disabled = false; - } - if ('message' in data) { - update_console(data.message); - } -}; - -mrtExecuteSocket.onclose = function(e) { - console.error('mrt execute socket closed unexpectedly'); -}; - -const ConfigWrapperSocket = create_socket("config/wrapper/"); - -ConfigWrapperSocket.onmessage = function(e) { - const data = JSON.parse(e.data); - let dict = new Object(); - for (const [stage, stage_data] of Object.entries(data)) { - let subdict = new Object(); - for (const attr of Object.keys(stage_data)) { - const id = '#' + stage + '_' + attr; - let value = document.querySelector(id).value; - subdict[attr] = value; - } - dict[stage] = subdict; - } - // overide pass_name - const pass_name = document.querySelector('#mrt-stage-selector').value; - dict["COMMON"]["PASS_NAME"] = pass_name; - let text_data = new Object(); - text_data['yaml'] = dict; - text_data['host'] = document.querySelector('#host-locator').value; - mrtExecuteSocket.send(JSON.stringify(text_data)); -}; - -mrtExecutor.onclick = function(e) { - mrtExecutor.disabled = true; - modelSubmitter.disabled = true; - ConfigWrapperSocket.send(null); -}; diff --git a/python/mrt/frontend/javascript/utils.js b/python/mrt/frontend/javascript/utils.js deleted file mode 100644 index b3c0b97e..00000000 --- a/python/mrt/frontend/javascript/utils.js +++ /dev/null @@ -1,42 +0,0 @@ -export const roomName = JSON.parse(document.getElementById('room-name').textContent); - -export function update_yaml_configurations(e) { - const data = JSON.parse(e.data); - for (const [stage, stage_data] of Object.entries(data)) { - for (const [attr, value] of Object.entries(stage_data)) { - const id = '#' + stage + '_' + attr; - document.querySelector(id).value = value; - } - } -} - -export function create_socket(sub_path) { - const newSocket = new WebSocket( - 'ws://' - + window.location.host - + '/ws/web/' - + sub_path - + roomName - + '/' - ); - return newSocket; -} - -export function update_console(str) { - document.querySelector('#chat-log').value += (str + '\n'); -} - -export function update_console_v2(str) { - document.querySelector('#chat-log').value += (str + '\n'); - let s = document.querySelector('#chat-log').value; - let ind = s.slice(0,-1).lastIndexOf('\n'); - document.querySelector('#chat-log').value = s.slice(0,ind+1) + str + '\n'; - -} - -export const yamlUpdateSocket = create_socket("yaml/update/"); - -yamlUpdateSocket.onmessage = function(e) { - update_yaml_configurations(e); - update_console("yaml parameters updated."); -}; diff --git a/python/mrt/frontend/javascript/yaml_clearer.js b/python/mrt/frontend/javascript/yaml_clearer.js deleted file mode 100644 index 4eb864d5..00000000 --- a/python/mrt/frontend/javascript/yaml_clearer.js +++ /dev/null @@ -1,19 +0,0 @@ -import { roomName, create_socket, update_console } from './utils.js'; - -const yamlClearSocket = create_socket("yaml/clear/") - -yamlClearSocket.onmessage = function(e) { - const data = JSON.parse(e.data); - for (const [stage, stage_data] of Object.entries(data)) { - for (const [attr, value] of Object.entries(stage_data)) { - const id = '#' + stage + '_' + attr; - document.querySelector(id).value = ''; - } - } - update_console("yaml parameters cleared."); -}; - -const yamlClearer = document.querySelector('#yaml-clearer'); -yamlClearer.onclick = function(e) { - yamlClearSocket.send(null) -}; diff --git a/python/mrt/frontend/javascript/yaml_file_loader.js b/python/mrt/frontend/javascript/yaml_file_loader.js deleted file mode 100644 index cc5f5f5f..00000000 --- a/python/mrt/frontend/javascript/yaml_file_loader.js +++ /dev/null @@ -1,8 +0,0 @@ -import { yamlUpdateSocket } from './utils.js'; - -document.querySelector('#yaml-loader').onclick = function(e) { - const yamlFileLocator = document.querySelector('#yaml-file-locator'); - yamlUpdateSocket.send(JSON.stringify({ - 'yaml_file': yamlFileLocator.value, - })); -}; diff --git a/python/mrt/frontend/manage.py b/python/mrt/frontend/manage.py deleted file mode 100755 index 19be6dd3..00000000 --- a/python/mrt/frontend/manage.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -"""Django's command-line utility for administrative tasks.""" -import os -import sys - - -def main(): - """Run administrative tasks.""" - os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') - try: - from django.core.management import execute_from_command_line - except ImportError as exc: - raise ImportError( - "Couldn't import Django. Are you sure it's installed and " - "available on your PYTHONPATH environment variable? Did you " - "forget to activate a virtual environment?" - ) from exc - execute_from_command_line(sys.argv) - - -if __name__ == '__main__': - main() diff --git a/python/mrt/frontend/python/rpc/forwarding.py b/python/mrt/frontend/python/rpc/forwarding.py deleted file mode 100644 index 048161b5..00000000 --- a/python/mrt/frontend/python/rpc/forwarding.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import argparse - -default_local_port = 5001 -default_remote_port = 5000 -default_remote_user = None -default_remote_host = None - -parser = argparse.ArgumentParser() -parser.add_argument( - "--local-port", type=int, default=default_local_port) -parser.add_argument( - "--remote-port", type=int, default=default_remote_port) -parser.add_argument( - "--remote-user", type=str, default=default_remote_user) -parser.add_argument( - "--remote-host", type=str, default=default_remote_host) - -def forward( - local_port=default_local_port, remote_port=default_remote_port, - remote_user=default_remote_user, remote_host=default_remote_host): - if remote_user is None: - raise RuntimeError("remote_user should be specified") - if remote_host is None: - raise RuntimeError("remote_host should be specified") - cmd = "ssh -N -L {}:localhost:{} {}@{}".format( - local_port, remote_port, remote_user, remote_host) - os.system(cmd) - -if __name__ == "__main__": - args = parser.parse_args() - forward( - local_port=args.local_port, remote_port=args.remote_port, - remote_user=args.remote_user, remote_host=args.remote_host) diff --git a/python/mrt/frontend/python/rpc/log.py b/python/mrt/frontend/python/rpc/log.py deleted file mode 100644 index c55cb690..00000000 --- a/python/mrt/frontend/python/rpc/log.py +++ /dev/null @@ -1,20 +0,0 @@ -import logging -from mrt.common.log import ( - LOG_LEVELS, ColorFormatter, FilterList, name2level -) - -def log_init(log_level, streamer): - assert log_level in LOG_LEVELS - logging.basicConfig(level=log_level, stream=streamer) - formatter = ColorFormatter( - fmt="[ %(asctime)s %(name)10s %(levelname)5s ] %(message)s", - datefmt="%Y-%m-%d %H:%M:%S") - log_filter = FilterList(log_level=log_level, default=False) - for handler in logging.root.handlers: - handler.addFilter(log_filter) - handler.setFormatter(formatter) - -def get_logger(verbosity, streamer): - log_init(name2level(verbosity.upper()), streamer) - logger = logging.getLogger("log.main") - return logger diff --git a/python/mrt/frontend/python/rpc/service.proto b/python/mrt/frontend/python/rpc/service.proto deleted file mode 100644 index 63913f02..00000000 --- a/python/mrt/frontend/python/rpc/service.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package test; - -service MRTRpcSrv { - rpc execute(MRTClientReq) returns(stream MRTServerResp) {} - rpc submit(stream MRTClientReqStream) returns (stream MRTServerResp) {} -} - -message MRTClientReq { - string content = 1; -} - -message MRTClientReqStream { - bytes chunck = 1; -} - -message MRTServerResp { - string logging_str = 1; -} diff --git a/python/mrt/frontend/python/rpc/service.py b/python/mrt/frontend/python/rpc/service.py deleted file mode 100644 index f66bbc0d..00000000 --- a/python/mrt/frontend/python/rpc/service.py +++ /dev/null @@ -1,109 +0,0 @@ -from concurrent import futures -import os -from os import path -from shutil import copyfile - -import grpc - -import rpc.service_pb2 as pb2 -import rpc.service_pb2_grpc as pb2_grpc -from rpc.utils import get_streamer - -# TODO(ryt): load balancer for maxinum_workers -maximum_workers = 4 -# local_addr = "127.0.0.1:5000" -# socket host difference -local_addr = "0.0.0.0:5000" -chunk_size = 1024 * 1024 # 1MB - -def mrt_submit( - src_sym_file, src_prm_file, dst_model_dir, host_addr=None): - model_name = path.splitext(path.basename(src_sym_file))[0] - model_name_2 = path.splitext(path.basename(src_prm_file))[0] - assert model_name == model_name_2, "not compatible, " + \ - "src_sym_file: {}, src_prm_file: {}".format( - src_sym_file, src_prm_file) - if host_addr is None: - dst_sym_file = path.join(dst_model_dir, model_name+".json") - dst_prm_file = path.join(dst_model_dir, model_name+".params") - copyfile(src_sym_file, dst_sym_file) - copyfile(src_prm_file, dst_prm_file) - yield "src files copied" - else: - def iterator_func(src_file, file_name): - yield pb2.MRTClientReqStream(chunck=bytes(dst_model_dir, 'utf-8')) - yield pb2.MRTClientReqStream(chunck=bytes(file_name, 'utf-8')) - yield pb2.MRTClientReqStream( - chunck=bytes(str(path.getsize(src_file)), 'utf-8')) - with open(src_file, 'rb') as f: - while True: - piece = f.read(chunk_size); - if len(piece) == 0: - return - yield pb2.MRTClientReqStream(chunck=piece) - conn = grpc.insecure_channel(host_addr) - client = pb2_grpc.MRTRpcSrvStub(channel=conn) - response = client.submit( - iterator_func(src_sym_file, model_name+".json")) - next(response) - for message in response: - yield message.logging_str - response = client.submit( - iterator_func(src_prm_file, model_name+".params")) - for message in response: - yield message.logging_str - -def mrt_execute(yaml_file_str, host_addr=None): - if host_addr is None: - my_streamer = get_streamer(yaml_file_str) - for logging_str in my_streamer.start(): - yield logging_str - else: - conn = grpc.insecure_channel(host_addr) - client = pb2_grpc.MRTRpcSrvStub(channel=conn) - response = client.execute( - pb2.MRTClientReq(content=yaml_file_str)) - for message in response: - yield message.logging_str - - -class MRTRpcSrv(pb2_grpc.MRTRpcSrvServicer): - def execute(self, request, context): - yaml_file_str = request.content - my_streamer = get_streamer(yaml_file_str) - for message in my_streamer.start(): - if not context.is_active(): - raise RuntimeError("client connection lost") - yield pb2.MRTServerResp(logging_str=message) - # if context.is_active(): - # context.cancel() - - def submit(self, request_iterator, context): - dst_model_dir = str(next(request_iterator).chunck, 'utf-8') - os.makedirs(dst_model_dir, exist_ok=True) - file_name = str(next(request_iterator).chunck, 'utf-8') - size = eval(str(next(request_iterator).chunck, 'utf-8')) - dst_file = path.join(dst_model_dir, file_name) - with open(dst_file, 'wb') as f: - cur_size = 0 - for piece in request_iterator: - f.write(piece.chunck) - cur_size += chunk_size - cur_size = min(cur_size, size) - message = "Current: {} Bytes / Total: {} Bytes, ".format( - cur_size, size) + \ - "{} % Completed".format(round(cur_size/size*100.0, 2)) - yield pb2.MRTServerResp(logging_str=message) - -def main(): - grpc_server = grpc.server( - futures.ThreadPoolExecutor(max_workers=maximum_workers)) - pb2_grpc.add_MRTRpcSrvServicer_to_server( - MRTRpcSrv(), grpc_server) - grpc_server.add_insecure_port(local_addr) - grpc_server.start() - print("server will start at {}".format(local_addr)) - grpc_server.wait_for_termination() - -if __name__ == '__main__': - main() diff --git a/python/mrt/frontend/python/rpc/streamer.py b/python/mrt/frontend/python/rpc/streamer.py deleted file mode 100644 index 3c43f288..00000000 --- a/python/mrt/frontend/python/rpc/streamer.py +++ /dev/null @@ -1,47 +0,0 @@ -from queue import Queue, Empty -from threading import Thread, current_thread -import sys - - -class Printer: - def __init__(self): - self.queues = {} - - def write(self, value): - queue = self.queues.get(current_thread().name) - if queue: - queue.put(value) - else: - sys.__stdout__.write(value) - - def flush(self): - pass - - def register(self, thread): - queue = Queue() - self.queues[thread.name] = queue - return queue - - def clean(self, thread): - del self.queues[thread.name] - -printer = Printer() -sys.stdout = printer - - -class Streamer: - def __init__(self, target, args): - self.thread = Thread(target=target, args=args) - self.queue = printer.register(self.thread) - - def start(self): - self.thread.start() - # print('This should be stdout') - while self.thread.is_alive(): - try: - item = self.queue.get_nowait() - yield item.strip() - except Empty: - pass - yield '\n***End***' - printer.clean(self.thread) diff --git a/python/mrt/frontend/python/rpc/test_rpc.py b/python/mrt/frontend/python/rpc/test_rpc.py deleted file mode 100644 index bb248791..00000000 --- a/python/mrt/frontend/python/rpc/test_rpc.py +++ /dev/null @@ -1,33 +0,0 @@ -from os import path -import argparse - -from mrt.V3.utils import get_cfg_defaults -from rpc.service import local_addr, mrt_execute, mrt_submit -from rpc.utils import stringify_cfg - -parser = argparse.ArgumentParser() -parser.add_argument("--host-addr", type=str, default=local_addr) - -def test_execute(host_addr): - cfg = get_cfg_defaults() - tmp_yaml_file = path.expanduser("~/mrt_yaml_root/alexnet.yaml") - cfg.merge_from_file(tmp_yaml_file) - yaml_file_str = stringify_cfg(cfg) - for message in mrt_execute(yaml_file_str, host_addr=host_addr): - print(message) - -def test_submit(host_addr): - src_sym_file = path.expanduser("~/mrt_model/alexnet.json") - src_prm_file = path.expanduser("~/mrt_model/alexnet.params") - # dst_model_dir = path.expanduser("~/mrt_model_2") - dst_model_dir = "/home/ycmtrivial/mrt_model" - for message in mrt_submit( - src_sym_file, src_prm_file, dst_model_dir, - host_addr=host_addr): - print(message) - -if __name__ == "__main__": - args = parser.parse_args() - host_addr = args.host_addr - test_execute(host_addr=host_addr) - test_submit(host_addr=host_addr) diff --git a/python/mrt/frontend/python/rpc/utils.py b/python/mrt/frontend/python/rpc/utils.py deleted file mode 100644 index a55445ec..00000000 --- a/python/mrt/frontend/python/rpc/utils.py +++ /dev/null @@ -1,25 +0,0 @@ -import sys -import io - -from yacs.config import CfgNode as CN - -from mrt.V3.execute import run -from rpc import streamer -from rpc.log import get_logger - -def get_streamer(yaml_file_str): - cfg = CN().load_cfg(yaml_file_str) - cfg.freeze() - logger = get_logger(cfg.COMMON.VERBOSITY, streamer.printer) - my_streamer = streamer.Streamer(run, (cfg, logger)) - return my_streamer - -def stringify_cfg(cfg): - # TODO(ryt): replace by appropriately - # configured yacs interface cfg.dump(**kwargs) - old_stdout = sys.stdout - sys.stdout = new_stdout = io.StringIO() - print(cfg) - yaml_file_str = new_stdout.getvalue() - sys.stdout = old_stdout - return yaml_file_str diff --git a/python/mrt/frontend/requirements.txt b/python/mrt/frontend/requirements.txt deleted file mode 100644 index e2b6ba4c..00000000 --- a/python/mrt/frontend/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -grpcio -grpcio-tools -protobuf diff --git a/python/mrt/frontend/web/__init__.py b/python/mrt/frontend/web/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/mrt/frontend/web/asgi.py b/python/mrt/frontend/web/asgi.py deleted file mode 100644 index 90e7fb23..00000000 --- a/python/mrt/frontend/web/asgi.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -ASGI config for web project. - -It exposes the ASGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ -""" - -import os - -from channels.auth import AuthMiddlewareStack -from channels.routing import ProtocolTypeRouter, URLRouter -from django.core.asgi import get_asgi_application -import web.routing - -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') - -application = ProtocolTypeRouter({ - "http": get_asgi_application(), - "websocket": AuthMiddlewareStack( - URLRouter(web.routing.websocket_urlpatterns)), -}) diff --git a/python/mrt/frontend/web/consumers.py b/python/mrt/frontend/web/consumers.py deleted file mode 100644 index 2793257f..00000000 --- a/python/mrt/frontend/web/consumers.py +++ /dev/null @@ -1,104 +0,0 @@ -import json -import yaml -import os -from os import path - -from channels.generic.websocket import WebsocketConsumer - -from mrt.V3.utils import merge_cfg, get_cfg_defaults -from mrt.V3.execute import run -from rpc.service import mrt_execute, mrt_submit -from rpc.utils import stringify_cfg -from .protocol import type_cast - - -class MRTExecuteConsumer(WebsocketConsumer): - def connect(self): - self.accept() - - def disconnect(self, close_code): - pass - - def receive(self, text_data): - json_from_js = json.loads(text_data) - json_data = {} - ref_cfg = get_cfg_defaults() - for stage, stage_data in json_from_js['yaml'].items(): - sub_type_cast = type_cast[stage] - sub_json_data = {} - stage_ref_data = getattr(ref_cfg, stage) - for attr, data in stage_data.items(): - if data == '': - data = getattr(stage_ref_data, attr) - elif attr in sub_type_cast: - cast_func = sub_type_cast[attr] - data = cast_func(data) - sub_json_data[attr] = data - json_data[stage] = sub_json_data - yaml_file_str = yaml.dump(json_data) - host_addr = json_from_js['host'] - for message in mrt_execute(yaml_file_str, host_addr=host_addr): - self.send(text_data=json.dumps({'message': message})) - self.send( - text_data=json.dumps({'activate': None})) - - -class ModelSubmitConsumer(WebsocketConsumer): - def connect(self): - self.accept() - - def disconnect(self, close_code): - pass - - def receive(self, text_data): - json_from_js = json.loads(text_data) - json_data = {} - host_addr = json_from_js['host'] - src_sym_file = json_from_js['symbol'] - src_prm_file = json_from_js['params'] - dst_model_dir = json_from_js['dst'] - cnt = 0 - for message in mrt_submit( - src_sym_file, src_prm_file, dst_model_dir, - host_addr=host_addr): - cnt += 1 - dct = {'message': message} - if cnt == 1: - dct['first'] = 1 - self.send(text_data=json.dumps(dct)) - self.send( - text_data=json.dumps({'activate': None})) - - -class YAMLInitConsumer(WebsocketConsumer): - def connect(self): - self.accept() - - def disconnect(self, close_code): - pass - - def receive(self, text_data): - cfg = get_cfg_defaults() - self.send(text_data=json.dumps(cfg)) - - -class YAMLUpdateConsumer(WebsocketConsumer): - def connect(self): - self.accept() - - def disconnect(self, close_code): - pass - - def receive(self, text_data): - text_data_json = json.loads(text_data) - yaml_file = text_data_json['yaml_file'] - cfg = merge_cfg(yaml_file) - self.send(text_data=json.dumps(cfg)) - - -class YAMLClearConsumer(YAMLInitConsumer): - pass - - -class ConfigWrapperConsumer(YAMLInitConsumer): - pass diff --git a/python/mrt/frontend/web/protocol.py b/python/mrt/frontend/web/protocol.py deleted file mode 100644 index 1f54abb7..00000000 --- a/python/mrt/frontend/web/protocol.py +++ /dev/null @@ -1,57 +0,0 @@ -import json - -str2listofeval = lambda v: [eval(s) for s in v.split(',')] -str2eval = lambda v: eval(v) -str2listofstr = lambda v: [s.strip() for s in v.split(',')] - -def str2bool(v): - if v == "true": - ret = True - elif v == "false": - ret = False - else: - raise RuntimeError("invalid v: {}".format(v)) - return ret - -# def str2attribute_deps(v): - # print(v) - # ret = json.loads(v) - # return ret - -type_cast = { - "COMMON": { - "DEVICE_IDS": str2listofeval, - "BATCH": str2eval, - "RUN_EVALUATE": str2bool, - "RUN_COMPILE": str2bool, - }, - "PREPARE": { - "DEVICE_IDS": str2listofeval, - "INPUT_SHAPE": str2listofeval, - "SPLIT_KEYS": str2listofstr, - }, - "CALIBRATE": { - "BATCH": str2eval, - "NUM_CALIB": str2eval, - "LAMBD": str2eval, - "DEVICE_IDS": str2listofeval, - }, - "QUANTIZE": { - "RESTORE_NAMES": str2listofstr, - "INPUT_PRECISION": str2eval, - "OUTPUT_PRECISION": str2eval, - "DEVICE_IDS": str2listofeval, - "SOFTMAX_LAMBD": str2eval, - "SHIFT_BITS": str2eval, - # TODO ATTRIBUTE_DEPS, OSCALE_MAPS, THRESHOLDS - }, - "EVALUATE": { - "BATCH": str2eval, - "DEVICE_IDS": str2listofeval, - "ITER_NUM": str2eval, - }, - "COMPILE": { - "BATCH": str2eval, - "DEVICE_IDS": str2listofeval, - }, -} diff --git a/python/mrt/frontend/web/routing.py b/python/mrt/frontend/web/routing.py deleted file mode 100644 index 72643818..00000000 --- a/python/mrt/frontend/web/routing.py +++ /dev/null @@ -1,24 +0,0 @@ -from django.urls import re_path - -from . import consumers - -websocket_urlpatterns = [ - re_path( - r'ws/web/mrt/execute/(?P\w+)/$', - consumers.MRTExecuteConsumer.as_asgi()), - re_path( - r'ws/web/yaml/init/(?P\w+)/$', - consumers.YAMLInitConsumer.as_asgi()), - re_path( - r'ws/web/yaml/update/(?P\w+)/$', - consumers.YAMLUpdateConsumer.as_asgi()), - re_path( - r'ws/web/yaml/clear/(?P\w+)/$', - consumers.YAMLClearConsumer.as_asgi()), - re_path( - r'ws/web/config/wrapper/(?P\w+)/$', - consumers.ConfigWrapperConsumer.as_asgi()), - re_path( - r'ws/web/model/submit/(?P\w+)/$', - consumers.ModelSubmitConsumer.as_asgi()), -] diff --git a/python/mrt/frontend/web/settings.py b/python/mrt/frontend/web/settings.py deleted file mode 100644 index f95f5bb5..00000000 --- a/python/mrt/frontend/web/settings.py +++ /dev/null @@ -1,144 +0,0 @@ -""" -Django settings for web project. - -Generated by 'django-admin startproject' using Django 3.2.9. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/topics/settings/ - -For the full list of settings and their values, see -https://docs.djangoproject.com/en/3.2/ref/settings/ -""" - -from pathlib import Path -from os import path - -# Build paths inside the project like this: BASE_DIR / 'subdir'. -BASE_DIR = Path(__file__).resolve().parent.parent - - -# Quick-start development settings - unsuitable for production -# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ - -# SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = 'django-insecure-cpi#8nm8p_dm!)kkn+^ugib_g*=ip224p5s@5_&aj1nz!$p)uh' - -# SECURITY WARNING: don't run with debug turned on in production! -DEBUG = True - -# ALLOWED_HOSTS = [] -ALLOWED_HOSTS = ['*'] - - -# Application definition - -INSTALLED_APPS = [ - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'web', - 'channels', -] - -MIDDLEWARE = [ - 'django.middleware.security.SecurityMiddleware', - 'whitenoise.middleware.WhiteNoiseMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', -] - -ROOT_URLCONF = 'web.urls' - -TEMPLATES = [ - { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': [], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', - ], - }, - }, -] - -WSGI_APPLICATION = 'web.wsgi.application' -ASGI_APPLICATION = 'web.asgi.application' - - -# Database -# https://docs.djangoproject.com/en/3.2/ref/settings/#databases - -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': BASE_DIR / 'db.sqlite3', - } -} - -import dj_database_url - -db_from_env = dj_database_url.config(conn_max_age=500) -DATABASES['default'].update(db_from_env) - - -# Password validation -# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators - -AUTH_PASSWORD_VALIDATORS = [ - { - 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', - }, -] - - -# Internationalization -# https://docs.djangoproject.com/en/3.2/topics/i18n/ - -LANGUAGE_CODE = 'en-us' - -TIME_ZONE = 'UTC' - -USE_I18N = True - -USE_L10N = True - -USE_TZ = True - - -# Static files (CSS, JavaScript, Images) -# https://docs.djangoproject.com/en/3.2/howto/static-files/ - -STATICFILES_DIRS = [ - path.join(BASE_DIR, "javascript"), -] - -STATIC_ROOT = BASE_DIR / 'staticfiles' - -STATIC_URL = '/static/' - -STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' - -# Default primary key field type -# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field - -DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' diff --git a/python/mrt/frontend/web/templates/room.html b/python/mrt/frontend/web/templates/room.html deleted file mode 100644 index 07f385b0..00000000 --- a/python/mrt/frontend/web/templates/room.html +++ /dev/null @@ -1,144 +0,0 @@ - - -{% load static %} - - - - Chat Room - - - Destination Host Address
- -
- -
- -
- -

- -
- -
- Local Symbol File Path
- Local Params File Path
-
-
- -
- -
- Local YAML File Path - - -
-
- -
- -

YAML Configuration Zone

- - -
-

COMMON

- PASS_NAME
- MODEL_DIR
- MODEL_NAME
- VERBOSITY
- START_AFTER
- DEVICE_TYPE
- DEVICE_IDS
- BATCH
- RUN_EVALUATE
- RUN_COMPILE
-
- - -
- - -
-

PREPARE

- DEVICE_TYPE
- DEVICE_IDS
- INPUT_SHAPE
- SPLIT_KEYS
-
- - -
- - -
-

CALIBRATE

- BATCH
- NUM_CALIB
- LAMBD
- DATASET_NAME
- DATASET_DIR
- DEVICE_TYPE
- DEVICE_IDS
-
- - -
- - -
-

QUANTIZATE

- RESTORE_NAMES
- INPUT_PRECISION
- OUTPUT_PRECISION
- DEVICE_TYPE
- DEVICE_IDS
- SOFTMAX_LAMBD
- SHIFT_BITS
- THRESHOLDS
- ATTRIBUTE_DEPS
- OSCALE_MAPS
-
- - -
- - -
-

EVALUATE

- BATCH
- DEVICE_TYPE
- DEVICE_IDS
- ITER_NUM
-
- - -
- - -
-

COMPLIE

- BATCH
- DUMP_DIR
- DEVICE_TYPE
- DEVICE_IDS
-
- - {{ room_name|json_script:"room-name" }} - - - - - - - - - diff --git a/python/mrt/frontend/web/urls.py b/python/mrt/frontend/web/urls.py deleted file mode 100644 index ac730d5d..00000000 --- a/python/mrt/frontend/web/urls.py +++ /dev/null @@ -1,22 +0,0 @@ -"""web URL Configuration - -The `urlpatterns` list routes URLs to views. For more information please see: - https://docs.djangoproject.com/en/3.2/topics/http/urls/ -Examples: -Function views - 1. Add an import: from my_app import views - 2. Add a URL to urlpatterns: path('', views.home, name='home') -Class-based views - 1. Add an import: from other_app.views import Home - 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') -Including another URLconf - 1. Import the include() function: from django.urls import include, path - 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) -""" -from django.urls import path - -from . import views - -urlpatterns = [ - path('/', views.room) -] diff --git a/python/mrt/frontend/web/views.py b/python/mrt/frontend/web/views.py deleted file mode 100644 index 70d876df..00000000 --- a/python/mrt/frontend/web/views.py +++ /dev/null @@ -1,4 +0,0 @@ -from django.shortcuts import render - -def room(request, room_name): - return render(request, "room.html", {"room_name": room_name}) diff --git a/python/mrt/frontend/web/wsgi.py b/python/mrt/frontend/web/wsgi.py deleted file mode 100644 index d092b9ab..00000000 --- a/python/mrt/frontend/web/wsgi.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -WSGI config for web project. - -It exposes the WSGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ -""" - -import os - -from django.core.wsgi import get_wsgi_application - -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.settings') - -application = get_wsgi_application() From 4afe38aa43a0e4bef0b939e52178dca75d020d5a Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 25 Feb 2022 14:48:17 +0800 Subject: [PATCH 112/120] update doc index --- docs/mrt/api/V3.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/mrt/api/V3.rst b/docs/mrt/api/V3.rst index 54709b9b..9c0183b3 100644 --- a/docs/mrt/api/V3.rst +++ b/docs/mrt/api/V3.rst @@ -41,7 +41,12 @@ _______________ mrt.V3.evaluate _______________ .. automodule:: mrt.V3.evaluate +.. autofunction:: mrt.V3.evaluate.forward +.. autofunction:: mrt.V3.evaluate.get_evaluation_info .. autofunction:: mrt.V3.evaluate.evaluate +.. autofunction:: mrt.V3.evaluate.get_ctx_eval +.. autofunction:: mrt.V3.evaluate.inference_original_model +.. autofunction:: mrt.V3.evaluate.inference_quantized_model mrt.V3.mrt_compile __________________ From abc992aaddadea9cc611ca7b7f1439bcb4770e7b Mon Sep 17 00:00:00 2001 From: ryt Date: Mon, 28 Feb 2022 18:22:48 +0800 Subject: [PATCH 113/120] [doc] tfm_ops FullyConnected.reduce ElemwiseMul.rewrite Activation.rewrite --- docs/mrt/api/operator.rst | 9 ++++++--- python/mrt/tfm_ops.py | 33 +++++++++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/docs/mrt/api/operator.rst b/docs/mrt/api/operator.rst index 5e9dbb15..f6ea7d8b 100644 --- a/docs/mrt/api/operator.rst +++ b/docs/mrt/api/operator.rst @@ -91,6 +91,7 @@ MxNet Supported Operators are listed as below: + :py:class:`ElemwiseAdd ` + :py:class:`ElemwiseSub ` + + :py:class:`ElemwiseMul ` + :py:class:`Clip ` + :py:class:`negative ` + :py:class:`abs ` @@ -137,7 +138,7 @@ MxNet Supported Operators are listed as below: :members: rewrite .. autoclass:: mrt.tfm_ops.Activation - :members: validate + :members: validate, rewrite .. autoclass:: mrt.tfm_ops.Convolution :members: rewrite, quantize @@ -170,7 +171,7 @@ MxNet Supported Operators are listed as below: :members: .. autoclass:: mrt.tfm_ops.FullyConnected - :members: rewrite, quantize + :members: rewrite, reduce, quantize .. autoclass:: mrt.tfm_ops.Sigmoid :members: quantize @@ -262,10 +263,12 @@ MxNet Supported Operators are listed as below: .. autoclass:: mrt.tfm_ops.ElemwiseAdd :members: fuse_transpose, quantize - .. autoclass:: mrt.tfm_ops.ElemwiseSub :members: fuse_transpose, quantize +.. autoclass:: mrt.tfm_ops.ElemwiseMul + :members: rewrite + .. autoclass:: mrt.tfm_ops.Dropout :members: fuse_transpose diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index 54670322..9be3745c 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -234,6 +234,11 @@ def fuse_transpose(self, op, **kwargs): return op def rewrite(self, op, **kwargs): + """ Equivalent transform of rewrite operator + Only applies when the attribute act_type equals to relu or sigmoid, + which indicates that rewrite could be directly tranformed into + the corresponding operator. + """ attr = op.list_attr() if attr['act_type'] == Relu.op_name: op = Relu().rewrite(op, **kwargs) @@ -671,7 +676,26 @@ def rewrite(self, op, **kwargs): return op def reduce(self, op, **kwargs): - # TODO(ryt.dev) documentation + """ Dimension reduction function considering + both flatten cases. + + Denote the input as X and transformed operator as Y. + If flatten is true, only one reduction of the high dimension input + to 2 dimension is needed. + + .. math:: + RX = reshape(X) + Y = FullyConnected(RX) + + If flatten is false, firstly one reduction of the input to 2 + dimension is needed. After FullyConnected op, the ouput should + be reshaped to the correct output shape. + + .. math:: + RX = reshape(X) + out = FullyConnected(RX) + Y = reshape(out) + """ name = op.attr('name') attr, childs = op.list_attr(), sym_iter(op.get_children()) cns = [c.attr('name') for c in childs] @@ -1979,12 +2003,13 @@ def fuse_transpose(self, op, **kwargs): return _ft_multi_input(op) def rewrite(self, op, **kwargs): + """ validate the infer_shapes of lhs and rhs must be the same + thus this op could be rewrite into broadcast_mul + corresponding cvm op would be optimized at compile time + """ name, op_name = op.attr('name'), op.attr('op_name') childs = sym_iter(op.get_children()) - # validate the infer_shapes of lhs and rhs must be the same - # thus this op could be rewrite into broadcast_mul - # corresponding cvm op would be optimized at compile time ln, rn = [c.attr('name') for c in childs] infer_shapes = kwargs['infer_shapes'] lshp, rshp = infer_shapes[ln], infer_shapes[rn] From ca6a688c8a76ec67d5853a690bdcc42f28b646f8 Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 23 Apr 2022 14:19:17 +0800 Subject: [PATCH 114/120] add list_model.py; quantization resnet101_v1, resnet152_v1 resnet18_v1 resnet34_v1 resnet34_v2, resnet152_v2, resnet18_v1b, resnet34_v1b, resnet50_v1b, resnet50_v1b, resnet101_v1b, etc...; tfm_ops.mean.rewrite/fuse_transpose and corresponding unit tests --- python/mrt/cvm_op.py | 39 +++ python/mrt/tfm_ops.py | 246 ++++++++++++++++-- .../model_zoo/{ => coco}/yolov5n-train.yaml | 0 .../mrt/model_zoo/{ => imagenet}/alexnet.yaml | 0 .../{ => imagenet}/cifar_resnet20_v1.yaml | 0 tests/mrt/model_zoo/imagenet/densenet121.yaml | 17 ++ .../model_zoo/{ => imagenet}/densenet161.yaml | 0 tests/mrt/model_zoo/imagenet/densenet169.yaml | 17 ++ tests/mrt/model_zoo/imagenet/densenet201.yaml | 17 ++ .../mobilenet0.5.yaml} | 7 +- .../mrt/model_zoo/imagenet/mobilenet0.75.yaml | 18 ++ .../{ => imagenet}/mobilenet1_0.yaml | 0 .../model_zoo/imagenet/mobilenetv2_0.5.yaml | 18 ++ .../model_zoo/imagenet/mobilenetv2_0.75.yaml | 18 ++ .../{ => imagenet}/mobilenetv2_1.0.yaml | 0 .../mrt/model_zoo/imagenet/resnet101_v1.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet101_v1b.yaml | 18 ++ .../imagenet/resnet101_v1b_kinetics400.yaml | 18 ++ .../mrt/model_zoo/imagenet/resnet101_v1c.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet101_v1d.yaml | 21 ++ .../imagenet/resnet101_v1d_0.73.yaml | 21 ++ .../imagenet/resnet101_v1d_0.76.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet101_v1s.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet152_v1.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet152_v1b.yaml | 21 ++ .../imagenet/resnet152_v1b_kinetics400.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet152_v1c.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet152_v1d.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet152_v1s.yaml | 21 ++ .../mrt/model_zoo/imagenet/resnet152_v2.yaml | 21 ++ .../model_zoo/{ => imagenet}/resnet18_v1.yaml | 0 .../mrt/model_zoo/imagenet/resnet18_v1b.yaml | 21 ++ .../{ => imagenet}/resnet18_v1b_0.89.yaml | 0 tests/mrt/model_zoo/imagenet/resnet34_v1.yaml | 18 ++ .../mrt/model_zoo/imagenet/resnet34_v1b.yaml | 18 ++ tests/mrt/model_zoo/imagenet/resnet34_v2.yaml | 18 ++ .../model_zoo/{ => imagenet}/resnet50_v1.yaml | 0 .../mrt/model_zoo/imagenet/resnet50_v1b.yaml | 18 ++ .../imagenet/resnet50_v1b_kinetics400.yaml | 18 ++ .../imagenet/resnet50_v1b_sthsthv2.yaml | 18 ++ .../mrt/model_zoo/imagenet/resnet50_v1c.yaml | 18 ++ .../mrt/model_zoo/imagenet/resnet50_v1d.yaml | 18 ++ .../model_zoo/imagenet/resnet50_v1d_0.11.yaml | 18 ++ .../model_zoo/imagenet/resnet50_v1d_0.37.yaml | 18 ++ .../model_zoo/imagenet/resnet50_v1d_0.48.yaml | 18 ++ .../model_zoo/imagenet/resnet50_v1d_0.86.yaml | 18 ++ .../mrt/model_zoo/imagenet/resnet50_v1s.yaml | 18 ++ .../model_zoo/{ => imagenet}/resnet50_v2.yaml | 0 .../{ => imagenet}/shufflenet_v1.yaml | 0 .../{ => imagenet}/squeezenet1.0.yaml | 0 .../mrt/model_zoo/imagenet/squeezenet1.1.yaml | 18 ++ .../{ => imagenet}/tf_inception_v3.yaml | 0 .../tf_mobilenet_v1_0.25_224_lite.yaml | 0 tests/mrt/model_zoo/imagenet/vgg11.yaml | 18 ++ tests/mrt/model_zoo/imagenet/vgg11_bn.yaml | 17 ++ tests/mrt/model_zoo/imagenet/vgg13.yaml | 17 ++ tests/mrt/model_zoo/imagenet/vgg13_bn.yaml | 17 ++ tests/mrt/model_zoo/imagenet/vgg16.yaml | 17 ++ tests/mrt/model_zoo/imagenet/vgg16_bn.yaml | 17 ++ tests/mrt/model_zoo/{ => imagenet}/vgg19.yaml | 0 tests/mrt/model_zoo/imagenet/vgg19_bn.yaml | 17 ++ tests/mrt/model_zoo/imagenet/xception.yaml | 20 ++ .../inception30_pool2_fwd/inceptionv3.yaml | 20 ++ .../mean/resnet18_v1b_kinetics400.yaml | 20 ++ .../mean/resnet34_v1b_kinetics400.yaml | 18 ++ .../model_zoo/mean/resnet50_v1b_hmdb51.yaml | 18 ++ tests/mrt/model_zoo/mean/vgg16_ucf101.yaml | 17 ++ tests/mrt/model_zoo/new.yaml | 21 ++ ..._raw_qd_animal10_2_cifar_resnet20_v2.yaml} | 0 ...mentation_epoch-4-0.8164531394275162.yaml} | 0 .../model_zoo/reshape/resnet50_v1b_gn.yaml | 18 ++ tests/mrt/model_zoo/{ => trec}/trec.yaml | 0 tests/mrt/model_zoo/tuning/mobilenet0.25.yaml | 18 ++ tests/mrt/model_zoo/tuning/mobilenet1.0.yaml | 17 ++ .../model_zoo/tuning/mobilenetv2_0.25.yaml | 18 ++ .../model_zoo/tuning/mobilenetv3_large.yaml | 17 ++ .../model_zoo/tuning/mobilenetv3_small.yaml | 18 ++ tests/mrt/model_zoo/tuning/resnet101_v2.yaml | 21 ++ tests/mrt/model_zoo/tuning/resnet18_v2.yaml | 18 ++ .../{ => voc}/ssd_512_mobilenet1.0_voc.yaml | 0 .../ssd_512_resnet50_v1_voc.yaml} | 0 .../{ => voc}/yolo3_darknet53_voc.yaml | 0 .../{ => voc}/yolo3_mobilenet1.0_voc.yaml | 0 83 files changed, 1348 insertions(+), 30 deletions(-) rename tests/mrt/model_zoo/{ => coco}/yolov5n-train.yaml (100%) rename tests/mrt/model_zoo/{ => imagenet}/alexnet.yaml (100%) rename tests/mrt/model_zoo/{ => imagenet}/cifar_resnet20_v1.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/densenet121.yaml rename tests/mrt/model_zoo/{ => imagenet}/densenet161.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/densenet169.yaml create mode 100644 tests/mrt/model_zoo/imagenet/densenet201.yaml rename tests/mrt/model_zoo/{mnist.yaml => imagenet/mobilenet0.5.yaml} (73%) create mode 100644 tests/mrt/model_zoo/imagenet/mobilenet0.75.yaml rename tests/mrt/model_zoo/{ => imagenet}/mobilenet1_0.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/mobilenetv2_0.5.yaml create mode 100644 tests/mrt/model_zoo/imagenet/mobilenetv2_0.75.yaml rename tests/mrt/model_zoo/{ => imagenet}/mobilenetv2_1.0.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/resnet101_v1.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet101_v1b.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet101_v1b_kinetics400.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet101_v1c.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet101_v1d.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet101_v1d_0.73.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet101_v1d_0.76.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet101_v1s.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet152_v1.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet152_v1b.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet152_v1b_kinetics400.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet152_v1c.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet152_v1d.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet152_v1s.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet152_v2.yaml rename tests/mrt/model_zoo/{ => imagenet}/resnet18_v1.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/resnet18_v1b.yaml rename tests/mrt/model_zoo/{ => imagenet}/resnet18_v1b_0.89.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/resnet34_v1.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet34_v1b.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet34_v2.yaml rename tests/mrt/model_zoo/{ => imagenet}/resnet50_v1.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1b.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1b_kinetics400.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1b_sthsthv2.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1c.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1d.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1d_0.11.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1d_0.37.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1d_0.48.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1d_0.86.yaml create mode 100644 tests/mrt/model_zoo/imagenet/resnet50_v1s.yaml rename tests/mrt/model_zoo/{ => imagenet}/resnet50_v2.yaml (100%) rename tests/mrt/model_zoo/{ => imagenet}/shufflenet_v1.yaml (100%) rename tests/mrt/model_zoo/{ => imagenet}/squeezenet1.0.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/squeezenet1.1.yaml rename tests/mrt/model_zoo/{ => imagenet}/tf_inception_v3.yaml (100%) rename tests/mrt/model_zoo/{ => imagenet}/tf_mobilenet_v1_0.25_224_lite.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/vgg11.yaml create mode 100644 tests/mrt/model_zoo/imagenet/vgg11_bn.yaml create mode 100644 tests/mrt/model_zoo/imagenet/vgg13.yaml create mode 100644 tests/mrt/model_zoo/imagenet/vgg13_bn.yaml create mode 100644 tests/mrt/model_zoo/imagenet/vgg16.yaml create mode 100644 tests/mrt/model_zoo/imagenet/vgg16_bn.yaml rename tests/mrt/model_zoo/{ => imagenet}/vgg19.yaml (100%) create mode 100644 tests/mrt/model_zoo/imagenet/vgg19_bn.yaml create mode 100644 tests/mrt/model_zoo/imagenet/xception.yaml create mode 100644 tests/mrt/model_zoo/inception30_pool2_fwd/inceptionv3.yaml create mode 100644 tests/mrt/model_zoo/mean/resnet18_v1b_kinetics400.yaml create mode 100644 tests/mrt/model_zoo/mean/resnet34_v1b_kinetics400.yaml create mode 100644 tests/mrt/model_zoo/mean/resnet50_v1b_hmdb51.yaml create mode 100644 tests/mrt/model_zoo/mean/vgg16_ucf101.yaml create mode 100644 tests/mrt/model_zoo/new.yaml rename tests/mrt/model_zoo/{qd10_resnetv1_20.yaml => quickdraw/quick_raw_qd_animal10_2_cifar_resnet20_v2.yaml} (100%) rename tests/mrt/model_zoo/{quickdraw.yaml => quickdraw/quickdraw_wlt_augmentation_epoch-4-0.8164531394275162.yaml} (100%) create mode 100644 tests/mrt/model_zoo/reshape/resnet50_v1b_gn.yaml rename tests/mrt/model_zoo/{ => trec}/trec.yaml (100%) create mode 100644 tests/mrt/model_zoo/tuning/mobilenet0.25.yaml create mode 100644 tests/mrt/model_zoo/tuning/mobilenet1.0.yaml create mode 100644 tests/mrt/model_zoo/tuning/mobilenetv2_0.25.yaml create mode 100644 tests/mrt/model_zoo/tuning/mobilenetv3_large.yaml create mode 100644 tests/mrt/model_zoo/tuning/mobilenetv3_small.yaml create mode 100644 tests/mrt/model_zoo/tuning/resnet101_v2.yaml create mode 100644 tests/mrt/model_zoo/tuning/resnet18_v2.yaml rename tests/mrt/model_zoo/{ => voc}/ssd_512_mobilenet1.0_voc.yaml (100%) rename tests/mrt/model_zoo/{ssd.yaml => voc/ssd_512_resnet50_v1_voc.yaml} (100%) rename tests/mrt/model_zoo/{ => voc}/yolo3_darknet53_voc.yaml (100%) rename tests/mrt/model_zoo/{ => voc}/yolo3_mobilenet1.0_voc.yaml (100%) diff --git a/python/mrt/cvm_op.py b/python/mrt/cvm_op.py index 9398a71d..9ccb7ce8 100644 --- a/python/mrt/cvm_op.py +++ b/python/mrt/cvm_op.py @@ -157,6 +157,24 @@ def forward(self, is_train, req, in_data, out_data, aux): def backward(self, req, out_grad, in_data, out_data, in_grad, aux): assert False +class RightShiftV2(mx.operator.CustomOp): + def __init__(self, shift_bit, **kwargs): + super(RightShiftV2, self).__init__(**kwargs) + self.sb = int(shift_bit) + assert self.sb > 0 + + def forward(self, is_train, req, in_data, out_data, aux): + assert is_train == False + X = in_data[0] + out = X.round() + if self.sb > 1: + out = out / 2**self.sb + out = out.round() + self.assign(out_data[0], req[0], out) + + def backward(self, req, out_grad, in_data, out_data, in_grad, aux): + assert False + class Annotate(mx.operator.CustomOp): def __init__(self, in_prec, out_prec, anno_type): super(Annotate, self).__init__() @@ -270,6 +288,27 @@ def infer_type(self, in_type): def create_operator(self, ctx, shapes, dtypes): return RightShift(self.precision, self.shift_bit) +@mx.operator.register("right_shift") +class RightShiftV2Prop(mx.operator.CustomOpProp): + """ MxNet right_shift operator property class. + """ + def __init__(self, shift_bit=0): + self.shift_bit = shift_bit + super(RightShiftV2Prop, self).__init__(need_top_grad=False) + def list_arguments(self): + return ['data'] + def list_outputs(self): + return ['output'] + def infer_shape(self, in_shape): + X_shape = in_shape[0] + out_shape = in_shape[0] + return [X_shape], [out_shape], [] + def infer_type(self, in_type): + X_type = in_type[0] + return [X_type], [X_type], [] + def create_operator(self, ctx, shapes, dtypes): + return RightShiftV2(self.shift_bit) + @mx.operator.register("cvm_lut") class LUTProp(mx.operator.CustomOpProp): """ MxNet cvm_lut operator property class. diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index 9be3745c..bbe74b95 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -1449,41 +1449,42 @@ def validate(self, op, **kwargs): return op def fuse_transpose(self, op, **kwargs): - """ Customized fuse_transpose pass Introduction. + return fuse_transpose_reduce(op, kwargs["infer_shapes"]) + # """ Customized fuse_transpose pass Introduction. - Suppose 'keepdims' is True and the input is 'Transpose'. + # Suppose 'keepdims' is True and the input is 'Transpose'. - .. code-block:: none + # .. code-block:: none - cX - | - Transpose(axis) - | - sum(dims1) + # cX + # | + # Transpose(axis) + # | + # sum(dims1) - then, the graph can be transformed into: + # then, the graph can be transformed into: - .. code-block:: none + # .. code-block:: none - cX - | - Sum(dims2) + # cX + # | + # Sum(dims2) - where: + # where: - .. code-block:: python + # .. code-block:: python - dims2 = [axis[i] for i in dims1] - """ - name, attr, X = op.attr('name'), op.list_attr(), op.get_children()[0] - xshp = kwargs['infer_shapes'][X.attr('name')][get_entry_id(X)] - axis = get_attr(attr, 'axis', [i for i in range(len(xshp))]) - keepdims = get_attr(attr, 'keepdims', False) - if X.attr('op_name') == Transpose.op_name and not keepdims: - axes, op = get_attr(X.list_attr(), 'axes'), X.get_children()[0] - axis = [axes[i] for i in axis] - op = mx.sym.sum(op, axis=axis, keepdims=keepdims, name=name) - return op + # dims2 = [axis[i] for i in dims1] + # """ + # name, attr, X = op.attr('name'), op.list_attr(), op.get_children()[0] + # xshp = kwargs['infer_shapes'][X.attr('name')][get_entry_id(X)] + # axis = get_attr(attr, 'axis', [i for i in range(len(xshp))]) + # keepdims = get_attr(attr, 'keepdims', False) + # if X.attr('op_name') == Transpose.op_name and not keepdims: + # axes, op = get_attr(X.list_attr(), 'axes'), X.get_children()[0] + # axis = [axes[i] for i in axis] + # op = mx.sym.sum(op, axis=axis, keepdims=keepdims, name=name) + # return op def calculate_ops(self, op, **kwargs): infer_shapes = kwargs['infer_shapes'] @@ -3103,3 +3104,196 @@ def fusable_cvm_precision_attr(op): assert is_fusable_cvm_precision(op) attr = op.list_attr() return get_attr(attr, 'precision'), get_attr(attr, 'shift_bit', 0) + +def sum_and_rightshift(ops, axis, shift_bit): + nops = [] + while ops: + cop = ops.pop() + cop = mx.sym.sum( + cop, axis=axis, keepdims=True, name=N.n('sum')) + cop = mx.sym.Custom( + cop, shift_bit=shift_bit, name=N.n('custom'), + op_type='right_shift') + nops.append(cop) + ops = nops + return ops + + +@register_transformer("mean") +class Mean(Transformer): + def fuse_transpose(self, op, **kwargs): + return fuse_transpose_reduce(op, kwargs["infer_shapes"]) + def rewrite(self, op, **kwargs): + name = op.attr('name') + return self._decompose_axis(op, kwargs['infer_shapes']) + + def _decompose_axis(self, op, infer_shapes): + name = op.attr('name') + attr, childs = op.list_attr(), sym_iter(op.get_children()) + + axis = eval(attr['axis']) + if isinstance(axis, int): + axis = (axis,) + else: + assert isinstance(axis, tuple), (axis, type(axis)) + + keepdims = eval(attr.get('keepdims', 'False')) + + exclude = eval(attr.get('exclude', 'False')) + if exclude: + raise NotImplementedError + + op = childs[0] + shp = infer_shapes[op.attr('name')][get_entry_id(op)] + prod = int(nd.prod(nd.array([shp[ax] for ax in axis])).asscalar()) + power_value = int(math.log2(prod)) + assert 1< 1 + if prod <= MAXIMUM_SIZE: + op = mx.sym.sum( + op, axis=axis, keepdims=keepdims, name=N.n('sum')) + if prod == 1: + return op + op = mx.sym.Custom( + op, shift_bit=int(math.log2(prod)), name=name, + op_type='right_shift') + return op + + # TODO(ryt): select batch_axis + axis_set = set(axis) + oaxes = list(range(len(shp))) + naxes = sorted(axis) + axes = [ax for ax in oaxes if ax not in axis_set] + naxes + nshp = tuple( + [sz for ax,sz in enumerate(shp) if ax not in axis_set]) + \ + (prod,) + transposed = False + if axes != oaxes: + transposed = True + op = mx.sym.transpose(op, axes=axes, name=N.n('transpose')) + reshaped = False + if len(naxes) > 1: + reshaped = True + op = mx.sym.reshape(op, shape=nshp, name=N.n('reshape')) + ops = [] + eax = len(nshp) - 1 + for i in range(0, prod, MAXIMUM_SIZE): + cop = mx.sym.slice_axis( + op, axis=eax, begin=i, end=i+MAXIMUM_SIZE, + name=N.n('slice_axis')) + ops.append(cop) + sb = int(math.log2(MAXIMUM_SIZE)) + ops = sum_and_rightshift(ops, eax, sb) + while len(ops) > MAXIMUM_SIZE: + assert MAXIMUM_SIZE % len(ops) == 0 + nops = [] + for i in range(0, len(ops), MAXIMUM_SIZE): + cop = mx.sym.concat( + *ops[i:i+MAXIMUM_SIZE], dim=eax, name=N.n('concat')) + nops.append(cop) + ops = sum_and_rightshift(nops, eax, sb) + res_sz = len(ops) + assert res_sz > 1 + sb = int(math.log2(res_sz)) + op = mx.sym.concat(*ops, dim=eax, name=N.n('concat')) + if keepdims: + op = mx.sym.sum(op, axis=eax, keepdims=True, name=N.n('sum')) + if reshaped: + for i in range(1, len(naxes)): + op = mx.sym.expand_dims( + op, axis=i+len(nshp)-1, name=N.n('expand_dims')) + if transposed: + raxes = [0] * len(axes) + for i, ax in enumerate(axes): + raxes[ax] = i + op = mx.sym.tranpose(op, axes=raxes, name=N.n('transpose')) + else: + op = mx.sym.sum(op, axis=eax, name=N.n('sum')) + op = mx.sym.Custom( + op, shift_bit=sb, name=name, op_type='right_shift') + return op + +def fuse_transpose_reduce(op, infer_shapes): + """ fuse_tranpose for reduce op, with fuse_transpose as the only child op. + currently support `sum` and `mean`. + + .. code-block:: none + + cX + | + Transpose(axis) + | + op(dims1) + + then, the graph can be transformed into: + + .. code-block:: none + + cX + | + op(dims2) + + where: + + .. code-block:: python + + dims2 = [axis[i] for i in dims1] + + if keepdims is true, we switch the order of reduce op and + transpose in an equivalent way, which could be an optimization for + cases like: + + .. code-block:: none + + cX + | + Transpose1 + | + reduce + | + Transpose2 + + which in the visit of Transpose2, could be further fused into: + + .. code-block:: none + cX + | + reduce + | + Transpose + """ + name, op_name = op.attr('name'), op.attr('op_name') + shp = infer_shapes[name][get_entry_id(op)] + if op_name not in [Sum.op_name, Mean.op_name]: + return op + attr, X = op.list_attr(), op.get_children()[0] + xopn = X.attr('op_name') + if xopn != Transpose.op_name: + return op + xshp = infer_shapes[X.attr('name')][get_entry_id(X)] + axis = get_attr(attr, 'axis', [i for i in range(len(xshp))]) + axes, cX = get_attr(X.list_attr(), 'axes'), X.get_children()[0] + naxis = [axes[i] for i in axis] + naxis_sorted = sorted(naxis) + keepdims = get_attr(attr, 'keepdims', False) + if keepdims: + op = get_mxnet_op(op_name)( + cX, axis=naxis_sorted, keepdims=True, name=N.n("reduce")) + op = mx.sym.transpose(op, axes=axes, name=name) + else: + naxis_set = set(naxis) + naxes = [ax for ax in axes if ax not in naxis_set] + naxes_dict = {ax:i for i,ax in enumerate(sorted(naxes))} + naxes = [naxes_dict[ax] for ax in naxes] + axes_ref = [i for i in range(len(shp))] + if naxes != axes_ref: + op = get_mxnet_op(op_name)( + cX, axis=naxis_sorted, keepdims=False, name=N.n("reduce")) + op = mx.sym.transpose(op, axes=naxes, name=name) + else: + op = get_mxnet_op(op_name)( + cX, axis=naxis_sorted, keepdims=False, name=name) + return op diff --git a/tests/mrt/model_zoo/yolov5n-train.yaml b/tests/mrt/model_zoo/coco/yolov5n-train.yaml similarity index 100% rename from tests/mrt/model_zoo/yolov5n-train.yaml rename to tests/mrt/model_zoo/coco/yolov5n-train.yaml diff --git a/tests/mrt/model_zoo/alexnet.yaml b/tests/mrt/model_zoo/imagenet/alexnet.yaml similarity index 100% rename from tests/mrt/model_zoo/alexnet.yaml rename to tests/mrt/model_zoo/imagenet/alexnet.yaml diff --git a/tests/mrt/model_zoo/cifar_resnet20_v1.yaml b/tests/mrt/model_zoo/imagenet/cifar_resnet20_v1.yaml similarity index 100% rename from tests/mrt/model_zoo/cifar_resnet20_v1.yaml rename to tests/mrt/model_zoo/imagenet/cifar_resnet20_v1.yaml diff --git a/tests/mrt/model_zoo/imagenet/densenet121.yaml b/tests/mrt/model_zoo/imagenet/densenet121.yaml new file mode 100644 index 00000000..a69d3ad9 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/densenet121.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: densenet121 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/densenet161.yaml b/tests/mrt/model_zoo/imagenet/densenet161.yaml similarity index 100% rename from tests/mrt/model_zoo/densenet161.yaml rename to tests/mrt/model_zoo/imagenet/densenet161.yaml diff --git a/tests/mrt/model_zoo/imagenet/densenet169.yaml b/tests/mrt/model_zoo/imagenet/densenet169.yaml new file mode 100644 index 00000000..ae58d4a8 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/densenet169.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: densenet169 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/imagenet/densenet201.yaml b/tests/mrt/model_zoo/imagenet/densenet201.yaml new file mode 100644 index 00000000..4c1f3dbf --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/densenet201.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: densenet201 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 1000 diff --git a/tests/mrt/model_zoo/mnist.yaml b/tests/mrt/model_zoo/imagenet/mobilenet0.5.yaml similarity index 73% rename from tests/mrt/model_zoo/mnist.yaml rename to tests/mrt/model_zoo/imagenet/mobilenet0.5.yaml index 36ea196f..eace32ae 100644 --- a/tests/mrt/model_zoo/mnist.yaml +++ b/tests/mrt/model_zoo/imagenet/mobilenet0.5.yaml @@ -1,10 +1,9 @@ COMMON: - MODEL_NAME: mnist_dapp -PREPARE: - INPUT_SHAPE: [-1,1,28,28] + MODEL_NAME: mobilenet0.5 CALIBRATE: NUM_CALIB: 1 - DATASET_NAME: mnist + DATASET_NAME: imagenet + LAMBD: 10 DEVICE_TYPE: gpu DEVICE_IDS: [0] QUANTIZE: diff --git a/tests/mrt/model_zoo/imagenet/mobilenet0.75.yaml b/tests/mrt/model_zoo/imagenet/mobilenet0.75.yaml new file mode 100644 index 00000000..4e95aba7 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/mobilenet0.75.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: mobilenet0.75 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/mobilenet1_0.yaml b/tests/mrt/model_zoo/imagenet/mobilenet1_0.yaml similarity index 100% rename from tests/mrt/model_zoo/mobilenet1_0.yaml rename to tests/mrt/model_zoo/imagenet/mobilenet1_0.yaml diff --git a/tests/mrt/model_zoo/imagenet/mobilenetv2_0.5.yaml b/tests/mrt/model_zoo/imagenet/mobilenetv2_0.5.yaml new file mode 100644 index 00000000..9fe7a385 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/mobilenetv2_0.5.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: mobilenetv2_0.5 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/mobilenetv2_0.75.yaml b/tests/mrt/model_zoo/imagenet/mobilenetv2_0.75.yaml new file mode 100644 index 00000000..fe2fccbb --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/mobilenetv2_0.75.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: mobilenetv2_0.75 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/mobilenetv2_1.0.yaml b/tests/mrt/model_zoo/imagenet/mobilenetv2_1.0.yaml similarity index 100% rename from tests/mrt/model_zoo/mobilenetv2_1.0.yaml rename to tests/mrt/model_zoo/imagenet/mobilenetv2_1.0.yaml diff --git a/tests/mrt/model_zoo/imagenet/resnet101_v1.yaml b/tests/mrt/model_zoo/imagenet/resnet101_v1.yaml new file mode 100644 index 00000000..4d38721c --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet101_v1.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet101_v1 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet101_v1b.yaml b/tests/mrt/model_zoo/imagenet/resnet101_v1b.yaml new file mode 100644 index 00000000..35376af5 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet101_v1b.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet101_v1b +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet101_v1b_kinetics400.yaml b/tests/mrt/model_zoo/imagenet/resnet101_v1b_kinetics400.yaml new file mode 100644 index 00000000..d6e91d47 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet101_v1b_kinetics400.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet101_v1b_kinetics400 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet101_v1c.yaml b/tests/mrt/model_zoo/imagenet/resnet101_v1c.yaml new file mode 100644 index 00000000..ab956d72 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet101_v1c.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet101_v1c + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet101_v1d.yaml b/tests/mrt/model_zoo/imagenet/resnet101_v1d.yaml new file mode 100644 index 00000000..a6b80236 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet101_v1d.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet101_v1d + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet101_v1d_0.73.yaml b/tests/mrt/model_zoo/imagenet/resnet101_v1d_0.73.yaml new file mode 100644 index 00000000..dad850f9 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet101_v1d_0.73.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet101_v1d_0.73 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet101_v1d_0.76.yaml b/tests/mrt/model_zoo/imagenet/resnet101_v1d_0.76.yaml new file mode 100644 index 00000000..76baf78f --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet101_v1d_0.76.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet101_v1d_0.76 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet101_v1s.yaml b/tests/mrt/model_zoo/imagenet/resnet101_v1s.yaml new file mode 100644 index 00000000..bf52def1 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet101_v1s.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet101_v1s + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet152_v1.yaml b/tests/mrt/model_zoo/imagenet/resnet152_v1.yaml new file mode 100644 index 00000000..0572e2c5 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet152_v1.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet152_v1 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [3] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [3] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [3] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet152_v1b.yaml b/tests/mrt/model_zoo/imagenet/resnet152_v1b.yaml new file mode 100644 index 00000000..91fd34d6 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet152_v1b.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet152_v1b + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet152_v1b_kinetics400.yaml b/tests/mrt/model_zoo/imagenet/resnet152_v1b_kinetics400.yaml new file mode 100644 index 00000000..b48f5080 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet152_v1b_kinetics400.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet152_v1b_kinetics400 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet152_v1c.yaml b/tests/mrt/model_zoo/imagenet/resnet152_v1c.yaml new file mode 100644 index 00000000..fcc454ce --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet152_v1c.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet152_v1c + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet152_v1d.yaml b/tests/mrt/model_zoo/imagenet/resnet152_v1d.yaml new file mode 100644 index 00000000..52f7f3bd --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet152_v1d.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet152_v1d + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet152_v1s.yaml b/tests/mrt/model_zoo/imagenet/resnet152_v1s.yaml new file mode 100644 index 00000000..22577836 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet152_v1s.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet152_v1s + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet152_v2.yaml b/tests/mrt/model_zoo/imagenet/resnet152_v2.yaml new file mode 100644 index 00000000..c96fc118 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet152_v2.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet152_v1 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/resnet18_v1.yaml b/tests/mrt/model_zoo/imagenet/resnet18_v1.yaml similarity index 100% rename from tests/mrt/model_zoo/resnet18_v1.yaml rename to tests/mrt/model_zoo/imagenet/resnet18_v1.yaml diff --git a/tests/mrt/model_zoo/imagenet/resnet18_v1b.yaml b/tests/mrt/model_zoo/imagenet/resnet18_v1b.yaml new file mode 100644 index 00000000..232ab1f5 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet18_v1b.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet18_v1b + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/resnet18_v1b_0.89.yaml b/tests/mrt/model_zoo/imagenet/resnet18_v1b_0.89.yaml similarity index 100% rename from tests/mrt/model_zoo/resnet18_v1b_0.89.yaml rename to tests/mrt/model_zoo/imagenet/resnet18_v1b_0.89.yaml diff --git a/tests/mrt/model_zoo/imagenet/resnet34_v1.yaml b/tests/mrt/model_zoo/imagenet/resnet34_v1.yaml new file mode 100644 index 00000000..8744886b --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet34_v1.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet34_v1 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet34_v1b.yaml b/tests/mrt/model_zoo/imagenet/resnet34_v1b.yaml new file mode 100644 index 00000000..4d547a69 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet34_v1b.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet34_v1b +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet34_v2.yaml b/tests/mrt/model_zoo/imagenet/resnet34_v2.yaml new file mode 100644 index 00000000..69caaaa6 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet34_v2.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet34_v2 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/resnet50_v1.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1.yaml similarity index 100% rename from tests/mrt/model_zoo/resnet50_v1.yaml rename to tests/mrt/model_zoo/imagenet/resnet50_v1.yaml diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1b.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1b.yaml new file mode 100644 index 00000000..c30b43ae --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1b.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1b +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1b_kinetics400.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1b_kinetics400.yaml new file mode 100644 index 00000000..6390fcbd --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1b_kinetics400.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1b_kinetics400 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1b_sthsthv2.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1b_sthsthv2.yaml new file mode 100644 index 00000000..b450515c --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1b_sthsthv2.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1b_sthsthv2 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1c.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1c.yaml new file mode 100644 index 00000000..5e6b0f8f --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1c.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1c +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1d.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1d.yaml new file mode 100644 index 00000000..7a55254c --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1d.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1d +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.11.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.11.yaml new file mode 100644 index 00000000..bcc25389 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.11.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1d_0.11 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.37.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.37.yaml new file mode 100644 index 00000000..b095acc9 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.37.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1d_0.37 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.48.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.48.yaml new file mode 100644 index 00000000..0566d6e9 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.48.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1d_0.48 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.86.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.86.yaml new file mode 100644 index 00000000..0f7938bb --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1d_0.86.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1d_0.86 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/imagenet/resnet50_v1s.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v1s.yaml new file mode 100644 index 00000000..b9c0d6d7 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/resnet50_v1s.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1s +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 25 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/resnet50_v2.yaml b/tests/mrt/model_zoo/imagenet/resnet50_v2.yaml similarity index 100% rename from tests/mrt/model_zoo/resnet50_v2.yaml rename to tests/mrt/model_zoo/imagenet/resnet50_v2.yaml diff --git a/tests/mrt/model_zoo/shufflenet_v1.yaml b/tests/mrt/model_zoo/imagenet/shufflenet_v1.yaml similarity index 100% rename from tests/mrt/model_zoo/shufflenet_v1.yaml rename to tests/mrt/model_zoo/imagenet/shufflenet_v1.yaml diff --git a/tests/mrt/model_zoo/squeezenet1.0.yaml b/tests/mrt/model_zoo/imagenet/squeezenet1.0.yaml similarity index 100% rename from tests/mrt/model_zoo/squeezenet1.0.yaml rename to tests/mrt/model_zoo/imagenet/squeezenet1.0.yaml diff --git a/tests/mrt/model_zoo/imagenet/squeezenet1.1.yaml b/tests/mrt/model_zoo/imagenet/squeezenet1.1.yaml new file mode 100644 index 00000000..5101ea6e --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/squeezenet1.1.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: squeezenet1.1 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 13 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tf_inception_v3.yaml b/tests/mrt/model_zoo/imagenet/tf_inception_v3.yaml similarity index 100% rename from tests/mrt/model_zoo/tf_inception_v3.yaml rename to tests/mrt/model_zoo/imagenet/tf_inception_v3.yaml diff --git a/tests/mrt/model_zoo/tf_mobilenet_v1_0.25_224_lite.yaml b/tests/mrt/model_zoo/imagenet/tf_mobilenet_v1_0.25_224_lite.yaml similarity index 100% rename from tests/mrt/model_zoo/tf_mobilenet_v1_0.25_224_lite.yaml rename to tests/mrt/model_zoo/imagenet/tf_mobilenet_v1_0.25_224_lite.yaml diff --git a/tests/mrt/model_zoo/imagenet/vgg11.yaml b/tests/mrt/model_zoo/imagenet/vgg11.yaml new file mode 100644 index 00000000..66931157 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/vgg11.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: vgg11 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/imagenet/vgg11_bn.yaml b/tests/mrt/model_zoo/imagenet/vgg11_bn.yaml new file mode 100644 index 00000000..7e8376aa --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/vgg11_bn.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: vgg11_bn +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/imagenet/vgg13.yaml b/tests/mrt/model_zoo/imagenet/vgg13.yaml new file mode 100644 index 00000000..35e1b3b2 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/vgg13.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: vgg13 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/imagenet/vgg13_bn.yaml b/tests/mrt/model_zoo/imagenet/vgg13_bn.yaml new file mode 100644 index 00000000..5e744aa0 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/vgg13_bn.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: vgg13_bn +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/imagenet/vgg16.yaml b/tests/mrt/model_zoo/imagenet/vgg16.yaml new file mode 100644 index 00000000..186442d1 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/vgg16.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: vgg16 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/imagenet/vgg16_bn.yaml b/tests/mrt/model_zoo/imagenet/vgg16_bn.yaml new file mode 100644 index 00000000..3b4f8e22 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/vgg16_bn.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: vgg16_bn +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/vgg19.yaml b/tests/mrt/model_zoo/imagenet/vgg19.yaml similarity index 100% rename from tests/mrt/model_zoo/vgg19.yaml rename to tests/mrt/model_zoo/imagenet/vgg19.yaml diff --git a/tests/mrt/model_zoo/imagenet/vgg19_bn.yaml b/tests/mrt/model_zoo/imagenet/vgg19_bn.yaml new file mode 100644 index 00000000..181bb278 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/vgg19_bn.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: vgg19_bn +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 64 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 2000 diff --git a/tests/mrt/model_zoo/imagenet/xception.yaml b/tests/mrt/model_zoo/imagenet/xception.yaml new file mode 100644 index 00000000..bfc954d3 --- /dev/null +++ b/tests/mrt/model_zoo/imagenet/xception.yaml @@ -0,0 +1,20 @@ +COMMON: + MODEL_NAME: xception + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/inception30_pool2_fwd/inceptionv3.yaml b/tests/mrt/model_zoo/inception30_pool2_fwd/inceptionv3.yaml new file mode 100644 index 00000000..141aaa38 --- /dev/null +++ b/tests/mrt/model_zoo/inception30_pool2_fwd/inceptionv3.yaml @@ -0,0 +1,20 @@ +COMMON: + MODEL_NAME: inceptionv3 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/mean/resnet18_v1b_kinetics400.yaml b/tests/mrt/model_zoo/mean/resnet18_v1b_kinetics400.yaml new file mode 100644 index 00000000..d6f5c356 --- /dev/null +++ b/tests/mrt/model_zoo/mean/resnet18_v1b_kinetics400.yaml @@ -0,0 +1,20 @@ +COMMON: + MODEL_NAME: resnet18_v1b_kinetics400 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/mean/resnet34_v1b_kinetics400.yaml b/tests/mrt/model_zoo/mean/resnet34_v1b_kinetics400.yaml new file mode 100644 index 00000000..57783cac --- /dev/null +++ b/tests/mrt/model_zoo/mean/resnet34_v1b_kinetics400.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet34_v1b_kinetics400 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/mean/resnet50_v1b_hmdb51.yaml b/tests/mrt/model_zoo/mean/resnet50_v1b_hmdb51.yaml new file mode 100644 index 00000000..3f95b407 --- /dev/null +++ b/tests/mrt/model_zoo/mean/resnet50_v1b_hmdb51.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1b_hmdb51 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/mean/vgg16_ucf101.yaml b/tests/mrt/model_zoo/mean/vgg16_ucf101.yaml new file mode 100644 index 00000000..76d2e7ba --- /dev/null +++ b/tests/mrt/model_zoo/mean/vgg16_ucf101.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: vgg16_ucf101 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/new.yaml b/tests/mrt/model_zoo/new.yaml new file mode 100644 index 00000000..4d38721c --- /dev/null +++ b/tests/mrt/model_zoo/new.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet101_v1 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/qd10_resnetv1_20.yaml b/tests/mrt/model_zoo/quickdraw/quick_raw_qd_animal10_2_cifar_resnet20_v2.yaml similarity index 100% rename from tests/mrt/model_zoo/qd10_resnetv1_20.yaml rename to tests/mrt/model_zoo/quickdraw/quick_raw_qd_animal10_2_cifar_resnet20_v2.yaml diff --git a/tests/mrt/model_zoo/quickdraw.yaml b/tests/mrt/model_zoo/quickdraw/quickdraw_wlt_augmentation_epoch-4-0.8164531394275162.yaml similarity index 100% rename from tests/mrt/model_zoo/quickdraw.yaml rename to tests/mrt/model_zoo/quickdraw/quickdraw_wlt_augmentation_epoch-4-0.8164531394275162.yaml diff --git a/tests/mrt/model_zoo/reshape/resnet50_v1b_gn.yaml b/tests/mrt/model_zoo/reshape/resnet50_v1b_gn.yaml new file mode 100644 index 00000000..51453e27 --- /dev/null +++ b/tests/mrt/model_zoo/reshape/resnet50_v1b_gn.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1b_gn +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/trec.yaml b/tests/mrt/model_zoo/trec/trec.yaml similarity index 100% rename from tests/mrt/model_zoo/trec.yaml rename to tests/mrt/model_zoo/trec/trec.yaml diff --git a/tests/mrt/model_zoo/tuning/mobilenet0.25.yaml b/tests/mrt/model_zoo/tuning/mobilenet0.25.yaml new file mode 100644 index 00000000..69ff23c7 --- /dev/null +++ b/tests/mrt/model_zoo/tuning/mobilenet0.25.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: mobilenet0.25 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tuning/mobilenet1.0.yaml b/tests/mrt/model_zoo/tuning/mobilenet1.0.yaml new file mode 100644 index 00000000..48617dca --- /dev/null +++ b/tests/mrt/model_zoo/tuning/mobilenet1.0.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: mobilenet1.0 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 diff --git a/tests/mrt/model_zoo/tuning/mobilenetv2_0.25.yaml b/tests/mrt/model_zoo/tuning/mobilenetv2_0.25.yaml new file mode 100644 index 00000000..062a09a3 --- /dev/null +++ b/tests/mrt/model_zoo/tuning/mobilenetv2_0.25.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: mobilenetv2_0.25 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tuning/mobilenetv3_large.yaml b/tests/mrt/model_zoo/tuning/mobilenetv3_large.yaml new file mode 100644 index 00000000..6f799df3 --- /dev/null +++ b/tests/mrt/model_zoo/tuning/mobilenetv3_large.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: mobilenetv3_large +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tuning/mobilenetv3_small.yaml b/tests/mrt/model_zoo/tuning/mobilenetv3_small.yaml new file mode 100644 index 00000000..b39d62a3 --- /dev/null +++ b/tests/mrt/model_zoo/tuning/mobilenetv3_small.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: mobilenetv3_small +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + LAMBD: 10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tuning/resnet101_v2.yaml b/tests/mrt/model_zoo/tuning/resnet101_v2.yaml new file mode 100644 index 00000000..75ea4b63 --- /dev/null +++ b/tests/mrt/model_zoo/tuning/resnet101_v2.yaml @@ -0,0 +1,21 @@ +COMMON: + MODEL_NAME: resnet101_v2 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tuning/resnet18_v2.yaml b/tests/mrt/model_zoo/tuning/resnet18_v2.yaml new file mode 100644 index 00000000..647fed32 --- /dev/null +++ b/tests/mrt/model_zoo/tuning/resnet18_v2.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet18_v2 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml b/tests/mrt/model_zoo/voc/ssd_512_mobilenet1.0_voc.yaml similarity index 100% rename from tests/mrt/model_zoo/ssd_512_mobilenet1.0_voc.yaml rename to tests/mrt/model_zoo/voc/ssd_512_mobilenet1.0_voc.yaml diff --git a/tests/mrt/model_zoo/ssd.yaml b/tests/mrt/model_zoo/voc/ssd_512_resnet50_v1_voc.yaml similarity index 100% rename from tests/mrt/model_zoo/ssd.yaml rename to tests/mrt/model_zoo/voc/ssd_512_resnet50_v1_voc.yaml diff --git a/tests/mrt/model_zoo/yolo3_darknet53_voc.yaml b/tests/mrt/model_zoo/voc/yolo3_darknet53_voc.yaml similarity index 100% rename from tests/mrt/model_zoo/yolo3_darknet53_voc.yaml rename to tests/mrt/model_zoo/voc/yolo3_darknet53_voc.yaml diff --git a/tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml b/tests/mrt/model_zoo/voc/yolo3_mobilenet1.0_voc.yaml similarity index 100% rename from tests/mrt/model_zoo/yolo3_mobilenet1.0_voc.yaml rename to tests/mrt/model_zoo/voc/yolo3_mobilenet1.0_voc.yaml From fa3292a5e561103019a2b178a3e25ece87302f5e Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 23 Apr 2022 14:21:25 +0800 Subject: [PATCH 115/120] upt unit tests --- tests/mrt/fuse_transpose/test_reduce.py | 159 ++++++++++++++++++++++++ tests/mrt/rewrite/test_mean.py | 116 +++++++++++++++++ 2 files changed, 275 insertions(+) create mode 100644 tests/mrt/fuse_transpose/test_reduce.py create mode 100644 tests/mrt/rewrite/test_mean.py diff --git a/tests/mrt/fuse_transpose/test_reduce.py b/tests/mrt/fuse_transpose/test_reduce.py new file mode 100644 index 00000000..72da6a94 --- /dev/null +++ b/tests/mrt/fuse_transpose/test_reduce.py @@ -0,0 +1,159 @@ +import unittest + +import mxnet as mx +from mxnet import ndarray as nd +import numpy as np + +from mrt import tfm_ops +from mrt.tfm_pass import fuse_transpose, attach_input_shape +from mrt.sym_utils import get_mxnet_op, sym_iter + +ctx = mx.gpu(0) + + +class TestFuseTransposeReduce(unittest.TestCase): + def assert_equal(self, a, b, places=10): + self.assertEqual(a.shape, b.shape) + nentry = int(nd.prod(nd.array(a.shape)).asscalar()) + res = (a-b).reshape(shape=(nentry,)).asnumpy() + norm = np.linalg.norm(res) + self.assertAlmostEqual(norm, 0.0, places=places) + + def cmp_sym(self, sym_1, sym_2): + op_name_1 = sym_1.attr("op_name") + op_name_2 = sym_2.attr("op_name") + if op_name_1 != op_name_2: + return False + attr_1 = sym_1.list_attr() + attr_2 = sym_2.list_attr() + if attr_1 != attr_2: + return False + childs_1 = sym_iter(sym_1.get_children()) + childs_2 = sym_iter(sym_2.get_children()) + if childs_1 is None and childs_2 is not None or \ + childs_1 is not None and childs_2 is None: + return False + if childs_1 is None and childs_2 is None: + return True + if len(childs_1) != len(childs_2): + return False + for i in range(len(childs_1)): + if not self.cmp_sym(childs_1[i], childs_2[i]): + return False + return True + + def get_random_data(self, shp, low=-1000.0, high=1000.0): + data = nd.random.uniform(low=low, high=high, shape=shp) + data = data.as_in_context(ctx) + return data + + def test_fuse_transpose_reduce_keepdims(self): + configs = [ + { + "shp_cx": (2,3,4,2,5), + "axes_1": [4,3,1,0,2], + "axis": [0,3,4], + "op_name": "sum", + "axes_2": [2,4,0,1,3], + "sym_ref": \ + mx.sym.transpose( + mx.sym.sum( + mx.sym.var("cx", shape=(2,3,4,2,5)), + axis=[0,2,4], keepdims=True), + axes=[1,2,4,3,0]), + }, + { + "shp_cx": (4,2,4,2,5,7), + "axes_1": [4,3,1,5,0,2], + "axis": [0,2,5], + "op_name": "mean", + "axes_2": [2,5,4,0,1,3], + "sym_ref": \ + mx.sym.transpose( + mx.sym.mean( + mx.sym.var("cx", shape=(4,2,4,2,5,7)), + axis=[1,2,4], keepdims=True), + axes=[1,2,0,4,3,5]), + }, + ] + for config in configs: + # generate data + shp_cx = config["shp_cx"] + data = self.get_random_data(shp_cx) + # original graph + cx = mx.sym.var("cx") + axes_1 = config["axes_1"] + tp1 = mx.sym.transpose(cx, axes=axes_1, name="var_tp1") + axis = config["axis"] + op_name = config["op_name"] + op = get_mxnet_op(op_name)( + tp1, axis=axis, name="var_"+op_name, keepdims=True) + axes_2 = config["axes_2"] + tp2 = mx.sym.transpose(op, axes=axes_2, name="var_tp2") + # original output + ex_1 = tp2.bind(ctx, {'cx': data}) + out_1 = ex_1.forward() + # fused graph + tp2n, _ = attach_input_shape(tp2, {}, {"cx":data.shape}) + tp2n, _ = fuse_transpose(tp2n, {}) + # fused output + ex_2 = tp2n.bind(ctx, {'cx': data}) + out_2 = ex_2.forward() + assert len(out_1) == len(out_2) == 1 + self.assert_equal(out_1[0], out_2[0], places=4) + sym_ref = config["sym_ref"] + self.assertEqual(self.cmp_sym(tp2n,sym_ref), True) + + def test_fuse_transpose_reduce(self): + configs = [ + { + "shp_cx": (2,3,4,2,5), + "axes": [4,3,1,0,2], + "axis": [0,3,4], + "op_name": "sum", + "sym_ref": \ + mx.sym.transpose( + mx.sym.sum( + mx.sym.var("cx", shape=(2,3,4,2,5)), + axis=[0,2,4], keepdims=False), + axes=[1,0]), + }, + { + "shp_cx": (4,2,4,2,5,7), + "axes": [4,0,1,3,5,2], + "axis": [0,2,5], + "op_name": "mean", + "sym_ref": \ + mx.sym.mean( + mx.sym.var("cx", shape=(4,2,4,2,5,7)), + axis=[1,2,4], keepdims=False), + }, + ] + for config in configs: + # generate data + shp_cx = config["shp_cx"] + data = self.get_random_data(shp_cx) + # original graph + cx = mx.sym.var("cx") + axes = config["axes"] + tp = mx.sym.transpose(cx, axes=axes, name="var_tp") + axis = config["axis"] + op_name = config["op_name"] + op = get_mxnet_op(op_name)( + tp, axis=axis, name="var_"+op_name, keepdims=False) + # original output + ex_1 = op.bind(ctx, {'cx': data}) + out_1 = ex_1.forward() + # fused graph + op, _ = attach_input_shape(op, {}, {"cx":data.shape}) + opn, _ = fuse_transpose(op, {}) + # fused output + ex_2 = opn.bind(ctx, {'cx': data}) + out_2 = ex_2.forward() + assert len(out_1) == len(out_2) == 1 + self.assert_equal(out_1[0], out_2[0], places=4) + sym_ref = config["sym_ref"] + self.assertEqual(self.cmp_sym(opn,sym_ref), True) + +if __name__ == "__main__": + unittest.main() diff --git a/tests/mrt/rewrite/test_mean.py b/tests/mrt/rewrite/test_mean.py new file mode 100644 index 00000000..58ed0c5c --- /dev/null +++ b/tests/mrt/rewrite/test_mean.py @@ -0,0 +1,116 @@ +import unittest + +import mxnet as mx +from mxnet import ndarray as nd +import numpy as np + +from mrt import tfm_ops, cvm_op +from mrt.tfm_pass import rewrite, attach_input_shape +from mrt.sym_utils import get_mxnet_op, sym_iter + +ctx = mx.gpu(0) + + +class TestRewriteMean(unittest.TestCase): + def cal_err_rel(self, a, b): + self.assertEqual(a.shape, b.shape) + nentry = int(nd.prod(nd.array(a.shape)).asscalar()) + numerator = np.linalg.norm((a-b).reshape(shape=(nentry,)).asnumpy()) + denominator = max( + np.linalg.norm(a.reshape(shape=(nentry,)).asnumpy()), + np.linalg.norm(a.reshape(shape=(nentry,)).asnumpy()) + ) + err = numerator / denominator + return err + + def assert_equal_rel_places(self, a, b, min_places=1, max_places=20): + err = self.cal_err_rel(a, b) + self.assertAlmostEqual(err, 0.0, places=min_places) + # search + places = min_places + while True: + places <<= 1 + try: + self.assertAlmostEqual(err, 0.0, places=places) + except AssertionError: + break + if places > max_places: + return max_places + l, r = min_places, places + while l < r-1: + m = (l+r) >> 1 + flag = True + try: + self.assertAlmostEqual(err, 0.0, places=m) + except AssertionError: + flag = False + if flag: + l = m + else: + r = m + return l + + def get_random_data_round(self, shp, low=-10000.0, high=10000.0): + data = nd.random.uniform( + low=low, high=high, shape=shp).round().astype('int') + data = data.as_in_context(ctx) + return data + + def test_rewrite(self): + configs = [ + { + "shp_cx": (10,4,512,5,2), + "low": -1000.0, + "high": 1000.0, + "axis": (1,2,4), + "keepdims": False, + }, + { + "shp_cx": (10,4,512,5,2), + "low": -10000.0, + "high": 10000.0, + "axis": (1,2,4), + "keepdims": False, + }, + { + "shp_cx": (10,4,512,5,2), + "low": -100000.0, + "high": 100000.0, + "axis": (1,2,4), + "keepdims": False, + }, + { + "shp_cx": (10,4,512,5,2), + "low": -1000000.0, + "high": 1000000.0, + "axis": (1,2,4), + "keepdims": False, + }, + ] + for config in configs: + # generate data + shp_cx = config["shp_cx"] + low = config["low"] + high = config["high"] + data = self.get_random_data_round(shp_cx, low=low, high=high) + # original graph + cx = mx.sym.var("cx") + axis = config["axis"] + keepdims = config["keepdims"] + op = mx.sym.mean( + cx, axis=axis, name="var_mean", keepdims=keepdims) + # original output + ex_1 = op.bind(ctx, {'cx': data}) + out_1 = [o.round() for o in ex_1.forward()] + # rewritten graph + nop, _ = attach_input_shape(op, {}, {"cx":data.shape}) + nop, _ = rewrite(nop, {}) + # fused output + ex_2 = nop.bind(ctx, {'cx': data}) + out_2 = ex_2.forward() + assert len(out_1) == len(out_2) == 1 + places = self.assert_equal_rel_places(out_1[0], out_2[0]) + print("config: {}, places: {}".format(config, places)) + +if __name__ == "__main__": + unittest.main() From caebd9003ef8fa3c1724bfbdc72268c1981725f2 Mon Sep 17 00:00:00 2001 From: ryt Date: Sat, 23 Apr 2022 14:22:32 +0800 Subject: [PATCH 116/120] list model --- tests/mrt/list_model.py | 46 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 tests/mrt/list_model.py diff --git a/tests/mrt/list_model.py b/tests/mrt/list_model.py new file mode 100644 index 00000000..2e1fb42b --- /dev/null +++ b/tests/mrt/list_model.py @@ -0,0 +1,46 @@ +import argparse +from os import path +import os + +import gluoncv as cv + +model_names_quantized_default = [] + +def get_model_names_quantized(fpath, model_names_quantized): + for fname in os.listdir(fpath): + nfpath = path.join(fpath, fname) + if path.isdir(nfpath): + get_model_names_quantized(nfpath, model_names_quantized) + else: + model_names_quantized.append(path.splitext(fname)[0]) + +dir_path = os.path.dirname(os.path.realpath(__file__)) +model_zoo = path.join(dir_path, "model_zoo") +get_model_names_quantized(model_zoo, model_names_quantized_default) +parser = argparse.ArgumentParser("") +parser.add_argument("-p", "--prefixes", nargs="*", type=str, default=[]) +parser.add_argument("-sq", "--show-quantized", action="store_true") +parser.add_argument("-sqo", "--show-quantized-only", action="store_true") +parser.add_argument("-mnq", "--model-names-quantized", nargs="*", type=str, default=model_names_quantized_default) + +if __name__ == "__main__": + args = parser.parse_args() + model_names_quantized = args.model_names_quantized + if args.show_quantized_only: + for model_name in model_names_quantized: + print(model_name) + else: + prefixes = set(args.prefixes) + show_quantized = args.show_quantized + supported_models = set(cv.model_zoo.get_model_list()) + for model_name in cv.model_zoo.pretrained_model_list(): + if model_name not in supported_models: + continue + if not show_quantized and model_name in model_names_quantized: + continue + if prefixes: + for prefix in prefixes: + if model_name.startswith(prefix): + print(model_name) + else: + print(model_name) From 5d5825479b4e3b0d96d0066a418832ce7f08e90c Mon Sep 17 00:00:00 2001 From: ryt Date: Fri, 6 May 2022 11:07:16 +0800 Subject: [PATCH 117/120] add new models using cifar10 as dataset --- python/mrt/tfm_ops.py | 8 +++----- .../model_zoo/cifar10/cifar_resnet110_v1.yaml | 19 ++++++++++++++++++ .../model_zoo/cifar10/cifar_resnet110_v2.yaml | 19 ++++++++++++++++++ .../model_zoo/cifar10/cifar_resnet20_v1.yaml | 19 ++++++++++++++++++ .../model_zoo/cifar10/cifar_resnet20_v2.yaml | 19 ++++++++++++++++++ .../model_zoo/cifar10/cifar_resnet56_v1.yaml | 19 ++++++++++++++++++ .../model_zoo/cifar10/cifar_resnet56_v2.yaml | 19 ++++++++++++++++++ .../cifar10/cifar_resnext29_16x64d.yaml | 19 ++++++++++++++++++ .../cifar10/cifar_wideresnet16_10.yaml | 19 ++++++++++++++++++ .../cifar10/cifar_wideresnet28_10.yaml | 19 ++++++++++++++++++ .../cifar10/cifar_wideresnet40_8.yaml | 19 ++++++++++++++++++ .../tune_org/resnet18_v1b_kinetics400.yaml | 20 +++++++++++++++++++ .../tune_org/resnet34_v1b_kinetics400.yaml | 18 +++++++++++++++++ .../tune_org/resnet50_v1b_hmdb51.yaml | 18 +++++++++++++++++ .../mrt/model_zoo/tune_org/vgg16_ucf101.yaml | 17 ++++++++++++++++ 15 files changed, 266 insertions(+), 5 deletions(-) create mode 100644 tests/mrt/model_zoo/cifar10/cifar_resnet110_v1.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_resnet110_v2.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_resnet20_v1.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_resnet20_v2.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_resnet56_v1.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_resnet56_v2.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_resnext29_16x64d.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_wideresnet16_10.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_wideresnet28_10.yaml create mode 100644 tests/mrt/model_zoo/cifar10/cifar_wideresnet40_8.yaml create mode 100644 tests/mrt/model_zoo/tune_org/resnet18_v1b_kinetics400.yaml create mode 100644 tests/mrt/model_zoo/tune_org/resnet34_v1b_kinetics400.yaml create mode 100644 tests/mrt/model_zoo/tune_org/resnet50_v1b_hmdb51.yaml create mode 100644 tests/mrt/model_zoo/tune_org/vgg16_ucf101.yaml diff --git a/python/mrt/tfm_ops.py b/python/mrt/tfm_ops.py index bbe74b95..689c5ae8 100644 --- a/python/mrt/tfm_ops.py +++ b/python/mrt/tfm_ops.py @@ -722,6 +722,7 @@ def reduce(self, op, **kwargs): op = self._matrix_decomposition(op, params, infer_shapes) else: fc_name = N.n("reduced_fc") + # TODO(ryt): apply infer_batch_axis default_batch_axis = 0 batch_axis = \ kwargs.get("batch_axes", {}).get(name, default_batch_axis) @@ -3124,12 +3125,9 @@ class Mean(Transformer): def fuse_transpose(self, op, **kwargs): return fuse_transpose_reduce(op, kwargs["infer_shapes"]) def rewrite(self, op, **kwargs): - name = op.attr('name') - return self._decompose_axis(op, kwargs['infer_shapes']) - - def _decompose_axis(self, op, infer_shapes): name = op.attr('name') attr, childs = op.list_attr(), sym_iter(op.get_children()) + infer_shapes = kwargs['infer_shapes'] axis = eval(attr['axis']) if isinstance(axis, int): @@ -3162,7 +3160,7 @@ def _decompose_axis(self, op, infer_shapes): op_type='right_shift') return op - # TODO(ryt): select batch_axis + # TODO(ryt): apply infer_batch_axis axis_set = set(axis) oaxes = list(range(len(shp))) naxes = sorted(axis) diff --git a/tests/mrt/model_zoo/cifar10/cifar_resnet110_v1.yaml b/tests/mrt/model_zoo/cifar10/cifar_resnet110_v1.yaml new file mode 100644 index 00000000..8cfbd55b --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_resnet110_v1.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_resnet110_v1 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_resnet110_v2.yaml b/tests/mrt/model_zoo/cifar10/cifar_resnet110_v2.yaml new file mode 100644 index 00000000..7efe1e0c --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_resnet110_v2.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_resnet20_v2 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_resnet20_v1.yaml b/tests/mrt/model_zoo/cifar10/cifar_resnet20_v1.yaml new file mode 100644 index 00000000..b2eddb4a --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_resnet20_v1.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_resnet20_v1 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_resnet20_v2.yaml b/tests/mrt/model_zoo/cifar10/cifar_resnet20_v2.yaml new file mode 100644 index 00000000..7efe1e0c --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_resnet20_v2.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_resnet20_v2 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_resnet56_v1.yaml b/tests/mrt/model_zoo/cifar10/cifar_resnet56_v1.yaml new file mode 100644 index 00000000..11e69725 --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_resnet56_v1.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_resnet56_v1 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_resnet56_v2.yaml b/tests/mrt/model_zoo/cifar10/cifar_resnet56_v2.yaml new file mode 100644 index 00000000..5a4ca836 --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_resnet56_v2.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_resnet56_v2 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_resnext29_16x64d.yaml b/tests/mrt/model_zoo/cifar10/cifar_resnext29_16x64d.yaml new file mode 100644 index 00000000..f9267fe5 --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_resnext29_16x64d.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_resnext29_16x64d +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_wideresnet16_10.yaml b/tests/mrt/model_zoo/cifar10/cifar_wideresnet16_10.yaml new file mode 100644 index 00000000..8ee9a01a --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_wideresnet16_10.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_wideresnet16_10 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_wideresnet28_10.yaml b/tests/mrt/model_zoo/cifar10/cifar_wideresnet28_10.yaml new file mode 100644 index 00000000..4a9d4ca7 --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_wideresnet28_10.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_wideresnet28_10 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/cifar10/cifar_wideresnet40_8.yaml b/tests/mrt/model_zoo/cifar10/cifar_wideresnet40_8.yaml new file mode 100644 index 00000000..42d17d65 --- /dev/null +++ b/tests/mrt/model_zoo/cifar10/cifar_wideresnet40_8.yaml @@ -0,0 +1,19 @@ +COMMON: + MODEL_NAME: cifar_wideresnet40_8 +PREPARE: + INPUT_SHAPE: [-1,3,32,32] +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: cifar10 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tune_org/resnet18_v1b_kinetics400.yaml b/tests/mrt/model_zoo/tune_org/resnet18_v1b_kinetics400.yaml new file mode 100644 index 00000000..d6f5c356 --- /dev/null +++ b/tests/mrt/model_zoo/tune_org/resnet18_v1b_kinetics400.yaml @@ -0,0 +1,20 @@ +COMMON: + MODEL_NAME: resnet18_v1b_kinetics400 + VERBOSITY: debug + RUN_EVALUATE: True +CALIBRATE: + BATCH: 16 + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tune_org/resnet34_v1b_kinetics400.yaml b/tests/mrt/model_zoo/tune_org/resnet34_v1b_kinetics400.yaml new file mode 100644 index 00000000..57783cac --- /dev/null +++ b/tests/mrt/model_zoo/tune_org/resnet34_v1b_kinetics400.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet34_v1b_kinetics400 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tune_org/resnet50_v1b_hmdb51.yaml b/tests/mrt/model_zoo/tune_org/resnet50_v1b_hmdb51.yaml new file mode 100644 index 00000000..3f95b407 --- /dev/null +++ b/tests/mrt/model_zoo/tune_org/resnet50_v1b_hmdb51.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: resnet50_v1b_hmdb51 +CALIBRATE: + NUM_CALIB: 1 + LAMBD: 16 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 160 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 500 diff --git a/tests/mrt/model_zoo/tune_org/vgg16_ucf101.yaml b/tests/mrt/model_zoo/tune_org/vgg16_ucf101.yaml new file mode 100644 index 00000000..76d2e7ba --- /dev/null +++ b/tests/mrt/model_zoo/tune_org/vgg16_ucf101.yaml @@ -0,0 +1,17 @@ +COMMON: + MODEL_NAME: vgg16_ucf101 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 10000 From acc5f12be5825fa6d0d35c045b894fac01fe2597 Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 25 May 2022 14:23:41 +0800 Subject: [PATCH 118/120] quantize inception model --- .../pooling/inceptionv3_kinetics400.yaml | 18 ++++++++++++++++++ .../inceptionv3_ucf101.yaml} | 7 +++---- .../tune_org/inceptionv1_kinetics400.yaml | 18 ++++++++++++++++++ 3 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 tests/mrt/model_zoo/pooling/inceptionv3_kinetics400.yaml rename tests/mrt/model_zoo/{imagenet/cifar_resnet20_v1.yaml => pooling/inceptionv3_ucf101.yaml} (71%) create mode 100644 tests/mrt/model_zoo/tune_org/inceptionv1_kinetics400.yaml diff --git a/tests/mrt/model_zoo/pooling/inceptionv3_kinetics400.yaml b/tests/mrt/model_zoo/pooling/inceptionv3_kinetics400.yaml new file mode 100644 index 00000000..92cf982f --- /dev/null +++ b/tests/mrt/model_zoo/pooling/inceptionv3_kinetics400.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: inceptionv3_kinetics400 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + # LAMBD: 12 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 50 diff --git a/tests/mrt/model_zoo/imagenet/cifar_resnet20_v1.yaml b/tests/mrt/model_zoo/pooling/inceptionv3_ucf101.yaml similarity index 71% rename from tests/mrt/model_zoo/imagenet/cifar_resnet20_v1.yaml rename to tests/mrt/model_zoo/pooling/inceptionv3_ucf101.yaml index b2eddb4a..58c55dbe 100644 --- a/tests/mrt/model_zoo/imagenet/cifar_resnet20_v1.yaml +++ b/tests/mrt/model_zoo/pooling/inceptionv3_ucf101.yaml @@ -1,10 +1,9 @@ COMMON: - MODEL_NAME: cifar_resnet20_v1 -PREPARE: - INPUT_SHAPE: [-1,3,32,32] + MODEL_NAME: inceptionv3_ucf101 CALIBRATE: NUM_CALIB: 1 - DATASET_NAME: cifar10 + DATASET_NAME: imagenet + # LAMBD: 12 DEVICE_TYPE: gpu DEVICE_IDS: [0] QUANTIZE: diff --git a/tests/mrt/model_zoo/tune_org/inceptionv1_kinetics400.yaml b/tests/mrt/model_zoo/tune_org/inceptionv1_kinetics400.yaml new file mode 100644 index 00000000..ebf9689d --- /dev/null +++ b/tests/mrt/model_zoo/tune_org/inceptionv1_kinetics400.yaml @@ -0,0 +1,18 @@ +COMMON: + MODEL_NAME: inceptionv1_kinetics400 +CALIBRATE: + NUM_CALIB: 1 + DATASET_NAME: imagenet + # LAMBD: 12 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +QUANTIZE: + INPUT_PRECISION: 8 + OUTPUT_PRECISION: 8 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] +EVALUATE: + BATCH: 16 + DEVICE_TYPE: gpu + DEVICE_IDS: [0] + ITER_NUM: 50 From caeb9612c665abcc1df72f1ff1abf0b4876dcbcc Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 25 May 2022 14:25:00 +0800 Subject: [PATCH 119/120] git repo initialize for yamrt --- python/mrt/yamrt/__init__.py | 4 + python/mrt/yamrt/autoquanter.py | 65 +++++++ python/mrt/yamrt/fquant/__init__.py | 2 + python/mrt/yamrt/fquant/common.py | 40 ++++ python/mrt/yamrt/fquant/proxy.py | 50 +++++ .../yamrt/fquant/uniform_affine_quantizer.py | 179 ++++++++++++++++++ python/mrt/yamrt/model/__init__.py | 0 python/mrt/yamrt/model/block/__init__.py | 0 python/mrt/yamrt/model/block/sym_model.py | 104 ++++++++++ python/mrt/yamrt/model/model.py | 70 +++++++ python/mrt/yamrt/modelhandler.py | 108 +++++++++++ python/mrt/yamrt/optimizer/__init__.py | 0 python/mrt/yamrt/optimizer/base.py | 69 +++++++ .../fquant/t_uniform_affine_quantizer.py | 99 ++++++++++ tests/yamrt/model/t_prediction_SCTF.py | 7 + tests/yamrt/utest.py | 17 ++ 16 files changed, 814 insertions(+) create mode 100644 python/mrt/yamrt/__init__.py create mode 100644 python/mrt/yamrt/autoquanter.py create mode 100644 python/mrt/yamrt/fquant/__init__.py create mode 100644 python/mrt/yamrt/fquant/common.py create mode 100644 python/mrt/yamrt/fquant/proxy.py create mode 100644 python/mrt/yamrt/fquant/uniform_affine_quantizer.py create mode 100644 python/mrt/yamrt/model/__init__.py create mode 100644 python/mrt/yamrt/model/block/__init__.py create mode 100644 python/mrt/yamrt/model/block/sym_model.py create mode 100644 python/mrt/yamrt/model/model.py create mode 100644 python/mrt/yamrt/modelhandler.py create mode 100644 python/mrt/yamrt/optimizer/__init__.py create mode 100644 python/mrt/yamrt/optimizer/base.py create mode 100644 tests/yamrt/fquant/t_uniform_affine_quantizer.py create mode 100644 tests/yamrt/model/t_prediction_SCTF.py create mode 100644 tests/yamrt/utest.py diff --git a/python/mrt/yamrt/__init__.py b/python/mrt/yamrt/__init__.py new file mode 100644 index 00000000..b2c89f34 --- /dev/null +++ b/python/mrt/yamrt/__init__.py @@ -0,0 +1,4 @@ +from .fquant import * +from .modelhandler import ModelHandler, MxnetModelHandler +from mrt.transformer import * +from .autoquanter import * diff --git a/python/mrt/yamrt/autoquanter.py b/python/mrt/yamrt/autoquanter.py new file mode 100644 index 00000000..626c104a --- /dev/null +++ b/python/mrt/yamrt/autoquanter.py @@ -0,0 +1,65 @@ +# General +from .modelhandler import ModelHandler + +# Mxnet +from mrt.transformer import * +from mrt import tfm_pass as tpass + + +class AutoQuanter(object): + def __init__(self, model:ModelHandler): + self._model = model + + def prepare(self, *args, **kwargs): + raise NotImplementedError + + def ptq_pre(self, *args, **kwargs): + raise NotImplementedError + + def ptq_pre_param(self, *args, **kwargs): + raise NotImplementedError + + def ptq(self, *args, **kwargs): + raise NotImplementedError + + def ptq_collect(self, *args, **kwargs): + raise NotImplementedError + + #TODO: Add full APIs. + +class MxnetAutoQuanter(AutoQuanter): + def __init__(self, model:ModelHandler): + super(MxnetAutoQuanter, self).__init__(model) + + def prepare(self, input_shape:dict=None): #TODO: Turn configurable like ptq_pre. + assert(input_shape is not None) + self._model.visit_model(tpass.name_duplicate_check) + if isinstance(input_shape, dict): + self._model.update_model(tpass.attach_input_shape, input_shape=input_shape) + self._model.update_model(tpass.fuse_multiple_inputs) + elif input_shape is not None: + model_inputs = self._model.visit_model(tpass.model_inputs) + assert model_inputs == 1, "Multiple inputs non-known shape" + self._model.update_model(tpass.input_name_replace) + self._model.update_model(tpass.attach_input_shape, {"data": input_shape}) + self._model.visit_model(tpass.infer_shape) + + self._model.update_model(tpass.fuse_multiple_outputs) + self._model.update_model(tpass.fuse_constant) + self._model.update_model(tpass.fuse_transpose) + self._model.update_model(tpass.rewrite) + self._model.update_model(tpass.fuse_constant) + self._model.update_model(tpass.params_unique) + + def ptq_pre(self, rule_list): + self._model.update_model(tpass.ptq_pre, rule_list=rule_list) + + def ptq_pre_param(self, config): + pass + + def ptq(self, ): + + raise NotImplementedError + + def ptq_collect(self): + raise NotImplementedError diff --git a/python/mrt/yamrt/fquant/__init__.py b/python/mrt/yamrt/fquant/__init__.py new file mode 100644 index 00000000..a8a51024 --- /dev/null +++ b/python/mrt/yamrt/fquant/__init__.py @@ -0,0 +1,2 @@ +from .uniform_affine_quantizer import * +from .proxy import * \ No newline at end of file diff --git a/python/mrt/yamrt/fquant/common.py b/python/mrt/yamrt/fquant/common.py new file mode 100644 index 00000000..3b216ab1 --- /dev/null +++ b/python/mrt/yamrt/fquant/common.py @@ -0,0 +1,40 @@ +import mxnet as mx + +QUANT_OP_PREFIX = "MRT_" + + + +class Wrapper(object): + """Basic Class for Quantization Info, Factory Functions, etc. + """ + def __init__(self, op:mx.sym.Symbol, config:dict): + self._ori_op = op + self._config = config + self._attr_dict = {} + self._build_attr_dict() + self._op = None + self._param = None + + def _build_attr_dict(self): + raise NotImplementedError + + def new_op(self): + self._op = mx.sym.Custom(**self._attr_dict) + return self._op + + def op(self): + return self._op + + def attr(self, key:str): + if key in self._attr_dict: + return self._attr_dict[key] + return 'null' + + def key(self): + return self._attr_dict[name] + + def init_param(self, *args, **kwargs): + raise NotImplementedError + + def param(self)->dict: + return self._param diff --git a/python/mrt/yamrt/fquant/proxy.py b/python/mrt/yamrt/fquant/proxy.py new file mode 100644 index 00000000..a2e46f2b --- /dev/null +++ b/python/mrt/yamrt/fquant/proxy.py @@ -0,0 +1,50 @@ +from .common import * +import mxnet as mx + + +class ProxyWrapper(Wrapper): + def __init__(self, op, config): + super(ProxyWrapper, self).__init__(op, config) + + def _build_attr_dict(self): + # None Symble + self._attr_dict['op_type'] = self._config['q_op_name'] + self._attr_dict['name'] = f"{self._attr_dict['op_type']}_{self._ori_op.attr('name')}" + # Symbles + self._attr_dict['data'] = self._ori_op + self._attr_dict['qbias'] = mx.sym.Variable(**self._ori_op.list_attr(), name=f"{self._attr_dict['name']}_qbias") + + +class Proxy(mx.operator.CustomOp): + def __init__(self): + super(Proxy, self).__init__() + + def forward(self, is_train, req, in_data, out_data, aux): + self.assign(out_data[0], req[0], in_data[1]) + + def backward(self, req, out_grad, in_data, out_data, in_grad, aux): # Seems like checkpoint techs in pytorch + assert(req[0] == req[1]) + self.assign(in_grad[1], req[0], out_grad[0]) + + +@mx.operator.register(QUANT_OP_PREFIX + "Proxy") +class ProxyProp(mx.operator.CustomOpProp): + def __init__(self): + super(ProxyProp, self).__init__() + + def list_arguments(self): + return ['data', 'qbias'] + + def list_outputs(self): + return ['output'] + + def infer_shape(self, in_shape): + assert(len(in_shape)==2) + return [*in_shape], [in_shape[0]], [] + + def infer_type(self, in_type): + return [*in_type], [in_type[0]], [] + + def create_operator(self, ctx, shapes, dtypes): + return Proxy() + diff --git a/python/mrt/yamrt/fquant/uniform_affine_quantizer.py b/python/mrt/yamrt/fquant/uniform_affine_quantizer.py new file mode 100644 index 00000000..5e7335a4 --- /dev/null +++ b/python/mrt/yamrt/fquant/uniform_affine_quantizer.py @@ -0,0 +1,179 @@ +from .common import * +import mxnet as mx +import mxnet.ndarray as nd + +def _round_ste(x): + return mx.nd.stop_gradient(mx.nd.round(x) - x) + x + + +def _new_detached_nd(*args): + res = [] + for item in args: + res.append(item.detach()) + return res + + +class UniformAffineQuantizerWrapper(Wrapper): + _scale_methods = ['max_scale', 'max', 'mse'] + def __init__(self, op, config): + self.channel_wise = False + self.scale_method = config['scale_method'] if 'scale_method' in config else _scale_methods[0] + super(UniformAffineQuantizerWrapper, self).__init__(op, config) + self.delta_nd = None + self.delta_op = None + self.zero_point_nd = None + self.zero_point_op = None + + def _build_attr_dict(self): + assert(self._config['q_op_name'] not in self._ori_op.attr('name')) + # None Symble + self._attr_dict['op_type'] = self._config['q_op_name'] + self._attr_dict['name'] = f"{self._attr_dict['op_type']}_{self._ori_op.attr('name')}" + self._attr_dict['n_bits'] = self._config['n_bits'] + self.channel_wise = self._config['channel_wise'] + # Symbles + self._attr_dict['data'] = self._ori_op + if not self.channel_wise: + self.delta_op = mx.sym.Variable(f"{self._attr_dict['name']}_delta", shape=(1)) + self.zero_point_op = mx.sym.Variable(f"{self._attr_dict['name']}_zero_point", shape=(1)) + self._attr_dict['delta'] = self.delta_op + self._attr_dict['zero_point'] = self.zero_point_op + elif self.channel_wise: + # Assume the the fisrt dim of input data is channel + assert(len(self._ori_op.infer_shape()[1]) == 1) + ori_op_shape = self._ori_op.infer_shape()[1][0] + channel_wise_shape = (ori_op_shape[0], * ([1] * (len(ori_op_shape) - 1))) + self.delta_op = mx.sym.Variable( + f"{self._attr_dict['name']}_delta", + shape=channel_wise_shape) + self.zero_point_op = mx.sym.Variable( + f"{self._attr_dict['name']}_zero_point", + shape=channel_wise_shape) + self._attr_dict['delta'] = self.delta_op + self._attr_dict['zero_point'] = self.zero_point_op + else: + raise TypeError + + def init_param(self, data: nd.NDArray): + pass + + def _init_param_impl(self, input_data: nd.NDArray, channel_wise:bool=False): + delta, zero_point = None, None + if channel_wise: + x_clone = input_data.copy().detach() + n_channels = x_clone.shape[0] + if len(x.shape) == 4: + x_max = x_clone.abs().max(dim=-1)[0].max(dim=-1)[0].max(dim=-1)[0] + else: + x_max = x_clone.abs().max(dim=-1)[0] + delta = x_max.clone() + zero_point = x_max.clone() + # determine the scale and zero point channel-by-channel + for c in range(n_channels): + delta[c], zero_point[c] = self.init_quantization_scale(x_clone[c], channel_wise=False) + if len(x.shape) == 4: + delta = delta.view(-1, 1, 1, 1) + zero_point = zero_point.view(-1, 1, 1, 1) + else: + delta = delta.view(-1, 1) + zero_point = zero_point.view(-1, 1) + else: + if 'max' in self.scale_method: + x_min = min(x.min().item(), 0) + x_max = max(x.max().item(), 0) + if 'scale' in self.scale_method: + x_min = x_min * (self.n_bits + 2) / 8 + x_max = x_max * (self.n_bits + 2) / 8 + + x_absmax = max(abs(x_min), x_max) + if self.sym: + x_min, x_max = -x_absmax if x_min < 0 else 0, x_absmax + + delta = float(x_max - x_min) / (self.n_levels - 1) + if delta < 1e-8: + warnings.warn('Quantization range close to zero: [{}, {}]'.format(x_min, x_max)) + delta = 1e-8 + + zero_point = round(-x_min / delta) + delta = torch.tensor(delta).type_as(x) + + elif self.scale_method == 'mse': + # we always use symmetric quantization in mse mode + x_absmax = x.abs().max() + x_min = x.min().item() + best_score = 1000 + for i in range(80): + new_max = x_absmax * (1.0 - (i * 0.01)) + x_q = self.quantize(x, new_max) + # L_p norm minimization as described in LAPQ + # https://arxiv.org/abs/1911.07190 + score = lp_loss(x, x_q, p=2.4, reduction='all') + if score < best_score: + best_score = score + delta = (2 * new_max) / (2 ** self.n_bits - 1) + zero_point = (new_max / delta).round() if x_min < 0 else 0 + # re-calculate the scale delta if zero-point is not 0, + else: + raise NotImplementedError +# def init_param(self, data:nd.NDArray, scale_method:str='max'): +# assert scale_method in _scale_methods +# if self.channel_wise: +# data_abs = data.abs() +# data_max_per_channel = + + + +class UniformAffineQuantizer(mx.operator.CustomOp): + def __init__(self, n_bits): + super(UniformAffineQuantizer, self).__init__() + self.n_bits = n_bits + self.n_levels = 2 ** self.n_bits + + def forward(self, is_train, req, in_data, out_data, aux): + conv_weight, delta, zero_point = in_data[0], in_data[1], in_data[2] + x_int = _round_ste(conv_weight / delta) + zero_point #TODO: Zero point is hard to implemented in the Fully Quantized Conditions. + x_quant = mx.nd.clip(x_int, 0, self.n_levels - 1) + x_dequant = (x_quant - zero_point) * delta + self.assign(out_data[0], req[0], x_dequant) + + def backward(self, req, out_grad, in_data, out_data, in_grad, aux): # Seems like checkpoint techs in pytorch + conv_weight, delta, zero_point = _new_detached_nd(*in_data[:3])# in_data[0].copy().detach(), in_data[1].copy().detach(), in_data[2].copy().detach() + conv_weight.attach_grad() + delta.attach_grad() + zero_point.attach_grad() + with mx.autograd.record(): + x_int = _round_ste(conv_weight / delta) + zero_point + x_quant = mx.nd.clip(x_int, 0, self.n_levels - 1) + x_dequant = (x_quant - zero_point) * delta + x_dequant.backward(_new_detached_nd(out_grad[0])[0]) + + self.assign(in_grad[0], req[0], conv_weight.grad) + self.assign(in_grad[1], req[1], delta.grad) + self.assign(in_grad[2], req[2], zero_point.grad) + + +@mx.operator.register(QUANT_OP_PREFIX + "UniformAffineQuantizer") +class UniformAffineQuantizerProp(mx.operator.CustomOpProp): + def __init__(self, n_bits): + super(UniformAffineQuantizerProp, self).__init__() + n_bits = n_bits if type(n_bits) is int else int(n_bits) + + assert 2 <= n_bits <= 32, 'bitwidth not supported' + self.n_bits = n_bits + + def list_arguments(self): + return ['data', 'delta', 'zero_point'] + + def list_outputs(self): + return ['output'] + + def infer_shape(self, in_shape): + assert(len(in_shape)==3) + return [*in_shape], [in_shape[0]], [] + + def infer_type(self, in_type): + return [*in_type], [in_type[0]], [] + + def create_operator(self, ctx, shapes, dtypes): + return UniformAffineQuantizer(n_bits=self.n_bits) + diff --git a/python/mrt/yamrt/model/__init__.py b/python/mrt/yamrt/model/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/yamrt/model/block/__init__.py b/python/mrt/yamrt/model/block/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/yamrt/model/block/sym_model.py b/python/mrt/yamrt/model/block/sym_model.py new file mode 100644 index 00000000..76ad4c15 --- /dev/null +++ b/python/mrt/yamrt/model/block/sym_model.py @@ -0,0 +1,104 @@ +# General +# None +# Mxnet Backend +import mxnet as mx +from mxnet import autograd +from mxnet import sym +from mxnet import nd +from ..model import * + +from mrt.sym_utils import * + + +class SymbolModel(Model): + def __init__(self, ctx=mx.cpu()): + super(SymbolModel, self).__init__() + self._flush_name() + self._ops = {} + self._forward_topo = [] + self._param = {} + self._ctx = ctx + + def _flush_name(self): + self._name = f"{list(self._inputs.keys())}->{list(self._outputs.keys())}" + + def attach_sym(self, op:sym.Symbol, param_dict:dict): + op_name = op.attr('name') + assert op_name not in self._forward_topo + assert op_name not in self._inputs + childs = op.get_children() + if childs is not None: + for child in op.get_children(): + child_name = child.attr("name") + child_op = child.attr("op_name") + if child_op == "null": + if child_name not in param_dict: + if child_name not in self._inputs: + self._inputs[child_name] = get_entry_id(child) + else: + self._param[child_name] = [param_dict[child_name].as_in_context(self._ctx)] + else: + if child_name not in self._param and child_name not in self._ops: + self._inputs[child_name] = get_entry_id(child) + else: + assert(op.attr('op_name') == 'null') + self._forward_topo.append(op_name) + self._ops[op_name] = op + self._flush_name() + + def _set_train(self): + self._training = True + + def _set_eval(self): + self._training = False + + def add_output(self, name): + assert name in self._forward_topo + if name not in self._outputs: + self._outputs[name] = self._ops[name] + self._flush_name() + + def _passive_update_dict(self, ori_config, to_update): + for key in to_update: + if key in ori_config: + ori_config[key] = to_update[key] + + def forward(self, data:dict): + for key in self._inputs: + assert ( key in data ) + data.update(self._param) + if self._training: + with autograd.record(): + self._forward_imp(data) + else: + self._forward_imp(data) + res = {} + for out in self._outputs: + res[out] = data[out] + return res + + def _forward_imp(self, intermediate): + for name in self._forward_topo: + op = self._ops[name] + op_name = op.attr('op_name') + if op.attr('op_name') == 'null': + assert name in intermediate + else: + childs, attr = sym_iter(op.get_children()), op.list_attr() + cinfos = [(c.attr('name'), get_entry_id(c)) for c in childs] + nd_inputs = [intermediate[n[0]][n[1]] for n in cinfos] + nd_out = get_nd_op(op_name) (*nd_inputs, **attr) + out = [nd_out] if not has_multi_outs(op) else nd_out + assert name not in intermediate + intermediate[name] = out + + def __call__(self, data): + if type(data) is not dict: + assert type(data) is nd.NDArray + data = {'data': [data], } + else: + # assert 'data' in data + data = data.copy() + data['data'] = [data['data']] + return self.forward(data) + diff --git a/python/mrt/yamrt/model/model.py b/python/mrt/yamrt/model/model.py new file mode 100644 index 00000000..6d0af5e7 --- /dev/null +++ b/python/mrt/yamrt/model/model.py @@ -0,0 +1,70 @@ +class Model(object): + def __init__(self): + self._training = True + self._children = [] + self._name = '' + self._param = {} + self._inputs = {} + self._outputs = {} + + def parameters(self): + return self._param + + def input_names(self): + return list(self._inputs.keys()) + + def output_names(self): + return list(self._outputs.keys()) + + def name(self): + return self._name + + def named_parameters(self, recurse:bool=True): + for param_name, param in self._param: + yield (param_name, param) + if recurse: + for child_name, child in self._children: + for param_name, param in child: + yield (child_name, param_name, param) + + def add_output(self, name): + raise NotImplementedError + + def children(self): + for child in self._children: + yield child + + def named_children(self): + for child in self._children: + yield (child.name(), child) + + def train(self): + self._training = True + self._set_train() + for child in self._children: + child.train() + + def _set_train(self): + raise NotImplementedError + + def eval(self): + self._training = False + self._set_eval() + for child in self._children: + child.eval() + + def _set_eval(self): + raise NotImplementedError + + def __call__(self, data): + return self.forward(data) + + def forward(self, data): + raise NotImplementedError + + @staticmethod + def _input_ready(input_names:list, data_dict:dict): + for name in input_names: + if name not in data_dict: + return False + return True diff --git a/python/mrt/yamrt/modelhandler.py b/python/mrt/yamrt/modelhandler.py new file mode 100644 index 00000000..0e9750d9 --- /dev/null +++ b/python/mrt/yamrt/modelhandler.py @@ -0,0 +1,108 @@ +# General +# None +# Mxnet Backend +import mxnet as mx +from mrt.tfm_pass import convert_params_dtype, topo_sort +from .model.block.sym_model import SymbolModel + +class ModelHandler(object): + """ Wrapper of Model, design with user-friendly model API. + """ + def __init__(self): + pass + + @classmethod + def load(*args, **kwargs): + raise NotImplementedError + + def model(self): + raise NotImplementedError + + def __next__(self): + raise NotImplementedError + + def __call__(self, data): + raise NotImplementedError + + def visit_sym(self, func, *args, **kwargs): + """Visit the architecture description of model in topo order. + """ + raise NotImplementedError + + def visit_model(self, func, *args, **kwargs): + """Visit the architecture description, parameters and other data of model in topo order. + """ + raise NotImplementedError + + def update_model(self, func, *args, **kwargs): + """Update the architecture description, parameters or other data of model in topo order. + """ + raise NotImplementedError + + +class MxnetModelHandler(ModelHandler): + """ Wrapper of Mxnet Model, design with user-friendly model API. + """ + def __init__(self, model_sym:mx.sym.Symbol, model_params:mx.ndarray.NDArray, dtype:str="float64"): + super(MxnetModelHandler, self).__init__() + self._sym = model_sym + self._param = convert_params_dtype(model_params, dest_dtype=dtype) + self._check() + self._extended_sym = None + self._extended_param = None + self._extended_dict = None + self._train = False + + def symbol(self): + return self._sym + + def params(self): + return self._param + + def _check(self): + for op in self._sym: + if op.attr('op_name') == 'null': + assert op.attr('name') in self._param + + @classmethod + def load(cls, symbol_filepath, params_filepath, dtype:str='float64'): + """ Model load from disk. """ + symbol = mx.sym.load(symbol_filepath) + param = mx.nd.load(params_filepath) + return cls(symbol, param, dtype) + + def model(self, config:dict={}): + """Build the runnable model based the config. + Args: + config:dict; Indicate how to build the runnable model. TODO + Returns: + model:yamrt.model.Model; The runnable model based on the config. + Notice: + For current version, the config will be ignore. A proper description of config's template will be discussed soon. + """ + # TODO Add block (a set of basic operation) support. + model = SymbolModel() + for op in self: + model.attach_sym(op, self._param) + model.add_output(self._sym.attr('name')) + return model + + def __iter__(self): + return topo_sort(self._sym) + + def __next__(self): + for item in topo_sort(self._sym): + yield item + raise StopIteration + + def visit_sym(self, func, *args,**kwargs): + return func(self._sym, *args, **kwargs) + + def visit_model(self, func, *args, **kwargs): + return func(self._sym, self._param, *args, **kwargs) + + def update_model(self, func, *args, **kwargs): + self._sym, self._param = func(self._sym, self._param, *args, **kwargs) + + def extend_model(self, func, *args, **kwargs): + self._extended_sym, self._extended_param, self._extended_dict = func(self._sym, self._param, *args, **kwargs) diff --git a/python/mrt/yamrt/optimizer/__init__.py b/python/mrt/yamrt/optimizer/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/mrt/yamrt/optimizer/base.py b/python/mrt/yamrt/optimizer/base.py new file mode 100644 index 00000000..e74e1405 --- /dev/null +++ b/python/mrt/yamrt/optimizer/base.py @@ -0,0 +1,69 @@ +import mxnet as mx +from mxnet import optimizer + +class Parameter(object): + def __init__(self, param_name:str, param_nd, index:int): + self.name = param_name + self.param_nd = param_nd + self.index = index + self.id = id(param_nd) + self.state = None + + def attach_state(self, state): + self.state = state + +class ParameterDict(object): + def __init__(self): + self._index_to_parameter = {} + self._id_to_parameter = {} + self._name_to_parameter = {} + self._param_list = [] + + def add_param(self, param:Parameter): + assert param.index not in self._index_to_parameter + assert param.id not in self._id_to_parameter + assert param.name not in self._name_to_parameter + + self._index_to_parameter[param.index] = param + self._id_to_parameter [param.id ] = param + self._name_to_parameter [param.name ] = param + self._param_list.append(param) + + def __iter__(self): + return self._param_list.__iter__() + + def get(self, key): + if type(key) is int: + return self._index_to_parameter[key] + elif type(key) is str: + return self._name_to_parameter[key] + else: + return self._id_to_parameter[id(key)] + + @classmethod + def load_param_dict(cls, param_dict:dict): + pd = cls() + for index, name in enumerate(param_dict): + param = Parameter(name, param_dict[name], index) + pd.add_param(param) + return pd + + +class Optimizer(object): + def __init__(self, param_dict:dict, opt_name: str, opt_kwargs: dict): + self._pd = ParameterDict.load_param_dict(param_dict) + self._opt = optimizer.Optimizer.create_optimizer(opt_name, **opt_kwargs) + self._init_graph() + + def _init_graph(self): + for param in self._pd: + param.attach_state(self._opt.create_state(param.index, param.param_nd)) + param.param_nd.attach_grad('write') + + def zero_grad(self): + for param in self._pd: + param.param_nd.grad[:] = 0 + + def step(self): + for param in self._pd: + self._opt.update(param.index, param.param_nd, param.param_nd.grad, param.state) \ No newline at end of file diff --git a/tests/yamrt/fquant/t_uniform_affine_quantizer.py b/tests/yamrt/fquant/t_uniform_affine_quantizer.py new file mode 100644 index 00000000..cf74d955 --- /dev/null +++ b/tests/yamrt/fquant/t_uniform_affine_quantizer.py @@ -0,0 +1,99 @@ +import unittest +import mxnet as mx + +if mx.__version__ > '1.5.1': + print(f"[Warning] Unknow Version: {mx.__version__}") + + +class TestUniformAffineQuantizer(unittest.TestCase): + """Test qweight.py""" + + @classmethod + def setUpClass(cls): + # import site + # site.addsitedir('../') + from mrt import yamrt + pass + + @classmethod + def tearDownClass(cls): + pass + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_shape(self): + weight = mx.sym.Variable('conv', shape=(64,3,7,7)) + bias = mx.sym.Variable('bias', shape=(64,)) + input_data = mx.sym.Variable('input_data', shape=(64, 3, 224, 224)) + zero_point = mx.sym.Variable('zero_point', shape=(1)) + delta = mx.sym.Variable('delta', shape=(1)) + qweight = mx.symbol.Custom(data=weight, delta=delta, zero_point=zero_point, name='conv1_wq', op_type='MRT_UniformAffineQuantizer', n_bits=32) + conv = mx.sym.Convolution(input_data, weight, bias, kernel=(7,7), num_filter=64) + qconv = mx.sym.Convolution(input_data, qweight, bias, kernel=(7,7), num_filter=64) + + self.assertEqual(conv.infer_shape()[0][1], weight.infer_shape()[1][0]) + self.assertEqual(conv.infer_shape()[0][1], qweight.infer_shape()[1][0]) + + self.assertEqual(qconv.infer_shape()[0][1], qweight.infer_shape()[1][0]) + self.assertEqual(qconv.infer_shape()[0][1], qweight.infer_shape()[1][0]) + + def test_forward(self): + weight = mx.sym.Variable('weight', shape=(64,3,7,7)) + bias = mx.sym.Variable('bias', shape=(64,)) + input_data = mx.sym.Variable('input_data', shape=(64, 3, 224, 224)) + zero_point = mx.sym.Variable('zero_point', shape=(1)) + delta = mx.sym.Variable('delta', shape=(1)) + qweight = mx.symbol.Custom(data=weight, delta=delta, zero_point=zero_point, name='conv1_wq', op_type='MRT_UniformAffineQuantizer', n_bits=32) + conv = mx.sym.Convolution(input_data, weight, bias, kernel=(7,7), num_filter=64) + qconv = mx.sym.Convolution(input_data, qweight, bias, kernel=(7,7), num_filter=64) + args = {"input_data": mx.nd.ones([43,3,224,224]), "weight": mx.nd.ones([64,3,7,7]), "delta": mx.nd.ones([1]), "zero_point": mx.nd.ones([1]), "bias": mx.nd.ones([64])} + qargs = {"input_data": mx.nd.ones([43,3,224,224]), "weight": mx.nd.ones([64,3,7,7]), "delta": mx.nd.ones([1]), "zero_point": mx.nd.ones([1]), "bias": mx.nd.ones([64])} + c = conv.bind(mx.cpu(), args=args, args_grad={}) + qc = qconv.bind(mx.cpu(), args=qargs, args_grad={}) + res = c.forward() + qres = qc.forward() + self.assertEqual(len(res), len(qres)) + self.assertTrue(res[0].shape == qres[0].shape) + + def test_backward(self): # TODO: + weight = mx.sym.Variable('weight', shape=(64,3,7,7)) + bias = mx.sym.Variable('bias', shape=(64,)) + input_data = mx.sym.Variable('input_data', shape=(64, 3, 224, 224)) + zero_point = mx.sym.Variable('zero_point', shape=(1)) + delta = mx.sym.Variable('delta', shape=(1)) + qweight = mx.symbol.Custom(data=weight, delta=delta, zero_point=zero_point, name='conv1_wq', op_type='MRT_UniformAffineQuantizer', n_bits=32) + conv = mx.sym.Convolution(input_data, weight, bias, kernel=(7,7), num_filter=64) + qconv = mx.sym.Convolution(input_data, qweight, bias, kernel=(7,7), num_filter=64) + args = {"input_data": mx.nd.ones([43,3,224,224]), "weight": mx.nd.ones([64,3,7,7]), "bias": mx.nd.ones([64])} + qargs = {"input_data": mx.nd.ones([43,3,224,224]), "weight": mx.nd.ones([64,3,7,7]), "delta": mx.nd.ones([1]) + 1, "zero_point": mx.nd.ones([1]) + 128, "bias": mx.nd.ones([64])} + grad = {"weight": mx.nd.zeros([64,3,7,7]), "bias": mx.nd.zeros([64])} + qgrad = {"weight": mx.nd.zeros([64,3,7,7]), "delta": mx.nd.zeros([1]), "zero_point": mx.nd.zeros([1]), "bias": mx.nd.zeros([64])} + + c = conv.bind(mx.cpu(), args=args, args_grad=grad) + qc = qconv.bind(mx.cpu(), args=qargs, args_grad=qgrad) + res = c.forward(is_train=True) + qres = qc.forward(is_train=True) + self.assertEqual(len(res), len(qres)) + self.assertTrue(res[0].shape == qres[0].shape) + + c.backward(res[0].detach()) + qc.backward(qres[0].detach()) + + self.assertEqual(c.grad_dict['weight'].shape, qc.grad_dict['weight'].shape) + self.assertGreater(qc.grad_dict['delta'], 0) + + + #def test_minus(self): + # """Test method minus(a, b)""" + # self.assertEqual(1, minus(3, 2)) + # self.assertNotEqual(1, minus(3, 2)) + + #@unittest.skip("do't run as not ready") + #def test_minus_with_skip(self): + # """Test method minus(a, b)""" + # self.assertEqual(1, minus(3, 2)) + # self.assertNotEqual(1, minus(3, 2)) diff --git a/tests/yamrt/model/t_prediction_SCTF.py b/tests/yamrt/model/t_prediction_SCTF.py new file mode 100644 index 00000000..8b385ff6 --- /dev/null +++ b/tests/yamrt/model/t_prediction_SCTF.py @@ -0,0 +1,7 @@ +import unittest +import mxnet as mx + + +class TestPredictionSCTF(unittest.TestCase): + def test_forward(self): + pass diff --git a/tests/yamrt/utest.py b/tests/yamrt/utest.py new file mode 100644 index 00000000..6c990e0d --- /dev/null +++ b/tests/yamrt/utest.py @@ -0,0 +1,17 @@ +import unittest +from os import path + + +if __name__ == '__main__': + suite = unittest.TestSuite() + cur_dir = path.dirname(path.abspath(__file__)) + + suite.addTests( + unittest.TestLoader().discover( + path.join(cur_dir,'model'), 't_*.py', top_level_dir=None)) + suite.addTests( + unittest.TestLoader().discover( + path.join(cur_dir,'fquant'), 't_*.py', top_level_dir=None)) + + runner = unittest.TextTestRunner(verbosity=1) + runner.run(suite) From 23037a99f7e6707b07529dc85fb8352837a92edd Mon Sep 17 00:00:00 2001 From: ryt Date: Wed, 25 May 2022 14:25:53 +0800 Subject: [PATCH 120/120] add has_multi_outs --- python/mrt/sym_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/mrt/sym_utils.py b/python/mrt/sym_utils.py index 24fdefe9..92ede7f5 100644 --- a/python/mrt/sym_utils.py +++ b/python/mrt/sym_utils.py @@ -471,6 +471,9 @@ def get_entry_id(sym): oindex = json.loads(graph.json())['heads'][0][1] return oindex +def has_multi_outs(sym): + return sym.attr('op_name') in MULTIPYE_OUTS_NODE + def get_node(sym, graph): """ Get the symbol from the provided graph which has the same name as the given symbol.