forked from ml4a/ml4a
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsetup.py
103 lines (95 loc) · 4.43 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import pathlib
from setuptools import setup, find_packages
packages = ['ml4a', 'ml4a.dataset', 'ml4a.utils', 'ml4a.models', 'ml4a.canvas', 'ml4a.models.submodules']
submodules_root = 'ml4a.models.submodules'
submodules = {
'BASNet': ['model', 'pytorch_iou', 'pytorch_ssim'],
'deepdream': [],
'ESRGAN': ['models'],
'face-parsing-PyTorch': ['modules', 'modules.src', 'modules.src.utils'],
'FlowNetPytorch': ['datasets', 'models'],
'glow': ['demo'],
'idinvert_pytorch': ['boundaries', 'boundaries.stylegan_bedroom256', 'boundaries.stylegan_ffhq256', 'boundaries.stylegan_tower256', 'models', 'utils'],
'neural_style': [],
'PhotoSketch': ['data', 'models', 'options', 'scripts', 'util'],
'Real-ESRGAN': ['options', 'realesrgan', 'realesrgan.archs', 'realesrgan.data', 'realesrgan.models', 'realesrgan.weights', 'scripts'],
'semantic-segmentation-pytorch': ['config', 'data', 'mit_semseg', 'mit_semseg.config', 'mit_semseg.lib', 'mit_semseg.lib.nn', 'mit_semseg.lib.nn.modules', 'mit_semseg.lib.nn.modules.tests', 'mit_semseg.lib.nn.parallel', 'mit_semseg.lib.utils', 'mit_semseg.lib.utils.data', 'mit_semseg.models'],
'SPADE': ['data', 'datasets', 'models', 'models.networks', 'models.networks.sync_batchnorm', 'options', 'trainers', 'util'],
'stylegan2': ['dnnlib', 'dnnlib.tflib', 'dnnlib.tflib.ops', 'dnnlib.submission', 'dnnlib.submission.internal', 'metrics', 'training'],
'stylegan2-ada-pytorch': ['dnnlib', 'metrics', 'torch_utils', 'torch_utils.ops', 'training'],
'stylegan3': ['dnnlib', 'gui_utils', 'metrics', 'torch_utils', 'torch_utils.ops', 'training', 'viz'],
'tacotron2': ['text', 'waveglow'],
'taming-transformers': ['configs', 'data', 'scripts', 'taming', 'taming.data', 'taming.models', 'taming.modules', 'taming.modules.diffusionmodules', 'taming.modules.discriminator', 'taming.modules.losses', 'taming.modules.misc', 'taming.modules.transformer', 'taming.modules.vqvae'],
'torch-dreams': ['torch_dreams'],
'Wav2Lip': ['evaluation', 'evaluation.scores_LSE', 'face_detection', 'face_detection.detection', 'face_detection.detection.sfd', 'models'],
'White-box-Cartoonization': ['index_files', 'test_code', 'test_code.saved_models', 'train_code', 'train_code.selective_search']
}
install_requires = [
'basicsr',
'bs4',
'dill',
'imutils',
'inflect',
'face_recognition',
'gdown',
'ipython',
'ipywidgets',
'librosa',
'lxml',
'matplotlib',
'moviepy',
'ninja',
'noise',
'numba',
'numpy',
'omegaconf==2.0.0',
'opencv-python',
'Pillow',
'pytorch-lightning==1.0.8',
'psutil',
'scikit-image',
'scikit-learn',
'tensorflow-gpu==1.15.0',
'torch',
'torchvision',
'tqdm',
'unidecode',
'yacs',
"tqdm"
]
package_data = {
'ml4a': [
'models/submodules/face-parsing-PyTorch/modules/src/*.cu',
'models/submodules/face-parsing-PyTorch/modules/src/*.cpp',
'models/submodules/face-parsing-PyTorch/modules/src/*.h',
'models/submodules/face-parsing-PyTorch/modules/src/utils/*.h',
'models/submodules/face-parsing-PyTorch/modules/src/utils/*.cuh',
'models/submodules/stylegan2/dnnlib/tflib/ops/*.cu',
'models/submodules/stylegan2-ada-pytorch/torch_utils/ops/*.cu',
'models/submodules/stylegan2-ada-pytorch/torch_utils/ops/*.cpp',
'models/submodules/stylegan2-ada-pytorch/torch_utils/ops/*.h',
'models/submodules/taming-transformers/data/*.txt',
'models/submodules/taming-transformers/configs/*.yaml'
]
}
readme_file = pathlib.Path(__file__).parent / "README.md"
short_description = 'A toolkit for making art with machine learning, including an API for popular deep learning models, recipes for combining them, and a suite of educational examples'
for submodule, subfolders in submodules.items():
submodule_packages = ['{}.{}'.format(submodules_root, submodule)]
submodule_packages.extend(['{}.{}.{}'.format(submodules_root, submodule, f) for f in subfolders])
packages.extend(submodule_packages)
setup(
name='ml4a',
version='0.1.4',
description=short_description,
long_description=readme_file.read_text(),
long_description_content_type="text/markdown",
url='http://github.com/ml4a/ml4a',
author='Gene Kogan',
author_email='[email protected]',
license='MIT',
packages=packages,
package_data=package_data,
install_requires=install_requires,
zip_safe=False
)