diff --git a/CHANGELOG.md b/CHANGELOG.md
index 01f131063..e7d8ff52e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,9 +2,10 @@
## Update for 2024-02-07
-### Highlights
+Another big release just hit the shelves!
+
+### Highlights
-Another big release, highlights being:
- A lot more functionality in the **Control** module:
- Inpaint and outpaint support, flexible resizing options, optional hires
- Built-in support for many new processors and models, all auto-downloaded on first use
@@ -19,6 +20,7 @@ Another big release, highlights being:
**Segmind SegMoE**, **Mixture Tiling**, **InstaFlow**, **SAG**, **BlipDiffusion**
- Massive work integrating latest advances with [OpenVINO](https://github.com/vladmandic/automatic/wiki/OpenVINO), [IPEX](https://github.com/vladmandic/automatic/wiki/Intel-ARC) and [ONNX Olive](https://github.com/vladmandic/automatic/wiki/ONNX-Runtime-&-Olive)
- Full control over brightness, sharpness and color shifts and color grading during generate process directly in latent space
+- **Documentation**! This was a big one, with a lot of new content and updates in the [WiKi](https://github.com/vladmandic/automatic/wiki)
Plus welcome additions to **UI performance, usability and accessibility** and flexibility of deployment as well as **API** improvements
And it also includes fixes for all reported issues so far
diff --git a/README.md b/README.md
index 0dce172c0..7c6ca2bd3 100644
--- a/README.md
+++ b/README.md
@@ -237,7 +237,7 @@ check [ChangeLog](CHANGELOG.md) for when feature was first introduced as it will
### **Sponsors**
diff --git a/extensions-builtin/sd-webui-controlnet b/extensions-builtin/sd-webui-controlnet
index 416c34507..ecd33eb82 160000
--- a/extensions-builtin/sd-webui-controlnet
+++ b/extensions-builtin/sd-webui-controlnet
@@ -1 +1 @@
-Subproject commit 416c345072c9c2066101e225964e3986abe6945e
+Subproject commit ecd33eb82b25c77cec3185cc9647db904fcb7c03
diff --git a/extensions-builtin/stable-diffusion-webui-rembg b/extensions-builtin/stable-diffusion-webui-rembg
index 4d6b4fd70..7fd9904d9 160000
--- a/extensions-builtin/stable-diffusion-webui-rembg
+++ b/extensions-builtin/stable-diffusion-webui-rembg
@@ -1 +1 @@
-Subproject commit 4d6b4fd70b00f0ffb4a66a381c85e74e88e04752
+Subproject commit 7fd9904d9b01bdc8e2029f908ca5005aff184f61
diff --git a/html/locale_en.json b/html/locale_en.json
index eac85f8f3..af19c4804 100644
--- a/html/locale_en.json
+++ b/html/locale_en.json
@@ -297,7 +297,7 @@
{"id":"","label":"VAE tiling","localized":"","hint":"Divide large images into overlapping tiles with limited VRAM. Results in a minor increase in processing time"},
{"id":"","label":"Attention slicing","localized":"","hint":"Performs attention computation in steps instead of all at once. Slower inference times, but greatly reduced memory usage"},
{"id":"","label":"Execution Provider","localized":"","hint":"ONNX Execution Provider"},
- {"id":"","label":"ONNX show onnx-specific menu","localized":"","hint":"Enable ONNX tab in UI. Restart required"},
+ {"id":"","label":"ONNX allow fallback to CPU","localized":"","hint":"Allow fallback to CPU when selected execution provider failed"},
{"id":"","label":"ONNX cache converted models","localized":"","hint":"Save the models that are converted to ONNX format as a cache. You can manage them in ONNX tab"},
{"id":"","label":"ONNX unload base model when processing refiner","localized":"","hint":"Unload base model when the refiner is being converted/optimized/processed"},
{"id":"","label":"inference-mode","localized":"","hint":"Use torch.inference_mode"},
diff --git a/html/locale_ko.json b/html/locale_ko.json
index d586b6c73..3eeabe8c0 100644
--- a/html/locale_ko.json
+++ b/html/locale_ko.json
@@ -582,7 +582,7 @@
{"id":"","label":"Diffusers model loading variant","localized":"","hint":""},
{"id":"","label":"Diffusers VAE loading variant","localized":"","hint":""},
{"id":"","label":"Execution Provider","localized":"","hint":"ONNX Execution Provider"},
- {"id":"","label":"ONNX show onnx-specific menu","localized":"ONNX 탭 보이기","hint":"UI에서 ONNX 탭을 볼 수 있게 한다. WebUI를 재시작하면 반영된다."},
+ {"id":"","label":"ONNX allow fallback to CPU","localized":"ONNX 생성 실패 시 CPU를 사용해 다시 시도","hint":"선택한 Execution Provider에서 오류가 발생했을 경우 CPU를 사용해 다시 시도한다."},
{"id":"","label":"ONNX cache converted models","localized":"ONNX 모델 저장 (캐시)","hint":"ONNX 형식으로 변환된 모델을 캐시로 저장한다. ONNX 탭에서 관리할 수 있다."},
{"id":"","label":"ONNX unload base model when processing refiner","localized":"ONNX 리파이너를 처리하고 있을 때 base 모델을 메모리에서 해제","hint":"리파이너 모델이 변환, 최적화 또는 처리되고 있을 때 base 모델을 메모리에서 잠시 해제합니다."}
],
diff --git a/installer.py b/installer.py
index 137733ec2..a2f857cc9 100644
--- a/installer.py
+++ b/installer.py
@@ -7,7 +7,12 @@
import platform
import subprocess
import cProfile
-import pkg_resources
+
+try:
+ import pkg_resources # python 3.12 no longer packages it built-in
+except ImportError:
+ stdout = subprocess.run(f'"{sys.executable}" -m pip install setuptools', shell=True, check=False, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ import pkg_resources
class Dot(dict): # dot notation access to dictionary attributes
@@ -47,7 +52,7 @@ class Dot(dict): # dot notation access to dictionary attributes
})
git_commit = "unknown"
submodules_commit = {
- # 'sd-webui-controlnet': 'ecd33eb',
+ 'sd-webui-controlnet': 'ecd33eb',
# 'stable-diffusion-webui-images-browser': '27fe4a7',
}
@@ -163,8 +168,11 @@ def installed(package, friendly: str = None, reload = False, quiet = False):
ok = True
try:
if reload:
- import imp # pylint: disable=deprecated-module
- imp.reload(pkg_resources)
+ try:
+ import imp # pylint: disable=deprecated-module
+ imp.reload(pkg_resources)
+ except Exception:
+ pass
if friendly:
pkgs = friendly.split()
else:
@@ -201,10 +209,15 @@ def installed(package, friendly: str = None, reload = False, quiet = False):
return False
-def uninstall(package):
- if installed(package, package, quiet=True):
- log.warning(f'Uninstalling: {package}')
- pip(f"uninstall {package} --yes --quiet", ignore=True, quiet=True)
+def uninstall(package, quiet = False):
+ packages = package if isinstance(package, list) else [package]
+ res = ''
+ for p in packages:
+ if installed(p, p, quiet=True):
+ if not quiet:
+ log.warning(f'Uninstalling: {p}')
+ res += pip(f"uninstall {p} --yes --quiet", ignore=True, quiet=True)
+ return res
def pip(arg: str, ignore: bool = False, quiet: bool = False):
@@ -229,11 +242,18 @@ def pip(arg: str, ignore: bool = False, quiet: bool = False):
# install package using pip if not already installed
def install(package, friendly: str = None, ignore: bool = False):
+ res = ''
if args.reinstall or args.upgrade:
global quick_allowed # pylint: disable=global-statement
quick_allowed = False
if args.reinstall or not installed(package, friendly):
- pip(f"install --upgrade {package}", ignore=ignore)
+ res = pip(f"install --upgrade {package}", ignore=ignore)
+ try:
+ import imp # pylint: disable=deprecated-module
+ imp.reload(pkg_resources)
+ except Exception:
+ pass
+ return res
# execute git command
@@ -362,6 +382,12 @@ def check_python():
log.debug(f'Git {git_version.replace("git version", "").strip()}')
+# check onnx version
+def check_onnx():
+ if not installed('onnxruntime', quiet=True) and not installed('onnxruntime-gpu', quiet=True): # allow either
+ install('onnxruntime', 'onnxruntime', ignore=True)
+
+
# check torch version
def check_torch():
if args.skip_torch:
@@ -379,8 +405,13 @@ def check_torch():
log.debug(f'Torch allowed: cuda={allow_cuda} rocm={allow_rocm} ipex={allow_ipex} diml={allow_directml} openvino={allow_openvino}')
torch_command = os.environ.get('TORCH_COMMAND', '')
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'none')
- if not installed('onnxruntime', quiet=True) and not installed('onnxruntime-gpu', quiet=True): # allow either
- install('onnxruntime', 'onnxruntime', ignore=True)
+ def is_rocm_available():
+ if not allow_rocm:
+ return False
+ if platform.system() == 'Windows':
+ hip_path = os.environ.get('HIP_PATH', None)
+ return hip_path is not None and os.path.exists(os.path.join(hip_path, 'bin'))
+ return shutil.which('rocminfo') is not None or os.path.exists('/opt/rocm/bin/rocminfo') or os.path.exists('/dev/kfd')
if torch_command != '':
pass
elif allow_cuda and (shutil.which('nvidia-smi') is not None or args.use_xformers or os.path.exists(os.path.join(os.environ.get('SystemRoot') or r'C:\Windows', 'System32', 'nvidia-smi.exe'))):
@@ -391,14 +422,21 @@ def check_torch():
torch_command = os.environ.get('TORCH_COMMAND', 'torch torchvision --index-url https://download.pytorch.org/whl/cu118')
xformers_package = os.environ.get('XFORMERS_PACKAGE', '--pre xformers' if opts.get('cross_attention_optimization', '') == 'xFormers' else 'none')
install('onnxruntime-gpu', 'onnxruntime-gpu', ignore=True)
- elif allow_rocm and (shutil.which('rocminfo') is not None or os.path.exists('/opt/rocm/bin/rocminfo') or os.path.exists('/dev/kfd')):
+ elif is_rocm_available():
+ is_windows = platform.system() == 'Windows' # provides more better logs for ZLUDA users and ROCm for Windows users in future.
log.info('AMD ROCm toolkit detected')
os.environ.setdefault('PYTORCH_HIP_ALLOC_CONF', 'garbage_collection_threshold:0.8,max_split_size_mb:512')
- os.environ.setdefault('TENSORFLOW_PACKAGE', 'tensorflow-rocm')
+ if not is_windows:
+ os.environ.setdefault('TENSORFLOW_PACKAGE', 'tensorflow-rocm')
try:
- command = subprocess.run('rocm_agent_enumerator', shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- amd_gpus = command.stdout.decode(encoding="utf8", errors="ignore").split('\n')
- amd_gpus = [x for x in amd_gpus if x and x != 'gfx000']
+ if is_windows:
+ command = subprocess.run('hipinfo', shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ amd_gpus = command.stdout.decode(encoding="utf8", errors="ignore").split('\n')
+ amd_gpus = [x.split(' ')[-1].strip() for x in amd_gpus if x.startswith('gcnArchName:')]
+ else:
+ command = subprocess.run('rocm_agent_enumerator', shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ amd_gpus = command.stdout.decode(encoding="utf8", errors="ignore").split('\n')
+ amd_gpus = [x for x in amd_gpus if x and x != 'gfx000']
log.debug(f'ROCm agents detected: {amd_gpus}')
except Exception as e:
log.debug(f'Run rocm_agent_enumerator failed: {e}')
@@ -433,22 +471,17 @@ def check_torch():
except Exception as e:
log.debug(f'ROCm hipconfig failed: {e}')
rocm_ver = None
- if rocm_ver in {"5.7"}:
- torch_command = os.environ.get('TORCH_COMMAND', f'torch torchvision --pre --index-url https://download.pytorch.org/whl/nightly/rocm{rocm_ver}')
- elif rocm_ver in {"5.5", "5.6"}:
- torch_command = os.environ.get('TORCH_COMMAND', f'torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm{rocm_ver}')
- else:
- # ROCm 5.5 is oldest for PyTorch 2.1
- torch_command = os.environ.get('TORCH_COMMAND', 'torch torchvision --index-url https://download.pytorch.org/whl/rocm5.5')
+ if not is_windows: # remove after PyTorch built with ROCm for Windows is launched
+ if rocm_ver in {"5.7"}:
+ torch_command = os.environ.get('TORCH_COMMAND', f'torch torchvision --pre --index-url https://download.pytorch.org/whl/nightly/rocm{rocm_ver}')
+ elif rocm_ver in {"5.5", "5.6"}:
+ torch_command = os.environ.get('TORCH_COMMAND', f'torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm{rocm_ver}')
+ else:
+ # ROCm 5.5 is oldest for PyTorch 2.1
+ torch_command = os.environ.get('TORCH_COMMAND', 'torch torchvision --index-url https://download.pytorch.org/whl/rocm5.5')
+ if rocm_ver is not None:
+ install(os.environ.get('ONNXRUNTIME_PACKAGE', get_onnxruntime_source_for_rocm(arr)), "onnxruntime-training built with ROCm", ignore=True)
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'none')
- if rocm_ver is not None:
- install(os.environ.get('ONNXRUNTIME_PACKAGE', get_onnxruntime_source_for_rocm(arr)), "onnxruntime-training built with ROCm", ignore=True)
- try:
- import onnxruntime
- if "ROCMExecutionProvider" not in onnxruntime.get_available_providers():
- log.warning('Failed to automatically install onxnruntime package for ROCm. Please manually install it if you need.')
- except Exception:
- pass
elif allow_ipex and (args.use_ipex or shutil.which('sycl-ls') is not None or shutil.which('sycl-ls.exe') is not None or os.environ.get('ONEAPI_ROOT') is not None or os.path.exists('/opt/intel/oneapi') or os.path.exists("C:/Program Files (x86)/Intel/oneAPI") or os.path.exists("C:/oneAPI")):
args.use_ipex = True # pylint: disable=attribute-defined-outside-init
log.info('Intel OneAPI Toolkit detected')
@@ -461,11 +494,11 @@ def check_torch():
install(os.environ.get('MKL_PACKAGE', 'mkl==2024.0.0'), 'mkl')
install(os.environ.get('DPCPP_PACKAGE', 'mkl-dpcpp==2024.0.0'), 'mkl-dpcpp')
else:
- if sys.version_info[1] == 11:
+ if sys.version_info.minor == 11:
pytorch_pip = 'https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.1.10%2Bxpu/torch-2.1.0a0+cxx11.abi-cp311-cp311-win_amd64.whl'
torchvision_pip = 'https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.1.10%2Bxpu/torchvision-0.16.0a0+cxx11.abi-cp311-cp311-win_amd64.whl'
ipex_pip = 'https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.1.10%2Bxpu/intel_extension_for_pytorch-2.1.10+xpu-cp311-cp311-win_amd64.whl'
- elif sys.version_info[1] == 10:
+ elif sys.version_info.minor == 10:
pytorch_pip = 'https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.1.10%2Bxpu/torch-2.1.0a0+cxx11.abi-cp310-cp310-win_amd64.whl'
torchvision_pip = 'https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.1.10%2Bxpu/torchvision-0.16.0a0+cxx11.abi-cp310-cp310-win_amd64.whl'
ipex_pip = 'https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.1.10%2Bxpu/intel_extension_for_pytorch-2.1.10+xpu-cp310-cp310-win_amd64.whl'
@@ -482,10 +515,10 @@ def check_torch():
install('onnxruntime-openvino', 'onnxruntime-openvino', ignore=True)
elif allow_openvino and args.use_openvino:
log.info('Using OpenVINO')
- torch_command = os.environ.get('TORCH_COMMAND', 'torch==2.1.2 torchvision==0.16.2 --index-url https://download.pytorch.org/whl/cpu')
+ torch_command = os.environ.get('TORCH_COMMAND', 'torch==2.2.0 torchvision==0.17.0 --index-url https://download.pytorch.org/whl/cpu')
install(os.environ.get('OPENVINO_PACKAGE', 'openvino==2023.3.0'), 'openvino')
install('onnxruntime-openvino', 'onnxruntime-openvino', ignore=True) # TODO openvino: numpy version conflicts with tensorflow and doesn't support Python 3.11
- install('nncf==2.8.0', 'nncf')
+ install('nncf==2.8.1', 'nncf')
os.environ.setdefault('PYTORCH_TRACING_MODE', 'TORCHFX')
os.environ.setdefault('NEOReadDebugKeys', '1')
os.environ.setdefault('ClDeviceGlobalMemSizeAvailablePercent', '100')
@@ -555,6 +588,8 @@ def check_torch():
log.debug(f'Cannot install xformers package: {e}')
if opts.get('cuda_compile_backend', '') == 'hidet':
install('hidet', 'hidet')
+ if opts.get('cuda_compile_backend', '') == 'deep-cache':
+ install('DeepCache')
if opts.get('nncf_compress_weights', False) and not args.use_openvino:
install('nncf==2.7.0', 'nncf')
if args.profile:
@@ -613,10 +648,11 @@ def run_extension_installer(folder):
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
result = subprocess.run(f'"{sys.executable}" "{path_installer}"', shell=True, env=env, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=folder)
+ txt = result.stdout.decode(encoding="utf8", errors="ignore")
+ debug(f'Extension installer: file={path_installer} {txt}')
if result.returncode != 0:
global errors # pylint: disable=global-statement
errors += 1
- txt = result.stdout.decode(encoding="utf8", errors="ignore")
if len(result.stderr) > 0:
txt = txt + '\n' + result.stderr.decode(encoding="utf8", errors="ignore")
log.error(f'Error running extension installer: {path_installer}')
@@ -843,21 +879,15 @@ def get_version():
def get_onnxruntime_source_for_rocm(rocm_ver):
- ort_version = "1.16.3"
-
- try:
- import onnxruntime
- ort_version = onnxruntime.__version__
- except ImportError:
- pass
-
+ ort_version = "1.16.3" # hardcoded
cp_str = f"{sys.version_info.major}{sys.version_info.minor}"
-
if rocm_ver is None:
command = subprocess.run('hipconfig --version', shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rocm_ver = command.stdout.decode(encoding="utf8", errors="ignore").split('.')
-
- return f"https://download.onnxruntime.ai/onnxruntime_training-{ort_version}%2Brocm{rocm_ver[0]}{rocm_ver[1]}-cp{cp_str}-cp{cp_str}-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ if "linux" in sys.platform:
+ return f"https://download.onnxruntime.ai/onnxruntime_training-{ort_version}%2Brocm{rocm_ver[0]}{rocm_ver[1]}-cp{cp_str}-cp{cp_str}-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+ else:
+ return 'onnxruntime-gpu'
# check version of the main repo and optionally upgrade it
@@ -971,6 +1001,7 @@ def add_args(parser):
group.add_argument('--skip-git', default = os.environ.get("SD_SKIPGIT",False), action='store_true', help = "Skips running all GIT operations, default: %(default)s")
group.add_argument('--skip-torch', default = os.environ.get("SD_SKIPTORCH",False), action='store_true', help = "Skips running Torch checks, default: %(default)s")
group.add_argument('--skip-all', default = os.environ.get("SD_SKIPALL",False), action='store_true', help = "Skips running all checks, default: %(default)s")
+ group.add_argument('--skip-env', default = os.environ.get("SD_SKIPENV",False), action='store_true', help = "Skips setting of env variables during startup, default: %(default)s")
group.add_argument('--experimental', default = os.environ.get("SD_EXPERIMENTAL",False), action='store_true', help = "Allow unsupported versions of libraries, default: %(default)s")
group.add_argument('--reinstall', default = os.environ.get("SD_REINSTALL",False), action='store_true', help = "Force reinstallation of all requirements, default: %(default)s")
group.add_argument('--test', default = os.environ.get("SD_TEST",False), action='store_true', help = "Run test only and exit")
diff --git a/modules/api/endpoints.py b/modules/api/endpoints.py
index 23d5e48ef..3d6f2381a 100644
--- a/modules/api/endpoints.py
+++ b/modules/api/endpoints.py
@@ -80,7 +80,7 @@ def post_interrogate(req: models.ReqInterrogate):
caption = shared.interrogator.interrogate(image)
return models.ResInterrogate(caption)
elif req.model == "deepdanbooru":
- from mobules import deepbooru
+ from modules import deepbooru
caption = deepbooru.model.tag(image)
return models.ResInterrogate(caption)
else:
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 6ee979a7c..49aa581b3 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -6,13 +6,12 @@
from PIL import Image
import gradio as gr
from modules.paths import data_path
-from modules import shared, ui_tempdir, script_callbacks, images
+from modules import shared, gr_tempdir, script_callbacks, images
re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
re_param = re.compile(re_param_code)
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
-re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") # pylint: disable=anomalous-backslash-in-string
type_of_gr_update = type(gr.update())
paste_fields = {}
registered_param_bindings = []
@@ -58,7 +57,7 @@ def image_from_url_text(filedata):
filedata = filedata[0]
if type(filedata) == dict and filedata.get("is_file", False):
filename = filedata["name"]
- is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename)
+ is_in_right_dir = gr_tempdir.check_tmp_file(shared.demo, filename)
if is_in_right_dir:
filename = filename.rsplit('?', 1)[0]
if not os.path.exists(filename):
@@ -188,28 +187,6 @@ def send_image_and_dimensions(x):
return img, w, h
-def find_hypernetwork_key(hypernet_name, hypernet_hash=None):
- """Determines the config parameter name to use for the hypernet based on the parameters in the infotext.
- Example: an infotext provides "Hypernet: ke-ta" and "Hypernet hash: 1234abcd". For the "Hypernet" config
- parameter this means there should be an entry that looks like "ke-ta-10000(1234abcd)" to set it to.
- If the infotext has no hash, then a hypernet with the same name will be selected instead.
- """
- hypernet_name = hypernet_name.lower()
- if hypernet_hash is not None:
- # Try to match the hash in the name
- for hypernet_key in shared.hypernetworks.keys():
- result = re_hypernet_hash.search(hypernet_key)
- if result is not None and result[1] == hypernet_hash:
- return hypernet_key
- else:
- # Fall back to a hypernet with the same name
- for hypernet_key in shared.hypernetworks.keys():
- if hypernet_key.lower().startswith(hypernet_name):
- return hypernet_key
-
- return None
-
-
def parse_generation_parameters(x: str):
res = {}
if x is None:
diff --git a/modules/gr_tempdir.py b/modules/gr_tempdir.py
new file mode 100644
index 000000000..92b38a6fe
--- /dev/null
+++ b/modules/gr_tempdir.py
@@ -0,0 +1,99 @@
+import os
+import tempfile
+from collections import namedtuple
+from pathlib import Path
+from PIL import Image, PngImagePlugin
+from modules import shared, errors, paths
+
+
+Savedfile = namedtuple("Savedfile", ["name"])
+debug = errors.log.trace if os.environ.get('SD_PATH_DEBUG', None) is not None else lambda *args, **kwargs: None
+
+
+def register_tmp_file(gradio, filename):
+ if hasattr(gradio, 'temp_file_sets'):
+ gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)}
+
+
+def check_tmp_file(gradio, filename):
+ ok = False
+ if hasattr(gradio, 'temp_file_sets'):
+ ok = ok or any(filename in fileset for fileset in gradio.temp_file_sets)
+ if shared.opts.outdir_samples != '':
+ ok = ok or Path(shared.opts.outdir_samples).resolve() in Path(filename).resolve().parents
+ else:
+ ok = ok or Path(shared.opts.outdir_txt2img_samples).resolve() in Path(filename).resolve().parents
+ ok = ok or Path(shared.opts.outdir_img2img_samples).resolve() in Path(filename).resolve().parents
+ ok = ok or Path(shared.opts.outdir_extras_samples).resolve() in Path(filename).resolve().parents
+ if shared.opts.outdir_grids != '':
+ ok = ok or Path(shared.opts.outdir_grids).resolve() in Path(filename).resolve().parents
+ else:
+ ok = ok or Path(shared.opts.outdir_txt2img_grids).resolve() in Path(filename).resolve().parents
+ ok = ok or Path(shared.opts.outdir_img2img_grids).resolve() in Path(filename).resolve().parents
+ ok = ok or Path(shared.opts.outdir_save).resolve() in Path(filename).resolve().parents
+ ok = ok or Path(shared.opts.outdir_init_images).resolve() in Path(filename).resolve().parents
+ return ok
+
+
+def pil_to_temp_file(self, img: Image, dir: str, format="png") -> str: # pylint: disable=redefined-builtin,unused-argument
+ """
+ # original gradio implementation
+ bytes_data = gr.processing_utils.encode_pil_to_bytes(img, format)
+ temp_dir = Path(dir) / self.hash_bytes(bytes_data)
+ temp_dir.mkdir(exist_ok=True, parents=True)
+ filename = str(temp_dir / f"image.{format}")
+ img.save(filename, pnginfo=gr.processing_utils.get_pil_metadata(img))
+ """
+ folder = dir
+ already_saved_as = getattr(img, 'already_saved_as', None)
+ exists = os.path.isfile(already_saved_as) if already_saved_as is not None else False
+ debug(f'Image lookup: {already_saved_as} exists={exists}')
+ if already_saved_as and exists:
+ register_tmp_file(shared.demo, already_saved_as)
+ file_obj = Savedfile(already_saved_as)
+ name = file_obj.name
+ debug(f'Image registered: {name}')
+ return name
+ if shared.opts.temp_dir != "":
+ folder = shared.opts.temp_dir
+ use_metadata = False
+ metadata = PngImagePlugin.PngInfo()
+ for key, value in img.info.items():
+ if isinstance(key, str) and isinstance(value, str):
+ metadata.add_text(key, value)
+ use_metadata = True
+ if not os.path.exists(folder):
+ os.makedirs(folder, exist_ok=True)
+ shared.log.debug(f'Created temp folder: path="{folder}"')
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=folder) as tmp:
+ name = tmp.name
+ img.save(name, pnginfo=(metadata if use_metadata else None))
+ img.already_saved_as = name
+ size = os.path.getsize(name)
+ shared.log.debug(f'Saving temp: image="{name}" resolution={img.width}x{img.height} size={size}')
+ params = ', '.join([f'{k}: {v}' for k, v in img.info.items()])
+ params = params[12:] if params.startswith('parameters: ') else params
+ with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
+ file.write(params)
+ return name
+
+
+# override save to file function so that it also writes PNG info
+
+def on_tmpdir_changed():
+ if shared.opts.temp_dir == "":
+ return
+ register_tmp_file(shared.demo, os.path.join(shared.opts.temp_dir, "x"))
+
+
+def cleanup_tmpdr():
+ temp_dir = shared.opts.temp_dir
+ if temp_dir == "" or not os.path.isdir(temp_dir):
+ return
+ for root, _dirs, files in os.walk(temp_dir, topdown=False):
+ for name in files:
+ _, extension = os.path.splitext(name)
+ if extension != ".png" and extension != ".jpg" and extension != ".webp":
+ continue
+ filename = os.path.join(root, name)
+ os.remove(filename)
diff --git a/modules/onnx_impl/__init__.py b/modules/onnx_impl/__init__.py
index abdea8e59..a03a30e96 100644
--- a/modules/onnx_impl/__init__.py
+++ b/modules/onnx_impl/__init__.py
@@ -95,7 +95,7 @@ def to(self, *args, **kwargs):
device = extract_device(args, kwargs)
if device is not None:
self.device = device
- self.model = move_inference_session(self.model, device)
+ self.model = move_inference_session(self.model, device) # pylint: disable=attribute-defined-outside-init
return self
@@ -203,7 +203,6 @@ def ORTDiffusionModelPart_to(self, *args, **kwargs):
def initialize():
global initialized # pylint: disable=global-statement
-
if initialized:
return
@@ -211,52 +210,53 @@ def initialize():
from modules import devices
from modules.paths import models_path
from modules.shared import opts
- from .execution_providers import ExecutionProvider, TORCH_DEVICE_TO_EP, available_execution_providers
-
- onnx_dir = os.path.join(models_path, "ONNX")
- if not os.path.isdir(onnx_dir):
- os.mkdir(onnx_dir)
- if devices.backend == "rocm":
- TORCH_DEVICE_TO_EP["cuda"] = ExecutionProvider.ROCm
+ try: # may fail on onnx import
+ import onnx # pylint: disable=unused-import
+ from .execution_providers import ExecutionProvider, TORCH_DEVICE_TO_EP, available_execution_providers
- from .pipelines.onnx_stable_diffusion_pipeline import OnnxStableDiffusionPipeline
- from .pipelines.onnx_stable_diffusion_img2img_pipeline import OnnxStableDiffusionImg2ImgPipeline
- from .pipelines.onnx_stable_diffusion_inpaint_pipeline import OnnxStableDiffusionInpaintPipeline
- from .pipelines.onnx_stable_diffusion_upscale_pipeline import OnnxStableDiffusionUpscalePipeline
- from .pipelines.onnx_stable_diffusion_xl_pipeline import OnnxStableDiffusionXLPipeline
- from .pipelines.onnx_stable_diffusion_xl_img2img_pipeline import OnnxStableDiffusionXLImg2ImgPipeline
+ onnx_dir = os.path.join(models_path, "ONNX")
+ if not os.path.isdir(onnx_dir):
+ os.mkdir(onnx_dir)
+ if devices.backend == "rocm":
+ TORCH_DEVICE_TO_EP["cuda"] = ExecutionProvider.ROCm
- # OnnxRuntimeModel Hijack.
- OnnxRuntimeModel.__module__ = 'diffusers'
- diffusers.OnnxRuntimeModel = OnnxRuntimeModel
+ from .pipelines.onnx_stable_diffusion_pipeline import OnnxStableDiffusionPipeline
+ from .pipelines.onnx_stable_diffusion_img2img_pipeline import OnnxStableDiffusionImg2ImgPipeline
+ from .pipelines.onnx_stable_diffusion_inpaint_pipeline import OnnxStableDiffusionInpaintPipeline
+ from .pipelines.onnx_stable_diffusion_upscale_pipeline import OnnxStableDiffusionUpscalePipeline
+ from .pipelines.onnx_stable_diffusion_xl_pipeline import OnnxStableDiffusionXLPipeline
+ from .pipelines.onnx_stable_diffusion_xl_img2img_pipeline import OnnxStableDiffusionXLImg2ImgPipeline
- diffusers.OnnxStableDiffusionPipeline = OnnxStableDiffusionPipeline
- diffusers.pipelines.auto_pipeline.AUTO_TEXT2IMAGE_PIPELINES_MAPPING["onnx-stable-diffusion"] = diffusers.OnnxStableDiffusionPipeline
+ OnnxRuntimeModel.__module__ = 'diffusers' # OnnxRuntimeModel Hijack.
+ diffusers.OnnxRuntimeModel = OnnxRuntimeModel
- diffusers.OnnxStableDiffusionImg2ImgPipeline = OnnxStableDiffusionImg2ImgPipeline
- diffusers.pipelines.auto_pipeline.AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["onnx-stable-diffusion"] = diffusers.OnnxStableDiffusionImg2ImgPipeline
+ diffusers.OnnxStableDiffusionPipeline = OnnxStableDiffusionPipeline
+ diffusers.pipelines.auto_pipeline.AUTO_TEXT2IMAGE_PIPELINES_MAPPING["onnx-stable-diffusion"] = diffusers.OnnxStableDiffusionPipeline
- diffusers.OnnxStableDiffusionInpaintPipeline = OnnxStableDiffusionInpaintPipeline
- diffusers.pipelines.auto_pipeline.AUTO_INPAINT_PIPELINES_MAPPING["onnx-stable-diffusion"] = diffusers.OnnxStableDiffusionInpaintPipeline
+ diffusers.OnnxStableDiffusionImg2ImgPipeline = OnnxStableDiffusionImg2ImgPipeline
+ diffusers.pipelines.auto_pipeline.AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["onnx-stable-diffusion"] = diffusers.OnnxStableDiffusionImg2ImgPipeline
- diffusers.OnnxStableDiffusionUpscalePipeline = OnnxStableDiffusionUpscalePipeline
+ diffusers.OnnxStableDiffusionInpaintPipeline = OnnxStableDiffusionInpaintPipeline
+ diffusers.pipelines.auto_pipeline.AUTO_INPAINT_PIPELINES_MAPPING["onnx-stable-diffusion"] = diffusers.OnnxStableDiffusionInpaintPipeline
- diffusers.OnnxStableDiffusionXLPipeline = OnnxStableDiffusionXLPipeline
- diffusers.pipelines.auto_pipeline.AUTO_TEXT2IMAGE_PIPELINES_MAPPING["onnx-stable-diffusion-xl"] = diffusers.OnnxStableDiffusionXLPipeline
+ diffusers.OnnxStableDiffusionUpscalePipeline = OnnxStableDiffusionUpscalePipeline
- diffusers.OnnxStableDiffusionXLImg2ImgPipeline = OnnxStableDiffusionXLImg2ImgPipeline
- diffusers.pipelines.auto_pipeline.AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["onnx-stable-diffusion-xl"] = diffusers.OnnxStableDiffusionXLImg2ImgPipeline
+ diffusers.OnnxStableDiffusionXLPipeline = OnnxStableDiffusionXLPipeline
+ diffusers.pipelines.auto_pipeline.AUTO_TEXT2IMAGE_PIPELINES_MAPPING["onnx-stable-diffusion-xl"] = diffusers.OnnxStableDiffusionXLPipeline
- # Huggingface model compatibility
- diffusers.ORTStableDiffusionXLPipeline = diffusers.OnnxStableDiffusionXLPipeline
- diffusers.ORTStableDiffusionXLImg2ImgPipeline = diffusers.OnnxStableDiffusionXLImg2ImgPipeline
+ diffusers.OnnxStableDiffusionXLImg2ImgPipeline = OnnxStableDiffusionXLImg2ImgPipeline
+ diffusers.pipelines.auto_pipeline.AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["onnx-stable-diffusion-xl"] = diffusers.OnnxStableDiffusionXLImg2ImgPipeline
- optimum.onnxruntime.modeling_diffusion._ORTDiffusionModelPart.to = ORTDiffusionModelPart_to # pylint: disable=protected-access
+ diffusers.ORTStableDiffusionXLPipeline = diffusers.OnnxStableDiffusionXLPipeline # Huggingface model compatibility
+ diffusers.ORTStableDiffusionXLImg2ImgPipeline = diffusers.OnnxStableDiffusionXLImg2ImgPipeline
- log.info(f'ONNX: selected={opts.onnx_execution_provider}, available={available_execution_providers}')
+ optimum.onnxruntime.modeling_diffusion._ORTDiffusionModelPart.to = ORTDiffusionModelPart_to # pylint: disable=protected-access
- initialized = True
+ log.info(f'ONNX: selected={opts.onnx_execution_provider}, available={available_execution_providers}')
+ initialized = True
+ except Exception as e:
+ log.error(f'ONNX: failed to initialize: {e}')
def initialize_olive():
diff --git a/modules/onnx_impl/pipelines/__init__.py b/modules/onnx_impl/pipelines/__init__.py
index 919284b2d..3d2f217df 100644
--- a/modules/onnx_impl/pipelines/__init__.py
+++ b/modules/onnx_impl/pipelines/__init__.py
@@ -5,12 +5,11 @@
from abc import ABCMeta
from typing import Type, Tuple, List, Any, Dict
from packaging import version
-import onnx
import torch
import diffusers
import onnxruntime as ort
import optimum.onnxruntime
-from installer import log
+from installer import log, install
from modules import shared
from modules.paths import sd_configs_path, models_path
from modules.sd_models import CheckpointInfo
@@ -25,6 +24,8 @@
SUBMODELS_SDXL = ("text_encoder", "text_encoder_2", "unet", "vae_encoder", "vae_decoder",)
SUBMODELS_SDXL_REFINER = ("text_encoder_2", "unet", "vae_encoder", "vae_decoder",)
+SUBMODELS_LARGE = ("text_encoder_2", "unet",)
+
class PipelineBase(TorchCompatibleModule, diffusers.DiffusionPipeline, metaclass=ABCMeta):
model_type: str
@@ -146,6 +147,8 @@ def derive_properties(self, pipeline: diffusers.DiffusionPipeline):
return pipeline
def convert(self, submodels: List[str], in_dir: os.PathLike, out_dir: os.PathLike):
+ install('onnx') # may not be installed yet, this performs check and installs as needed
+ import onnx
shutil.rmtree("cache", ignore_errors=True)
shutil.rmtree("footprints", ignore_errors=True)
@@ -177,7 +180,7 @@ def convert(self, submodels: List[str], in_dir: os.PathLike, out_dir: os.PathLik
onnx.save_model(
model,
os.path.join(destination, "model.onnx"),
- save_as_external_data=submodel == "unet",
+ save_as_external_data=submodel in SUBMODELS_LARGE,
all_tensors_to_one_file=True,
location="weights.pb",
)
@@ -326,7 +329,7 @@ def preprocess(self, p: StableDiffusionProcessing):
config.vae = os.path.join(models_path, "VAE", shared.opts.sd_vae)
if not os.path.isfile(config.vae):
del config.vae
- config.vae_sdxl_fp16_fix = self._is_sdxl and not shared.opts.diffusers_vae_upcast
+ config.vae_sdxl_fp16_fix = self._is_sdxl and shared.opts.diffusers_vae_upcast == "false"
config.width = p.width
config.height = p.height
diff --git a/modules/onnx_impl/ui.py b/modules/onnx_impl/ui.py
index bcddf1b80..cfdc40f82 100644
--- a/modules/onnx_impl/ui.py
+++ b/modules/onnx_impl/ui.py
@@ -17,6 +17,7 @@ def create_ui():
from modules.shared import log, opts, cmd_opts, refresh_checkpoints
from modules.sd_models import checkpoint_tiles, get_closet_checkpoint_match
from modules.paths import sd_configs_path
+ from . import run_olive_workflow
from .execution_providers import ExecutionProvider, install_execution_provider
from .utils import check_diffusers_cache
@@ -47,7 +48,7 @@ def create_ui():
ep_install.click(fn=install_execution_provider, inputs=ep_checkbox)
- if opts.cuda_compile_backend == "olive-ai":
+ if run_olive_workflow is not None:
import olive.passes as olive_passes
from olive.hardware.accelerator import AcceleratorSpec, Device
diff --git a/modules/postprocess/gfpgan_model.py b/modules/postprocess/gfpgan_model.py
index b85c900e6..6de9b771c 100644
--- a/modules/postprocess/gfpgan_model.py
+++ b/modules/postprocess/gfpgan_model.py
@@ -104,4 +104,4 @@ def restore(self, np_image):
shared.face_restorers.append(FaceRestorerGFPGAN())
except Exception as e:
- errors.display(e, 'gfpgan')
+ errors.log.error(f'GFPGan failed to initialize: {e}')
diff --git a/modules/shared.py b/modules/shared.py
index bd30f75c1..1bf739267 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -562,7 +562,7 @@ def temp_disable_extensions():
"send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
"keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001, "visible": False}),
"keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001, "visible": False}),
- "keyedit_delimiters": OptionInfo(".,\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters", gr.Textbox, { "visible": False }), # pylint: disable=anomalous-backslash-in-string
+ "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters", gr.Textbox, { "visible": False }),
"quicksettings_list": OptionInfo(["sd_model_checkpoint"] if backend == Backend.ORIGINAL else ["sd_model_checkpoint", "sd_model_refiner"], "Quicksettings list", gr.Dropdown, lambda: {"multiselect":True, "choices": list(opts.data_labels.keys())}),
"ui_scripts_reorder": OptionInfo("", "UI scripts order", gr.Textbox, { "visible": False }),
}))
diff --git a/modules/shared_items.py b/modules/shared_items.py
index 74c4de8ff..d4de35207 100644
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -47,17 +47,20 @@ def get_pipelines():
'Kandinsky 2.2': getattr(diffusers, 'KandinskyV22Pipeline', None),
'Kandinsky 3': getattr(diffusers, 'Kandinsky3Pipeline', None),
'DeepFloyd IF': getattr(diffusers, 'IFPipeline', None),
- 'ONNX Stable Diffusion': getattr(diffusers, 'OnnxStableDiffusionPipeline', None),
- 'ONNX Stable Diffusion Img2Img': getattr(diffusers, 'OnnxStableDiffusionImg2ImgPipeline', None),
- 'ONNX Stable Diffusion Inpaint': getattr(diffusers, 'OnnxStableDiffusionInpaintPipeline', None),
- 'ONNX Stable Diffusion Upscale': getattr(diffusers, 'OnnxStableDiffusionUpscalePipeline', None),
- 'ONNX Stable Diffusion XL': getattr(diffusers, 'OnnxStableDiffusionXLPipeline', None),
- 'ONNX Stable Diffusion XL Img2Img': getattr(diffusers, 'OnnxStableDiffusionXLImg2ImgPipeline', None),
'Custom Diffusers Pipeline': getattr(diffusers, 'DiffusionPipeline', None),
'InstaFlow': getattr(diffusers, 'StableDiffusionPipeline', None), # dynamically redefined and loaded in sd_models.load_diffuser
'SegMoE': getattr(diffusers, 'StableDiffusionPipeline', None), # dynamically redefined and loaded in sd_models.load_diffuser
- # Segmind SSD-1B, Segmind Tiny
}
+ if hasattr(diffusers, 'OnnxStableDiffusionXLPipeline'):
+ onnx_pipelines = {
+ 'ONNX Stable Diffusion': getattr(diffusers, 'OnnxStableDiffusionPipeline', None),
+ 'ONNX Stable Diffusion Img2Img': getattr(diffusers, 'OnnxStableDiffusionImg2ImgPipeline', None),
+ 'ONNX Stable Diffusion Inpaint': getattr(diffusers, 'OnnxStableDiffusionInpaintPipeline', None),
+ 'ONNX Stable Diffusion Upscale': getattr(diffusers, 'OnnxStableDiffusionUpscalePipeline', None),
+ 'ONNX Stable Diffusion XL': getattr(diffusers, 'OnnxStableDiffusionXLPipeline', None),
+ 'ONNX Stable Diffusion XL Img2Img': getattr(diffusers, 'OnnxStableDiffusionXLImg2ImgPipeline', None),
+ }
+ pipelines.update(onnx_pipelines)
for k, v in pipelines.items():
if k != 'Autodetect' and v is None:
diff --git a/requirements.txt b/requirements.txt
index a12005c7e..ae99222f7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,5 @@
setuptools
+patch-ng
addict
aenum
aiohttp
@@ -9,22 +10,18 @@ blendmodes
clean-fid
easydev
extcolors
-facexlib
filetype
future
gdown
-gfpgan
GitPython
httpcore
inflection
jsonmerge
kornia
lark
-lmdb
lpips
omegaconf
open-clip-torch
-onnx
optimum
piexif
psutil
@@ -34,13 +31,12 @@ rich
safetensors
scipy
tb_nightly
-tensordict
+tensordict==0.2.1
toml
torchdiffeq
voluptuous
yapf
scikit-image
-basicsr
fasteners
dctorch
pymatting
@@ -60,10 +56,10 @@ diffusers==0.26.2
einops==0.4.1
gradio==3.43.2
huggingface_hub==0.20.3
-numexpr==2.8.4
-numpy==1.26.2
-numba==0.58.1
-pandas==1.5.3
+numexpr==2.8.8
+numpy==1.26.4
+numba==0.59.0
+pandas
protobuf==3.20.3
pytorch_lightning==1.9.4
tokenizers==0.15.1
diff --git a/scripts/postprocessing_gfpgan.py b/scripts/postprocessing_gfpgan.py
index a20e3542a..a69f97c9e 100644
--- a/scripts/postprocessing_gfpgan.py
+++ b/scripts/postprocessing_gfpgan.py
@@ -2,7 +2,6 @@
import numpy as np
import gradio as gr
from modules import scripts_postprocessing
-from modules.postprocess import gfpgan_model
class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
@@ -15,8 +14,12 @@ def ui(self):
return { "gfpgan_visibility": gfpgan_visibility }
def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility): # pylint: disable=arguments-differ
+ from installer import install
+ install("facexlib")
+ install("gfpgan")
if gfpgan_visibility == 0:
return
+ from modules.postprocess import gfpgan_model
restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8))
res = Image.fromarray(restored_img)
if gfpgan_visibility < 1.0:
diff --git a/wiki b/wiki
index c44eeed91..7654276a0 160000
--- a/wiki
+++ b/wiki
@@ -1 +1 @@
-Subproject commit c44eeed913f772b8a2c9fb9ed56b42606588c425
+Subproject commit 7654276a07723988aefc17da885de8686ee0c530