From c1f99d5757adbfe517a398b70b1d65c693dd36ce Mon Sep 17 00:00:00 2001 From: mrhan1993 <50648276+mrhan1993@users.noreply.github.com> Date: Sun, 7 Apr 2024 14:18:03 +0800 Subject: [PATCH 1/2] Lightning support for cog --- fooocusapi/worker.py | 2 +- main.py | 5 +- predict.py | 303 ++++++++++++++++++++++++++----------------- 3 files changed, 191 insertions(+), 119 deletions(-) diff --git a/fooocusapi/worker.py b/fooocusapi/worker.py index c6895c0..a505b0c 100644 --- a/fooocusapi/worker.py +++ b/fooocusapi/worker.py @@ -684,7 +684,7 @@ def yield_result(_, imgs, tasks, extension='png'): ) if debugging_inpaint_preprocessor: - yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing()) + yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), tasks) return progressbar(async_task, 13, 'VAE Inpaint encoding ...') diff --git a/main.py b/main.py index c8127ca..313de6a 100644 --- a/main.py +++ b/main.py @@ -382,13 +382,14 @@ class Args(object): prepare_environments(args) if load_all_models: - import modules.config as config + from modules import config from fooocusapi.parameters import default_inpaint_engine_version config.downloading_upscale_model() config.downloading_inpaint_models(default_inpaint_engine_version) config.downloading_controlnet_canny() config.downloading_controlnet_cpds() - config.downloading_ip_adapters() + config.downloading_ip_adapters('ip') + config.downloading_ip_adapters('face') print("[Pre Setup] Finished") diff --git a/predict.py b/predict.py index 3c0037d..2f73af0 100644 --- a/predict.py +++ b/predict.py @@ -1,109 +1,179 @@ +""" # Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md +""" import copy import os +from typing import List import numpy as np from PIL import Image -from typing import List from cog import BasePredictor, BaseModel, Input, Path from fooocusapi.file_utils import output_dir -from fooocusapi.parameters import (GenerationFinishReason, - ImageGenerationParams, - available_aspect_ratios, - uov_methods, - outpaint_expansions, - default_styles, - default_base_model_name, - default_refiner_model_name, - default_loras, - default_refiner_switch, - default_cfg_scale, - default_prompt_negative) +from fooocusapi.parameters import ( + GenerationFinishReason, + ImageGenerationParams, + available_aspect_ratios, + uov_methods, + outpaint_expansions, + default_styles, + default_base_model_name, + default_refiner_model_name, + default_loras, + default_refiner_switch, + default_cfg_scale, + default_prompt_negative +) from fooocusapi.task_queue import TaskType + class Output(BaseModel): seeds: List[str] paths: List[Path] + class Predictor(BasePredictor): def setup(self) -> None: """Load the model into memory to make running multiple predictions efficient""" from main import pre_setup - pre_setup(disable_image_log=True, skip_pip=True, preload_pipeline=True, preset=None) + pre_setup(disable_image_log=True, skip_pip=True, preload_pipeline=True, preset='default') def predict( self, - prompt: str = Input( default='', description="Prompt for image generation"), - negative_prompt: str = Input( default=default_prompt_negative, - description="Negtive prompt for image generation"), - style_selections: str = Input(default=','.join(default_styles), - description="Fooocus styles applied for image generation, seperated by comma"), - performance_selection: str = Input( default='Speed', - description="Performance selection", choices=['Speed', 'Quality', 'Extreme Speed']), - aspect_ratios_selection: str = Input(default='1152*896', - description="The generated image's size", choices=available_aspect_ratios), - image_number: int = Input(default=1, - description="How many image to generate", ge=1, le=8), - image_seed: int = Input(default=-1, - description="Seed to generate image, -1 for random"), - sharpness: float = Input(default=2.0, ge=0.0, le=30.0), - guidance_scale: float = Input(default=default_cfg_scale, ge=1.0, le=30.0), - refiner_switch: float = Input(default=default_refiner_switch, ge=0.1, le=1.0), - uov_input_image: Path = Input(default=None, - description="Input image for upscale or variation, keep None for not upscale or variation"), - uov_method: str = Input(default='Disabled', choices=uov_methods), - uov_upscale_value: float = Input(default=0, description="Only when Upscale (Custom)"), - inpaint_additional_prompt: str = Input( default='', description="Prompt for image generation"), - inpaint_input_image: Path = Input(default=None, - description="Input image for inpaint or outpaint, keep None for not inpaint or outpaint. Please noticed, `uov_input_image` has bigger priority is not None."), - inpaint_input_mask: Path = Input(default=None, - description="Input mask for inpaint"), - outpaint_selections: str = Input(default='', - description="Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma"), - outpaint_distance_left: int = Input(default=0, - description="Outpaint expansion distance from Left of the image"), - outpaint_distance_top: int = Input(default=0, - description="Outpaint expansion distance from Top of the image"), - outpaint_distance_right: int = Input(default=0, - description="Outpaint expansion distance from Right of the image"), - outpaint_distance_bottom: int = Input(default=0, - description="Outpaint expansion distance from Bottom of the image"), - cn_img1: Path = Input(default=None, - description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."), - cn_stop1: float = Input(default=None, ge=0, le=1, - description="Stop at for image prompt, None for default value"), - cn_weight1: float = Input(default=None, ge=0, le=2, - description="Weight for image prompt, None for default value"), - cn_type1: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=[ - 'ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']), - cn_img2: Path = Input(default=None, - description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."), - cn_stop2: float = Input(default=None, ge=0, le=1, - description="Stop at for image prompt, None for default value"), - cn_weight2: float = Input(default=None, ge=0, le=2, - description="Weight for image prompt, None for default value"), - cn_type2: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=[ - 'ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']), - cn_img3: Path = Input(default=None, - description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."), - cn_stop3: float = Input(default=None, ge=0, le=1, - description="Stop at for image prompt, None for default value"), - cn_weight3: float = Input(default=None, ge=0, le=2, - description="Weight for image prompt, None for default value"), - cn_type3: str = Input(default='ImagePrompt', - description="ControlNet type for image prompt", choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']), - cn_img4: Path = Input(default=None, - description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."), - cn_stop4: float = Input(default=None, ge=0, le=1, - description="Stop at for image prompt, None for default value"), - cn_weight4: float = Input(default=None, ge=0, le=2, - description="Weight for image prompt, None for default value"), - cn_type4: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']), + prompt: str = Input( + default='', + description="Prompt for image generation"), + negative_prompt: str = Input( + default=default_prompt_negative, + description="Negtive prompt for image generation"), + style_selections: str = Input( + default=','.join(default_styles), + description="Fooocus styles applied for image generation, seperated by comma"), + performance_selection: str = Input( + default='Speed', + choices=['Speed', 'Quality', 'Extreme Speed', 'Lightning'], + description="Performance selection"), + aspect_ratios_selection: str = Input( + default='1152*896', + choices=available_aspect_ratios, + description="The generated image's size"), + image_number: int = Input( + default=1, + ge=1, le=8, + description="How many image to generate"), + save_extension: str = Input( + default='png', + choices=['png', 'jpg', 'webp'], + description="File extension for image generation"), + image_seed: int = Input( + default=-1, + description="Seed to generate image, -1 for random"), + sharpness: float = Input( + default=2.0, + ge=0.0, le=30.0), + guidance_scale: float = Input( + default=default_cfg_scale, + ge=1.0, le=30.0), + refiner_switch: float = Input( + default=default_refiner_switch, + ge=0.1, le=1.0), + uov_input_image: Path = Input( + default=None, + description="Input image for upscale or variation, keep None for not upscale or variation"), + uov_method: str = Input( + default='Disabled', + choices=uov_methods), + uov_upscale_value: float = Input( + default=0, + description="Only when Upscale (Custom)"), + inpaint_additional_prompt: str = Input( + default='', + description="Prompt for image generation"), + inpaint_input_image: Path = Input( + default=None, + description="Input image for inpaint or outpaint, keep None for not inpaint or outpaint. Please noticed, `uov_input_image` has bigger priority is not None."), + inpaint_input_mask: Path = Input( + default=None, + description="Input mask for inpaint"), + outpaint_selections: str = Input( + default='', + description="Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma"), + outpaint_distance_left: int = Input( + default=0, + description="Outpaint expansion distance from Left of the image"), + outpaint_distance_top: int = Input( + default=0, + description="Outpaint expansion distance from Top of the image"), + outpaint_distance_right: int = Input( + default=0, + description="Outpaint expansion distance from Right of the image"), + outpaint_distance_bottom: int = Input( + default=0, + description="Outpaint expansion distance from Bottom of the image"), + cn_img1: Path = Input( + default=None, + description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."), + cn_stop1: float = Input( + default=None, + ge=0, le=1, + description="Stop at for image prompt, None for default value"), + cn_weight1: float = Input( + default=None, + ge=0, le=2, + description="Weight for image prompt, None for default value"), + cn_type1: str = Input( + default='ImagePrompt', + choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS'], + description="ControlNet type for image prompt"), + cn_img2: Path = Input( + default=None, + description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."), + cn_stop2: float = Input( + default=None, + ge=0, le=1, + description="Stop at for image prompt, None for default value"), + cn_weight2: float = Input( + default=None, + ge=0, le=2, + description="Weight for image prompt, None for default value"), + cn_type2: str = Input( + default='ImagePrompt', + choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS'], + description="ControlNet type for image prompt"), + cn_img3: Path = Input( + default=None, + description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."), + cn_stop3: float = Input( + default=None, + ge=0, le=1, + description="Stop at for image prompt, None for default value"), + cn_weight3: float = Input( + default=None, + ge=0, le=2, + description="Weight for image prompt, None for default value"), + cn_type3: str = Input( + default='ImagePrompt', + choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS'], + description="ControlNet type for image prompt"), + cn_img4: Path = Input( + default=None, + description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."), + cn_stop4: float = Input( + default=None, + ge=0, le=1, + description="Stop at for image prompt, None for default value"), + cn_weight4: float = Input( + default=None, + ge=0, le=2, + description="Weight for image prompt, None for default value"), + cn_type4: str = Input( + default='ImagePrompt', + choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS'], + description="ControlNet type for image prompt") ) -> Output: """Run a single prediction on the model""" - import modules.flags as flags + from modules import flags from modules.sdxl_styles import legal_style_names from fooocusapi.worker import blocking_get_task_result, worker_queue @@ -142,8 +212,11 @@ def predict( outpaint_selections_arr.append(expansion) image_prompts = [] - image_prompt_config = [(cn_img1, cn_stop1, cn_weight1, cn_type1), (cn_img2, cn_stop2, cn_weight2, cn_type2), - (cn_img3, cn_stop3, cn_weight3, cn_type3), (cn_img4, cn_stop4, cn_weight4, cn_type4)] + image_prompt_config = [ + (cn_img1, cn_stop1, cn_weight1, cn_type1), + (cn_img2, cn_stop2, cn_weight2, cn_type2), + (cn_img3, cn_stop3, cn_weight3, cn_type3), + (cn_img4, cn_stop4, cn_weight4, cn_type4)] for config in image_prompt_config: cn_img, cn_stop, cn_weight, cn_type = config if cn_img is not None: @@ -157,42 +230,42 @@ def predict( advanced_params = None - params = ImageGenerationParams(prompt=prompt, - negative_prompt=negative_prompt, - style_selections=style_selections_arr, - performance_selection=performance_selection, - aspect_ratios_selection=aspect_ratios_selection, - image_number=image_number, - image_seed=image_seed, - sharpness=sharpness, - guidance_scale=guidance_scale, - base_model_name=base_model_name, - refiner_model_name=refiner_model_name, - refiner_switch=refiner_switch, - loras=loras, - uov_input_image=uov_input_image, - uov_method=uov_method, - upscale_value=uov_upscale_value, - outpaint_selections=outpaint_selections_arr, - inpaint_input_image=inpaint_input_image_dict, - image_prompts=image_prompts, - advanced_params=advanced_params, - inpaint_additional_prompt=inpaint_additional_prompt, - outpaint_distance_left=outpaint_distance_left, - outpaint_distance_top=outpaint_distance_top, - outpaint_distance_right=outpaint_distance_right, - outpaint_distance_bottom=outpaint_distance_bottom, - require_base64=False, - ) + params = ImageGenerationParams( + prompt=prompt, + negative_prompt=negative_prompt, + style_selections=style_selections_arr, + performance_selection=performance_selection, + aspect_ratios_selection=aspect_ratios_selection, + image_number=image_number, + image_seed=image_seed, + sharpness=sharpness, + guidance_scale=guidance_scale, + base_model_name=base_model_name, + refiner_model_name=refiner_model_name, + refiner_switch=refiner_switch, + loras=loras, + uov_input_image=uov_input_image, + uov_method=uov_method, + upscale_value=uov_upscale_value, + outpaint_selections=outpaint_selections_arr, + inpaint_input_image=inpaint_input_image_dict, + image_prompts=image_prompts, + advanced_params=advanced_params, + inpaint_additional_prompt=inpaint_additional_prompt, + outpaint_distance_left=outpaint_distance_left, + outpaint_distance_top=outpaint_distance_top, + outpaint_distance_right=outpaint_distance_right, + outpaint_distance_bottom=outpaint_distance_bottom, + require_base64=False, + save_extension=save_extension + ) print(f"[Predictor Predict] Params: {params.__dict__}") async_task = worker_queue.add_task(TaskType.text_2_img, {'params': params.__dict__, 'require_base64': False}) if async_task is None: print("[Task Queue] The task queue has reached limit") - raise Exception( - f"The task queue has reached limit." - ) + raise Exception("The task queue has reached limit.") results = blocking_get_task_result(async_task.job_id) output_paths: List[Path] = [] @@ -205,8 +278,6 @@ def predict( print(f"[Predictor Predict] Finished with {len(output_paths)} images") if len(output_paths) == 0: - raise Exception( - f"Process failed." - ) + raise Exception("Process failed.") return Output(seeds=output_seeds, paths=output_paths) From e7556d4726a37e80668a9921a872803e63a7ce21 Mon Sep 17 00:00:00 2001 From: mrhan1993 <50648276+mrhan1993@users.noreply.github.com> Date: Sun, 7 Apr 2024 17:01:52 +0800 Subject: [PATCH 2/2] fix issue #244 --- fooocusapi/models.py | 2 +- fooocusapi/parameters.py | 102 +++++++++++++++++---------------------- fooocusapi/task_queue.py | 3 ++ main.py | 22 ++++++--- predict.py | 2 +- 5 files changed, 64 insertions(+), 67 deletions(-) diff --git a/fooocusapi/models.py b/fooocusapi/models.py index 1bb4b7e..0197fa6 100644 --- a/fooocusapi/models.py +++ b/fooocusapi/models.py @@ -105,7 +105,7 @@ class AdvancedParams(BaseModel): freeu_s2: float = Field(0.95, description="FreeU B4") debugging_inpaint_preprocessor: bool = Field(False, description="Debug Inpaint Preprocessing") inpaint_disable_initial_latent: bool = Field(False, description="Disable initial latent in inpaint") - inpaint_engine: str = Field('v1', description="Inpaint Engine") + inpaint_engine: str = Field('v2.6', description="Inpaint Engine") inpaint_strength: float = Field(1.0, description="Inpaint Denoising Strength", ge=0.0, le=1.0) inpaint_respective_field: float = Field(1.0, description="Inpaint Respective Field", ge=0.0, le=1.0) inpaint_mask_upload_checkbox: bool = Field(False, description="Upload Mask") diff --git a/fooocusapi/parameters.py b/fooocusapi/parameters.py index 38e3ac2..9fc5607 100644 --- a/fooocusapi/parameters.py +++ b/fooocusapi/parameters.py @@ -2,6 +2,8 @@ from typing import Dict, List, Tuple import numpy as np +from pydantic import BaseModel, Field + default_inpaint_engine_version = 'v2.6' @@ -75,6 +77,45 @@ def __init__(self, im: str | None, seed: str, finish_reason: GenerationFinishRea self.finish_reason = finish_reason +class AdvancedParams(BaseModel): + disable_preview: bool = Field(False, description="Disable preview during generation") + disable_intermediate_results: bool = Field(False, description="Disable intermediate results") + disable_seed_increment: bool = Field(False, description="Disable Seed Increment") + adm_scaler_positive: float = Field(1.5, description="Positive ADM Guidance Scaler", ge=0.1, le=3.0) + adm_scaler_negative: float = Field(0.8, description="Negative ADM Guidance Scaler", ge=0.1, le=3.0) + adm_scaler_end: float = Field(0.3, description="ADM Guidance End At Step", ge=0.0, le=1.0) + adaptive_cfg: float = Field(7.0, description="CFG Mimicking from TSNR", ge=1.0, le=30.0) + sampler_name: str = Field(default_sampler, description="Sampler") + scheduler_name: str = Field(default_scheduler, description="Scheduler") + overwrite_step: int = Field(-1, description="Forced Overwrite of Sampling Step", ge=-1, le=200) + overwrite_switch: float = Field(-1, description="Forced Overwrite of Refiner Switch Step", ge=-1, le=1) + overwrite_width: int = Field(-1, description="Forced Overwrite of Generating Width", ge=-1, le=2048) + overwrite_height: int = Field(-1, description="Forced Overwrite of Generating Height", ge=-1, le=2048) + overwrite_vary_strength: float = Field(-1, description='Forced Overwrite of Denoising Strength of "Vary"', ge=-1, le=1.0) + overwrite_upscale_strength: float = Field(-1, description='Forced Overwrite of Denoising Strength of "Upscale"', ge=-1, le=1.0) + mixing_image_prompt_and_vary_upscale: bool = Field(False, description="Mixing Image Prompt and Vary/Upscale") + mixing_image_prompt_and_inpaint: bool = Field(False, description="Mixing Image Prompt and Inpaint") + debugging_cn_preprocessor: bool = Field(False, description="Debug Preprocessors") + skipping_cn_preprocessor: bool = Field(False, description="Skip Preprocessors") + canny_low_threshold: int = Field(64, description="Canny Low Threshold", ge=1, le=255) + canny_high_threshold: int = Field(128, description="Canny High Threshold", ge=1, le=255) + refiner_swap_method: str = Field('joint', description="Refiner swap method") + controlnet_softness: float = Field(0.25, description="Softness of ControlNet", ge=0.0, le=1.0) + freeu_enabled: bool = Field(False, description="FreeU enabled") + freeu_b1: float = Field(1.01, description="FreeU B1") + freeu_b2: float = Field(1.02, description="FreeU B2") + freeu_s1: float = Field(0.99, description="FreeU B3") + freeu_s2: float = Field(0.95, description="FreeU B4") + debugging_inpaint_preprocessor: bool = Field(False, description="Debug Inpaint Preprocessing") + inpaint_disable_initial_latent: bool = Field(False, description="Disable initial latent in inpaint") + inpaint_engine: str = Field('v2.6', description="Inpaint Engine") + inpaint_strength: float = Field(1.0, description="Inpaint Denoising Strength", ge=0.0, le=1.0) + inpaint_respective_field: float = Field(1.0, description="Inpaint Respective Field", ge=0.0, le=1.0) + inpaint_mask_upload_checkbox: bool = Field(False, description="Upload Mask") + invert_mask_checkbox: bool = Field(False, description="Invert Mask") + inpaint_erode_or_dilate: int = Field(0, description="Mask Erode or Dilate", ge=-64, le=64) + + class ImageGenerationParams(object): def __init__(self, prompt: str, negative_prompt: str, @@ -129,60 +170,7 @@ def __init__(self, prompt: str, self.image_prompts = image_prompts self.save_extension = save_extension self.require_base64 = require_base64 - - if advanced_params is None: - disable_preview = False - adm_scaler_positive = 1.5 - adm_scaler_negative = 0.8 - adm_scaler_end = 0.3 - adaptive_cfg = 7.0 - sampler_name = default_sampler - scheduler_name = default_scheduler - generate_image_grid = False - overwrite_step = -1 - overwrite_switch = -1 - overwrite_width = -1 - overwrite_height = -1 - overwrite_vary_strength = -1 - overwrite_upscale_strength = -1 - mixing_image_prompt_and_vary_upscale = False - mixing_image_prompt_and_inpaint = False - debugging_cn_preprocessor = False - skipping_cn_preprocessor = False - controlnet_softness = 0.25 - canny_low_threshold = 64 - canny_high_threshold = 128 - refiner_swap_method = 'joint' - freeu_enabled = False - freeu_b1, freeu_b2, freeu_s1, freeu_s2 = [None] * 4 - debugging_inpaint_preprocessor = False - inpaint_disable_initial_latent = False - inpaint_engine = default_inpaint_engine_version - inpaint_strength = 1.0 - inpaint_respective_field = 0.618 - inpaint_mask_upload_checkbox = False - invert_mask_checkbox = False - inpaint_erode_or_dilate = 0 - - - # Auto set mixing_image_prompt_and_inpaint to True - if len(self.image_prompts) > 0 and inpaint_input_image is not None: - print('Mixing Image Prompts and Inpaint Enabled') - mixing_image_prompt_and_inpaint = True - if len(self.image_prompts) > 0 and uov_input_image is not None: - print('Mixing Image Prompts and Vary Upscale Enabled') - mixing_image_prompt_and_vary_upscale = True - - self.advanced_params = [ - disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \ - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \ - overwrite_vary_strength, overwrite_upscale_strength, \ - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \ - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \ - refiner_swap_method, \ - freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \ - debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \ - inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate - ] - else: - self.advanced_params = advanced_params + self.advanced_params = advanced_params + + if self.advanced_params is None: + self.advanced_params = AdvancedParams() diff --git a/fooocusapi/task_queue.py b/fooocusapi/task_queue.py index 4209b19..9542b68 100644 --- a/fooocusapi/task_queue.py +++ b/fooocusapi/task_queue.py @@ -81,6 +81,9 @@ def add_task(self, type: TaskType, req_param: ImageGenerationParams, webhook_url if len(self.queue) >= self.queue_size: return None + if isinstance(req_param, dict): + req_param = ImageGenerationParams(**req_param) + job_id = str(uuid.uuid4()) task = QueueTask(job_id=job_id, type=type, req_param=req_param, in_queue_millis=int(round(time.time() * 1000)), diff --git a/main.py b/main.py index 313de6a..76190fc 100644 --- a/main.py +++ b/main.py @@ -330,14 +330,15 @@ def prepare_environments(args) -> bool: return True -def pre_setup(skip_sync_repo: bool = False, - disable_image_log: bool = False, - skip_pip=False, - load_all_models: bool = False, - preload_pipeline: bool = False, - always_gpu: bool = False, - all_in_fp16: bool = False, - preset: str | None = None): +def pre_setup( + skip_sync_repo: bool = False, + disable_image_log: bool = False, + skip_pip=False, + load_all_models: bool = False, + preload_pipeline: bool = False, + always_gpu: bool = False, + all_in_fp16: bool = False, + preset: str | None = None): class Args(object): host = '127.0.0.1' port = 8888 @@ -381,6 +382,11 @@ class Args(object): import fooocusapi.args as _ prepare_environments(args) + # Start task schedule thread + from fooocusapi.worker import task_schedule_loop + task_schedule_thread = Thread(target=task_schedule_loop, daemon=True) + task_schedule_thread.start() + if load_all_models: from modules import config from fooocusapi.parameters import default_inpaint_engine_version diff --git a/predict.py b/predict.py index 2f73af0..dcb8b6b 100644 --- a/predict.py +++ b/predict.py @@ -262,7 +262,7 @@ def predict( print(f"[Predictor Predict] Params: {params.__dict__}") - async_task = worker_queue.add_task(TaskType.text_2_img, {'params': params.__dict__, 'require_base64': False}) + async_task = worker_queue.add_task(TaskType.text_2_img, params.__dict__) if async_task is None: print("[Task Queue] The task queue has reached limit") raise Exception("The task queue has reached limit.")