Skip to content

Commit

Permalink
Lightning support for cog
Browse files Browse the repository at this point in the history
  • Loading branch information
mrhan1993 committed Apr 7, 2024
1 parent b9fd060 commit c1f99d5
Show file tree
Hide file tree
Showing 3 changed files with 191 additions and 119 deletions.
2 changes: 1 addition & 1 deletion fooocusapi/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -684,7 +684,7 @@ def yield_result(_, imgs, tasks, extension='png'):
)

if debugging_inpaint_preprocessor:
yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing())
yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), tasks)
return

progressbar(async_task, 13, 'VAE Inpaint encoding ...')
Expand Down
5 changes: 3 additions & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,13 +382,14 @@ class Args(object):
prepare_environments(args)

if load_all_models:
import modules.config as config
from modules import config
from fooocusapi.parameters import default_inpaint_engine_version
config.downloading_upscale_model()
config.downloading_inpaint_models(default_inpaint_engine_version)
config.downloading_controlnet_canny()
config.downloading_controlnet_cpds()
config.downloading_ip_adapters()
config.downloading_ip_adapters('ip')
config.downloading_ip_adapters('face')
print("[Pre Setup] Finished")


Expand Down
303 changes: 187 additions & 116 deletions predict.py
Original file line number Diff line number Diff line change
@@ -1,109 +1,179 @@
"""
# Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md
"""

import copy
import os
from typing import List
import numpy as np

from PIL import Image
from typing import List
from cog import BasePredictor, BaseModel, Input, Path
from fooocusapi.file_utils import output_dir
from fooocusapi.parameters import (GenerationFinishReason,
ImageGenerationParams,
available_aspect_ratios,
uov_methods,
outpaint_expansions,
default_styles,
default_base_model_name,
default_refiner_model_name,
default_loras,
default_refiner_switch,
default_cfg_scale,
default_prompt_negative)
from fooocusapi.parameters import (
GenerationFinishReason,
ImageGenerationParams,
available_aspect_ratios,
uov_methods,
outpaint_expansions,
default_styles,
default_base_model_name,
default_refiner_model_name,
default_loras,
default_refiner_switch,
default_cfg_scale,
default_prompt_negative
)
from fooocusapi.task_queue import TaskType


class Output(BaseModel):
seeds: List[str]
paths: List[Path]


class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
from main import pre_setup
pre_setup(disable_image_log=True, skip_pip=True, preload_pipeline=True, preset=None)
pre_setup(disable_image_log=True, skip_pip=True, preload_pipeline=True, preset='default')

def predict(
self,
prompt: str = Input( default='', description="Prompt for image generation"),
negative_prompt: str = Input( default=default_prompt_negative,
description="Negtive prompt for image generation"),
style_selections: str = Input(default=','.join(default_styles),
description="Fooocus styles applied for image generation, seperated by comma"),
performance_selection: str = Input( default='Speed',
description="Performance selection", choices=['Speed', 'Quality', 'Extreme Speed']),
aspect_ratios_selection: str = Input(default='1152*896',
description="The generated image's size", choices=available_aspect_ratios),
image_number: int = Input(default=1,
description="How many image to generate", ge=1, le=8),
image_seed: int = Input(default=-1,
description="Seed to generate image, -1 for random"),
sharpness: float = Input(default=2.0, ge=0.0, le=30.0),
guidance_scale: float = Input(default=default_cfg_scale, ge=1.0, le=30.0),
refiner_switch: float = Input(default=default_refiner_switch, ge=0.1, le=1.0),
uov_input_image: Path = Input(default=None,
description="Input image for upscale or variation, keep None for not upscale or variation"),
uov_method: str = Input(default='Disabled', choices=uov_methods),
uov_upscale_value: float = Input(default=0, description="Only when Upscale (Custom)"),
inpaint_additional_prompt: str = Input( default='', description="Prompt for image generation"),
inpaint_input_image: Path = Input(default=None,
description="Input image for inpaint or outpaint, keep None for not inpaint or outpaint. Please noticed, `uov_input_image` has bigger priority is not None."),
inpaint_input_mask: Path = Input(default=None,
description="Input mask for inpaint"),
outpaint_selections: str = Input(default='',
description="Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma"),
outpaint_distance_left: int = Input(default=0,
description="Outpaint expansion distance from Left of the image"),
outpaint_distance_top: int = Input(default=0,
description="Outpaint expansion distance from Top of the image"),
outpaint_distance_right: int = Input(default=0,
description="Outpaint expansion distance from Right of the image"),
outpaint_distance_bottom: int = Input(default=0,
description="Outpaint expansion distance from Bottom of the image"),
cn_img1: Path = Input(default=None,
description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
cn_stop1: float = Input(default=None, ge=0, le=1,
description="Stop at for image prompt, None for default value"),
cn_weight1: float = Input(default=None, ge=0, le=2,
description="Weight for image prompt, None for default value"),
cn_type1: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=[
'ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']),
cn_img2: Path = Input(default=None,
description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
cn_stop2: float = Input(default=None, ge=0, le=1,
description="Stop at for image prompt, None for default value"),
cn_weight2: float = Input(default=None, ge=0, le=2,
description="Weight for image prompt, None for default value"),
cn_type2: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=[
'ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']),
cn_img3: Path = Input(default=None,
description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
cn_stop3: float = Input(default=None, ge=0, le=1,
description="Stop at for image prompt, None for default value"),
cn_weight3: float = Input(default=None, ge=0, le=2,
description="Weight for image prompt, None for default value"),
cn_type3: str = Input(default='ImagePrompt',
description="ControlNet type for image prompt", choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']),
cn_img4: Path = Input(default=None,
description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
cn_stop4: float = Input(default=None, ge=0, le=1,
description="Stop at for image prompt, None for default value"),
cn_weight4: float = Input(default=None, ge=0, le=2,
description="Weight for image prompt, None for default value"),
cn_type4: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']),
prompt: str = Input(
default='',
description="Prompt for image generation"),
negative_prompt: str = Input(
default=default_prompt_negative,
description="Negtive prompt for image generation"),
style_selections: str = Input(
default=','.join(default_styles),
description="Fooocus styles applied for image generation, seperated by comma"),
performance_selection: str = Input(
default='Speed',
choices=['Speed', 'Quality', 'Extreme Speed', 'Lightning'],
description="Performance selection"),
aspect_ratios_selection: str = Input(
default='1152*896',
choices=available_aspect_ratios,
description="The generated image's size"),
image_number: int = Input(
default=1,
ge=1, le=8,
description="How many image to generate"),
save_extension: str = Input(
default='png',
choices=['png', 'jpg', 'webp'],
description="File extension for image generation"),
image_seed: int = Input(
default=-1,
description="Seed to generate image, -1 for random"),
sharpness: float = Input(
default=2.0,
ge=0.0, le=30.0),
guidance_scale: float = Input(
default=default_cfg_scale,
ge=1.0, le=30.0),
refiner_switch: float = Input(
default=default_refiner_switch,
ge=0.1, le=1.0),
uov_input_image: Path = Input(
default=None,
description="Input image for upscale or variation, keep None for not upscale or variation"),
uov_method: str = Input(
default='Disabled',
choices=uov_methods),
uov_upscale_value: float = Input(
default=0,
description="Only when Upscale (Custom)"),
inpaint_additional_prompt: str = Input(
default='',
description="Prompt for image generation"),
inpaint_input_image: Path = Input(
default=None,
description="Input image for inpaint or outpaint, keep None for not inpaint or outpaint. Please noticed, `uov_input_image` has bigger priority is not None."),
inpaint_input_mask: Path = Input(
default=None,
description="Input mask for inpaint"),
outpaint_selections: str = Input(
default='',
description="Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma"),
outpaint_distance_left: int = Input(
default=0,
description="Outpaint expansion distance from Left of the image"),
outpaint_distance_top: int = Input(
default=0,
description="Outpaint expansion distance from Top of the image"),
outpaint_distance_right: int = Input(
default=0,
description="Outpaint expansion distance from Right of the image"),
outpaint_distance_bottom: int = Input(
default=0,
description="Outpaint expansion distance from Bottom of the image"),
cn_img1: Path = Input(
default=None,
description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
cn_stop1: float = Input(
default=None,
ge=0, le=1,
description="Stop at for image prompt, None for default value"),
cn_weight1: float = Input(
default=None,
ge=0, le=2,
description="Weight for image prompt, None for default value"),
cn_type1: str = Input(
default='ImagePrompt',
choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS'],
description="ControlNet type for image prompt"),
cn_img2: Path = Input(
default=None,
description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
cn_stop2: float = Input(
default=None,
ge=0, le=1,
description="Stop at for image prompt, None for default value"),
cn_weight2: float = Input(
default=None,
ge=0, le=2,
description="Weight for image prompt, None for default value"),
cn_type2: str = Input(
default='ImagePrompt',
choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS'],
description="ControlNet type for image prompt"),
cn_img3: Path = Input(
default=None,
description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
cn_stop3: float = Input(
default=None,
ge=0, le=1,
description="Stop at for image prompt, None for default value"),
cn_weight3: float = Input(
default=None,
ge=0, le=2,
description="Weight for image prompt, None for default value"),
cn_type3: str = Input(
default='ImagePrompt',
choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS'],
description="ControlNet type for image prompt"),
cn_img4: Path = Input(
default=None,
description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
cn_stop4: float = Input(
default=None,
ge=0, le=1,
description="Stop at for image prompt, None for default value"),
cn_weight4: float = Input(
default=None,
ge=0, le=2,
description="Weight for image prompt, None for default value"),
cn_type4: str = Input(
default='ImagePrompt',
choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS'],
description="ControlNet type for image prompt")
) -> Output:
"""Run a single prediction on the model"""
import modules.flags as flags
from modules import flags
from modules.sdxl_styles import legal_style_names
from fooocusapi.worker import blocking_get_task_result, worker_queue

Expand Down Expand Up @@ -142,8 +212,11 @@ def predict(
outpaint_selections_arr.append(expansion)

image_prompts = []
image_prompt_config = [(cn_img1, cn_stop1, cn_weight1, cn_type1), (cn_img2, cn_stop2, cn_weight2, cn_type2),
(cn_img3, cn_stop3, cn_weight3, cn_type3), (cn_img4, cn_stop4, cn_weight4, cn_type4)]
image_prompt_config = [
(cn_img1, cn_stop1, cn_weight1, cn_type1),
(cn_img2, cn_stop2, cn_weight2, cn_type2),
(cn_img3, cn_stop3, cn_weight3, cn_type3),
(cn_img4, cn_stop4, cn_weight4, cn_type4)]
for config in image_prompt_config:
cn_img, cn_stop, cn_weight, cn_type = config
if cn_img is not None:
Expand All @@ -157,42 +230,42 @@ def predict(

advanced_params = None

params = ImageGenerationParams(prompt=prompt,
negative_prompt=negative_prompt,
style_selections=style_selections_arr,
performance_selection=performance_selection,
aspect_ratios_selection=aspect_ratios_selection,
image_number=image_number,
image_seed=image_seed,
sharpness=sharpness,
guidance_scale=guidance_scale,
base_model_name=base_model_name,
refiner_model_name=refiner_model_name,
refiner_switch=refiner_switch,
loras=loras,
uov_input_image=uov_input_image,
uov_method=uov_method,
upscale_value=uov_upscale_value,
outpaint_selections=outpaint_selections_arr,
inpaint_input_image=inpaint_input_image_dict,
image_prompts=image_prompts,
advanced_params=advanced_params,
inpaint_additional_prompt=inpaint_additional_prompt,
outpaint_distance_left=outpaint_distance_left,
outpaint_distance_top=outpaint_distance_top,
outpaint_distance_right=outpaint_distance_right,
outpaint_distance_bottom=outpaint_distance_bottom,
require_base64=False,
)
params = ImageGenerationParams(
prompt=prompt,
negative_prompt=negative_prompt,
style_selections=style_selections_arr,
performance_selection=performance_selection,
aspect_ratios_selection=aspect_ratios_selection,
image_number=image_number,
image_seed=image_seed,
sharpness=sharpness,
guidance_scale=guidance_scale,
base_model_name=base_model_name,
refiner_model_name=refiner_model_name,
refiner_switch=refiner_switch,
loras=loras,
uov_input_image=uov_input_image,
uov_method=uov_method,
upscale_value=uov_upscale_value,
outpaint_selections=outpaint_selections_arr,
inpaint_input_image=inpaint_input_image_dict,
image_prompts=image_prompts,
advanced_params=advanced_params,
inpaint_additional_prompt=inpaint_additional_prompt,
outpaint_distance_left=outpaint_distance_left,
outpaint_distance_top=outpaint_distance_top,
outpaint_distance_right=outpaint_distance_right,
outpaint_distance_bottom=outpaint_distance_bottom,
require_base64=False,
save_extension=save_extension
)

print(f"[Predictor Predict] Params: {params.__dict__}")

async_task = worker_queue.add_task(TaskType.text_2_img, {'params': params.__dict__, 'require_base64': False})
if async_task is None:
print("[Task Queue] The task queue has reached limit")
raise Exception(
f"The task queue has reached limit."
)
raise Exception("The task queue has reached limit.")
results = blocking_get_task_result(async_task.job_id)

output_paths: List[Path] = []
Expand All @@ -205,8 +278,6 @@ def predict(
print(f"[Predictor Predict] Finished with {len(output_paths)} images")

if len(output_paths) == 0:
raise Exception(
f"Process failed."
)
raise Exception("Process failed.")

return Output(seeds=output_seeds, paths=output_paths)

0 comments on commit c1f99d5

Please sign in to comment.