diff --git a/README.md b/README.md
index 1f04efcb..616a0d70 100644
--- a/README.md
+++ b/README.md
@@ -220,6 +220,51 @@ Differences may be less or more pronounced for different inputs. Please see the
+
+## Play with simple Web UI
+
+
+ Click to expand
+
+
+
+After you have completed the model conversion according to the above process, you can use the following command to start a simple Web UI:
+
+```bash
+python -m python_coreml_stable_diffusion.web -i --compute-unit ALL
+```
+
+After the command is executed, we will get a log similar to the following:
+
+```bash
+WARNING:coremltools:Torch version 1.13.0 has not been tested with coremltools. You may run into unexpected errors. Torch 1.12.1 is the most recent version that has been tested.
+INFO:python_coreml_stable_diffusion.pipeline:Initializing PyTorch pipe for reference configuration
+...
+...
+INFO:python_coreml_stable_diffusion.pipeline:Done.
+INFO:python_coreml_stable_diffusion.pipeline:Initializing Core ML pipe for image generation
+INFO:python_coreml_stable_diffusion.pipeline:Stable Diffusion configured to generate 512x512 images
+INFO:python_coreml_stable_diffusion.pipeline:Done.
+Running on local URL: http://0.0.0.0:7860
+
+To create a public link, set `share=True` in `launch()`.
+```
+
+Open `http://0.0.0.0:7860` in your browser to start your Core ML Stable Diffusion adventure.
+
+
+Web UI relies on gradio, a great interface framework. If you have not installed it, then execute the above command, the program will try to install it automatically.
+
+If the installation fails, you can try to manually execute the following command to complete the dependency installation.
+
+```bash
+pip install gradio
+```
+
+When the installation is complete, re-execute the above command to start the Web UI.
+
+
+
## FAQ
diff --git a/assets/webui.jpg b/assets/webui.jpg
new file mode 100644
index 00000000..ea68ea09
Binary files /dev/null and b/assets/webui.jpg differ
diff --git a/python_coreml_stable_diffusion/web.py b/python_coreml_stable_diffusion/web.py
new file mode 100644
index 00000000..b001e13e
--- /dev/null
+++ b/python_coreml_stable_diffusion/web.py
@@ -0,0 +1,108 @@
+try:
+ import gradio as gr
+ import python_coreml_stable_diffusion.pipeline as pipeline
+ from diffusers import StableDiffusionPipeline
+
+ def init(args):
+ pipeline.logger.info("Initializing PyTorch pipe for reference configuration")
+ pytorch_pipe = StableDiffusionPipeline.from_pretrained(args.model_version,
+ use_auth_token=True)
+
+ user_specified_scheduler = None
+ if args.scheduler is not None:
+ user_specified_scheduler = pipeline.SCHEDULER_MAP[
+ args.scheduler].from_config(pytorch_pipe.scheduler.config)
+
+ coreml_pipe = pipeline.get_coreml_pipe(pytorch_pipe=pytorch_pipe,
+ mlpackages_dir=args.i,
+ model_version=args.model_version,
+ compute_unit=args.compute_unit,
+ scheduler_override=user_specified_scheduler)
+
+
+ def infer(prompt, steps):
+ pipeline.logger.info("Beginning image generation.")
+ image = coreml_pipe(
+ prompt=prompt,
+ height=coreml_pipe.height,
+ width=coreml_pipe.width,
+ num_inference_steps=steps,
+ )
+ images = []
+ images.append(image["images"][0])
+ return images
+
+
+ demo = gr.Blocks()
+
+ with demo:
+ gr.Markdown(
+ "Core ML Stable Diffusion
Run Stable Diffusion on Apple Silicon with Core ML")
+ with gr.Group():
+ with gr.Box():
+ with gr.Row():
+ with gr.Column():
+ with gr.Row():
+ text = gr.Textbox(
+ label="Prompt",
+ lines=11,
+ placeholder="Enter your prompt",
+ )
+ with gr.Row():
+ btn = gr.Button("Generate image")
+ with gr.Row():
+ steps = gr.Slider(label="Steps", minimum=1,
+ maximum=50, value=10, step=1)
+ with gr.Column():
+ gallery = gr.Gallery(
+ label="Generated image", elem_id="gallery"
+ )
+
+ text.submit(infer, inputs=[text, steps], outputs=gallery)
+ btn.click(infer, inputs=[text, steps], outputs=gallery)
+
+ demo.launch(debug=True, server_name="0.0.0.0")
+
+
+ if __name__ == "__main__":
+ parser = pipeline.argparse.ArgumentParser()
+
+ parser.add_argument(
+ "-i",
+ required=True,
+ help=("Path to input directory with the .mlpackage files generated by "
+ "python_coreml_stable_diffusion.torch2coreml"))
+ parser.add_argument(
+ "--model-version",
+ default="CompVis/stable-diffusion-v1-4",
+ help=
+ ("The pre-trained model checkpoint and configuration to restore. "
+ "For available versions: https://huggingface.co/models?search=stable-diffusion"
+ ))
+ parser.add_argument(
+ "--compute-unit",
+ choices=pipeline.get_available_compute_units(),
+ default="ALL",
+ help=("The compute units to be used when executing Core ML models. "
+ f"Options: {pipeline.get_available_compute_units()}"))
+ parser.add_argument(
+ "--scheduler",
+ choices=tuple(pipeline.SCHEDULER_MAP.keys()),
+ default=None,
+ help=("The scheduler to use for running the reverse diffusion process. "
+ "If not specified, the default scheduler from the diffusers pipeline is utilized"))
+
+ args = parser.parse_args()
+ init(args)
+
+except ModuleNotFoundError as moduleNotFound:
+ print(f'Found that `gradio` is not installed, try to install it automatically')
+ try:
+ import subprocess
+ import sys
+
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'gradio'])
+ print(f'Successfully installed missing package `gradio`.')
+ print(f'Now re-execute the command :D')
+ except subprocess.CalledProcessError:
+ print(f'Automatic package installation failed, try manually executing `pip install gradio`, then retry the command again.')
\ No newline at end of file