Spaces:
Runtime error
Runtime error
| import torch | |
| import requests | |
| from PIL import Image | |
| from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler | |
| # Load the pipeline | |
| pipeline = DiffusionPipeline.from_pretrained( | |
| "sudo-ai/zero123plus-v1.1", custom_pipeline="sudo-ai/zero123plus-pipeline", | |
| torch_dtype=torch.float16 | |
| ) | |
| # Feel free to tune the scheduler! | |
| # `timestep_spacing` parameter is not supported in older versions of `diffusers` | |
| # so there may be performance degradations | |
| # We recommend using `diffusers==0.20.2` | |
| pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config( | |
| pipeline.scheduler.config, timestep_spacing='trailing' | |
| ) | |
| pipeline.to('cuda:0') | |
| # Download an example image. | |
| cond = Image.open(requests.get("https://d.skis.ltd/nrp/sample-data/lysol.png", stream=True).raw) | |
| # Run the pipeline! | |
| result = pipeline(cond, num_inference_steps=75).images[0] | |
| # for general real and synthetic images of general objects | |
| # usually it is enough to have around 28 inference steps | |
| # for images with delicate details like faces (real or anime) | |
| # you may need 75-100 steps for the details to construct | |
| result.show() | |
| result.save("output.png") | |