Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -64,18 +64,19 @@ if is_colab:
|
|
| 64 |
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 65 |
|
| 66 |
else: # download all models
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
|
|
|
| 79 |
|
| 80 |
if torch.cuda.is_available():
|
| 81 |
pipe = pipe.to("cuda")
|
|
@@ -130,8 +131,9 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
|
|
| 130 |
if is_colab or current_model == custom_model:
|
| 131 |
pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 132 |
else:
|
| 133 |
-
pipe =
|
| 134 |
-
pipe =
|
|
|
|
| 135 |
|
| 136 |
if torch.cuda.is_available():
|
| 137 |
pipe = pipe.to("cuda")
|
|
@@ -163,8 +165,9 @@ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, w
|
|
| 163 |
if is_colab or current_model == custom_model:
|
| 164 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 165 |
else:
|
| 166 |
-
pipe =
|
| 167 |
-
pipe =
|
|
|
|
| 168 |
|
| 169 |
if torch.cuda.is_available():
|
| 170 |
pipe = pipe.to("cuda")
|
|
|
|
| 64 |
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 65 |
|
| 66 |
else: # download all models
|
| 67 |
+
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 68 |
+
# print(f"{datetime.datetime.now()} Downloading vae...")
|
| 69 |
+
# vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
|
| 70 |
+
# for model in models:
|
| 71 |
+
# try:
|
| 72 |
+
# print(f"{datetime.datetime.now()} Downloading {model.name} model...")
|
| 73 |
+
# unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
|
| 74 |
+
# model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
| 75 |
+
# model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
| 76 |
+
# except Exception as e:
|
| 77 |
+
# print(f"{datetime.datetime.now()} Failed to load model " + model.name + ": " + str(e))
|
| 78 |
+
# models.remove(model)
|
| 79 |
+
# pipe = models[0].pipe_t2i
|
| 80 |
|
| 81 |
if torch.cuda.is_available():
|
| 82 |
pipe = pipe.to("cuda")
|
|
|
|
| 131 |
if is_colab or current_model == custom_model:
|
| 132 |
pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 133 |
else:
|
| 134 |
+
pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 135 |
+
# pipe = pipe.to("cpu")
|
| 136 |
+
# pipe = current_model.pipe_t2i
|
| 137 |
|
| 138 |
if torch.cuda.is_available():
|
| 139 |
pipe = pipe.to("cuda")
|
|
|
|
| 165 |
if is_colab or current_model == custom_model:
|
| 166 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 167 |
else:
|
| 168 |
+
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
| 169 |
+
# pipe = pipe.to("cpu")
|
| 170 |
+
# pipe = current_model.pipe_i2i
|
| 171 |
|
| 172 |
if torch.cuda.is_available():
|
| 173 |
pipe = pipe.to("cuda")
|