Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -487,27 +487,27 @@ scheduler = DDIMScheduler.from_pretrained(
|
|
| 487 |
|
| 488 |
vae = AutoencoderKL.from_pretrained(base_model,
|
| 489 |
subfolder="vae",
|
| 490 |
-
torch_dtype=torch.float16,
|
| 491 |
)
|
| 492 |
if vae is None:
|
| 493 |
vae = AutoencoderKL.from_pretrained(
|
| 494 |
"stabilityai/sd-vae-ft-mse",
|
| 495 |
-
torch_dtype=torch.float16,
|
| 496 |
)
|
| 497 |
text_encoder = CLIPTextModel.from_pretrained(
|
| 498 |
base_model,
|
| 499 |
subfolder="text_encoder",
|
| 500 |
-
torch_dtype=torch.float16,
|
| 501 |
)
|
| 502 |
tokenizer = CLIPTokenizer.from_pretrained(
|
| 503 |
base_model,
|
| 504 |
subfolder="tokenizer",
|
| 505 |
-
torch_dtype=torch.float16,
|
| 506 |
)
|
| 507 |
unet = UNet2DConditionModel.from_pretrained(
|
| 508 |
base_model,
|
| 509 |
subfolder="unet",
|
| 510 |
-
torch_dtype=torch.float16,
|
| 511 |
)
|
| 512 |
feature_extract = CLIPImageProcessor.from_pretrained(
|
| 513 |
base_model,
|
|
@@ -604,24 +604,40 @@ def setup_model(name,clip_skip, lora_group=None,diffuser_pipeline = False ,contr
|
|
| 604 |
if name not in unet_cache:
|
| 605 |
if name not in models_single_file:
|
| 606 |
try:
|
| 607 |
-
vae_model = AutoencoderKL.from_pretrained(model,subfolder="vae"
|
|
|
|
|
|
|
| 608 |
except OSError:
|
| 609 |
-
vae_model = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",
|
|
|
|
|
|
|
| 610 |
|
| 611 |
try:
|
| 612 |
-
unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet",
|
|
|
|
|
|
|
| 613 |
except OSError:
|
| 614 |
-
unet = UNet2DConditionModel.from_pretrained(base_model, subfolder="unet",
|
|
|
|
|
|
|
| 615 |
|
| 616 |
try:
|
| 617 |
-
text_encoder = CLIPTextModel.from_pretrained(model, subfolder="text_encoder",
|
|
|
|
|
|
|
| 618 |
except OSError:
|
| 619 |
-
text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder="text_encoder",
|
|
|
|
|
|
|
| 620 |
|
| 621 |
try:
|
| 622 |
-
tokenizer = CLIPTokenizer.from_pretrained(model,subfolder="tokenizer",
|
|
|
|
|
|
|
| 623 |
except OSError:
|
| 624 |
-
tokenizer = CLIPTokenizer.from_pretrained(base_model,subfolder="tokenizer",
|
|
|
|
|
|
|
| 625 |
|
| 626 |
try:
|
| 627 |
scheduler = DDIMScheduler.from_pretrained(model,subfolder="scheduler")
|
|
@@ -633,7 +649,9 @@ def setup_model(name,clip_skip, lora_group=None,diffuser_pipeline = False ,contr
|
|
| 633 |
except OSError:
|
| 634 |
feature_extract = CLIPImageProcessor.from_pretrained(base_model,subfolder="feature_extractor")
|
| 635 |
else:
|
| 636 |
-
pipe_get = StableDiffusionPipeline_finetune.from_single_file(model,safety_checker= None,requires_safety_checker = False,
|
|
|
|
|
|
|
| 637 |
vae_model = pipe_get.vae
|
| 638 |
unet = pipe_get.unet
|
| 639 |
text_encoder = pipe_get.text_encoder
|
|
@@ -2989,7 +3007,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 2989 |
|
| 2990 |
prompt = gr.Textbox(
|
| 2991 |
label="Prompt",
|
| 2992 |
-
value="
|
| 2993 |
show_label=True,
|
| 2994 |
#max_lines=4,
|
| 2995 |
placeholder="Enter prompt.",
|
|
|
|
| 487 |
|
| 488 |
vae = AutoencoderKL.from_pretrained(base_model,
|
| 489 |
subfolder="vae",
|
| 490 |
+
#torch_dtype=torch.float16,
|
| 491 |
)
|
| 492 |
if vae is None:
|
| 493 |
vae = AutoencoderKL.from_pretrained(
|
| 494 |
"stabilityai/sd-vae-ft-mse",
|
| 495 |
+
#torch_dtype=torch.float16,
|
| 496 |
)
|
| 497 |
text_encoder = CLIPTextModel.from_pretrained(
|
| 498 |
base_model,
|
| 499 |
subfolder="text_encoder",
|
| 500 |
+
#torch_dtype=torch.float16,
|
| 501 |
)
|
| 502 |
tokenizer = CLIPTokenizer.from_pretrained(
|
| 503 |
base_model,
|
| 504 |
subfolder="tokenizer",
|
| 505 |
+
#torch_dtype=torch.float16,
|
| 506 |
)
|
| 507 |
unet = UNet2DConditionModel.from_pretrained(
|
| 508 |
base_model,
|
| 509 |
subfolder="unet",
|
| 510 |
+
#torch_dtype=torch.float16,
|
| 511 |
)
|
| 512 |
feature_extract = CLIPImageProcessor.from_pretrained(
|
| 513 |
base_model,
|
|
|
|
| 604 |
if name not in unet_cache:
|
| 605 |
if name not in models_single_file:
|
| 606 |
try:
|
| 607 |
+
vae_model = AutoencoderKL.from_pretrained(model,subfolder="vae"
|
| 608 |
+
#,torch_dtype=torch.float16
|
| 609 |
+
)
|
| 610 |
except OSError:
|
| 611 |
+
vae_model = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",
|
| 612 |
+
#torch_dtype=torch.float16
|
| 613 |
+
)
|
| 614 |
|
| 615 |
try:
|
| 616 |
+
unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet",
|
| 617 |
+
#torch_dtype=torch.float16
|
| 618 |
+
)
|
| 619 |
except OSError:
|
| 620 |
+
unet = UNet2DConditionModel.from_pretrained(base_model, subfolder="unet",
|
| 621 |
+
#torch_dtype=torch.float16
|
| 622 |
+
)
|
| 623 |
|
| 624 |
try:
|
| 625 |
+
text_encoder = CLIPTextModel.from_pretrained(model, subfolder="text_encoder",
|
| 626 |
+
#torch_dtype=torch.float16
|
| 627 |
+
)
|
| 628 |
except OSError:
|
| 629 |
+
text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder="text_encoder",
|
| 630 |
+
#torch_dtype=torch.float16
|
| 631 |
+
)
|
| 632 |
|
| 633 |
try:
|
| 634 |
+
tokenizer = CLIPTokenizer.from_pretrained(model,subfolder="tokenizer",
|
| 635 |
+
#torch_dtype=torch.float16
|
| 636 |
+
)
|
| 637 |
except OSError:
|
| 638 |
+
tokenizer = CLIPTokenizer.from_pretrained(base_model,subfolder="tokenizer",
|
| 639 |
+
#torch_dtype=torch.float16
|
| 640 |
+
)
|
| 641 |
|
| 642 |
try:
|
| 643 |
scheduler = DDIMScheduler.from_pretrained(model,subfolder="scheduler")
|
|
|
|
| 649 |
except OSError:
|
| 650 |
feature_extract = CLIPImageProcessor.from_pretrained(base_model,subfolder="feature_extractor")
|
| 651 |
else:
|
| 652 |
+
pipe_get = StableDiffusionPipeline_finetune.from_single_file(model,safety_checker= None,requires_safety_checker = False,
|
| 653 |
+
#torch_dtype=torch.float16
|
| 654 |
+
).to(device)
|
| 655 |
vae_model = pipe_get.vae
|
| 656 |
unet = pipe_get.unet
|
| 657 |
text_encoder = pipe_get.text_encoder
|
|
|
|
| 3007 |
|
| 3008 |
prompt = gr.Textbox(
|
| 3009 |
label="Prompt",
|
| 3010 |
+
value="An adorable girl is sitting on the park",
|
| 3011 |
show_label=True,
|
| 3012 |
#max_lines=4,
|
| 3013 |
placeholder="Enter prompt.",
|