Roll back to before we wasted our money with Zero
Browse files
app.py
CHANGED
|
@@ -10,9 +10,6 @@ import os
|
|
| 10 |
from PIL import Image
|
| 11 |
import spaces
|
| 12 |
|
| 13 |
-
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
| 14 |
-
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
|
| 15 |
-
|
| 16 |
from diffusers import (
|
| 17 |
StableDiffusionPipeline,
|
| 18 |
StableDiffusionControlNetImg2ImgPipeline,
|
|
@@ -41,7 +38,7 @@ pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
|
| 41 |
safety_checker=None,
|
| 42 |
torch_dtype=torch.float16,
|
| 43 |
).to("cuda")
|
| 44 |
-
|
| 45 |
# pipe.controlnet = torch.nn.DataParallel(pipe.controlnet)
|
| 46 |
# pipe.unet = torch.nn.DataParallel(pipe.unet)
|
| 47 |
|
|
@@ -83,7 +80,7 @@ SAMPLER_MAP = {
|
|
| 83 |
"DEIS": lambda config: DEISMultistepScheduler.from_config(config),
|
| 84 |
}
|
| 85 |
|
| 86 |
-
@spaces.GPU(
|
| 87 |
def inference(
|
| 88 |
qr_code_content: str,
|
| 89 |
prompt: str,
|
|
@@ -327,4 +324,3 @@ model: https://huggingface.co/DionTimmer/controlnet_qrcode-control_v1p_sd15
|
|
| 327 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
| 328 |
blocks.queue(max_size=20,api_open=False)
|
| 329 |
blocks.launch(share=bool(os.environ.get("SHARE", False)), show_api=False)
|
| 330 |
-
|
|
|
|
| 10 |
from PIL import Image
|
| 11 |
import spaces
|
| 12 |
|
|
|
|
|
|
|
|
|
|
| 13 |
from diffusers import (
|
| 14 |
StableDiffusionPipeline,
|
| 15 |
StableDiffusionControlNetImg2ImgPipeline,
|
|
|
|
| 38 |
safety_checker=None,
|
| 39 |
torch_dtype=torch.float16,
|
| 40 |
).to("cuda")
|
| 41 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 42 |
# pipe.controlnet = torch.nn.DataParallel(pipe.controlnet)
|
| 43 |
# pipe.unet = torch.nn.DataParallel(pipe.unet)
|
| 44 |
|
|
|
|
| 80 |
"DEIS": lambda config: DEISMultistepScheduler.from_config(config),
|
| 81 |
}
|
| 82 |
|
| 83 |
+
@spaces.GPU()
|
| 84 |
def inference(
|
| 85 |
qr_code_content: str,
|
| 86 |
prompt: str,
|
|
|
|
| 324 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
| 325 |
blocks.queue(max_size=20,api_open=False)
|
| 326 |
blocks.launch(share=bool(os.environ.get("SHARE", False)), show_api=False)
|
|
|