Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import spaces
|
| 2 |
import argparse
|
| 3 |
import os
|
|
@@ -13,12 +56,21 @@ from diffusers import FluxPipeline
|
|
| 13 |
from diffusers.pipelines.stable_diffusion import safety_checker
|
| 14 |
from PIL import Image
|
| 15 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 16 |
-
import subprocess
|
| 17 |
|
| 18 |
-
#
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
# Setup and initialization code
|
| 24 |
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
|
|
@@ -31,6 +83,7 @@ os.environ["HF_HOME"] = cache_path
|
|
| 31 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 32 |
|
| 33 |
# Florence 모델 초기화
|
|
|
|
| 34 |
florence_models = {
|
| 35 |
'gokaygokay/Florence-2-Flux-Large': AutoModelForCausalLM.from_pretrained(
|
| 36 |
'gokaygokay/Florence-2-Flux-Large',
|
|
@@ -80,10 +133,13 @@ class timer:
|
|
| 80 |
if not path.exists(cache_path):
|
| 81 |
os.makedirs(cache_path, exist_ok=True)
|
| 82 |
|
|
|
|
| 83 |
pipe = FluxPipeline.from_pretrained(
|
| 84 |
"black-forest-labs/FLUX.1-dev",
|
| 85 |
torch_dtype=torch.bfloat16
|
| 86 |
)
|
|
|
|
|
|
|
| 87 |
pipe.load_lora_weights(
|
| 88 |
hf_hub_download(
|
| 89 |
"ByteDance/Hyper-SD",
|
|
@@ -92,9 +148,15 @@ pipe.load_lora_weights(
|
|
| 92 |
)
|
| 93 |
pipe.fuse_lora(lora_scale=0.125)
|
| 94 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
@spaces.GPU
|
| 100 |
def generate_caption(image, model_name='gokaygokay/Florence-2-Flux-Large'):
|
|
@@ -142,6 +204,7 @@ def process_and_save_image(height, width, steps, scales, prompt, seed):
|
|
| 142 |
return generated_image
|
| 143 |
except Exception as e:
|
| 144 |
print(f"Error in image generation: {str(e)}")
|
|
|
|
| 145 |
return None
|
| 146 |
|
| 147 |
def get_random_seed():
|
|
|
|
| 1 |
+
import subprocess
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
# Install/upgrade required packages with specific versions
|
| 5 |
+
def install_packages():
|
| 6 |
+
packages = [
|
| 7 |
+
"transformers>=4.46.0",
|
| 8 |
+
"diffusers>=0.31.0",
|
| 9 |
+
"accelerate>=0.26.0",
|
| 10 |
+
"huggingface-hub>=0.23.0"
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
for package in packages:
|
| 14 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "--upgrade", package], check=True)
|
| 15 |
+
|
| 16 |
+
# Run installation before other imports
|
| 17 |
+
try:
|
| 18 |
+
install_packages()
|
| 19 |
+
except Exception as e:
|
| 20 |
+
print(f"Warning: Could not auto-install packages: {e}")
|
| 21 |
+
|
| 22 |
+
# Try to install flash-attn with a timeout
|
| 23 |
+
try:
|
| 24 |
+
print("Attempting to install flash-attn...")
|
| 25 |
+
result = subprocess.run(
|
| 26 |
+
'pip install flash-attn --no-build-isolation',
|
| 27 |
+
env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"},
|
| 28 |
+
shell=True,
|
| 29 |
+
timeout=120, # 2 minute timeout
|
| 30 |
+
capture_output=True,
|
| 31 |
+
text=True
|
| 32 |
+
)
|
| 33 |
+
if result.returncode == 0:
|
| 34 |
+
print("Flash-attn installed successfully")
|
| 35 |
+
else:
|
| 36 |
+
print(f"Flash-attn installation failed: {result.stderr}")
|
| 37 |
+
print("Continuing without flash-attn...")
|
| 38 |
+
except subprocess.TimeoutExpired:
|
| 39 |
+
print("Flash-attn installation timed out - continuing without it")
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"Flash-attn installation error: {e}")
|
| 42 |
+
print("Continuing without flash-attn...")
|
| 43 |
+
|
| 44 |
import spaces
|
| 45 |
import argparse
|
| 46 |
import os
|
|
|
|
| 56 |
from diffusers.pipelines.stable_diffusion import safety_checker
|
| 57 |
from PIL import Image
|
| 58 |
from transformers import AutoProcessor, AutoModelForCausalLM
|
|
|
|
| 59 |
|
| 60 |
+
# Try to use efficient attention mechanisms
|
| 61 |
+
ATTN_METHOD = None
|
| 62 |
+
try:
|
| 63 |
+
import xformers
|
| 64 |
+
print("Using xformers for efficient attention")
|
| 65 |
+
ATTN_METHOD = "xformers"
|
| 66 |
+
except ImportError:
|
| 67 |
+
try:
|
| 68 |
+
import flash_attn
|
| 69 |
+
print("Using flash attention")
|
| 70 |
+
ATTN_METHOD = "flash_attn"
|
| 71 |
+
except ImportError:
|
| 72 |
+
print("No efficient attention method available, using default")
|
| 73 |
+
ATTN_METHOD = "default"
|
| 74 |
|
| 75 |
# Setup and initialization code
|
| 76 |
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
|
|
|
|
| 83 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 84 |
|
| 85 |
# Florence 모델 초기화
|
| 86 |
+
print("Initializing Florence models...")
|
| 87 |
florence_models = {
|
| 88 |
'gokaygokay/Florence-2-Flux-Large': AutoModelForCausalLM.from_pretrained(
|
| 89 |
'gokaygokay/Florence-2-Flux-Large',
|
|
|
|
| 133 |
if not path.exists(cache_path):
|
| 134 |
os.makedirs(cache_path, exist_ok=True)
|
| 135 |
|
| 136 |
+
print("Loading FLUX pipeline...")
|
| 137 |
pipe = FluxPipeline.from_pretrained(
|
| 138 |
"black-forest-labs/FLUX.1-dev",
|
| 139 |
torch_dtype=torch.bfloat16
|
| 140 |
)
|
| 141 |
+
|
| 142 |
+
print("Loading LoRA weights...")
|
| 143 |
pipe.load_lora_weights(
|
| 144 |
hf_hub_download(
|
| 145 |
"ByteDance/Hyper-SD",
|
|
|
|
| 148 |
)
|
| 149 |
pipe.fuse_lora(lora_scale=0.125)
|
| 150 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
| 151 |
+
|
| 152 |
+
# Safety checker initialization
|
| 153 |
+
try:
|
| 154 |
+
pipe.safety_checker = safety_checker.StableDiffusionSafetyChecker.from_pretrained(
|
| 155 |
+
"CompVis/stable-diffusion-safety-checker"
|
| 156 |
+
)
|
| 157 |
+
except Exception as e:
|
| 158 |
+
print(f"Warning: Could not load safety checker: {e}")
|
| 159 |
+
pipe.safety_checker = None
|
| 160 |
|
| 161 |
@spaces.GPU
|
| 162 |
def generate_caption(image, model_name='gokaygokay/Florence-2-Flux-Large'):
|
|
|
|
| 204 |
return generated_image
|
| 205 |
except Exception as e:
|
| 206 |
print(f"Error in image generation: {str(e)}")
|
| 207 |
+
gr.Warning(f"Error generating image: {str(e)}")
|
| 208 |
return None
|
| 209 |
|
| 210 |
def get_random_seed():
|