Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,12 @@ import torch
|
|
| 5 |
import spaces
|
| 6 |
|
| 7 |
from PIL import Image
|
| 8 |
-
from diffusers import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
from huggingface_hub import InferenceClient
|
| 10 |
import math
|
| 11 |
|
|
@@ -193,6 +198,12 @@ pipe.load_lora_weights(
|
|
| 193 |
)
|
| 194 |
pipe.fuse_lora()
|
| 195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
# --- UI Constants and Helpers ---
|
| 197 |
MAX_SEED = np.iinfo(np.int32).max
|
| 198 |
|
|
|
|
| 5 |
import spaces
|
| 6 |
|
| 7 |
from PIL import Image
|
| 8 |
+
from diffusers import FlowMatchEulerDiscreteScheduler
|
| 9 |
+
from optimization import optimize_pipeline_
|
| 10 |
+
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
|
| 11 |
+
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
|
| 12 |
+
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 13 |
+
|
| 14 |
from huggingface_hub import InferenceClient
|
| 15 |
import math
|
| 16 |
|
|
|
|
| 198 |
)
|
| 199 |
pipe.fuse_lora()
|
| 200 |
|
| 201 |
+
# Apply the same optimizations from the first version
|
| 202 |
+
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
| 203 |
+
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 204 |
+
|
| 205 |
+
# --- Ahead-of-time compilation ---
|
| 206 |
+
|
| 207 |
# --- UI Constants and Helpers ---
|
| 208 |
MAX_SEED = np.iinfo(np.int32).max
|
| 209 |
|