Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,7 +17,6 @@ subprocess.run(
|
|
| 17 |
shell=True,
|
| 18 |
)
|
| 19 |
|
| 20 |
-
|
| 21 |
os.makedirs("/home/user/app/checkpoints", exist_ok=True)
|
| 22 |
from huggingface_hub import snapshot_download
|
| 23 |
snapshot_download(
|
|
@@ -51,7 +50,6 @@ import models
|
|
| 51 |
from transport import Sampler, create_transport
|
| 52 |
|
| 53 |
from multiprocessing import Process,Queue,set_start_method,get_context
|
| 54 |
-
#set_start_method('fork')
|
| 55 |
|
| 56 |
class ModelFailure:
|
| 57 |
pass
|
|
@@ -110,11 +108,6 @@ def model_main(args, master_port, rank):
|
|
| 110 |
# Override the built-in print with the new version
|
| 111 |
builtins.print = print
|
| 112 |
|
| 113 |
-
# os.environ["MASTER_PORT"] = str(master_port)
|
| 114 |
-
# os.environ["MASTER_ADDR"] = "127.0.0.1"
|
| 115 |
-
# os.environ["RANK"] = str(rank)
|
| 116 |
-
# os.environ["WORLD_SIZE"] = str(args.num_gpus)
|
| 117 |
-
|
| 118 |
|
| 119 |
train_args = torch.load(os.path.join(args.ckpt, "model_args.pth"))
|
| 120 |
print("Loaded model arguments:", json.dumps(train_args.__dict__, indent=2))
|
|
@@ -307,16 +300,13 @@ def inference(args, infer_args, text_encoder, tokenizer, vae, model):
|
|
| 307 |
samples = (samples + 1.0) / 2.0
|
| 308 |
samples.clamp_(0.0, 1.0)
|
| 309 |
|
| 310 |
-
img = to_pil_image(samples[0
|
| 311 |
print("> generated image, done.")
|
| 312 |
|
| 313 |
-
# if response_queue is not None:
|
| 314 |
-
# response_queue.put((img, metadata))
|
| 315 |
return img, metadata
|
| 316 |
except Exception:
|
| 317 |
print(traceback.format_exc())
|
| 318 |
return ModelFailure()
|
| 319 |
-
# response_queue.put(ModelFailure())
|
| 320 |
|
| 321 |
|
| 322 |
def none_or_str(value):
|
|
@@ -402,38 +392,11 @@ def main():
|
|
| 402 |
raise NotImplementedError("Multi-GPU Inference is not yet supported")
|
| 403 |
|
| 404 |
master_port = find_free_port()
|
| 405 |
-
#mp.set_start_method("fork")
|
| 406 |
-
# processes = []
|
| 407 |
-
# request_queues = []
|
| 408 |
-
# response_queue = mp.Queue()
|
| 409 |
-
# mp_barrier = mp.Barrier(args.num_gpus + 1)
|
| 410 |
-
# for i in range(args.num_gpus):
|
| 411 |
-
# request_queues.append(mp.Queue())
|
| 412 |
-
# p = mp.Process(
|
| 413 |
-
# target=model_main,
|
| 414 |
-
# args=(
|
| 415 |
-
# args,
|
| 416 |
-
# master_port,
|
| 417 |
-
# i,
|
| 418 |
-
# request_queues[i],
|
| 419 |
-
# response_queue if i == 0 else None,
|
| 420 |
-
# mp_barrier,
|
| 421 |
-
# ),
|
| 422 |
-
# )
|
| 423 |
-
# p.start()
|
| 424 |
-
# processes.append(p)
|
| 425 |
|
| 426 |
text_encoder, tokenizer, vae, model = model_main(args, master_port, 0)
|
| 427 |
|
| 428 |
-
description =
|
| 429 |
-
#"""
|
| 430 |
-
# Lumina Next Text-to-Image
|
| 431 |
-
|
| 432 |
-
#Lumina-Next-T2I is a 2B Next-DiT model with 2B text encoder.
|
| 433 |
-
|
| 434 |
-
#Demo current model: `Lumina-Next-T2I`
|
| 435 |
|
| 436 |
-
#"""
|
| 437 |
with gr.Blocks() as demo:
|
| 438 |
with gr.Row():
|
| 439 |
gr.Markdown(description)
|
|
@@ -568,13 +531,7 @@ def main():
|
|
| 568 |
) # noqa
|
| 569 |
|
| 570 |
@spaces.GPU(duration=200)
|
| 571 |
-
def on_submit(*infer_args):
|
| 572 |
-
# for q in request_queues:
|
| 573 |
-
# q.put(args)
|
| 574 |
-
# result = response_queue.get()
|
| 575 |
-
# if isinstance(result, ModelFailure):
|
| 576 |
-
# raise RuntimeError
|
| 577 |
-
# img, metadata = result
|
| 578 |
result = inference(args, infer_args, text_encoder, tokenizer, vae, model)
|
| 579 |
if isinstance(result, ModelFailure):
|
| 580 |
raise RuntimeError("Model failed to generate the image.")
|
|
|
|
| 17 |
shell=True,
|
| 18 |
)
|
| 19 |
|
|
|
|
| 20 |
os.makedirs("/home/user/app/checkpoints", exist_ok=True)
|
| 21 |
from huggingface_hub import snapshot_download
|
| 22 |
snapshot_download(
|
|
|
|
| 50 |
from transport import Sampler, create_transport
|
| 51 |
|
| 52 |
from multiprocessing import Process,Queue,set_start_method,get_context
|
|
|
|
| 53 |
|
| 54 |
class ModelFailure:
|
| 55 |
pass
|
|
|
|
| 108 |
# Override the built-in print with the new version
|
| 109 |
builtins.print = print
|
| 110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
train_args = torch.load(os.path.join(args.ckpt, "model_args.pth"))
|
| 113 |
print("Loaded model arguments:", json.dumps(train_args.__dict__, indent=2))
|
|
|
|
| 300 |
samples = (samples + 1.0) / 2.0
|
| 301 |
samples.clamp_(0.0, 1.0)
|
| 302 |
|
| 303 |
+
img = to_pil_image(samples[0].float())
|
| 304 |
print("> generated image, done.")
|
| 305 |
|
|
|
|
|
|
|
| 306 |
return img, metadata
|
| 307 |
except Exception:
|
| 308 |
print(traceback.format_exc())
|
| 309 |
return ModelFailure()
|
|
|
|
| 310 |
|
| 311 |
|
| 312 |
def none_or_str(value):
|
|
|
|
| 392 |
raise NotImplementedError("Multi-GPU Inference is not yet supported")
|
| 393 |
|
| 394 |
master_port = find_free_port()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 395 |
|
| 396 |
text_encoder, tokenizer, vae, model = model_main(args, master_port, 0)
|
| 397 |
|
| 398 |
+
description = "Lumina-Image 2.0 ([Github](https://github.com/Alpha-VLLM/Lumina-Image-2.0/tree/main))"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 399 |
|
|
|
|
| 400 |
with gr.Blocks() as demo:
|
| 401 |
with gr.Row():
|
| 402 |
gr.Markdown(description)
|
|
|
|
| 531 |
) # noqa
|
| 532 |
|
| 533 |
@spaces.GPU(duration=200)
|
| 534 |
+
def on_submit(*infer_args, progress=gr.Progress(track_tqdm=True),):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 535 |
result = inference(args, infer_args, text_encoder, tokenizer, vae, model)
|
| 536 |
if isinstance(result, ModelFailure):
|
| 537 |
raise RuntimeError("Model failed to generate the image.")
|