Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
import os
|
| 2 |
huggingface_token = os.getenv("HF_TOKEN")
|
| 3 |
if not huggingface_token:
|
| 4 |
-
print("
|
| 5 |
-
|
| 6 |
import gradio as gr
|
| 7 |
import json
|
| 8 |
import logging
|
|
@@ -23,19 +23,15 @@ from gradio_imageslider import ImageSlider
|
|
| 23 |
import numpy as np
|
| 24 |
import warnings
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
try:
|
| 31 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu", token=huggingface_token)
|
| 32 |
except Exception as e:
|
| 33 |
-
print(f"
|
| 34 |
-
#
|
| 35 |
def translator(text, max_length=512):
|
| 36 |
return [{'translation_text': text}]
|
| 37 |
|
| 38 |
-
#Load prompts for randomization
|
| 39 |
df = pd.read_csv('prompts.csv', header=None)
|
| 40 |
prompt_values = df.values.flatten()
|
| 41 |
|
|
@@ -43,20 +39,18 @@ prompt_values = df.values.flatten()
|
|
| 43 |
with open('loras.json', 'r') as f:
|
| 44 |
loras = json.load(f)
|
| 45 |
|
| 46 |
-
#
|
| 47 |
dtype = torch.bfloat16
|
| 48 |
-
|
| 49 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 50 |
|
| 51 |
-
# 공통 FLUX 모델 로드
|
| 52 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 53 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
|
| 54 |
|
| 55 |
-
#
|
| 56 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
| 57 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
| 58 |
|
| 59 |
-
#
|
| 60 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
| 61 |
base_model,
|
| 62 |
vae=good_vae,
|
|
@@ -276,14 +270,15 @@ def add_custom_lora(custom_lora, selected_indices, current_loras):
|
|
| 276 |
lora_scale_3,
|
| 277 |
lora_image_1,
|
| 278 |
lora_image_2,
|
| 279 |
-
lora_image_3
|
|
|
|
| 280 |
)
|
| 281 |
except Exception as e:
|
| 282 |
print(e)
|
| 283 |
gr.Warning(str(e))
|
| 284 |
-
return current_loras, gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
|
| 285 |
else:
|
| 286 |
-
return current_loras, gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
|
| 287 |
|
| 288 |
def remove_custom_lora(selected_indices, current_loras):
|
| 289 |
if current_loras:
|
|
@@ -318,6 +313,8 @@ def remove_custom_lora(selected_indices, current_loras):
|
|
| 318 |
lora3 = current_loras[selected_indices[2]]
|
| 319 |
selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨"
|
| 320 |
lora_image_3 = lora3['image']
|
|
|
|
|
|
|
| 321 |
return (
|
| 322 |
current_loras,
|
| 323 |
gr.update(value=gallery_items),
|
|
@@ -330,7 +327,8 @@ def remove_custom_lora(selected_indices, current_loras):
|
|
| 330 |
lora_scale_3,
|
| 331 |
lora_image_1,
|
| 332 |
lora_image_2,
|
| 333 |
-
lora_image_3
|
|
|
|
| 334 |
)
|
| 335 |
|
| 336 |
@spaces.GPU(duration=75)
|
|
@@ -339,7 +337,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
| 339 |
pipe.to("cuda")
|
| 340 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
| 341 |
with calculateDuration("Generating image"):
|
| 342 |
-
# Generate image
|
| 343 |
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
| 344 |
prompt=prompt_mash,
|
| 345 |
num_inference_steps=steps,
|
|
@@ -374,25 +372,23 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
| 374 |
|
| 375 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
| 376 |
try:
|
| 377 |
-
#
|
| 378 |
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
| 379 |
try:
|
| 380 |
translated = translator(prompt, max_length=512)[0]['translation_text']
|
| 381 |
-
print(f"
|
| 382 |
-
print(f"
|
| 383 |
prompt = translated
|
| 384 |
except Exception as e:
|
| 385 |
-
print(f"
|
| 386 |
-
#
|
| 387 |
-
|
| 388 |
-
|
| 389 |
|
| 390 |
if not selected_indices:
|
| 391 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
| 392 |
|
| 393 |
selected_loras = [loras_state[idx] for idx in selected_indices]
|
| 394 |
|
| 395 |
-
# Build the prompt with trigger words
|
| 396 |
prepends = []
|
| 397 |
appends = []
|
| 398 |
for lora in selected_loras:
|
|
@@ -451,7 +447,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
| 451 |
|
| 452 |
print(f"Active adapters after loading: {pipe.get_active_adapters()}")
|
| 453 |
|
| 454 |
-
#
|
| 455 |
with calculateDuration("Randomizing seed"):
|
| 456 |
if randomize_seed:
|
| 457 |
seed = random.randint(0, MAX_SEED)
|
|
@@ -536,6 +532,7 @@ def update_history(new_image, history):
|
|
| 536 |
history.insert(0, new_image)
|
| 537 |
return history
|
| 538 |
|
|
|
|
| 539 |
custom_theme = gr.themes.Base(
|
| 540 |
primary_hue="blue",
|
| 541 |
secondary_hue="purple",
|
|
@@ -574,7 +571,7 @@ custom_theme = gr.themes.Base(
|
|
| 574 |
)
|
| 575 |
|
| 576 |
css = '''
|
| 577 |
-
/*
|
| 578 |
#gen_btn {
|
| 579 |
height: 100%
|
| 580 |
}
|
|
@@ -596,11 +593,11 @@ css = '''
|
|
| 596 |
|
| 597 |
#lora_list {
|
| 598 |
background: var(--block-background-fill);
|
| 599 |
-
padding: 0 1em .3em;
|
| 600 |
font-size: 90%
|
| 601 |
}
|
| 602 |
|
| 603 |
-
/*
|
| 604 |
.custom_lora_card {
|
| 605 |
margin-bottom: 1em
|
| 606 |
}
|
|
@@ -608,19 +605,19 @@ css = '''
|
|
| 608 |
.card_internal {
|
| 609 |
display: flex;
|
| 610 |
height: 100px;
|
| 611 |
-
margin-top: .5em
|
| 612 |
}
|
| 613 |
|
| 614 |
.card_internal img {
|
| 615 |
margin-right: 1em
|
| 616 |
}
|
| 617 |
|
| 618 |
-
/*
|
| 619 |
.styler {
|
| 620 |
--form-gap-width: 0px !important
|
| 621 |
}
|
| 622 |
|
| 623 |
-
/*
|
| 624 |
#progress {
|
| 625 |
height: 30px;
|
| 626 |
width: 90% !important;
|
|
@@ -647,7 +644,7 @@ css = '''
|
|
| 647 |
transition: width 0.5s ease-in-out
|
| 648 |
}
|
| 649 |
|
| 650 |
-
/*
|
| 651 |
#component-8, .button_total {
|
| 652 |
height: 100%;
|
| 653 |
align-self: stretch;
|
|
@@ -674,7 +671,7 @@ css = '''
|
|
| 674 |
align-self: stretch;
|
| 675 |
}
|
| 676 |
|
| 677 |
-
/*
|
| 678 |
#lora_gallery {
|
| 679 |
margin: 20px 0;
|
| 680 |
padding: 10px;
|
|
@@ -687,7 +684,7 @@ css = '''
|
|
| 687 |
display: block !important;
|
| 688 |
}
|
| 689 |
|
| 690 |
-
/*
|
| 691 |
#gallery {
|
| 692 |
display: grid !important;
|
| 693 |
grid-template-columns: repeat(10, 1fr) !important;
|
|
@@ -699,7 +696,7 @@ css = '''
|
|
| 699 |
max-width: 100% !important;
|
| 700 |
}
|
| 701 |
|
| 702 |
-
/*
|
| 703 |
.gallery-item {
|
| 704 |
position: relative !important;
|
| 705 |
width: 100% !important;
|
|
@@ -718,7 +715,7 @@ css = '''
|
|
| 718 |
border-radius: 12px !important;
|
| 719 |
}
|
| 720 |
|
| 721 |
-
/*
|
| 722 |
.wrap, .svelte-w6dy5e {
|
| 723 |
display: grid !important;
|
| 724 |
grid-template-columns: repeat(10, 1fr) !important;
|
|
@@ -727,7 +724,7 @@ css = '''
|
|
| 727 |
max-width: 100% !important;
|
| 728 |
}
|
| 729 |
|
| 730 |
-
/*
|
| 731 |
.container, .content, .block, .contain {
|
| 732 |
width: 100% !important;
|
| 733 |
max-width: 100% !important;
|
|
@@ -741,7 +738,7 @@ css = '''
|
|
| 741 |
padding: 0 !important;
|
| 742 |
}
|
| 743 |
|
| 744 |
-
/*
|
| 745 |
.button_total {
|
| 746 |
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
|
| 747 |
transition: all 0.3s ease;
|
|
@@ -752,7 +749,7 @@ css = '''
|
|
| 752 |
box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
|
| 753 |
}
|
| 754 |
|
| 755 |
-
/*
|
| 756 |
input, textarea {
|
| 757 |
box-shadow: inset 0 2px 4px 0 rgba(0, 0, 0, 0.06);
|
| 758 |
transition: all 0.3s ease;
|
|
@@ -762,14 +759,14 @@ input:focus, textarea:focus {
|
|
| 762 |
box-shadow: 0 0 0 3px rgba(66, 153, 225, 0.5);
|
| 763 |
}
|
| 764 |
|
| 765 |
-
/*
|
| 766 |
.gradio-container .input,
|
| 767 |
.gradio-container .button,
|
| 768 |
.gradio-container .block {
|
| 769 |
border-radius: 12px;
|
| 770 |
}
|
| 771 |
|
| 772 |
-
/*
|
| 773 |
#gallery::-webkit-scrollbar {
|
| 774 |
width: 8px;
|
| 775 |
}
|
|
@@ -788,57 +785,57 @@ input:focus, textarea:focus {
|
|
| 788 |
background: #555;
|
| 789 |
}
|
| 790 |
|
| 791 |
-
/* Flex
|
| 792 |
.flex {
|
| 793 |
width: 100% !important;
|
| 794 |
max-width: 100% !important;
|
| 795 |
display: flex !important;
|
| 796 |
}
|
| 797 |
|
| 798 |
-
/* Svelte
|
| 799 |
.svelte-1p9xokt {
|
| 800 |
width: 100% !important;
|
| 801 |
max-width: 100% !important;
|
| 802 |
}
|
| 803 |
|
| 804 |
-
/* Footer
|
| 805 |
#footer {
|
| 806 |
visibility: hidden;
|
| 807 |
}
|
| 808 |
|
| 809 |
-
/*
|
| 810 |
#result_column, #result_column > div {
|
| 811 |
display: flex !important;
|
| 812 |
flex-direction: column !important;
|
| 813 |
-
align-items: flex-start !important;
|
| 814 |
width: 100% !important;
|
| 815 |
-
margin: 0 !important;
|
| 816 |
}
|
| 817 |
|
| 818 |
.generated-image, .generated-image > div {
|
| 819 |
display: flex !important;
|
| 820 |
-
justify-content: flex-start !important;
|
| 821 |
-
align-items: flex-start !important;
|
| 822 |
width: 90% !important;
|
| 823 |
max-width: 768px !important;
|
| 824 |
-
margin: 0 !important;
|
| 825 |
-
margin-left: 20px !important;
|
| 826 |
}
|
| 827 |
|
| 828 |
.generated-image img {
|
| 829 |
-
margin: 0 !important;
|
| 830 |
display: block !important;
|
| 831 |
max-width: 100% !important;
|
| 832 |
}
|
| 833 |
|
| 834 |
-
/*
|
| 835 |
.history-gallery {
|
| 836 |
display: flex !important;
|
| 837 |
-
justify-content: flex-start !important;
|
| 838 |
width: 90% !important;
|
| 839 |
max-width: 90% !important;
|
| 840 |
-
margin: 0 !important;
|
| 841 |
-
margin-left: 20px !important;
|
| 842 |
}
|
| 843 |
'''
|
| 844 |
|
|
@@ -848,9 +845,11 @@ with gr.Blocks(theme=custom_theme, css=css, delete_cache=(60, 3600)) as app:
|
|
| 848 |
|
| 849 |
gr.Markdown(
|
| 850 |
"""
|
| 851 |
-
# GiniGen:
|
| 852 |
-
###
|
| 853 |
-
|
|
|
|
|
|
|
| 854 |
"""
|
| 855 |
)
|
| 856 |
|
|
@@ -922,7 +921,7 @@ with gr.Blocks(theme=custom_theme, css=css, delete_cache=(60, 3600)) as app:
|
|
| 922 |
# Result and Progress Area
|
| 923 |
with gr.Column(elem_id="result_column"):
|
| 924 |
progress_bar = gr.Markdown(elem_id="progress", visible=False)
|
| 925 |
-
with gr.Column(elem_id="result_box"):
|
| 926 |
result = gr.Image(
|
| 927 |
label="Generated Image",
|
| 928 |
interactive=False,
|
|
@@ -940,12 +939,11 @@ with gr.Blocks(theme=custom_theme, css=css, delete_cache=(60, 3600)) as app:
|
|
| 940 |
elem_classes=["history-gallery"]
|
| 941 |
)
|
| 942 |
|
| 943 |
-
|
| 944 |
# Advanced Settings
|
| 945 |
with gr.Row():
|
| 946 |
with gr.Accordion("Advanced Settings", open=False):
|
| 947 |
with gr.Row():
|
| 948 |
-
input_image = gr.Image(label="Input
|
| 949 |
image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
|
| 950 |
with gr.Column():
|
| 951 |
with gr.Row():
|
|
@@ -955,7 +953,7 @@ with gr.Blocks(theme=custom_theme, css=css, delete_cache=(60, 3600)) as app:
|
|
| 955 |
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
|
| 956 |
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
|
| 957 |
with gr.Row():
|
| 958 |
-
randomize_seed = gr.Checkbox(True, label="Randomize
|
| 959 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
|
| 960 |
|
| 961 |
# Custom LoRA Section
|
|
@@ -1013,7 +1011,7 @@ with gr.Blocks(theme=custom_theme, css=css, delete_cache=(60, 3600)) as app:
|
|
| 1013 |
inputs=[custom_lora, selected_indices, loras_state],
|
| 1014 |
outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3,
|
| 1015 |
selected_indices, lora_scale_1, lora_scale_2, lora_scale_3,
|
| 1016 |
-
lora_image_1, lora_image_2, lora_image_3]
|
| 1017 |
)
|
| 1018 |
|
| 1019 |
remove_custom_lora_button.click(
|
|
@@ -1021,7 +1019,7 @@ with gr.Blocks(theme=custom_theme, css=css, delete_cache=(60, 3600)) as app:
|
|
| 1021 |
inputs=[selected_indices, loras_state],
|
| 1022 |
outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3,
|
| 1023 |
selected_indices, lora_scale_1, lora_scale_2, lora_scale_3,
|
| 1024 |
-
lora_image_1, lora_image_2, lora_image_3]
|
| 1025 |
)
|
| 1026 |
|
| 1027 |
gr.on(
|
|
@@ -1039,4 +1037,4 @@ with gr.Blocks(theme=custom_theme, css=css, delete_cache=(60, 3600)) as app:
|
|
| 1039 |
|
| 1040 |
if __name__ == "__main__":
|
| 1041 |
app.queue(max_size=20)
|
| 1042 |
-
app.launch(debug=True)
|
|
|
|
| 1 |
import os
|
| 2 |
huggingface_token = os.getenv("HF_TOKEN")
|
| 3 |
if not huggingface_token:
|
| 4 |
+
print("Warning: Hugging Face token is not set.")
|
| 5 |
+
|
| 6 |
import gradio as gr
|
| 7 |
import json
|
| 8 |
import logging
|
|
|
|
| 23 |
import numpy as np
|
| 24 |
import warnings
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
try:
|
| 27 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu", token=huggingface_token)
|
| 28 |
except Exception as e:
|
| 29 |
+
print(f"Translation model load failed: {str(e)}")
|
| 30 |
+
# If the translation model fails to load, return the original text
|
| 31 |
def translator(text, max_length=512):
|
| 32 |
return [{'translation_text': text}]
|
| 33 |
|
| 34 |
+
# Load prompts for randomization
|
| 35 |
df = pd.read_csv('prompts.csv', header=None)
|
| 36 |
prompt_values = df.values.flatten()
|
| 37 |
|
|
|
|
| 39 |
with open('loras.json', 'r') as f:
|
| 40 |
loras = json.load(f)
|
| 41 |
|
| 42 |
+
# Load base FLUX model
|
| 43 |
dtype = torch.bfloat16
|
|
|
|
| 44 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 45 |
|
|
|
|
| 46 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 47 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
|
| 48 |
|
| 49 |
+
# Settings for LoRA
|
| 50 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
| 51 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
| 52 |
|
| 53 |
+
# Set up image-to-image pipeline
|
| 54 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
| 55 |
base_model,
|
| 56 |
vae=good_vae,
|
|
|
|
| 270 |
lora_scale_3,
|
| 271 |
lora_image_1,
|
| 272 |
lora_image_2,
|
| 273 |
+
lora_image_3,
|
| 274 |
+
gr.update(visible=True) # Make "Remove Custom LoRA" button visible
|
| 275 |
)
|
| 276 |
except Exception as e:
|
| 277 |
print(e)
|
| 278 |
gr.Warning(str(e))
|
| 279 |
+
return current_loras, gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
|
| 280 |
else:
|
| 281 |
+
return current_loras, gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
|
| 282 |
|
| 283 |
def remove_custom_lora(selected_indices, current_loras):
|
| 284 |
if current_loras:
|
|
|
|
| 313 |
lora3 = current_loras[selected_indices[2]]
|
| 314 |
selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨"
|
| 315 |
lora_image_3 = lora3['image']
|
| 316 |
+
# If no custom LoRA remains, hide the "Remove Custom LoRA" button
|
| 317 |
+
remove_button_visibility = gr.update(visible=False) if not any("custom" in lora['repo'] for lora in current_loras) else gr.update(visible=True)
|
| 318 |
return (
|
| 319 |
current_loras,
|
| 320 |
gr.update(value=gallery_items),
|
|
|
|
| 327 |
lora_scale_3,
|
| 328 |
lora_image_1,
|
| 329 |
lora_image_2,
|
| 330 |
+
lora_image_3,
|
| 331 |
+
remove_button_visibility
|
| 332 |
)
|
| 333 |
|
| 334 |
@spaces.GPU(duration=75)
|
|
|
|
| 337 |
pipe.to("cuda")
|
| 338 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
| 339 |
with calculateDuration("Generating image"):
|
| 340 |
+
# Generate image iteratively
|
| 341 |
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
| 342 |
prompt=prompt_mash,
|
| 343 |
num_inference_steps=steps,
|
|
|
|
| 372 |
|
| 373 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
| 374 |
try:
|
| 375 |
+
# Detect and translate Korean text if present
|
| 376 |
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
| 377 |
try:
|
| 378 |
translated = translator(prompt, max_length=512)[0]['translation_text']
|
| 379 |
+
print(f"Original prompt: {prompt}")
|
| 380 |
+
print(f"Translated prompt: {translated}")
|
| 381 |
prompt = translated
|
| 382 |
except Exception as e:
|
| 383 |
+
print(f"Translation failed: {str(e)}")
|
| 384 |
+
# Use the original prompt if translation fails
|
|
|
|
|
|
|
| 385 |
|
| 386 |
if not selected_indices:
|
| 387 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
| 388 |
|
| 389 |
selected_loras = [loras_state[idx] for idx in selected_indices]
|
| 390 |
|
| 391 |
+
# Build the prompt with trigger words
|
| 392 |
prepends = []
|
| 393 |
appends = []
|
| 394 |
for lora in selected_loras:
|
|
|
|
| 447 |
|
| 448 |
print(f"Active adapters after loading: {pipe.get_active_adapters()}")
|
| 449 |
|
| 450 |
+
# Randomize seed if requested
|
| 451 |
with calculateDuration("Randomizing seed"):
|
| 452 |
if randomize_seed:
|
| 453 |
seed = random.randint(0, MAX_SEED)
|
|
|
|
| 532 |
history.insert(0, new_image)
|
| 533 |
return history
|
| 534 |
|
| 535 |
+
# Custom theme configuration
|
| 536 |
custom_theme = gr.themes.Base(
|
| 537 |
primary_hue="blue",
|
| 538 |
secondary_hue="purple",
|
|
|
|
| 571 |
)
|
| 572 |
|
| 573 |
css = '''
|
| 574 |
+
/* Basic button and component styles */
|
| 575 |
#gen_btn {
|
| 576 |
height: 100%
|
| 577 |
}
|
|
|
|
| 593 |
|
| 594 |
#lora_list {
|
| 595 |
background: var(--block-background-fill);
|
| 596 |
+
padding: 0 1em 0.3em;
|
| 597 |
font-size: 90%
|
| 598 |
}
|
| 599 |
|
| 600 |
+
/* Custom LoRA card styles */
|
| 601 |
.custom_lora_card {
|
| 602 |
margin-bottom: 1em
|
| 603 |
}
|
|
|
|
| 605 |
.card_internal {
|
| 606 |
display: flex;
|
| 607 |
height: 100px;
|
| 608 |
+
margin-top: 0.5em
|
| 609 |
}
|
| 610 |
|
| 611 |
.card_internal img {
|
| 612 |
margin-right: 1em
|
| 613 |
}
|
| 614 |
|
| 615 |
+
/* Utility classes */
|
| 616 |
.styler {
|
| 617 |
--form-gap-width: 0px !important
|
| 618 |
}
|
| 619 |
|
| 620 |
+
/* Progress bar styles */
|
| 621 |
#progress {
|
| 622 |
height: 30px;
|
| 623 |
width: 90% !important;
|
|
|
|
| 644 |
transition: width 0.5s ease-in-out
|
| 645 |
}
|
| 646 |
|
| 647 |
+
/* Component-specific styles */
|
| 648 |
#component-8, .button_total {
|
| 649 |
height: 100%;
|
| 650 |
align-self: stretch;
|
|
|
|
| 671 |
align-self: stretch;
|
| 672 |
}
|
| 673 |
|
| 674 |
+
/* Gallery main styles */
|
| 675 |
#lora_gallery {
|
| 676 |
margin: 20px 0;
|
| 677 |
padding: 10px;
|
|
|
|
| 684 |
display: block !important;
|
| 685 |
}
|
| 686 |
|
| 687 |
+
/* Gallery grid styles */
|
| 688 |
#gallery {
|
| 689 |
display: grid !important;
|
| 690 |
grid-template-columns: repeat(10, 1fr) !important;
|
|
|
|
| 696 |
max-width: 100% !important;
|
| 697 |
}
|
| 698 |
|
| 699 |
+
/* Gallery item styles */
|
| 700 |
.gallery-item {
|
| 701 |
position: relative !important;
|
| 702 |
width: 100% !important;
|
|
|
|
| 715 |
border-radius: 12px !important;
|
| 716 |
}
|
| 717 |
|
| 718 |
+
/* Gallery grid wrapper */
|
| 719 |
.wrap, .svelte-w6dy5e {
|
| 720 |
display: grid !important;
|
| 721 |
grid-template-columns: repeat(10, 1fr) !important;
|
|
|
|
| 724 |
max-width: 100% !important;
|
| 725 |
}
|
| 726 |
|
| 727 |
+
/* Common container styles */
|
| 728 |
.container, .content, .block, .contain {
|
| 729 |
width: 100% !important;
|
| 730 |
max-width: 100% !important;
|
|
|
|
| 738 |
padding: 0 !important;
|
| 739 |
}
|
| 740 |
|
| 741 |
+
/* Button styles */
|
| 742 |
.button_total {
|
| 743 |
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
|
| 744 |
transition: all 0.3s ease;
|
|
|
|
| 749 |
box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
|
| 750 |
}
|
| 751 |
|
| 752 |
+
/* Input field styles */
|
| 753 |
input, textarea {
|
| 754 |
box-shadow: inset 0 2px 4px 0 rgba(0, 0, 0, 0.06);
|
| 755 |
transition: all 0.3s ease;
|
|
|
|
| 759 |
box-shadow: 0 0 0 3px rgba(66, 153, 225, 0.5);
|
| 760 |
}
|
| 761 |
|
| 762 |
+
/* Component border-radius */
|
| 763 |
.gradio-container .input,
|
| 764 |
.gradio-container .button,
|
| 765 |
.gradio-container .block {
|
| 766 |
border-radius: 12px;
|
| 767 |
}
|
| 768 |
|
| 769 |
+
/* Scrollbar styles */
|
| 770 |
#gallery::-webkit-scrollbar {
|
| 771 |
width: 8px;
|
| 772 |
}
|
|
|
|
| 785 |
background: #555;
|
| 786 |
}
|
| 787 |
|
| 788 |
+
/* Flex container */
|
| 789 |
.flex {
|
| 790 |
width: 100% !important;
|
| 791 |
max-width: 100% !important;
|
| 792 |
display: flex !important;
|
| 793 |
}
|
| 794 |
|
| 795 |
+
/* Svelte specific classes */
|
| 796 |
.svelte-1p9xokt {
|
| 797 |
width: 100% !important;
|
| 798 |
max-width: 100% !important;
|
| 799 |
}
|
| 800 |
|
| 801 |
+
/* Hide Footer */
|
| 802 |
#footer {
|
| 803 |
visibility: hidden;
|
| 804 |
}
|
| 805 |
|
| 806 |
+
/* Generated image and container styles */
|
| 807 |
#result_column, #result_column > div {
|
| 808 |
display: flex !important;
|
| 809 |
flex-direction: column !important;
|
| 810 |
+
align-items: flex-start !important;
|
| 811 |
width: 100% !important;
|
| 812 |
+
margin: 0 !important;
|
| 813 |
}
|
| 814 |
|
| 815 |
.generated-image, .generated-image > div {
|
| 816 |
display: flex !important;
|
| 817 |
+
justify-content: flex-start !important;
|
| 818 |
+
align-items: flex-start !important;
|
| 819 |
width: 90% !important;
|
| 820 |
max-width: 768px !important;
|
| 821 |
+
margin: 0 !important;
|
| 822 |
+
margin-left: 20px !important;
|
| 823 |
}
|
| 824 |
|
| 825 |
.generated-image img {
|
| 826 |
+
margin: 0 !important;
|
| 827 |
display: block !important;
|
| 828 |
max-width: 100% !important;
|
| 829 |
}
|
| 830 |
|
| 831 |
+
/* History gallery left alignment */
|
| 832 |
.history-gallery {
|
| 833 |
display: flex !important;
|
| 834 |
+
justify-content: flex-start !important;
|
| 835 |
width: 90% !important;
|
| 836 |
max-width: 90% !important;
|
| 837 |
+
margin: 0 !important;
|
| 838 |
+
margin-left: 20px !important;
|
| 839 |
}
|
| 840 |
'''
|
| 841 |
|
|
|
|
| 845 |
|
| 846 |
gr.Markdown(
|
| 847 |
"""
|
| 848 |
+
# GiniGen: Multi-LoRA (Image Training) Integrated Generation Model
|
| 849 |
+
### Instructions:
|
| 850 |
+
Select a model from the gallery (up to 3 models) |
|
| 851 |
+
Enter your prompt in Korean or English |
|
| 852 |
+
Click the **Generate** button
|
| 853 |
"""
|
| 854 |
)
|
| 855 |
|
|
|
|
| 921 |
# Result and Progress Area
|
| 922 |
with gr.Column(elem_id="result_column"):
|
| 923 |
progress_bar = gr.Markdown(elem_id="progress", visible=False)
|
| 924 |
+
with gr.Column(elem_id="result_box"):
|
| 925 |
result = gr.Image(
|
| 926 |
label="Generated Image",
|
| 927 |
interactive=False,
|
|
|
|
| 939 |
elem_classes=["history-gallery"]
|
| 940 |
)
|
| 941 |
|
|
|
|
| 942 |
# Advanced Settings
|
| 943 |
with gr.Row():
|
| 944 |
with gr.Accordion("Advanced Settings", open=False):
|
| 945 |
with gr.Row():
|
| 946 |
+
input_image = gr.Image(label="Input Image", type="filepath")
|
| 947 |
image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
|
| 948 |
with gr.Column():
|
| 949 |
with gr.Row():
|
|
|
|
| 953 |
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
|
| 954 |
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
|
| 955 |
with gr.Row():
|
| 956 |
+
randomize_seed = gr.Checkbox(True, label="Randomize Seed")
|
| 957 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
|
| 958 |
|
| 959 |
# Custom LoRA Section
|
|
|
|
| 1011 |
inputs=[custom_lora, selected_indices, loras_state],
|
| 1012 |
outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3,
|
| 1013 |
selected_indices, lora_scale_1, lora_scale_2, lora_scale_3,
|
| 1014 |
+
lora_image_1, lora_image_2, lora_image_3, remove_custom_lora_button]
|
| 1015 |
)
|
| 1016 |
|
| 1017 |
remove_custom_lora_button.click(
|
|
|
|
| 1019 |
inputs=[selected_indices, loras_state],
|
| 1020 |
outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3,
|
| 1021 |
selected_indices, lora_scale_1, lora_scale_2, lora_scale_3,
|
| 1022 |
+
lora_image_1, lora_image_2, lora_image_3, remove_custom_lora_button]
|
| 1023 |
)
|
| 1024 |
|
| 1025 |
gr.on(
|
|
|
|
| 1037 |
|
| 1038 |
if __name__ == "__main__":
|
| 1039 |
app.queue(max_size=20)
|
| 1040 |
+
app.launch(debug=True)
|