Spaces:
Configuration error
Configuration error
root
commited on
Commit
·
0ea1b17
1
Parent(s):
aadbd6e
update ui
Browse files- app.py +30 -9
- model.README.md +24 -0
- token_identifier.txt +1 -0
- train_dreambooth.py +2 -0
app.py
CHANGED
|
@@ -34,13 +34,12 @@ if(is_gpu_associated):
|
|
| 34 |
# model_v1 = snapshot_download(repo_id="multimodalart/sd-fine-tunable")
|
| 35 |
# model_v2 = snapshot_download(repo_id="stabilityai/stable-diffusion-2")
|
| 36 |
# model_v2_512 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-base")
|
| 37 |
-
model_alt = snapshot_download(repo_id="BAAI/AltDiffusion")
|
| 38 |
-
model_alt_m9 = snapshot_download(repo_id="BAAI/AltDiffusion-m9")
|
| 39 |
-
safety_checker = snapshot_download(repo_id="multimodalart/sd-sc")
|
| 40 |
-
model_to_load = model_alt_m9
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
zip_ref.extractall(".")
|
| 44 |
|
| 45 |
def swap_text(option, base):
|
| 46 |
resize_width = 768 if base == "v2-768" else 512
|
|
@@ -450,6 +449,27 @@ def checkbox_swap(checkbox):
|
|
| 450 |
return [gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox)]
|
| 451 |
|
| 452 |
with gr.Blocks(css=css) as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 453 |
with gr.Box():
|
| 454 |
if is_shared_ui:
|
| 455 |
top_description = gr.HTML(f'''
|
|
@@ -482,12 +502,13 @@ with gr.Blocks(css=css) as demo:
|
|
| 482 |
<p>Do a <code>pip install requirements-local.txt</code></p>
|
| 483 |
</div>
|
| 484 |
''')
|
|
|
|
| 485 |
gr.Markdown("# Dreambooth Training UI 💭")
|
| 486 |
-
gr.Markdown("Customize
|
| 487 |
|
| 488 |
with gr.Row() as what_are_you_training:
|
| 489 |
type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
|
| 490 |
-
base_model_to_use = gr.Dropdown(label="Which base model would you like to use?", choices=["
|
| 491 |
|
| 492 |
#Very hacky approach to emulate dynamically created Gradio components
|
| 493 |
with gr.Row() as upload_your_concept:
|
|
|
|
| 34 |
# model_v1 = snapshot_download(repo_id="multimodalart/sd-fine-tunable")
|
| 35 |
# model_v2 = snapshot_download(repo_id="stabilityai/stable-diffusion-2")
|
| 36 |
# model_v2_512 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-base")
|
| 37 |
+
# model_alt = snapshot_download(repo_id="BAAI/AltDiffusion")
|
| 38 |
+
# model_alt_m9 = snapshot_download(repo_id="BAAI/AltDiffusion-m9")
|
| 39 |
+
# safety_checker = snapshot_download(repo_id="multimodalart/sd-sc")
|
| 40 |
+
model_to_load = None # model_alt_m9
|
| 41 |
+
# with zipfile.ZipFile("mix.zip", 'r') as zip_ref:
|
| 42 |
+
# zip_ref.extractall(".")
|
|
|
|
| 43 |
|
| 44 |
def swap_text(option, base):
|
| 45 |
resize_width = 768 if base == "v2-768" else 512
|
|
|
|
| 449 |
return [gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox)]
|
| 450 |
|
| 451 |
with gr.Blocks(css=css) as demo:
|
| 452 |
+
gr.HTML(f'''
|
| 453 |
+
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
|
| 454 |
+
<div
|
| 455 |
+
style="
|
| 456 |
+
display: inline-flex;
|
| 457 |
+
gap: 1.2rem;
|
| 458 |
+
font-size: 1.75rem;
|
| 459 |
+
margin-bottom: 10px;
|
| 460 |
+
width: 700px;
|
| 461 |
+
height: 100px;
|
| 462 |
+
margin: 0 auto;
|
| 463 |
+
/* border: 1px solid red; */
|
| 464 |
+
justify-content: center;
|
| 465 |
+
">
|
| 466 |
+
<a href="https://github.com/FlagAI-Open/FlagAI"><img src="https://raw.githubusercontent.com/920232796/test/master/WechatIMG6906.png" alt="FlagAI" width="80%" height="80%" style="margin: 0 auto;"></a>
|
| 467 |
+
</div>
|
| 468 |
+
<p style="margin-bottom: 10px; font-size: 94%">
|
| 469 |
+
This is a dreambooth Training UI for <a href="https://huggingface.co/BAAI/AltDiffusion-m9" style="text-decoration: underline;">AltDiffusion-m9 model</a>,which is a multilingual image-to-text model supported 9 languages.
|
| 470 |
+
</p>
|
| 471 |
+
</div>
|
| 472 |
+
''')
|
| 473 |
with gr.Box():
|
| 474 |
if is_shared_ui:
|
| 475 |
top_description = gr.HTML(f'''
|
|
|
|
| 502 |
<p>Do a <code>pip install requirements-local.txt</code></p>
|
| 503 |
</div>
|
| 504 |
''')
|
| 505 |
+
|
| 506 |
gr.Markdown("# Dreambooth Training UI 💭")
|
| 507 |
+
gr.Markdown("Customize AltDiffusion and AltDiffusion-m9(ⁿᵉʷ!) by giving it a few examples of a concept. Based on the [🧨 diffusers](https://github.com/huggingface/diffusers) implementation, additional techniques from [TheLastBen](https://github.com/TheLastBen/diffusers) and [ShivamShrirao](https://github.com/ShivamShrirao/diffusers)")
|
| 508 |
|
| 509 |
with gr.Row() as what_are_you_training:
|
| 510 |
type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
|
| 511 |
+
base_model_to_use = gr.Dropdown(label="Which base model would you like to use?", choices=["alt", "alt_m9"], value="alt_m9", interactive=True)
|
| 512 |
|
| 513 |
#Very hacky approach to emulate dynamically created Gradio components
|
| 514 |
with gr.Row() as upload_your_concept:
|
model.README.md
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: creativeml-openrail-m
|
| 3 |
+
tags:
|
| 4 |
+
- text-to-image
|
| 5 |
+
widget:
|
| 6 |
+
- text: sks
|
| 7 |
+
---
|
| 8 |
+
### test-m9 Dreambooth model trained by Alon77777 with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the alt_m9 base model
|
| 9 |
+
|
| 10 |
+
You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts!
|
| 11 |
+
|
| 12 |
+
Sample pictures of:
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
sks (use that on your prompt)
|
| 24 |
+

|
token_identifier.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
sks
|
train_dreambooth.py
CHANGED
|
@@ -728,6 +728,8 @@ def run_training(args_imported):
|
|
| 728 |
if args.train_text_encoder:
|
| 729 |
text_encoder.train()
|
| 730 |
for step, batch in enumerate(train_dataloader):
|
|
|
|
|
|
|
| 731 |
with accelerator.accumulate(unet):
|
| 732 |
# Convert images to latent space
|
| 733 |
with torch.no_grad():
|
|
|
|
| 728 |
if args.train_text_encoder:
|
| 729 |
text_encoder.train()
|
| 730 |
for step, batch in enumerate(train_dataloader):
|
| 731 |
+
import pdb
|
| 732 |
+
pdb.set_trace()
|
| 733 |
with accelerator.accumulate(unet):
|
| 734 |
# Convert images to latent space
|
| 735 |
with torch.no_grad():
|