Varun258 commited on
Commit
4c35f0a
·
verified ·
1 Parent(s): 5c2e944

Upload 89 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +29 -0
  2. .gitignore +195 -0
  3. LICENSE +201 -0
  4. README.md +243 -13
  5. app.py +263 -0
  6. assets/comfyui_template.png +3 -0
  7. assets/gradio_examples/1identity/config.json +6 -0
  8. assets/gradio_examples/1identity/ref.webp +0 -0
  9. assets/gradio_examples/2identity/config.json +6 -0
  10. assets/gradio_examples/2identity/ref.jpg +0 -0
  11. assets/gradio_examples/3identity/config.json +6 -0
  12. assets/gradio_examples/3identity/ref.webp +0 -0
  13. assets/gradio_examples/4subject/config.json +6 -0
  14. assets/gradio_examples/4subject/ref.jpg +3 -0
  15. assets/gradio_examples/5style_subject/config.json +7 -0
  16. assets/gradio_examples/5style_subject/ref1.webp +0 -0
  17. assets/gradio_examples/5style_subject/ref2.webp +0 -0
  18. assets/gradio_examples/6style_subject/config.json +7 -0
  19. assets/gradio_examples/6style_subject/ref1.webp +0 -0
  20. assets/gradio_examples/6style_subject/ref2.webp +0 -0
  21. assets/gradio_examples/7style_subject/config.json +7 -0
  22. assets/gradio_examples/7style_subject/ref1.webp +0 -0
  23. assets/gradio_examples/7style_subject/ref2.webp +0 -0
  24. assets/gradio_examples/8style/config.json +6 -0
  25. assets/gradio_examples/8style/ref.webp +0 -0
  26. assets/gradio_examples/9style/config.json +6 -0
  27. assets/gradio_examples/9style/ref.webp +0 -0
  28. assets/gradio_examples/identity1.jpg +0 -0
  29. assets/gradio_examples/identity1_result.png +3 -0
  30. assets/gradio_examples/identity2.webp +0 -0
  31. assets/gradio_examples/identity2_style2_result.webp +0 -0
  32. assets/gradio_examples/style1.webp +0 -0
  33. assets/gradio_examples/style1_result.webp +0 -0
  34. assets/gradio_examples/style2.webp +0 -0
  35. assets/gradio_examples/style3.webp +3 -0
  36. assets/gradio_examples/style3_style4_result.webp +3 -0
  37. assets/gradio_examples/style4.webp +3 -0
  38. assets/gradio_examples/z1_mix_style/config.json +7 -0
  39. assets/gradio_examples/z1_mix_style/ref1.webp +3 -0
  40. assets/gradio_examples/z1_mix_style/ref2.webp +3 -0
  41. assets/gradio_examples/z2_mix_style/config.json +7 -0
  42. assets/gradio_examples/z2_mix_style/ref1.png +0 -0
  43. assets/gradio_examples/z2_mix_style/ref2.png +0 -0
  44. assets/gradio_examples/z3_mix_style/config.json +8 -0
  45. assets/gradio_examples/z3_mix_style/ref1.jpg +3 -0
  46. assets/gradio_examples/z3_mix_style/ref2.png +0 -0
  47. assets/gradio_examples/z3_mix_style/ref3.png +0 -0
  48. assets/gradio_examples/z4_t2i/config.json +5 -0
  49. assets/show_case1.webp +3 -0
  50. assets/show_case2.webp +3 -0
.gitattributes CHANGED
@@ -33,3 +33,32 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/comfyui_template.png filter=lfs diff=lfs merge=lfs -text
37
+ assets/gradio_examples/4subject/ref.jpg filter=lfs diff=lfs merge=lfs -text
38
+ assets/gradio_examples/identity1_result.png filter=lfs diff=lfs merge=lfs -text
39
+ assets/gradio_examples/style3_style4_result.webp filter=lfs diff=lfs merge=lfs -text
40
+ assets/gradio_examples/style3.webp filter=lfs diff=lfs merge=lfs -text
41
+ assets/gradio_examples/style4.webp filter=lfs diff=lfs merge=lfs -text
42
+ assets/gradio_examples/z1_mix_style/ref1.webp filter=lfs diff=lfs merge=lfs -text
43
+ assets/gradio_examples/z1_mix_style/ref2.webp filter=lfs diff=lfs merge=lfs -text
44
+ assets/gradio_examples/z3_mix_style/ref1.jpg filter=lfs diff=lfs merge=lfs -text
45
+ assets/show_case1.webp filter=lfs diff=lfs merge=lfs -text
46
+ assets/show_case2.webp filter=lfs diff=lfs merge=lfs -text
47
+ assets/show_case3.webp filter=lfs diff=lfs merge=lfs -text
48
+ assets/show_case4.webp filter=lfs diff=lfs merge=lfs -text
49
+ assets/show_case5.webp filter=lfs diff=lfs merge=lfs -text
50
+ assets/show_case6.webp filter=lfs diff=lfs merge=lfs -text
51
+ assets/show_case7.webp filter=lfs diff=lfs merge=lfs -text
52
+ assets/show_case8.webp filter=lfs diff=lfs merge=lfs -text
53
+ assets/teaser.webp filter=lfs diff=lfs merge=lfs -text
54
+ assets/usoxcomfyui_official.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ assets/usoxcomfyui.webp filter=lfs diff=lfs merge=lfs -text
56
+ workflow/17-17-29.webp.webp filter=lfs diff=lfs merge=lfs -text
57
+ workflow/example1.png filter=lfs diff=lfs merge=lfs -text
58
+ workflow/example2.png filter=lfs diff=lfs merge=lfs -text
59
+ workflow/example3.png filter=lfs diff=lfs merge=lfs -text
60
+ workflow/example4.png filter=lfs diff=lfs merge=lfs -text
61
+ workflow/example5.png filter=lfs diff=lfs merge=lfs -text
62
+ workflow/example6.png filter=lfs diff=lfs merge=lfs -text
63
+ workflow/input.png filter=lfs diff=lfs merge=lfs -text
64
+ workflow/style5_0.webp.webp filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # Ruff stuff:
171
+ .ruff_cache/
172
+
173
+ # PyPI configuration file
174
+ .pypirc
175
+
176
+ # User config files
177
+ .vscode/
178
+ output/
179
+
180
+ # ckpt
181
+ *.bin
182
+ *.pt
183
+ *.pth
184
+
185
+ logs/
186
+ *.safetensors
187
+
188
+
189
+
190
+ # FOR EXCLUSION OF DOWNLOADED WEIGHTS
191
+ # Ignore everything in weights/
192
+ weights/*
193
+
194
+ # But don't ignore this file
195
+ !weights/downloader.py
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,13 +1,243 @@
1
- ---
2
- title: Video Generator
3
- emoji: 🐠
4
- colorFrom: indigo
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.44.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h3 align="center">
2
+ <img src="assets/uso.webp" alt="Logo" style="vertical-align: middle; width: 95px; height: auto;">
3
+ </br>
4
+ Unified Style and Subject-Driven Generation via Disentangled and Reward Learning
5
+ </h3>
6
+
7
+ <p align="center">
8
+ <a href="https://bytedance.github.io/USO/"><img alt="Build" src="https://img.shields.io/badge/Project%20Page-USO-blue"></a>
9
+ <a href="https://arxiv.org/abs/2508.18966"><img alt="Build" src="https://img.shields.io/badge/Tech%20Report-USO-b31b1b.svg"></a>
10
+ <a href="https://huggingface.co/bytedance-research/USO"><img src="https://img.shields.io/static/v1?label=%F0%9F%A4%97%20Hugging%20Face&message=Model&color=green"></a>
11
+ <a href="https://huggingface.co/spaces/bytedance-research/USO"><img src="https://img.shields.io/static/v1?label=%F0%9F%A4%97%20Hugging%20Face&message=demo&color=orange"></a>
12
+ </p>
13
+ </p>
14
+
15
+ ><p align="center"> <span style="color:#137cf3; font-family: Gill Sans">Shaojin Wu,</span><sup></sup></a> <span style="color:#137cf3; font-family: Gill Sans">Mengqi Huang,</span></a> <span style="color:#137cf3; font-family: Gill Sans">Yufeng Cheng,</span><sup></sup></a> <span style="color:#137cf3; font-family: Gill Sans">Wenxu Wu,</span><sup></sup> </a> <span style="color:#137cf3; font-family: Gill Sans">Jiahe Tian,</span><sup></sup></a> <span style="color:#137cf3; font-family: Gill Sans">Yiming Luo,</span><sup></sup></a> <span style="color:#137cf3; font-family: Gill Sans">Fei Ding,</span></a> <span style="color:#137cf3; font-family: Gill Sans">Qian He</span></a> <br>
16
+ ><span style="font-size: 13.5px">UXO Team</span><br>
17
+ ><span style="font-size: 12px">Intelligent Creation Lab, Bytedance</span></p>
18
+
19
+ ### 🚩 Updates
20
+ * **2025.09.03** 🎉 USO is now natively supported in ComfyUI, see official tutorial [USO in ComfyUI](https://docs.comfy.org/tutorials/flux/flux-1-uso) and our provided examples in `./workflow`. More tips are available in the [README below](https://github.com/bytedance/USO#%EF%B8%8F-comfyui-examples).
21
+ <p align="center">
22
+ <img src="assets/usoxcomfyui_official.jpeg" width=1024 height="auto">
23
+ </p>
24
+
25
+ * **2025.08.28** 🔥 The [demo](https://huggingface.co/spaces/bytedance-research/USO) of USO is released. Try it Now! ⚡️
26
+ * **2025.08.28** 🔥 Update fp8 mode as a primary low vmemory usage support (please scroll down). Gift for consumer-grade GPU users. The peak Vmemory usage is ~16GB now.
27
+ * **2025.08.27** 🔥 The [inference code](https://github.com/bytedance/USO) and [model](https://huggingface.co/bytedance-research/USO) of USO are released.
28
+ * **2025.08.27** 🔥 The [project page](https://bytedance.github.io/USO) of USO is created.
29
+ * **2025.08.27** 🔥 The [technical report](https://arxiv.org/abs/2508.18966) of USO is released.
30
+
31
+ ## 📖 Introduction
32
+ Existing literature typically treats style-driven and subject-driven generation as two disjoint tasks: the former prioritizes stylistic similarity, whereas the latter insists on subject consistency, resulting in an apparent antagonism. We argue that both objectives can be unified under a single framework because they ultimately concern the disentanglement and re-composition of “content” and “style”, a long-standing theme in style-driven research. To this end, we present USO, a Unified framework for Style driven and subject-driven GeneratiOn. First, we construct a large-scale triplet dataset consisting of content images, style images, and their corresponding stylized content images. Second, we introduce a disentangled learning scheme that simultaneously aligns style features and disentangles content from style through two complementary objectives, style-alignment training and content–style disentanglement training. Third, we incorporate a style reward-learning paradigm to further enhance the model’s performance.
33
+ <p align="center">
34
+ <img src="assets/teaser.webp" width="1024"/>
35
+ </p>
36
+
37
+ ## ⚡️ Quick Start
38
+
39
+ ### 🔧 Requirements and Installation
40
+
41
+ Install the requirements
42
+ ```bash
43
+ ## create a virtual environment with python >= 3.10 <= 3.12, like
44
+ python -m venv uso_env
45
+ source uso_env/bin/activate
46
+ ## or
47
+ conda create -n uso_env python=3.10 -y
48
+ conda activate uso_env
49
+
50
+ ## install torch
51
+ ## recommended version:
52
+ pip install torch==2.4.0 torchvision==0.19.0 --index-url https://download.pytorch.org/whl/cu124
53
+
54
+ ## then install the requirements by you need
55
+ pip install -r requirements.txt # legacy installation command
56
+ ```
57
+
58
+ Then download checkpoints:
59
+ ```bash
60
+ # 1. set up .env file
61
+ cp example.env .env
62
+
63
+ # 2. set your huggingface token in .env (open the file and change this value to your token)
64
+ HF_TOKEN=your_huggingface_token_here
65
+
66
+ #3. download the necessary weights (comment any weights you don't need)
67
+ pip install huggingface_hub
68
+ python ./weights/downloader.py
69
+ ```
70
+ - **IF YOU HAVE WEIGHTS, COMMENT OUT WHAT YOU DON'T NEED IN ./weights/downloader.py**
71
+
72
+ ### ✍️ Inference
73
+ * Start from the examples below to explore and spark your creativity. ✨
74
+ ```bash
75
+ # the first image is a content reference, and the rest are style references.
76
+
77
+ # for subject-driven generation
78
+ python inference.py --prompt "The man in flower shops carefully match bouquets, conveying beautiful emotions and blessings with flowers. " --image_paths "assets/gradio_examples/identity1.jpg" --width 1024 --height 1024
79
+ # for style-driven generation
80
+ # please keep the first image path empty
81
+ python inference.py --prompt "A cat sleeping on a chair." --image_paths "" "assets/gradio_examples/style1.webp" --width 1024 --height 1024
82
+ # for style-subject driven generation (or set the prompt to empty for layout-preserved generation)
83
+ python inference.py --prompt "The woman gave an impassioned speech on the podium." --image_paths "assets/gradio_examples/identity2.webp" "assets/gradio_examples/style2.webp" --width 1024 --height 1024
84
+ # for multi-style generation
85
+ # please keep the first image path empty
86
+ python inference.py --prompt "A handsome man." --image_paths "" "assets/gradio_examples/style3.webp" "assets/gradio_examples/style4.webp" --width 1024 --height 1024
87
+
88
+ # for low vram:
89
+ python inference.py --prompt "your propmt" --image_paths "your_image.jpg" --width 1024 --height 1024 --offload --model_type flux-dev-fp8
90
+ ```
91
+ * You can also compare your results with the results in the `assets/gradio_examples` folder.
92
+
93
+ * For more examples, visit our [project page](https://bytedance.github.io/USO) or try the live [demo](https://huggingface.co/spaces/bytedance-research/USO).
94
+
95
+ ### 🌟 Gradio Demo
96
+
97
+ ```bash
98
+ python app.py
99
+ ```
100
+
101
+ **For low vmemory usage**, please pass the `--offload` and `--name flux-dev-fp8` args. The peak memory usage will be 16GB (Single reference) ~ 18GB (Multi references).
102
+
103
+ ```bash
104
+ # please use FLUX_DEV_FP8 replace FLUX_DEV
105
+ export FLUX_DEV_FP8="YOUR_FLUX_DEV_PATH"
106
+
107
+ python app.py --offload --name flux-dev-fp8
108
+ ```
109
+
110
+ ## 🌈 More examples
111
+ We provide some prompts and results to help you better understand the model. You can check our [paper](https://arxiv.org/abs/2508.18966) or [project page](https://bytedance.github.io/USO/) for more visualizations.
112
+
113
+ #### Subject/Identity-driven generation
114
+ <details>
115
+ <summary>If you want to place a subject into new scene, please use natural language like "A dog/man/woman is doing...". If you only want to transfer the style but keep the layout, please an use instructive prompt like "Transform the style into ... style". For portraits-preserved generation, USO excels at producing high skin-detail images. A practical guideline: use half-body close-ups for half-body prompts, and full-body images when the pose or framing changes significantly. </summary>
116
+ <p align="center">
117
+ <img src="assets/show_case1.webp" width="1024"/>
118
+ <p>
119
+ <p align="center">
120
+ <img src="assets/show_case2.webp" width="1024"/>
121
+ </p>
122
+ <p align="center">
123
+ <img src="assets/show_case3.webp" width="1024"/>
124
+ </p>
125
+ <p align="center">
126
+ <img src="assets/show_case4.webp" width="1024"/>
127
+ </p>
128
+ </details>
129
+
130
+
131
+ #### Style-driven generation
132
+ <details>
133
+ <summary>Just upload one or two style images, and use natural language to create want you want. USO will generate images follow your prompt and match the style you uploaded. </summary>
134
+ <p align="center">
135
+ <img src="assets/show_case5.webp" width="1024"/>
136
+ <p>
137
+ <p align="center">
138
+ <img src="assets/show_case6.webp" width="1024"/>
139
+ </p>
140
+ </details>
141
+
142
+ #### Style-subject driven generation
143
+ <details>
144
+ <summary>USO can stylize a single content reference with one or two style refs. For layout-preserved generation, just set the prompt to empty. </summary>
145
+ `Layout-preserved generation`
146
+ <p align="center">
147
+ <img src="assets/show_case7.webp" width="1024"/>
148
+ <p>
149
+
150
+ `Layout-shifted generation`
151
+ <p align="center">
152
+ <img src="assets/show_case8.webp" width="1024"/>
153
+ </p>
154
+ </details>
155
+
156
+ ## ⚙️ ComfyUI examples
157
+ We’re pleased that USO now has native support in ComfyUI. For a quick start, please refer to the official tutorials [USO in ComfyUI](https://docs.comfy.org/tutorials/flux/flux-1-uso). To help you reproduce and match the results, we’ve provided several examples in `./workflows`, including **workflows** and their **inputs** and outputs, so you can quickly get familiar with what USO can do. With USO now fully compatible with the ComfyUI ecosystem, you can combine it with other plugins like ControlNet and LoRA. **We welcome community contributions of more workflows and examples.**
158
+
159
+ Now you can easily run USO in ComfyUI. Just update ComfyUI to the latest version (0.3.57), and you’ll find USO in the official templates.
160
+ <p align="center">
161
+ <img src="assets/comfyui_template.png" width=1024 height="auto">
162
+ </p>
163
+
164
+ More examples are provided below:
165
+ <p align="center">
166
+ <img src="assets/usoxcomfyui.webp" width=1024 height="auto">
167
+ </p>
168
+
169
+ **Identity preserved**
170
+ <p align="center">
171
+ <img src="workflow/example1.png" width=1024 height="auto">
172
+ </p>
173
+
174
+ Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example1.json). Input images can be found in `./workflow`
175
+
176
+ **Identity stylized**
177
+ <p align="center">
178
+ <img src="workflow/example3.png" width=1024 height="auto">
179
+ </p>
180
+
181
+ Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example3.json). Input images can be found in `./workflow`
182
+
183
+ **Identity + style reference**
184
+ <p align="center">
185
+ <img src="workflow/example2.png" width=1024 height="auto">
186
+ </p>
187
+
188
+ Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example2.json). Input images can be found in `./workflow`
189
+
190
+ **Single style reference**
191
+ <p align="center">
192
+ <img src="workflow/example4.png" width=1024 height="auto">
193
+ </p>
194
+
195
+ Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example4.json). Input images can be found in `./workflow`
196
+ <p align="center">
197
+ <img src="workflow/example6.png" width=1024 height="auto">
198
+ </p>
199
+
200
+ Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example6.json). Input images can be found in `./workflow`
201
+
202
+ **Multiple style reference**
203
+ <p align="center">
204
+ <img src="workflow/example5.png" width=1024 height="auto">
205
+ </p>
206
+
207
+ Download the image above and drag it into ComfyUI to load the corresponding [workflow](workflow/example5.json). Input images can be found in `./workflow`
208
+
209
+ ## 📄 Disclaimer
210
+ <p>
211
+ We open-source this project for academic research. The vast majority of images
212
+ used in this project are either generated or from open-source datasets. If you have any concerns,
213
+ please contact us, and we will promptly remove any inappropriate content.
214
+ Our project is released under the Apache 2.0 License. If you apply to other base models,
215
+ please ensure that you comply with the original licensing terms.
216
+ <br><br>This research aims to advance the field of generative AI. Users are free to
217
+ create images using this tool, provided they comply with local laws and exercise
218
+ responsible usage. The developers are not liable for any misuse of the tool by users.</p>
219
+
220
+ ## 🚀 Updates
221
+ For the purpose of fostering research and the open-source community, we plan to open-source the entire project, encompassing training, inference, weights, dataset etc. Thank you for your patience and support! 🌟
222
+ - [x] Release technical report.
223
+ - [x] Release github repo.
224
+ - [x] Release inference code.
225
+ - [x] Release model checkpoints.
226
+ - [x] Release huggingface space demo.
227
+ - Release training code.
228
+ - Release dataset.
229
+
230
+ ## Citation
231
+ If USO is helpful, please help to ⭐ the repo.
232
+
233
+ If you find this project useful for your research, please consider citing our paper:
234
+ ```bibtex
235
+ @article{wu2025uso,
236
+ title={USO: Unified Style and Subject-Driven Generation via Disentangled and Reward Learning},
237
+ author={Shaojin Wu and Mengqi Huang and Yufeng Cheng and Wenxu Wu and Jiahe Tian and Yiming Luo and Fei Ding and Qian He},
238
+ year={2025},
239
+ eprint={2508.18966},
240
+ archivePrefix={arXiv},
241
+ primaryClass={cs.CV},
242
+ }
243
+ ```
app.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import dataclasses
16
+ import json
17
+ import os
18
+ from pathlib import Path
19
+
20
+ from dotenv import load_dotenv
21
+ load_dotenv()
22
+
23
+
24
+ import gradio as gr
25
+ import torch
26
+
27
+ from uso.flux.pipeline import USOPipeline
28
+ from transformers import SiglipVisionModel, SiglipImageProcessor
29
+
30
+
31
+ with open("assets/uso_text.svg", "r", encoding="utf-8") as svg_file:
32
+ text_content = svg_file.read()
33
+
34
+ with open("assets/uso_logo.svg", "r", encoding="utf-8") as svg_file:
35
+ logo_content = svg_file.read()
36
+
37
+ title = f"""
38
+ <div style="display: flex; align-items: center; justify-content: center;">
39
+ <span style="transform: scale(0.7);margin-right: -5px;">{text_content}</span>
40
+ <span style="font-size: 1.8em;margin-left: -10px;font-weight: bold; font-family: Gill Sans;">by UXO Team</span>
41
+ <span style="margin-left: 0px; transform: scale(0.85); display: inline-block;">{logo_content}</span>
42
+ </div>
43
+ """.strip()
44
+
45
+ badges_text = r"""
46
+ <div style="text-align: center; display: flex; justify-content: center; gap: 5px;">
47
+ <a href="https://github.com/bytedance/USO"><img src="https://img.shields.io/static/v1?label=GitHub&message=Code&color=green&logo=github"></a>
48
+ <a href="https://bytedance.github.io/USO/"><img alt="Build" src="https://img.shields.io/badge/Project%20Page-USO-yellow"></a>
49
+ <a href="https://arxiv.org/abs/2504.02160"><img alt="Build" src="https://img.shields.io/badge/arXiv%20paper-USO-b31b1b.svg"></a>
50
+ <a href="https://huggingface.co/bytedance-research/USO"><img src="https://img.shields.io/static/v1?label=%F0%9F%A4%97%20Hugging%20Face&message=Model&color=orange"></a>
51
+ </div>
52
+ """.strip()
53
+
54
+ tips = """
55
+ ### What is USO and How to use?
56
+ 🎨 USO is a unified style-subject optimized customization model and the latest addition to the UXO family (<a href='https://github.com/bytedance/USO' target='_blank'> USO</a> and <a href='https://github.com/bytedance/UNO' target='_blank'> UNO</a>).
57
+ It can freely combine any subjects with any styles in any scenarios.
58
+
59
+ 💡 We provide step-by-step instructions in our <a href='https://github.com/bytedance/USO#-more-examples' target='_blank'> Github Repo</a>.
60
+ Additionally, try the examples provided below the demo to quickly get familiar with USO and inspire your creativity!
61
+
62
+ ### Updates
63
+ 🔥 **2025.09.04** USO now has native support in ComfyUI (see <a href='https://docs.comfy.org/tutorials/flux/flux-1-uso' target='_blank'>ComfyUI's official documentation</a> for details). For more information, please also check out our <a href='https://github.com/bytedance/USO?tab=readme-ov-file#%EF%B8%8F-comfyui-examples' target='_blank'>GitHub Repo</a>.
64
+
65
+ <details>
66
+ <summary style="cursor: pointer; color: #d34c0e; font-weight: 500;">The model is trained on 1024x1024 resolution and supports 3 types of usage. Tips:</summary>
67
+
68
+ * **Only content img**: support following types:
69
+ * Subject/Identity-driven (supports natural prompt, e.g., *A clock on the table.* *The woman near the sea.*, excels in producing **photorealistic portraits**)
70
+ * Style edit (layout-preserved): *Transform the image into Ghibli style/Pixel style/Retro comic style/Watercolor painting style...*.
71
+ * Style edit (layout-shift): *Ghibli style, the man on the beach.*.
72
+ * **Only style img**: Reference input style and generate anything following prompt. Excelling in this and further support multiple style references (in beta).
73
+ * **Content img + style img**: Place the content into the desired style.
74
+ * Layout-preserved: set prompt to **empty**.
75
+ * Layout-shift: using natural prompt.</details>"""
76
+
77
+ star = """
78
+ ### If USO is helpful, please help to ⭐ our <a href='https://github.com/bytedance/USO' target='_blank'> Github Repo</a>. Thanks a lot!"""
79
+
80
+ def get_examples(examples_dir: str = "assets/examples") -> list:
81
+ examples = Path(examples_dir)
82
+ ans = []
83
+ for example in examples.iterdir():
84
+ if not example.is_dir() or len(os.listdir(example)) == 0:
85
+ continue
86
+ with open(example / "config.json") as f:
87
+ example_dict = json.load(f)
88
+
89
+
90
+ example_list = []
91
+ example_list.append(example_dict["prompt"]) # prompt
92
+
93
+ for key in ["image_ref1", "image_ref2", "image_ref3"]:
94
+ if key in example_dict:
95
+ example_list.append(str(example / example_dict[key]))
96
+ else:
97
+ example_list.append(None)
98
+
99
+ example_list.append(example_dict["seed"])
100
+ ans.append(example_list)
101
+ return ans
102
+
103
+
104
+ def create_demo(
105
+ model_type: str,
106
+ device: str = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu"),
107
+ offload: bool = False,
108
+ ):
109
+
110
+ # hf_download set to false to prevent download of weights
111
+ pipeline = USOPipeline(
112
+ model_type, device, offload, only_lora=True, lora_rank=128, hf_download=False
113
+ )
114
+ print("USOPipeline loaded successfully")
115
+
116
+
117
+
118
+
119
+ # ⚠️ Weights now load from local paths via .env instead of downloading
120
+ siglip_path = os.getenv("SIGLIP_PATH", "google/siglip-so400m-patch14-384")
121
+ siglip_processor = SiglipImageProcessor.from_pretrained(siglip_path)
122
+ siglip_model = SiglipVisionModel.from_pretrained(siglip_path)
123
+
124
+
125
+ siglip_model.eval()
126
+ siglip_model.to(device)
127
+ pipeline.model.vision_encoder = siglip_model
128
+ pipeline.model.vision_encoder_processor = siglip_processor
129
+ print("SigLIP model loaded successfully")
130
+
131
+ with gr.Blocks() as demo:
132
+ gr.Markdown(title)
133
+ gr.Markdown(badges_text)
134
+ gr.Markdown(tips)
135
+ with gr.Row():
136
+ with gr.Column():
137
+ prompt = gr.Textbox(label="Prompt", value="A beautiful woman.")
138
+ with gr.Row():
139
+ image_prompt1 = gr.Image(
140
+ label="Content Reference Img", visible=True, interactive=True, type="pil"
141
+ )
142
+ image_prompt2 = gr.Image(
143
+ label="Style Reference Img", visible=True, interactive=True, type="pil"
144
+ )
145
+ image_prompt3 = gr.Image(
146
+ label="Extra Style Reference Img (Beta)", visible=True, interactive=True, type="pil"
147
+ )
148
+
149
+ with gr.Row():
150
+ with gr.Row():
151
+ width = gr.Slider(
152
+ 512, 1536, 1024, step=16, label="Generation Width"
153
+ )
154
+ height = gr.Slider(
155
+ 512, 1536, 1024, step=16, label="Generation Height"
156
+ )
157
+ with gr.Row():
158
+ with gr.Row():
159
+ keep_size = gr.Checkbox(
160
+ label="Keep input size",
161
+ value=False,
162
+ interactive=True
163
+ )
164
+ with gr.Column():
165
+ gr.Markdown("Set it to True if you only need style editing or want to keep the layout.")
166
+
167
+ with gr.Accordion("Advanced Options", open=True):
168
+ with gr.Row():
169
+ num_steps = gr.Slider(
170
+ 1, 50, 25, step=1, label="Number of steps"
171
+ )
172
+ guidance = gr.Slider(
173
+ 1.0, 5.0, 4.0, step=0.1, label="Guidance", interactive=True
174
+ )
175
+ content_long_size = gr.Slider(
176
+ 0, 1024, 512, step=16, label="Content reference size"
177
+ )
178
+ seed = gr.Number(-1, label="Seed (-1 for random)")
179
+
180
+ generate_btn = gr.Button("Generate")
181
+ gr.Markdown(star)
182
+
183
+ with gr.Column():
184
+ output_image = gr.Image(label="Generated Image")
185
+ download_btn = gr.File(
186
+ label="Download full-resolution", type="filepath", interactive=False
187
+ )
188
+ gr.Markdown(
189
+ """
190
+ ### ❗️ Important Usage Tips:
191
+ - **Input Prompt**: Unless you only need Style Editing ("Transform the style into..."), use natural language ("A dog/man/woman is doing...") instead of instruction descriptions of subject, identity, or style.
192
+ - **Input Content Image**: For portrait-preserving generation, USO excels at producing images with high skin detail. A practical guideline: use half-body close-ups when your prompt specifies a half-body subject, and full-body images—especially when the pose changes significantly.
193
+ """
194
+ )
195
+
196
+ inputs = [
197
+ prompt,
198
+ image_prompt1,
199
+ image_prompt2,
200
+ image_prompt3,
201
+ seed,
202
+ width,
203
+ height,
204
+ guidance,
205
+ num_steps,
206
+ keep_size,
207
+ content_long_size,
208
+ ]
209
+ generate_btn.click(
210
+ fn=pipeline.gradio_generate,
211
+ inputs=inputs,
212
+ outputs=[output_image, download_btn],
213
+ )
214
+
215
+ # example_text = gr.Text("", visible=False, label="Case For:")
216
+ examples = get_examples("./assets/gradio_examples")
217
+
218
+ gr.Examples(
219
+ examples=examples,
220
+ inputs=[
221
+ prompt,
222
+ image_prompt1,
223
+ image_prompt2,
224
+ image_prompt3,
225
+ seed,
226
+ ],
227
+ # cache_examples='lazy',
228
+ outputs=[output_image, download_btn],
229
+ fn=pipeline.gradio_generate,
230
+ label='row 1-4: identity/subject-driven; row 5-7: style-subject-driven; row 8-9: style-driven; row 10-12: multi-style-driven task; row 13: txt2img',
231
+ examples_per_page=15
232
+ )
233
+
234
+ with gr.Accordion("Local Gradio Demo for Developers", open=False):
235
+ gr.Markdown(
236
+ 'Please refer to our GitHub repository to [run the USO gradio demo locally](https://github.com/bytedance/USO?tab=readme-ov-file#-gradio-demo).'
237
+ )
238
+ return demo
239
+
240
+
241
+ if __name__ == "__main__":
242
+ from typing import Literal
243
+
244
+ from transformers import HfArgumentParser
245
+
246
+ @dataclasses.dataclass
247
+ class AppArgs:
248
+ name: Literal["flux-dev", "flux-dev-fp8", "flux-schnell", "flux-krea-dev"] = "flux-dev"
249
+ device: Literal["cuda", "cpu", "mps"] = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu")
250
+ offload: bool = dataclasses.field(
251
+ default=False,
252
+ metadata={
253
+ "help": "If True, sequantial offload the models(ae, dit, text encoder) to CPU if not used."
254
+ },
255
+ )
256
+ port: int = 7860
257
+
258
+ parser = HfArgumentParser([AppArgs])
259
+ args_tuple = parser.parse_args_into_dataclasses() # type: tuple[AppArgs]
260
+ args = args_tuple[0]
261
+
262
+ demo = create_demo(args.name, args.device, args.offload)
263
+ demo.launch(server_port=args.port)
assets/comfyui_template.png ADDED

Git LFS Details

  • SHA256: 77bcc55c997e6f08ddba58f94426152bef87a9b15fa3148194d1c6c401278430
  • Pointer size: 131 Bytes
  • Size of remote file: 759 kB
assets/gradio_examples/1identity/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "The girl is riding a bike in a street.",
3
+ "seed": 3407,
4
+ "usage": "Identity-driven",
5
+ "image_ref1": "./ref.webp"
6
+ }
assets/gradio_examples/1identity/ref.webp ADDED
assets/gradio_examples/2identity/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "The man in flower shops carefully match bouquets, conveying beautiful emotions and blessings with flowers.",
3
+ "seed": 3407,
4
+ "usage": "Identity-driven",
5
+ "image_ref1": "./ref.jpg"
6
+ }
assets/gradio_examples/2identity/ref.jpg ADDED
assets/gradio_examples/3identity/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "Transform the image into Ghibli style.",
3
+ "seed": 3407,
4
+ "usage": "Identity-driven",
5
+ "image_ref1": "./ref.webp"
6
+ }
assets/gradio_examples/3identity/ref.webp ADDED
assets/gradio_examples/4subject/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "Wool felt style, a clock in the jungle.",
3
+ "seed": 3407,
4
+ "usage": "Subject-driven",
5
+ "image_ref1": "./ref.jpg"
6
+ }
assets/gradio_examples/4subject/ref.jpg ADDED

Git LFS Details

  • SHA256: 0e1eb6ca2c944f3bfaed3ace56f5f186ed073a477e0333e0237253d98f0c9267
  • Pointer size: 131 Bytes
  • Size of remote file: 139 kB
assets/gradio_examples/5style_subject/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "",
3
+ "seed": 321,
4
+ "usage": "Style-subject-driven (layout-preserved)",
5
+ "image_ref1": "./ref1.webp",
6
+ "image_ref2": "./ref2.webp"
7
+ }
assets/gradio_examples/5style_subject/ref1.webp ADDED
assets/gradio_examples/5style_subject/ref2.webp ADDED
assets/gradio_examples/6style_subject/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "The woman gave an impassioned speech on the podium.",
3
+ "seed": 321,
4
+ "usage": "Style-subject-driven (layout-shifted)",
5
+ "image_ref1": "./ref1.webp",
6
+ "image_ref2": "./ref2.webp"
7
+ }
assets/gradio_examples/6style_subject/ref1.webp ADDED
assets/gradio_examples/6style_subject/ref2.webp ADDED
assets/gradio_examples/7style_subject/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "The man gave an impassioned speech on the podium. ",
3
+ "seed": 42,
4
+ "usage": "Style-subject-driven (layout-shifted)",
5
+ "image_ref1": "./ref1.webp",
6
+ "image_ref2": "./ref2.webp"
7
+ }
assets/gradio_examples/7style_subject/ref1.webp ADDED
assets/gradio_examples/7style_subject/ref2.webp ADDED
assets/gradio_examples/8style/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "A cat sleeping on a chair.",
3
+ "seed": 3407,
4
+ "usage": "Style-driven",
5
+ "image_ref2": "./ref.webp"
6
+ }
assets/gradio_examples/8style/ref.webp ADDED
assets/gradio_examples/9style/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "A beautiful woman.",
3
+ "seed": 3407,
4
+ "usage": "Style-driven",
5
+ "image_ref2": "./ref.webp"
6
+ }
assets/gradio_examples/9style/ref.webp ADDED
assets/gradio_examples/identity1.jpg ADDED
assets/gradio_examples/identity1_result.png ADDED

Git LFS Details

  • SHA256: 7684256e44ce1bd4ada1e77a12674432eddd95b07fb388673899139afc56d864
  • Pointer size: 132 Bytes
  • Size of remote file: 1.54 MB
assets/gradio_examples/identity2.webp ADDED
assets/gradio_examples/identity2_style2_result.webp ADDED
assets/gradio_examples/style1.webp ADDED
assets/gradio_examples/style1_result.webp ADDED
assets/gradio_examples/style2.webp ADDED
assets/gradio_examples/style3.webp ADDED

Git LFS Details

  • SHA256: a1d272a0ecb03126503446b00a2152deab2045f89ac2c01f948e1099589d2862
  • Pointer size: 131 Bytes
  • Size of remote file: 142 kB
assets/gradio_examples/style3_style4_result.webp ADDED

Git LFS Details

  • SHA256: d09a5e429cc1d059aecd041e061868cd8e5b59f4718bb0f926fd84364f3794b0
  • Pointer size: 131 Bytes
  • Size of remote file: 173 kB
assets/gradio_examples/style4.webp ADDED

Git LFS Details

  • SHA256: b1ce04559726509672ce859d617a08d8dff8b2fe28f503fecbca7a5f66082882
  • Pointer size: 131 Bytes
  • Size of remote file: 290 kB
assets/gradio_examples/z1_mix_style/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "A man.",
3
+ "seed": 321,
4
+ "usage": "Multi-style-driven",
5
+ "image_ref2": "./ref1.webp",
6
+ "image_ref3": "./ref2.webp"
7
+ }
assets/gradio_examples/z1_mix_style/ref1.webp ADDED

Git LFS Details

  • SHA256: a1d272a0ecb03126503446b00a2152deab2045f89ac2c01f948e1099589d2862
  • Pointer size: 131 Bytes
  • Size of remote file: 142 kB
assets/gradio_examples/z1_mix_style/ref2.webp ADDED

Git LFS Details

  • SHA256: b1ce04559726509672ce859d617a08d8dff8b2fe28f503fecbca7a5f66082882
  • Pointer size: 131 Bytes
  • Size of remote file: 290 kB
assets/gradio_examples/z2_mix_style/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "Boat on water.",
3
+ "seed": 321,
4
+ "usage": "Multi-style-driven",
5
+ "image_ref2": "./ref1.png",
6
+ "image_ref3": "./ref2.png"
7
+ }
assets/gradio_examples/z2_mix_style/ref1.png ADDED
assets/gradio_examples/z2_mix_style/ref2.png ADDED
assets/gradio_examples/z3_mix_style/config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompt": "",
3
+ "seed": 321,
4
+ "usage": "Multi-style-driven",
5
+ "image_ref1": "./ref1.jpg",
6
+ "image_ref2": "./ref2.png",
7
+ "image_ref3": "./ref3.png"
8
+ }
assets/gradio_examples/z3_mix_style/ref1.jpg ADDED

Git LFS Details

  • SHA256: 6b8d8b0e22c91297ed23fabf03b0a3574a717efe8e578d6a9d51f5367b7fb0ee
  • Pointer size: 131 Bytes
  • Size of remote file: 167 kB
assets/gradio_examples/z3_mix_style/ref2.png ADDED
assets/gradio_examples/z3_mix_style/ref3.png ADDED
assets/gradio_examples/z4_t2i/config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "prompt": "A beautiful woman.",
3
+ "seed": 0,
4
+ "usage": "Text-to-image"
5
+ }
assets/show_case1.webp ADDED

Git LFS Details

  • SHA256: 61676ad26a250e7bd17d7b267d8f91c1b166fabb9893da6ec0e3fac85482aa78
  • Pointer size: 131 Bytes
  • Size of remote file: 197 kB
assets/show_case2.webp ADDED

Git LFS Details

  • SHA256: 4954c6dd760437322691f56aef532d1a1a1a9f2ebfafb1050372023d5195c849
  • Pointer size: 131 Bytes
  • Size of remote file: 204 kB