Spaces:
Running
Running
Commit
·
88cedf6
1
Parent(s):
4782e1d
Update app.py
Browse files
app.py
CHANGED
|
@@ -28,7 +28,7 @@ for model_id in model_ids.keys():
|
|
| 28 |
pass
|
| 29 |
|
| 30 |
def infer(prompt):
|
| 31 |
-
|
| 32 |
return prompt
|
| 33 |
|
| 34 |
start_work = """async() => {
|
|
@@ -122,33 +122,7 @@ start_work = """async() => {
|
|
| 122 |
}
|
| 123 |
window['checkPrompt_interval'] = window.setInterval("window.checkPrompt()", 100);
|
| 124 |
}
|
| 125 |
-
|
| 126 |
-
/*
|
| 127 |
-
texts = gradioEl.querySelectorAll('textarea');
|
| 128 |
-
text0 = gradioEl.querySelectorAll('textarea')[0];
|
| 129 |
-
text1 = gradioEl.querySelectorAll('textarea')[0];
|
| 130 |
-
|
| 131 |
-
for (var i = 1; i < texts.length; i++) {
|
| 132 |
-
setNativeValue(texts[i], text0.value);
|
| 133 |
-
texts[i].dispatchEvent(new Event('input', { bubbles: true }));
|
| 134 |
-
}
|
| 135 |
-
|
| 136 |
-
var st = setTimeout(function() {
|
| 137 |
-
text1 = window['gradioEl'].querySelectorAll('textarea')[1];
|
| 138 |
-
console.log('do_click()_1_' + text1.value);
|
| 139 |
-
|
| 140 |
-
btns = window['gradioEl'].querySelectorAll('button');
|
| 141 |
-
for (var i = 0; i < btns.length; i++) {
|
| 142 |
-
if (btns[i].innerText == 'Submit') {
|
| 143 |
-
btns[i].focus();
|
| 144 |
-
btns[i].click();
|
| 145 |
-
//break;
|
| 146 |
-
}
|
| 147 |
-
}
|
| 148 |
-
console.log('do_click()_3_');
|
| 149 |
-
}, 10);
|
| 150 |
-
*/
|
| 151 |
-
|
| 152 |
return false;
|
| 153 |
}"""
|
| 154 |
|
|
@@ -173,76 +147,6 @@ with gr.Blocks(title='Text to Image') as demo:
|
|
| 173 |
|
| 174 |
submit_btn.click(fn=infer, inputs=[prompt_input0], outputs=[prompt_input1])
|
| 175 |
|
| 176 |
-
# prompt_input = gr.Textbox(lines=4, label="Input prompt")
|
| 177 |
-
# tab_demo = gr.TabbedInterface([sd15_demo, sd20_demo, openjourney_demo], ["stable-diffusion-v1-5", "stable-diffusion-2", "openjourney"])
|
| 178 |
-
|
| 179 |
-
# demo = gr.Interface(fn=infer,
|
| 180 |
-
# inputs=[prompt_input],
|
| 181 |
-
# outputs=[tab_demo],
|
| 182 |
-
# )
|
| 183 |
-
|
| 184 |
if __name__ == "__main__":
|
| 185 |
demo.launch()
|
| 186 |
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
# import os
|
| 190 |
-
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
|
| 191 |
-
# from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy
|
| 192 |
-
|
| 193 |
-
# import gradio as gr
|
| 194 |
-
# import PIL.Image
|
| 195 |
-
# import numpy as np
|
| 196 |
-
# import random
|
| 197 |
-
# import torch
|
| 198 |
-
# import subprocess
|
| 199 |
-
|
| 200 |
-
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 201 |
-
# # print('Using device:', device)
|
| 202 |
-
|
| 203 |
-
# HF_TOKEN_SD=os.environ.get('HF_TOKEN_SD')
|
| 204 |
-
|
| 205 |
-
# if 0==0:
|
| 206 |
-
# model_id = "runwayml/stable-diffusion-v1-5"
|
| 207 |
-
|
| 208 |
-
# model_id = "prompthero/openjourney"
|
| 209 |
-
|
| 210 |
-
# # pipeClass = StableDiffusionImg2ImgPipeline
|
| 211 |
-
# pipeClass = StableDiffusionPipeline
|
| 212 |
-
# className = pipeClass.__name__
|
| 213 |
-
# if className == 'StableDiffusionInpaintPipeline':
|
| 214 |
-
# model_id = "runwayml/stable-diffusion-inpainting"
|
| 215 |
-
|
| 216 |
-
# sd_pipe = pipeClass.from_pretrained(
|
| 217 |
-
# model_id,
|
| 218 |
-
# # revision="fp16",
|
| 219 |
-
# torch_dtype=torch.float16,
|
| 220 |
-
# # use_auth_token=HF_TOKEN_SD
|
| 221 |
-
# ) # .to(device)
|
| 222 |
-
|
| 223 |
-
# def predict(prompt, steps=100, seed=42, guidance_scale=6.0):
|
| 224 |
-
# #torch.cuda.empty_cache()
|
| 225 |
-
# # print(subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT).decode("utf8"))
|
| 226 |
-
# generator = torch.manual_seed(seed)
|
| 227 |
-
# images = sd_pipe([prompt],
|
| 228 |
-
# generator=generator,
|
| 229 |
-
# num_inference_steps=steps,
|
| 230 |
-
# eta=0.3,
|
| 231 |
-
# guidance_scale=guidance_scale)["sample"]
|
| 232 |
-
# # print(subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT).decode("utf8"))
|
| 233 |
-
# return images[0]
|
| 234 |
-
|
| 235 |
-
# random_seed = random.randint(0, 2147483647)
|
| 236 |
-
# gr.Interface(
|
| 237 |
-
# predict,
|
| 238 |
-
# inputs=[
|
| 239 |
-
# gr.inputs.Textbox(label='Prompt', default='a chalk pastel drawing of a llama wearing a wizard hat'),
|
| 240 |
-
# gr.inputs.Slider(1, 100, label='Inference Steps', default=50, step=1),
|
| 241 |
-
# gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
|
| 242 |
-
# gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=6.0, step=0.1),
|
| 243 |
-
# ],
|
| 244 |
-
# outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"),
|
| 245 |
-
# css="#output_image{width: 256px}",
|
| 246 |
-
# title="Text-to-Image_Latent_Diffusion",
|
| 247 |
-
# # description="This Spaces contains a text-to-image Latent Diffusion process for the <a href=\"https://huggingface.co/CompVis/ldm-text2im-large-256\">ldm-text2im-large-256</a> model by <a href=\"https://huggingface.co/CompVis\">CompVis</a> using the <a href=\"https://github.com/huggingface/diffusers\">diffusers library</a>. The goal of this demo is to showcase the diffusers library and you can check how the code works here. If you want the state-of-the-art experience with Latent Diffusion text-to-image check out the <a href=\"https://huggingface.co/spaces/multimodalart/latentdiffusion\">main Spaces</a>.",
|
| 248 |
-
# ).launch()
|
|
|
|
| 28 |
pass
|
| 29 |
|
| 30 |
def infer(prompt):
|
| 31 |
+
prompt = getTextTrans(prompt, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
|
| 32 |
return prompt
|
| 33 |
|
| 34 |
start_work = """async() => {
|
|
|
|
| 122 |
}
|
| 123 |
window['checkPrompt_interval'] = window.setInterval("window.checkPrompt()", 100);
|
| 124 |
}
|
| 125 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
return false;
|
| 127 |
}"""
|
| 128 |
|
|
|
|
| 147 |
|
| 148 |
submit_btn.click(fn=infer, inputs=[prompt_input0], outputs=[prompt_input1])
|
| 149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
if __name__ == "__main__":
|
| 151 |
demo.launch()
|
| 152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|