update desc, layout
Browse files
app.py
CHANGED
|
@@ -37,28 +37,30 @@ be to the input. This pipeline requires a value of at least `1`. It's possible y
|
|
| 37 |
* Cropping the image so the face takes up a larger portion of the frame.
|
| 38 |
"""
|
| 39 |
|
| 40 |
-
def chat(image_in,
|
| 41 |
progress(0, desc="Starting...")
|
| 42 |
global counter
|
| 43 |
#global seed
|
| 44 |
#img_nm = f"./edited_image_{seed}.png"
|
|
|
|
|
|
|
|
|
|
| 45 |
counter += 1
|
| 46 |
-
#print(f"seed is : {seed}")
|
| 47 |
-
#print(f"image_in name is :{img_nm}")
|
| 48 |
-
|
| 49 |
#if message == "revert": --to add revert functionality later
|
| 50 |
if counter > 1:
|
| 51 |
# Open the image
|
| 52 |
image_in = Image.open("edited_image.png") #(img_nm)
|
| 53 |
-
prompt = message #eg - "turn him into cyborg"
|
| 54 |
-
edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
|
| 55 |
-
edited_image
|
|
|
|
| 56 |
history = history or []
|
| 57 |
-
add_text_list = ["There you go ", "Enjoy your image! ", "Nice work! Wonder what you gonna do next! ", "Way to go! ", "Does this work for you? ", "Something like this? "]
|
| 58 |
#Resizing the image for better display
|
|
|
|
| 59 |
#response = random.choice(add_text_list) + '<img src="/file=' + img_nm[2:] + '" style="width: 200px; height: 200px;">'
|
| 60 |
-
response =
|
| 61 |
-
|
|
|
|
| 62 |
return history, history
|
| 63 |
|
| 64 |
with gr.Blocks() as demo:
|
|
@@ -73,9 +75,14 @@ with gr.Blocks() as demo:
|
|
| 73 |
text_in = gr.Textbox()
|
| 74 |
state_in = gr.State()
|
| 75 |
b1 = gr.Button('Edit the image!')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
chatbot = gr.Chatbot()
|
| 77 |
-
b1.click(chat,[image_in, text_in, state_in], [chatbot, state_in])
|
| 78 |
gr.Markdown(help_text)
|
| 79 |
|
| 80 |
demo.queue(concurrency_count=10)
|
| 81 |
-
demo.launch(debug=True, width="80%", height=
|
|
|
|
| 37 |
* Cropping the image so the face takes up a larger portion of the frame.
|
| 38 |
"""
|
| 39 |
|
| 40 |
+
def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, prompt, history, progress=gr.Progress(track_tqdm=True)):
|
| 41 |
progress(0, desc="Starting...")
|
| 42 |
global counter
|
| 43 |
#global seed
|
| 44 |
#img_nm = f"./edited_image_{seed}.png"
|
| 45 |
+
#print(f"seed is:{seed}")
|
| 46 |
+
#print(f"image name is:{img_nm}")
|
| 47 |
+
|
| 48 |
counter += 1
|
|
|
|
|
|
|
|
|
|
| 49 |
#if message == "revert": --to add revert functionality later
|
| 50 |
if counter > 1:
|
| 51 |
# Open the image
|
| 52 |
image_in = Image.open("edited_image.png") #(img_nm)
|
| 53 |
+
#prompt = message #eg - "turn him into cyborg"
|
| 54 |
+
#edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
|
| 55 |
+
edited_image = pipe(prompt, image=image_in, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
|
| 56 |
+
edited_image.save("edited_image.png") #("/tmp/edited_image.png") #(img_nm)
|
| 57 |
history = history or []
|
|
|
|
| 58 |
#Resizing the image for better display
|
| 59 |
+
add_text_list = ["There you go", "Enjoy your image!", "Nice work! Wonder what you gonna do next!", "Way to go!", "Does this work for you?", "Something like this?"]
|
| 60 |
#response = random.choice(add_text_list) + '<img src="/file=' + img_nm[2:] + '" style="width: 200px; height: 200px;">'
|
| 61 |
+
#response = random.choice(add_text_list) + '<img src="/file=edited_image.png" style="width: 350px; height: 350px;">'
|
| 62 |
+
response = random.choice(add_text_list) + '<img src="/file=edited_image.png">' # style="width: 350px; height: 350px;">'
|
| 63 |
+
history.append((prompt, response))
|
| 64 |
return history, history
|
| 65 |
|
| 66 |
with gr.Blocks() as demo:
|
|
|
|
| 75 |
text_in = gr.Textbox()
|
| 76 |
state_in = gr.State()
|
| 77 |
b1 = gr.Button('Edit the image!')
|
| 78 |
+
with gr.Accordion("Advance settings for Training and Inference", open=False):
|
| 79 |
+
gr.Markdown("Advance settings for - Number of Inference steps, Guidanace scale, and Image guidance scale.")
|
| 80 |
+
in_steps = gr.Number(label="Enter the number of Inference steps", value = 20)
|
| 81 |
+
in_guidance_scale = gr.Slider(1,10, step=0.5, label="Set Guidance scale", value=7.5)
|
| 82 |
+
in_img_guidance_scale = gr.Slider(1,10, step=0.5, label="Set Image Guidance scale", value=1.5)
|
| 83 |
chatbot = gr.Chatbot()
|
| 84 |
+
b1.click(chat,[image_in, in_steps, in_guidance_scale, in_img_guidance_scale, text_in, state_in], [chatbot, state_in]) #, queue=True)
|
| 85 |
gr.Markdown(help_text)
|
| 86 |
|
| 87 |
demo.queue(concurrency_count=10)
|
| 88 |
+
demo.launch(debug=True, width="80%", height=2000)
|