create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import PIL
|
| 3 |
+
import requests
|
| 4 |
+
import torch
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import random
|
| 7 |
+
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
|
| 8 |
+
|
| 9 |
+
model_id = "timbrooks/instruct-pix2pix"
|
| 10 |
+
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", safety_checker=None)
|
| 11 |
+
pipe.to("cuda")
|
| 12 |
+
pipe.enable_attention_slicing()
|
| 13 |
+
|
| 14 |
+
#image = PIL.Image.open("./example.png")
|
| 15 |
+
#image = PIL.ImageOps.exif_transpose(image)
|
| 16 |
+
#image = image.convert("RGB")
|
| 17 |
+
#image
|
| 18 |
+
#prompt = "turn him into cyborg"
|
| 19 |
+
#pipe(prompt, image=image, num_inference_steps=20, image_guidance_scale=1).images[0]
|
| 20 |
+
|
| 21 |
+
counter = 0
|
| 22 |
+
|
| 23 |
+
def chat(image_in, message, history): #, progress=gr.Progress(track_tqdm=True)):
|
| 24 |
+
#progress(0, desc="Starting...")
|
| 25 |
+
global counter
|
| 26 |
+
counter += 1
|
| 27 |
+
#if message == "revert": --to add revert functionality later
|
| 28 |
+
if counter > 1:
|
| 29 |
+
# Open the image
|
| 30 |
+
image_in = Image.open("./edited_image.png")
|
| 31 |
+
prompt = message #eg - "turn him into cyborg"
|
| 32 |
+
edited_image = pipe(prompt, image=image_in, num_inference_steps=20, image_guidance_scale=1).images[0]
|
| 33 |
+
edited_image.save("edited_image.png") #("./edited_image.png")
|
| 34 |
+
history = history or []
|
| 35 |
+
add_text_list = ["There you go", "Enjoy your image!", "Nice work! Wonder what you gonna do next!", "Way to go!", "Does this work for you?", "Something like this?"]
|
| 36 |
+
#Resizing the image for better display
|
| 37 |
+
response = random.choice(add_text_list) + '<img src="/file=edited_image.png" style="width: 200px; height: 200px;">'
|
| 38 |
+
history.append((message, response))
|
| 39 |
+
return history, history
|
| 40 |
+
|
| 41 |
+
with gr.Blocks() as demo:
|
| 42 |
+
with gr.Row():
|
| 43 |
+
with gr.Column():
|
| 44 |
+
image_in = gr.Image(type='pil', label="Original Image")
|
| 45 |
+
text_in = gr.Textbox()
|
| 46 |
+
state_in = gr.State()
|
| 47 |
+
b1 = gr.Button('Edit the image!')
|
| 48 |
+
chatbot = gr.Chatbot()
|
| 49 |
+
b1.click(chat,[image_in, text_in, state_in], [chatbot, state_in])
|
| 50 |
+
|
| 51 |
+
#demo.queue()
|
| 52 |
+
demo.launch(debug=True, width="80%", height=1500)
|