Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, VisionEncoderDecoderModel
|
| 3 |
import torch
|
|
|
|
| 4 |
|
| 5 |
torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
|
| 6 |
torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
|
|
@@ -22,6 +23,11 @@ vitgpt_processor = AutoImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image
|
|
| 22 |
vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 23 |
vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 26 |
|
| 27 |
git_model_base.to(device)
|
|
@@ -29,6 +35,7 @@ blip_model_base.to(device)
|
|
| 29 |
git_model_large.to(device)
|
| 30 |
blip_model_large.to(device)
|
| 31 |
vitgpt_model.to(device)
|
|
|
|
| 32 |
|
| 33 |
def generate_caption(processor, model, image, tokenizer=None):
|
| 34 |
inputs = processor(images=image, return_tensors="pt").to(device)
|
|
@@ -43,6 +50,12 @@ def generate_caption(processor, model, image, tokenizer=None):
|
|
| 43 |
return generated_caption
|
| 44 |
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
def generate_captions(image):
|
| 47 |
caption_git_base = generate_caption(git_processor_base, git_model_base, image)
|
| 48 |
|
|
@@ -54,11 +67,13 @@ def generate_captions(image):
|
|
| 54 |
|
| 55 |
caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
|
| 56 |
|
| 57 |
-
|
|
|
|
|
|
|
| 58 |
|
| 59 |
|
| 60 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
| 61 |
-
outputs = [gr.outputs.Textbox(label="Caption generated by GIT-base"), gr.outputs.Textbox(label="Caption generated by GIT-large"), gr.outputs.Textbox(label="Caption generated by BLIP-base"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by ViT+GPT-2")]
|
| 62 |
|
| 63 |
title = "Interactive demo: comparing image captioning models"
|
| 64 |
description = "Gradio Demo to compare GIT, BLIP and ViT+GPT2, 3 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, VisionEncoderDecoderModel
|
| 3 |
import torch
|
| 4 |
+
import open_clip
|
| 5 |
|
| 6 |
torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
|
| 7 |
torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
|
|
|
|
| 23 |
vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 24 |
vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 25 |
|
| 26 |
+
coca_model, _, coca_transform = open_clip.create_model_and_transforms(
|
| 27 |
+
"coca_ViT-L-14",
|
| 28 |
+
pretrained="laion2B-s13B-b90k-mscoco-2014.pt"
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 32 |
|
| 33 |
git_model_base.to(device)
|
|
|
|
| 35 |
git_model_large.to(device)
|
| 36 |
blip_model_large.to(device)
|
| 37 |
vitgpt_model.to(device)
|
| 38 |
+
coca_model.to(device)
|
| 39 |
|
| 40 |
def generate_caption(processor, model, image, tokenizer=None):
|
| 41 |
inputs = processor(images=image, return_tensors="pt").to(device)
|
|
|
|
| 50 |
return generated_caption
|
| 51 |
|
| 52 |
|
| 53 |
+
def generate_caption_coca(model, transform, image):
|
| 54 |
+
im = transform(image).unsqueeze(0).to(device)
|
| 55 |
+
generated = model.generate(im, seq_len=20)
|
| 56 |
+
return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")
|
| 57 |
+
|
| 58 |
+
|
| 59 |
def generate_captions(image):
|
| 60 |
caption_git_base = generate_caption(git_processor_base, git_model_base, image)
|
| 61 |
|
|
|
|
| 67 |
|
| 68 |
caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
|
| 69 |
|
| 70 |
+
caption_coca = generate_caption_coca(coca_model, coca_transform, image)
|
| 71 |
+
|
| 72 |
+
return caption_git_base, caption_git_large, caption_blip_base, caption_blip_large, caption_vitgpt, caption_coca
|
| 73 |
|
| 74 |
|
| 75 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
| 76 |
+
outputs = [gr.outputs.Textbox(label="Caption generated by GIT-base"), gr.outputs.Textbox(label="Caption generated by GIT-large"), gr.outputs.Textbox(label="Caption generated by BLIP-base"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by ViT+GPT-2"), gr.outputs.Textbox(label="Caption generated by CoCa")]
|
| 77 |
|
| 78 |
title = "Interactive demo: comparing image captioning models"
|
| 79 |
description = "Gradio Demo to compare GIT, BLIP and ViT+GPT2, 3 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|