Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -30,7 +30,11 @@ ip_model = IPAdapterXL(pipe, image_encoder_repo, image_encoder_subfolder, ip_ckp
|
|
| 30 |
# Initialize CLIP model
|
| 31 |
clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K')
|
| 32 |
clip_model.to(device)
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
def get_image_embeds(pil_image, model=clip_model, preproc=preprocess, dev=device):
|
| 36 |
"""Get CLIP image embeddings for a given PIL image"""
|
|
@@ -61,19 +65,19 @@ def process_images(
|
|
| 61 |
# for demo purposes we allow for up to 3 different concepts and corresponding concept images
|
| 62 |
if concept_image1 is not None:
|
| 63 |
concept_images.append(concept_image1)
|
| 64 |
-
concept_descriptions.append(concept_name1)
|
| 65 |
else:
|
| 66 |
return None, "Please upload at least one concept image"
|
| 67 |
|
| 68 |
# Add second concept (optional)
|
| 69 |
if concept_image2 is not None:
|
| 70 |
concept_images.append(concept_image2)
|
| 71 |
-
concept_descriptions.append(concept_name2)
|
| 72 |
|
| 73 |
# Add third concept (optional)
|
| 74 |
if concept_image3 is not None:
|
| 75 |
concept_images.append(concept_image3)
|
| 76 |
-
concept_descriptions.append(concept_name3)
|
| 77 |
|
| 78 |
# Get all ranks
|
| 79 |
ranks = [rank1]
|
|
@@ -150,6 +154,8 @@ def process_and_display(
|
|
| 150 |
|
| 151 |
return modified_images
|
| 152 |
|
|
|
|
|
|
|
| 153 |
with gr.Blocks(title="Image Concept Composition") as demo:
|
| 154 |
gr.Markdown("# IP Composer")
|
| 155 |
gr.Markdown("")
|
|
@@ -162,21 +168,21 @@ with gr.Blocks(title="Image Concept Composition") as demo:
|
|
| 162 |
with gr.Column(scale=2):
|
| 163 |
concept_image1 = gr.Image(label="Concept Image 1 (Required)", type="numpy")
|
| 164 |
with gr.Row():
|
| 165 |
-
concept_name1 = gr.
|
| 166 |
rank1 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 1")
|
| 167 |
|
| 168 |
with gr.Row():
|
| 169 |
with gr.Column(scale=2):
|
| 170 |
concept_image2 = gr.Image(label="Concept Image 2 (Optional)", type="numpy")
|
| 171 |
with gr.Row():
|
| 172 |
-
concept_name2 = gr.
|
| 173 |
rank2 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 2")
|
| 174 |
|
| 175 |
with gr.Row():
|
| 176 |
with gr.Column(scale=2):
|
| 177 |
concept_image3 = gr.Image(label="Concept Image 3 (Optional)", type="numpy")
|
| 178 |
with gr.Row():
|
| 179 |
-
concept_name3 = gr.
|
| 180 |
rank3 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 3")
|
| 181 |
|
| 182 |
prompt = gr.Textbox(label="Guidance Prompt (Optional)", placeholder="Optional text prompt to guide generation")
|
|
|
|
| 30 |
# Initialize CLIP model
|
| 31 |
clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K')
|
| 32 |
clip_model.to(device)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
CONCEPTS_MAP = {'age':'age','animal fur':'animal_fur', 'deterioration': 'deterioration', 'dogs':'dog', 'emotion':'emotion', 'floor':'floor', 'flowers':'flower', 'fruit/vegtebale':'fruit_vegtebale', 'fur':'fur', 'furniture':'furniture', 'lens':'lens', 'outfit':'outfit', 'outfit color':'outfit_color', 'pattern':'pattern', 'texture':'texture', 'times of day':'times_of_day', 'tree':'tree', 'vehicle':'vehicle', 'vehicle color':'vehicle_color'}
|
| 36 |
+
|
| 37 |
+
concept_options = list(CONCEPTS_MAP.keys())
|
| 38 |
|
| 39 |
def get_image_embeds(pil_image, model=clip_model, preproc=preprocess, dev=device):
|
| 40 |
"""Get CLIP image embeddings for a given PIL image"""
|
|
|
|
| 65 |
# for demo purposes we allow for up to 3 different concepts and corresponding concept images
|
| 66 |
if concept_image1 is not None:
|
| 67 |
concept_images.append(concept_image1)
|
| 68 |
+
concept_descriptions.append(CONCEPTS_MAP[concept_name1])
|
| 69 |
else:
|
| 70 |
return None, "Please upload at least one concept image"
|
| 71 |
|
| 72 |
# Add second concept (optional)
|
| 73 |
if concept_image2 is not None:
|
| 74 |
concept_images.append(concept_image2)
|
| 75 |
+
concept_descriptions.append(CONCEPTS_MAP[concept_name2])
|
| 76 |
|
| 77 |
# Add third concept (optional)
|
| 78 |
if concept_image3 is not None:
|
| 79 |
concept_images.append(concept_image3)
|
| 80 |
+
concept_descriptions.append(CONCEPTS_MAP[concept_name3])
|
| 81 |
|
| 82 |
# Get all ranks
|
| 83 |
ranks = [rank1]
|
|
|
|
| 154 |
|
| 155 |
return modified_images
|
| 156 |
|
| 157 |
+
|
| 158 |
+
|
| 159 |
with gr.Blocks(title="Image Concept Composition") as demo:
|
| 160 |
gr.Markdown("# IP Composer")
|
| 161 |
gr.Markdown("")
|
|
|
|
| 168 |
with gr.Column(scale=2):
|
| 169 |
concept_image1 = gr.Image(label="Concept Image 1 (Required)", type="numpy")
|
| 170 |
with gr.Row():
|
| 171 |
+
concept_name1 = gr.Dropdown(concept_options, label="concept 1", value=None, info="concept type")
|
| 172 |
rank1 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 1")
|
| 173 |
|
| 174 |
with gr.Row():
|
| 175 |
with gr.Column(scale=2):
|
| 176 |
concept_image2 = gr.Image(label="Concept Image 2 (Optional)", type="numpy")
|
| 177 |
with gr.Row():
|
| 178 |
+
concept_name2 = gr.Dropdown(concept_options, label="concept 2", value=None, info="concept type")
|
| 179 |
rank2 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 2")
|
| 180 |
|
| 181 |
with gr.Row():
|
| 182 |
with gr.Column(scale=2):
|
| 183 |
concept_image3 = gr.Image(label="Concept Image 3 (Optional)", type="numpy")
|
| 184 |
with gr.Row():
|
| 185 |
+
concept_name3 = gr.Dropdown(concept_options, label="concept 3", value= None, info="concept type")
|
| 186 |
rank3 = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="Rank 3")
|
| 187 |
|
| 188 |
prompt = gr.Textbox(label="Guidance Prompt (Optional)", placeholder="Optional text prompt to guide generation")
|