Spaces:
Running
on
Zero
Running
on
Zero
update concept map
Browse files
app.py
CHANGED
|
@@ -32,9 +32,23 @@ ip_model = IPAdapterXL(pipe, image_encoder_repo, image_encoder_subfolder, ip_ckp
|
|
| 32 |
clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K')
|
| 33 |
clip_model.to(device)
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
concept_options = list(CONCEPTS_MAP.keys())
|
| 39 |
|
| 40 |
|
|
@@ -104,7 +118,7 @@ def process_images(
|
|
| 104 |
for i, concept_name in enumerate(concept_descriptions):
|
| 105 |
img_pil = Image.fromarray(concept_images[i]).convert("RGB")
|
| 106 |
concept_embeds.append(get_image_embeds(img_pil, clip_model, preprocess, device))
|
| 107 |
-
embeds_path = f"./IP_Composer/text_embeddings/{concept_name}
|
| 108 |
with open(embeds_path, "rb") as f:
|
| 109 |
all_embeds_in = np.load(f)
|
| 110 |
|
|
|
|
| 32 |
clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K')
|
| 33 |
clip_model.to(device)
|
| 34 |
|
| 35 |
+
CONCEPTS_MAP={
|
| 36 |
+
"age": "age_descriptions.npy",
|
| 37 |
+
"animal_fur": "fur_descriptions.npy",
|
| 38 |
+
"dogs": "dog_descriptions.npy",
|
| 39 |
+
"emotions": "emotion_descriptions.npy",
|
| 40 |
+
"flowers": "flower_descriptions.npy",
|
| 41 |
+
"fruit/vegtable": "fruit_vegetable_descriptions.npy",
|
| 42 |
+
"outfit type": "outfit_descriptions.npy",
|
| 43 |
+
"outfit pattern (including color)": "outfit_pattern_descriptions.npy",
|
| 44 |
+
"patterns": "pattern_descriptions.npy",
|
| 45 |
+
"patterns (including color)": "pattern_descriptions_with_colors.npy",
|
| 46 |
+
"vehicle": "vehicle_descriptions.npy",
|
| 47 |
+
"daytime": "times_of_day_descriptions.npy",
|
| 48 |
+
"pose": "person_poses_descriptions.npy",
|
| 49 |
+
"season": "season_descriptions.npy",
|
| 50 |
+
"material": "material_descriptions_with_gems.npy"
|
| 51 |
+
}
|
| 52 |
concept_options = list(CONCEPTS_MAP.keys())
|
| 53 |
|
| 54 |
|
|
|
|
| 118 |
for i, concept_name in enumerate(concept_descriptions):
|
| 119 |
img_pil = Image.fromarray(concept_images[i]).convert("RGB")
|
| 120 |
concept_embeds.append(get_image_embeds(img_pil, clip_model, preprocess, device))
|
| 121 |
+
embeds_path = f"./IP_Composer/text_embeddings/{concept_name}"
|
| 122 |
with open(embeds_path, "rb") as f:
|
| 123 |
all_embeds_in = np.load(f)
|
| 124 |
|