Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
|
@@ -47,9 +47,12 @@
|
|
| 47 |
"\n",
|
| 48 |
"def fix_bad_symbols(txt):\n",
|
| 49 |
" result = txt\n",
|
| 50 |
-
" for symbol in ['
|
| 51 |
" result = result.replace(symbol,'\\\\' + symbol)\n",
|
|
|
|
|
|
|
| 52 |
" #------#\n",
|
|
|
|
| 53 |
" return result;\n",
|
| 54 |
"\n",
|
| 55 |
"\n",
|
|
@@ -390,19 +393,27 @@
|
|
| 390 |
"# @markdown πΌοΈ Choose a pre-encoded reference\n",
|
| 391 |
"index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
|
| 392 |
"PROMPT_INDEX = index\n",
|
| 393 |
-
"
|
| 394 |
-
"
|
| 395 |
"# @markdown -----------\n",
|
| 396 |
-
"# @markdown
|
| 397 |
-
"POS = '' # @param {type:'string'}\n",
|
| 398 |
-
"
|
| 399 |
-
"pos_strength =
|
| 400 |
"# @markdown -----------\n",
|
| 401 |
"\n",
|
| 402 |
-
"# @markdown π« Penalize similarity to
|
| 403 |
-
"NEG = '' # @param {type:'string'}\n",
|
| 404 |
-
"
|
| 405 |
-
"neg_strength =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 406 |
"# @markdown -----------\n",
|
| 407 |
"# @title βοΈπ Print the results (Advanced)\n",
|
| 408 |
"list_size = 1000 # param {type:'number'}\n",
|
|
@@ -436,6 +447,14 @@
|
|
| 436 |
"enable = run_script\n",
|
| 437 |
"\n",
|
| 438 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 439 |
"# Load the data if not already loaded\n",
|
| 440 |
"try:\n",
|
| 441 |
" loaded2\n",
|
|
@@ -471,28 +490,30 @@
|
|
| 471 |
" # text-similarity\n",
|
| 472 |
" sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
|
| 473 |
"\n",
|
| 474 |
-
"
|
|
|
|
|
|
|
| 475 |
" if(NEG != ''):\n",
|
| 476 |
-
"\n",
|
| 477 |
-
"
|
| 478 |
-
"
|
| 479 |
-
"
|
| 480 |
-
"
|
| 481 |
-
"\n",
|
| 482 |
-
"
|
| 483 |
-
" neg_sims = torch.matmul(text_tensor, text_features_NEG.t())\n",
|
| 484 |
" #------#\n",
|
| 485 |
"\n",
|
| 486 |
-
"
|
|
|
|
|
|
|
| 487 |
" if(POS != ''):\n",
|
| 488 |
-
"\n",
|
| 489 |
-
"
|
| 490 |
-
"
|
| 491 |
-
"
|
| 492 |
-
"
|
| 493 |
-
"\n",
|
| 494 |
-
"
|
| 495 |
-
" pos_sims = torch.matmul(text_tensor, text_features_POS.t())\n",
|
| 496 |
" #------#\n",
|
| 497 |
"\n",
|
| 498 |
" # plus image-similarity\n",
|
|
@@ -501,11 +522,14 @@
|
|
| 501 |
"\n",
|
| 502 |
"\n",
|
| 503 |
" # plus POS-similarity\n",
|
| 504 |
-
"
|
| 505 |
-
"\n",
|
|
|
|
| 506 |
"\n",
|
| 507 |
" # minus NEG-similarity\n",
|
| 508 |
-
"
|
|
|
|
|
|
|
| 509 |
"\n",
|
| 510 |
"\n",
|
| 511 |
" # Sort the items\n",
|
|
@@ -518,13 +542,28 @@
|
|
| 518 |
"\n",
|
| 519 |
" _prompts = ''\n",
|
| 520 |
" _sims = ''\n",
|
|
|
|
| 521 |
" for _index in range(start_at_index + RANGE):\n",
|
| 522 |
" if _index < start_at_index : continue\n",
|
| 523 |
-
" index = indices[_index].item()\n",
|
| 524 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 525 |
" prompt = prompts[f'{index}']\n",
|
|
|
|
| 526 |
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
|
| 527 |
"\n",
|
|
|
|
|
|
|
|
|
|
| 528 |
" #Remove duplicates\n",
|
| 529 |
" if _prompts.find(prompt + separator)<=-1:\n",
|
| 530 |
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
|
|
@@ -554,7 +593,10 @@
|
|
| 554 |
" #-------#\n",
|
| 555 |
" #-------#\n",
|
| 556 |
"#-------#\n",
|
| 557 |
-
"image\n"
|
|
|
|
|
|
|
|
|
|
| 558 |
],
|
| 559 |
"metadata": {
|
| 560 |
"id": "XW3914T8O2uf"
|
|
|
|
| 47 |
"\n",
|
| 48 |
"def fix_bad_symbols(txt):\n",
|
| 49 |
" result = txt\n",
|
| 50 |
+
" for symbol in ['}', '{' , ')', '(', '[' , ']' , ':' , '=' , '^']:\n",
|
| 51 |
" result = result.replace(symbol,'\\\\' + symbol)\n",
|
| 52 |
+
" for symbol in ['^']:\n",
|
| 53 |
+
" result = result.replace(symbol,'')\n",
|
| 54 |
" #------#\n",
|
| 55 |
+
" result = result.replace('\\\\|','|')\n",
|
| 56 |
" return result;\n",
|
| 57 |
"\n",
|
| 58 |
"\n",
|
|
|
|
| 393 |
"# @markdown πΌοΈ Choose a pre-encoded reference\n",
|
| 394 |
"index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
|
| 395 |
"PROMPT_INDEX = index\n",
|
| 396 |
+
"\n",
|
| 397 |
+
"import math\n",
|
| 398 |
"# @markdown -----------\n",
|
| 399 |
+
"# @markdown πβ Enhance similarity to prompt(s)\n",
|
| 400 |
+
"POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
| 401 |
+
"log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
| 402 |
+
"pos_strength = math.pow(10 ,log_strength-1)\n",
|
| 403 |
"# @markdown -----------\n",
|
| 404 |
"\n",
|
| 405 |
+
"# @markdown π« Penalize similarity to prompt(s)\n",
|
| 406 |
+
"NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
| 407 |
+
"log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
| 408 |
+
"neg_strength = math.pow(10 ,log_strength-1)\n",
|
| 409 |
+
"\n",
|
| 410 |
+
"# @markdown β© Skip item(s) containing the word\n",
|
| 411 |
+
"SKIP = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
| 412 |
+
"\n",
|
| 413 |
+
"# @markdown βοΈ sim_ref = C* text_encoding + image_encoding*(1-C) <br>\n",
|
| 414 |
+
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
| 415 |
+
"\n",
|
| 416 |
+
"blacklist = SKIP\n",
|
| 417 |
"# @markdown -----------\n",
|
| 418 |
"# @title βοΈπ Print the results (Advanced)\n",
|
| 419 |
"list_size = 1000 # param {type:'number'}\n",
|
|
|
|
| 447 |
"enable = run_script\n",
|
| 448 |
"\n",
|
| 449 |
"\n",
|
| 450 |
+
"\n",
|
| 451 |
+
"def isBlacklisted(txt):\n",
|
| 452 |
+
" if blacklist.strip() == '': return False\n",
|
| 453 |
+
" for item in list(blacklist.split(',')):\n",
|
| 454 |
+
" if txt.find(item.strip())> -1 : return True\n",
|
| 455 |
+
" #------#\n",
|
| 456 |
+
" return False\n",
|
| 457 |
+
"\n",
|
| 458 |
"# Load the data if not already loaded\n",
|
| 459 |
"try:\n",
|
| 460 |
" loaded2\n",
|
|
|
|
| 490 |
" # text-similarity\n",
|
| 491 |
" sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
|
| 492 |
"\n",
|
| 493 |
+
" # Calculate negatives\n",
|
| 494 |
+
" neg_sims = {}\n",
|
| 495 |
+
" neg_sims[f'{0}'] = 0*sims\n",
|
| 496 |
" if(NEG != ''):\n",
|
| 497 |
+
" _index = 0\n",
|
| 498 |
+
" for _NEG in NEG.split(','):\n",
|
| 499 |
+
" inputs = tokenizer(text = _NEG, truncation = True , padding=True, return_tensors=\"pt\")\n",
|
| 500 |
+
" text_features_NEG = model.get_text_features(**inputs)\n",
|
| 501 |
+
" text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
|
| 502 |
+
" # text-similarity\n",
|
| 503 |
+
" neg_sims[f'{_index}'] = torch.matmul(text_tensor, text_features_NEG.t())\n",
|
|
|
|
| 504 |
" #------#\n",
|
| 505 |
"\n",
|
| 506 |
+
" # Calculate positives\n",
|
| 507 |
+
" pos_sims = {}\n",
|
| 508 |
+
" pos_sims[f'{0}'] = 0*sims\n",
|
| 509 |
" if(POS != ''):\n",
|
| 510 |
+
" _index = 0\n",
|
| 511 |
+
" for _POS in POS.split(','):\n",
|
| 512 |
+
" inputs = tokenizer(text = _POS, truncation = True , padding=True, return_tensors=\"pt\")\n",
|
| 513 |
+
" text_features_POS = model.get_text_features(**inputs)\n",
|
| 514 |
+
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
|
| 515 |
+
" # text-similarity\n",
|
| 516 |
+
" pos_sims[f'{_index}'] = torch.matmul(text_tensor, text_features_POS.t())\n",
|
|
|
|
| 517 |
" #------#\n",
|
| 518 |
"\n",
|
| 519 |
" # plus image-similarity\n",
|
|
|
|
| 522 |
"\n",
|
| 523 |
"\n",
|
| 524 |
" # plus POS-similarity\n",
|
| 525 |
+
" for key in pos_sims:\n",
|
| 526 |
+
" sims = sims + pos_strength*pos_sims[key]\n",
|
| 527 |
+
" #------#\n",
|
| 528 |
"\n",
|
| 529 |
" # minus NEG-similarity\n",
|
| 530 |
+
" for key in neg_sims:\n",
|
| 531 |
+
" sims = sims - neg_strength*neg_sims[key]\n",
|
| 532 |
+
" #-------#\n",
|
| 533 |
"\n",
|
| 534 |
"\n",
|
| 535 |
" # Sort the items\n",
|
|
|
|
| 542 |
"\n",
|
| 543 |
" _prompts = ''\n",
|
| 544 |
" _sims = ''\n",
|
| 545 |
+
" offset = 0\n",
|
| 546 |
" for _index in range(start_at_index + RANGE):\n",
|
| 547 |
" if _index < start_at_index : continue\n",
|
|
|
|
| 548 |
"\n",
|
| 549 |
+
" for iters in range(10000):\n",
|
| 550 |
+
" found = True\n",
|
| 551 |
+
" index = indices[_index + offset].item()\n",
|
| 552 |
+
" if isBlacklisted(prompts[f'{index}'].lower()):\n",
|
| 553 |
+
" offset = offset + 1\n",
|
| 554 |
+
" found = False\n",
|
| 555 |
+
" if (_index + offset)>NUM_VOCAB_ITEMS : found = True\n",
|
| 556 |
+
" if found : break\n",
|
| 557 |
+
" #-------#\n",
|
| 558 |
+
"\n",
|
| 559 |
+
" index = indices[_index + offset].item()\n",
|
| 560 |
" prompt = prompts[f'{index}']\n",
|
| 561 |
+
"\n",
|
| 562 |
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
|
| 563 |
"\n",
|
| 564 |
+
"\n",
|
| 565 |
+
" #---------#\n",
|
| 566 |
+
"\n",
|
| 567 |
" #Remove duplicates\n",
|
| 568 |
" if _prompts.find(prompt + separator)<=-1:\n",
|
| 569 |
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
|
|
|
|
| 593 |
" #-------#\n",
|
| 594 |
" #-------#\n",
|
| 595 |
"#-------#\n",
|
| 596 |
+
"image or print('No image found')\n",
|
| 597 |
+
"\n",
|
| 598 |
+
"\n",
|
| 599 |
+
"#------#"
|
| 600 |
],
|
| 601 |
"metadata": {
|
| 602 |
"id": "XW3914T8O2uf"
|