Upload sd_token_similarity_calculator.ipynb
Browse files
Google Colab Notebooks/sd_token_similarity_calculator.ipynb
CHANGED
|
@@ -271,7 +271,7 @@
|
|
| 271 |
{
|
| 272 |
"cell_type": "code",
|
| 273 |
"source": [
|
| 274 |
-
"# @title
|
| 275 |
"neg_prompt = \"\" # @param {\"type\":\"string\",\"placeholder\":\"Write something to avoid\"}\n",
|
| 276 |
"\n",
|
| 277 |
"neg_strength = 1 # @param {type:\"slider\", min:0, max:5, step:0.01}\n",
|
|
@@ -377,8 +377,6 @@
|
|
| 377 |
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
|
| 378 |
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
|
| 379 |
"\n",
|
| 380 |
-
"if image_NEG != \"\":\n",
|
| 381 |
-
"\n",
|
| 382 |
"# Get text features for user input\n",
|
| 383 |
"inputs = tokenizer(text = prompt, padding=True, return_tensors=\"pt\")\n",
|
| 384 |
"text_features_A = model.get_text_features(**inputs)\n",
|
|
@@ -519,13 +517,14 @@
|
|
| 519 |
"\n",
|
| 520 |
"if(not compact_Output):\n",
|
| 521 |
" if(print_Descriptions):\n",
|
| 522 |
-
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n '
|
|
|
|
| 523 |
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
|
| 524 |
" print('')\n",
|
| 525 |
" else:\n",
|
| 526 |
-
" print(__prompts)\n",
|
| 527 |
"else:\n",
|
| 528 |
-
" print(__prompts)\n",
|
| 529 |
"#-------#\n",
|
| 530 |
"\n",
|
| 531 |
"\n",
|
|
@@ -704,6 +703,81 @@
|
|
| 704 |
"execution_count": null,
|
| 705 |
"outputs": []
|
| 706 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 707 |
{
|
| 708 |
"cell_type": "code",
|
| 709 |
"source": [
|
|
|
|
| 271 |
{
|
| 272 |
"cell_type": "code",
|
| 273 |
"source": [
|
| 274 |
+
"# @title 📝🚫 Penalize similarity to Prompt text_encoding (optional)\n",
|
| 275 |
"neg_prompt = \"\" # @param {\"type\":\"string\",\"placeholder\":\"Write something to avoid\"}\n",
|
| 276 |
"\n",
|
| 277 |
"neg_strength = 1 # @param {type:\"slider\", min:0, max:5, step:0.01}\n",
|
|
|
|
| 377 |
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
|
| 378 |
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
|
| 379 |
"\n",
|
|
|
|
|
|
|
| 380 |
"# Get text features for user input\n",
|
| 381 |
"inputs = tokenizer(text = prompt, padding=True, return_tensors=\"pt\")\n",
|
| 382 |
"text_features_A = model.get_text_features(**inputs)\n",
|
|
|
|
| 517 |
"\n",
|
| 518 |
"if(not compact_Output):\n",
|
| 519 |
" if(print_Descriptions):\n",
|
| 520 |
+
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
|
| 521 |
+
" for i in range(N) : print(__prompts)\n",
|
| 522 |
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
|
| 523 |
" print('')\n",
|
| 524 |
" else:\n",
|
| 525 |
+
" for i in range(N) : print(__prompts)\n",
|
| 526 |
"else:\n",
|
| 527 |
+
" for i in range(N) : print(__prompts)\n",
|
| 528 |
"#-------#\n",
|
| 529 |
"\n",
|
| 530 |
"\n",
|
|
|
|
| 703 |
"execution_count": null,
|
| 704 |
"outputs": []
|
| 705 |
},
|
| 706 |
+
{
|
| 707 |
+
"cell_type": "code",
|
| 708 |
+
"source": [
|
| 709 |
+
"# @title ⚙️🖼️ Print the results (Advanced)\n",
|
| 710 |
+
"list_size = 1000 # @param {type:'number'}\n",
|
| 711 |
+
"start_at_index = 0 # @param {type:'number'}\n",
|
| 712 |
+
"print_Similarity = True # @param {type:\"boolean\"}\n",
|
| 713 |
+
"print_Prompts = True # @param {type:\"boolean\"}\n",
|
| 714 |
+
"print_Prefix = True # @param {type:\"boolean\"}\n",
|
| 715 |
+
"print_Descriptions = True # @param {type:\"boolean\"}\n",
|
| 716 |
+
"compact_Output = True # @param {type:\"boolean\"}\n",
|
| 717 |
+
"newline_Separator = True # @param {type:\"boolean\"}\n",
|
| 718 |
+
"\n",
|
| 719 |
+
"\n",
|
| 720 |
+
"import random\n",
|
| 721 |
+
"# @markdown -----------\n",
|
| 722 |
+
"# @markdown Mix with...\n",
|
| 723 |
+
"list_size2 = 1000 # @param {type:'number'}\n",
|
| 724 |
+
"start_at_index2 = 10000 # @param {type:'number'}\n",
|
| 725 |
+
"rate_percent = 50 # @param {type:\"slider\", min:0, max:100, step:1}\n",
|
| 726 |
+
"\n",
|
| 727 |
+
"# @markdown -----------\n",
|
| 728 |
+
"# @markdown Repeat output N times\n",
|
| 729 |
+
"\n",
|
| 730 |
+
"N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
|
| 731 |
+
"\n",
|
| 732 |
+
"# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
|
| 733 |
+
"RANGE = list_size\n",
|
| 734 |
+
"separator = '|'\n",
|
| 735 |
+
"if newline_Separator : separator = separator + '\\n'\n",
|
| 736 |
+
"\n",
|
| 737 |
+
"_prompts = '{'\n",
|
| 738 |
+
"_sims = '{'\n",
|
| 739 |
+
"for _index in range(start_at_index + RANGE):\n",
|
| 740 |
+
" if _index < start_at_index : continue\n",
|
| 741 |
+
" index = indices[_index]\n",
|
| 742 |
+
"\n",
|
| 743 |
+
" prompt = prompts[f'{index}']\n",
|
| 744 |
+
" if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
|
| 745 |
+
"\n",
|
| 746 |
+
" #Remove duplicates\n",
|
| 747 |
+
" if _prompts.find(prompt + separator)<=-1:\n",
|
| 748 |
+
" _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
|
| 749 |
+
" #-------#\n",
|
| 750 |
+
" _prompts = _prompts.replace(prompt + separator,'')\n",
|
| 751 |
+
" _prompts = _prompts + prompt + separator\n",
|
| 752 |
+
" #------#\n",
|
| 753 |
+
"#------#\n",
|
| 754 |
+
"__prompts = (_prompts + '}').replace(separator + '}', '}')\n",
|
| 755 |
+
"__sims = (_sims + '}').replace(separator + '}', '}')\n",
|
| 756 |
+
"#------#\n",
|
| 757 |
+
"\n",
|
| 758 |
+
"if(not print_Prompts): __prompts = ''\n",
|
| 759 |
+
"if(not print_Similarity): __sims = ''\n",
|
| 760 |
+
"\n",
|
| 761 |
+
"if(not compact_Output):\n",
|
| 762 |
+
" if(print_Descriptions):\n",
|
| 763 |
+
" print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
|
| 764 |
+
" for i in range(N) : print(__prompts)\n",
|
| 765 |
+
" print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
|
| 766 |
+
" print('')\n",
|
| 767 |
+
" else:\n",
|
| 768 |
+
" for i in range(N) : print(__prompts)\n",
|
| 769 |
+
"else:\n",
|
| 770 |
+
" for i in range(N) : print(__prompts)\n",
|
| 771 |
+
"#-------#\n",
|
| 772 |
+
"\n",
|
| 773 |
+
"\n"
|
| 774 |
+
],
|
| 775 |
+
"metadata": {
|
| 776 |
+
"id": "6FEmV02tArrh"
|
| 777 |
+
},
|
| 778 |
+
"execution_count": null,
|
| 779 |
+
"outputs": []
|
| 780 |
+
},
|
| 781 |
{
|
| 782 |
"cell_type": "code",
|
| 783 |
"source": [
|