Add SetFit model
Browse files- 1_Pooling/config.json +7 -0
- README.md +49 -0
- config.json +24 -0
- config_sentence_transformers.json +7 -0
- model_head.pkl +3 -0
- modules.json +14 -0
- pytorch_model.bin +3 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +15 -0
- tokenizer.json +0 -0
- tokenizer_config.json +66 -0
- vocab.txt +0 -0
    	
        1_Pooling/config.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "word_embedding_dimension": 768,
         | 
| 3 | 
            +
              "pooling_mode_cls_token": false,
         | 
| 4 | 
            +
              "pooling_mode_mean_tokens": true,
         | 
| 5 | 
            +
              "pooling_mode_max_tokens": false,
         | 
| 6 | 
            +
              "pooling_mode_mean_sqrt_len_tokens": false
         | 
| 7 | 
            +
            }
         | 
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,49 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            license: apache-2.0
         | 
| 3 | 
            +
            tags:
         | 
| 4 | 
            +
            - setfit
         | 
| 5 | 
            +
            - sentence-transformers
         | 
| 6 | 
            +
            - text-classification
         | 
| 7 | 
            +
            pipeline_tag: text-classification
         | 
| 8 | 
            +
            ---
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            # mserras/setfit-alpaca-es-unprocessable-sample-detection
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
         | 
| 15 | 
            +
            2. Training a classification head with features from the fine-tuned Sentence Transformer.
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            ## Usage
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            To use this model for inference, first install the SetFit library:
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            ```bash
         | 
| 22 | 
            +
            python -m pip install setfit
         | 
| 23 | 
            +
            ```
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            You can then run inference as follows:
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            ```python
         | 
| 28 | 
            +
            from setfit import SetFitModel
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            # Download from Hub and run inference
         | 
| 31 | 
            +
            model = SetFitModel.from_pretrained("mserras/setfit-alpaca-es-unprocessable-sample-detection")
         | 
| 32 | 
            +
            # Run inference
         | 
| 33 | 
            +
            preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"])
         | 
| 34 | 
            +
            ```
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            ## BibTeX entry and citation info
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            ```bibtex
         | 
| 39 | 
            +
            @article{https://doi.org/10.48550/arxiv.2209.11055,
         | 
| 40 | 
            +
            doi = {10.48550/ARXIV.2209.11055},
         | 
| 41 | 
            +
            url = {https://arxiv.org/abs/2209.11055},
         | 
| 42 | 
            +
            author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
         | 
| 43 | 
            +
            keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
         | 
| 44 | 
            +
            title = {Efficient Few-Shot Learning Without Prompts},
         | 
| 45 | 
            +
            publisher = {arXiv},
         | 
| 46 | 
            +
            year = {2022},
         | 
| 47 | 
            +
            copyright = {Creative Commons Attribution 4.0 International}
         | 
| 48 | 
            +
            }
         | 
| 49 | 
            +
            ```
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,24 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "/home/mserras/.cache/torch/sentence_transformers/sentence-transformers_paraphrase-mpnet-base-v2/",
         | 
| 3 | 
            +
              "architectures": [
         | 
| 4 | 
            +
                "MPNetModel"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "attention_probs_dropout_prob": 0.1,
         | 
| 7 | 
            +
              "bos_token_id": 0,
         | 
| 8 | 
            +
              "eos_token_id": 2,
         | 
| 9 | 
            +
              "hidden_act": "gelu",
         | 
| 10 | 
            +
              "hidden_dropout_prob": 0.1,
         | 
| 11 | 
            +
              "hidden_size": 768,
         | 
| 12 | 
            +
              "initializer_range": 0.02,
         | 
| 13 | 
            +
              "intermediate_size": 3072,
         | 
| 14 | 
            +
              "layer_norm_eps": 1e-05,
         | 
| 15 | 
            +
              "max_position_embeddings": 514,
         | 
| 16 | 
            +
              "model_type": "mpnet",
         | 
| 17 | 
            +
              "num_attention_heads": 12,
         | 
| 18 | 
            +
              "num_hidden_layers": 12,
         | 
| 19 | 
            +
              "pad_token_id": 1,
         | 
| 20 | 
            +
              "relative_attention_num_buckets": 32,
         | 
| 21 | 
            +
              "torch_dtype": "float32",
         | 
| 22 | 
            +
              "transformers_version": "4.27.4",
         | 
| 23 | 
            +
              "vocab_size": 30527
         | 
| 24 | 
            +
            }
         | 
    	
        config_sentence_transformers.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "__version__": {
         | 
| 3 | 
            +
                "sentence_transformers": "2.0.0",
         | 
| 4 | 
            +
                "transformers": "4.7.0",
         | 
| 5 | 
            +
                "pytorch": "1.9.0+cu102"
         | 
| 6 | 
            +
              }
         | 
| 7 | 
            +
            }
         | 
    	
        model_head.pkl
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:fd2a56e6188f185801d1f2c5f46f7b5025faea5d86ecf5dabac25d6fa21dad3d
         | 
| 3 | 
            +
            size 6991
         | 
    	
        modules.json
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            [
         | 
| 2 | 
            +
              {
         | 
| 3 | 
            +
                "idx": 0,
         | 
| 4 | 
            +
                "name": "0",
         | 
| 5 | 
            +
                "path": "",
         | 
| 6 | 
            +
                "type": "sentence_transformers.models.Transformer"
         | 
| 7 | 
            +
              },
         | 
| 8 | 
            +
              {
         | 
| 9 | 
            +
                "idx": 1,
         | 
| 10 | 
            +
                "name": "1",
         | 
| 11 | 
            +
                "path": "1_Pooling",
         | 
| 12 | 
            +
                "type": "sentence_transformers.models.Pooling"
         | 
| 13 | 
            +
              }
         | 
| 14 | 
            +
            ]
         | 
    	
        pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:61c00ff0acfe54636a7982eba2f2c8e5abe795cca674ecde6565ca4767ff60ca
         | 
| 3 | 
            +
            size 438013677
         | 
    	
        sentence_bert_config.json
    ADDED
    
    | @@ -0,0 +1,4 @@ | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "max_seq_length": 512,
         | 
| 3 | 
            +
              "do_lower_case": false
         | 
| 4 | 
            +
            }
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,15 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": "<s>",
         | 
| 3 | 
            +
              "cls_token": "<s>",
         | 
| 4 | 
            +
              "eos_token": "</s>",
         | 
| 5 | 
            +
              "mask_token": {
         | 
| 6 | 
            +
                "content": "<mask>",
         | 
| 7 | 
            +
                "lstrip": true,
         | 
| 8 | 
            +
                "normalized": false,
         | 
| 9 | 
            +
                "rstrip": false,
         | 
| 10 | 
            +
                "single_word": false
         | 
| 11 | 
            +
              },
         | 
| 12 | 
            +
              "pad_token": "<pad>",
         | 
| 13 | 
            +
              "sep_token": "</s>",
         | 
| 14 | 
            +
              "unk_token": "[UNK]"
         | 
| 15 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,66 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": {
         | 
| 3 | 
            +
                "__type": "AddedToken",
         | 
| 4 | 
            +
                "content": "<s>",
         | 
| 5 | 
            +
                "lstrip": false,
         | 
| 6 | 
            +
                "normalized": true,
         | 
| 7 | 
            +
                "rstrip": false,
         | 
| 8 | 
            +
                "single_word": false
         | 
| 9 | 
            +
              },
         | 
| 10 | 
            +
              "cls_token": {
         | 
| 11 | 
            +
                "__type": "AddedToken",
         | 
| 12 | 
            +
                "content": "<s>",
         | 
| 13 | 
            +
                "lstrip": false,
         | 
| 14 | 
            +
                "normalized": true,
         | 
| 15 | 
            +
                "rstrip": false,
         | 
| 16 | 
            +
                "single_word": false
         | 
| 17 | 
            +
              },
         | 
| 18 | 
            +
              "do_basic_tokenize": true,
         | 
| 19 | 
            +
              "do_lower_case": true,
         | 
| 20 | 
            +
              "eos_token": {
         | 
| 21 | 
            +
                "__type": "AddedToken",
         | 
| 22 | 
            +
                "content": "</s>",
         | 
| 23 | 
            +
                "lstrip": false,
         | 
| 24 | 
            +
                "normalized": true,
         | 
| 25 | 
            +
                "rstrip": false,
         | 
| 26 | 
            +
                "single_word": false
         | 
| 27 | 
            +
              },
         | 
| 28 | 
            +
              "mask_token": {
         | 
| 29 | 
            +
                "__type": "AddedToken",
         | 
| 30 | 
            +
                "content": "<mask>",
         | 
| 31 | 
            +
                "lstrip": true,
         | 
| 32 | 
            +
                "normalized": true,
         | 
| 33 | 
            +
                "rstrip": false,
         | 
| 34 | 
            +
                "single_word": false
         | 
| 35 | 
            +
              },
         | 
| 36 | 
            +
              "model_max_length": 512,
         | 
| 37 | 
            +
              "never_split": null,
         | 
| 38 | 
            +
              "pad_token": {
         | 
| 39 | 
            +
                "__type": "AddedToken",
         | 
| 40 | 
            +
                "content": "<pad>",
         | 
| 41 | 
            +
                "lstrip": false,
         | 
| 42 | 
            +
                "normalized": true,
         | 
| 43 | 
            +
                "rstrip": false,
         | 
| 44 | 
            +
                "single_word": false
         | 
| 45 | 
            +
              },
         | 
| 46 | 
            +
              "sep_token": {
         | 
| 47 | 
            +
                "__type": "AddedToken",
         | 
| 48 | 
            +
                "content": "</s>",
         | 
| 49 | 
            +
                "lstrip": false,
         | 
| 50 | 
            +
                "normalized": true,
         | 
| 51 | 
            +
                "rstrip": false,
         | 
| 52 | 
            +
                "single_word": false
         | 
| 53 | 
            +
              },
         | 
| 54 | 
            +
              "special_tokens_map_file": null,
         | 
| 55 | 
            +
              "strip_accents": null,
         | 
| 56 | 
            +
              "tokenize_chinese_chars": true,
         | 
| 57 | 
            +
              "tokenizer_class": "MPNetTokenizer",
         | 
| 58 | 
            +
              "unk_token": {
         | 
| 59 | 
            +
                "__type": "AddedToken",
         | 
| 60 | 
            +
                "content": "[UNK]",
         | 
| 61 | 
            +
                "lstrip": false,
         | 
| 62 | 
            +
                "normalized": true,
         | 
| 63 | 
            +
                "rstrip": false,
         | 
| 64 | 
            +
                "single_word": false
         | 
| 65 | 
            +
              }
         | 
| 66 | 
            +
            }
         | 
    	
        vocab.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 

