davda54
		
	commited on
		
		
					Commit 
							
							·
						
						55c5488
	
1
								Parent(s):
							
							cf1dcef
								
upload
Browse files- config.json +27 -0
 - configuration_ltgbert.py +107 -0
 - modeling_ltgbert.py +827 -0
 - pytorch_model.bin +3 -0
 - special_tokens_map.json +9 -0
 - tokenizer.json +0 -0
 - tokenizer_config.json +4 -0
 
    	
        config.json
    ADDED
    
    | 
         @@ -0,0 +1,27 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "architectures": [
         
     | 
| 3 | 
         
            +
                "LtgBertForMaskedLM"
         
     | 
| 4 | 
         
            +
              ],
         
     | 
| 5 | 
         
            +
              "attention_probs_dropout_prob": 0.1,
         
     | 
| 6 | 
         
            +
              "auto_map": {
         
     | 
| 7 | 
         
            +
                "AutoConfig": "configuration_ltgbert.LtgBertConfig",
         
     | 
| 8 | 
         
            +
                "AutoModel": "modeling_ltgbert.LtgBertModel",
         
     | 
| 9 | 
         
            +
                "AutoModelForMaskedLM": "modeling_ltgbert.LtgBertForMaskedLM",
         
     | 
| 10 | 
         
            +
                "AutoModelForSequenceClassification": "modeling_ltgbert.LtgBertForSequenceClassification"
         
     | 
| 11 | 
         
            +
              },
         
     | 
| 12 | 
         
            +
              "classifier_dropout": 0.2,
         
     | 
| 13 | 
         
            +
              "hidden_dropout_prob": 0.1,
         
     | 
| 14 | 
         
            +
              "hidden_size": 768,
         
     | 
| 15 | 
         
            +
              "intermediate_size": 2048,
         
     | 
| 16 | 
         
            +
              "layer_norm_eps": 1e-07,
         
     | 
| 17 | 
         
            +
              "max_position_embeddings": 512,
         
     | 
| 18 | 
         
            +
              "model_type": "ltgbert",
         
     | 
| 19 | 
         
            +
              "num_attention_heads": 12,
         
     | 
| 20 | 
         
            +
              "num_hidden_layers": 12,
         
     | 
| 21 | 
         
            +
              "output_all_encoded_layers": true,
         
     | 
| 22 | 
         
            +
              "pad_token_id": 4,
         
     | 
| 23 | 
         
            +
              "position_bucket_size": 32,
         
     | 
| 24 | 
         
            +
              "torch_dtype": "float32",
         
     | 
| 25 | 
         
            +
              "transformers_version": "4.26.0",
         
     | 
| 26 | 
         
            +
              "vocab_size": 16384
         
     | 
| 27 | 
         
            +
            }
         
     | 
    	
        configuration_ltgbert.py
    ADDED
    
    | 
         @@ -0,0 +1,107 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # coding=utf-8
         
     | 
| 2 | 
         
            +
            # Copyright 2023 Language Technology Group from University of Oslo and The HuggingFace Inc. team.
         
     | 
| 3 | 
         
            +
            #
         
     | 
| 4 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 5 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 6 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 7 | 
         
            +
            #
         
     | 
| 8 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 9 | 
         
            +
            #
         
     | 
| 10 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 11 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 12 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 13 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 14 | 
         
            +
            # limitations under the License.
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            """ LTG-BERT configutation """
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            from transformers.configuration_utils import PretrainedConfig
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            LTG_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
         
     | 
| 23 | 
         
            +
                "bnc-bert-span": "https://huggingface.co/ltg/bnc-bert-span",
         
     | 
| 24 | 
         
            +
                "bnc-bert-span-2x": "https://huggingface.co/ltg/bnc-bert-span-2x",
         
     | 
| 25 | 
         
            +
                "bnc-bert-span-0.5x": "https://huggingface.co/ltg/bnc-bert-span-0.5x",
         
     | 
| 26 | 
         
            +
                "bnc-bert-span-0.25x": "https://huggingface.co/ltg/bnc-bert-span-0.25x",
         
     | 
| 27 | 
         
            +
                "bnc-bert-span-order": "https://huggingface.co/ltg/bnc-bert-span-order",
         
     | 
| 28 | 
         
            +
                "bnc-bert-span-document": "https://huggingface.co/ltg/bnc-bert-span-document",
         
     | 
| 29 | 
         
            +
                "bnc-bert-span-word": "https://huggingface.co/ltg/bnc-bert-span-word",
         
     | 
| 30 | 
         
            +
                "bnc-bert-span-subword": "https://huggingface.co/ltg/bnc-bert-span-subword",
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
                "norbert3-xs": "https://huggingface.co/ltg/norbert3-xs/config.json",
         
     | 
| 33 | 
         
            +
                "norbert3-small": "https://huggingface.co/ltg/norbert3-small/config.json",
         
     | 
| 34 | 
         
            +
                "norbert3-base": "https://huggingface.co/ltg/norbert3-base/config.json",
         
     | 
| 35 | 
         
            +
                "norbert3-large": "https://huggingface.co/ltg/norbert3-large/config.json",
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
                "norbert3-oversampled-base": "https://huggingface.co/ltg/norbert3-oversampled-base/config.json",
         
     | 
| 38 | 
         
            +
                "norbert3-ncc-base": "https://huggingface.co/ltg/norbert3-ncc-base/config.json",
         
     | 
| 39 | 
         
            +
                "norbert3-nak-base": "https://huggingface.co/ltg/norbert3-nak-base/config.json",
         
     | 
| 40 | 
         
            +
                "norbert3-nb-base": "https://huggingface.co/ltg/norbert3-nb-base/config.json",
         
     | 
| 41 | 
         
            +
                "norbert3-wiki-base": "https://huggingface.co/ltg/norbert3-wiki-base/config.json",
         
     | 
| 42 | 
         
            +
                "norbert3-c4-base": "https://huggingface.co/ltg/norbert3-c4-base/config.json"
         
     | 
| 43 | 
         
            +
            }
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            class LtgBertConfig(PretrainedConfig):
         
     | 
| 47 | 
         
            +
                r"""
         
     | 
| 48 | 
         
            +
                This is the configuration class to store the configuration of a [`LtgBertModel`]. It is used to
         
     | 
| 49 | 
         
            +
                instantiate an LTG-BERT model according to the specified arguments, defining the model architecture.
         
     | 
| 50 | 
         
            +
                Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
         
     | 
| 51 | 
         
            +
                documentation from [`PretrainedConfig`] for more information.
         
     | 
| 52 | 
         
            +
                Args:
         
     | 
| 53 | 
         
            +
                    vocab_size (`int`, *optional*, defaults to 16384):
         
     | 
| 54 | 
         
            +
                        Vocabulary size of the LTG-BERT model. Defines the number of different tokens that can be represented by the
         
     | 
| 55 | 
         
            +
                        `inputs_ids` passed when calling [`LtgBertModel`].
         
     | 
| 56 | 
         
            +
                    hidden_size (`int`, *optional*, defaults to 768):
         
     | 
| 57 | 
         
            +
                        Dimensionality of the encoder layers and the pooler layer.
         
     | 
| 58 | 
         
            +
                    num_hidden_layers (`int`, *optional*, defaults to 12):
         
     | 
| 59 | 
         
            +
                        Number of hidden layers in the Transformer encoder.
         
     | 
| 60 | 
         
            +
                    num_attention_heads (`int`, *optional*, defaults to 12):
         
     | 
| 61 | 
         
            +
                        Number of attention heads for each attention layer in the Transformer encoder.
         
     | 
| 62 | 
         
            +
                    intermediate_size (`int`, *optional*, defaults to 2048):
         
     | 
| 63 | 
         
            +
                        Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
         
     | 
| 64 | 
         
            +
                    hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
         
     | 
| 65 | 
         
            +
                        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
         
     | 
| 66 | 
         
            +
                    attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
         
     | 
| 67 | 
         
            +
                        The dropout ratio for the attention probabilities.
         
     | 
| 68 | 
         
            +
                    max_position_embeddings (`int`, *optional*, defaults to 512):
         
     | 
| 69 | 
         
            +
                        The maximum sequence length that this model might ever be used with. Typically set this to something large
         
     | 
| 70 | 
         
            +
                        just in case (e.g., 512 or 1024 or 2048).
         
     | 
| 71 | 
         
            +
                    layer_norm_eps (`float`, *optional*, defaults to 1e-12):
         
     | 
| 72 | 
         
            +
                        The epsilon used by the layer normalization layers.
         
     | 
| 73 | 
         
            +
                    classifier_dropout (`float`, *optional*):
         
     | 
| 74 | 
         
            +
                        The dropout ratio for the classification head.
         
     | 
| 75 | 
         
            +
                """
         
     | 
| 76 | 
         
            +
                model_type = "ltgbert"
         
     | 
| 77 | 
         
            +
                def __init__(
         
     | 
| 78 | 
         
            +
                    self,
         
     | 
| 79 | 
         
            +
                    vocab_size=16384,
         
     | 
| 80 | 
         
            +
                    attention_probs_dropout_prob=0.1,
         
     | 
| 81 | 
         
            +
                    hidden_dropout_prob=0.1,
         
     | 
| 82 | 
         
            +
                    hidden_size=768,
         
     | 
| 83 | 
         
            +
                    intermediate_size=2048,
         
     | 
| 84 | 
         
            +
                    max_position_embeddings=512,
         
     | 
| 85 | 
         
            +
                    position_bucket_size=32,
         
     | 
| 86 | 
         
            +
                    num_attention_heads=12,
         
     | 
| 87 | 
         
            +
                    num_hidden_layers=12,
         
     | 
| 88 | 
         
            +
                    layer_norm_eps=1.0e-7,
         
     | 
| 89 | 
         
            +
                    pad_token_id=4,
         
     | 
| 90 | 
         
            +
                    output_all_encoded_layers=True,
         
     | 
| 91 | 
         
            +
                    classifier_dropout=None,
         
     | 
| 92 | 
         
            +
                    **kwargs,
         
     | 
| 93 | 
         
            +
                ):
         
     | 
| 94 | 
         
            +
                    super().__init__(pad_token_id=pad_token_id, **kwargs)
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
                    self.vocab_size = vocab_size
         
     | 
| 97 | 
         
            +
                    self.hidden_size = hidden_size
         
     | 
| 98 | 
         
            +
                    self.num_hidden_layers = num_hidden_layers
         
     | 
| 99 | 
         
            +
                    self.num_attention_heads = num_attention_heads
         
     | 
| 100 | 
         
            +
                    self.intermediate_size = intermediate_size
         
     | 
| 101 | 
         
            +
                    self.hidden_dropout_prob = hidden_dropout_prob
         
     | 
| 102 | 
         
            +
                    self.attention_probs_dropout_prob = attention_probs_dropout_prob
         
     | 
| 103 | 
         
            +
                    self.max_position_embeddings = max_position_embeddings
         
     | 
| 104 | 
         
            +
                    self.output_all_encoded_layers = output_all_encoded_layers
         
     | 
| 105 | 
         
            +
                    self.position_bucket_size = position_bucket_size
         
     | 
| 106 | 
         
            +
                    self.layer_norm_eps = layer_norm_eps
         
     | 
| 107 | 
         
            +
                    self.classifier_dropout = classifier_dropout
         
     | 
    	
        modeling_ltgbert.py
    ADDED
    
    | 
         @@ -0,0 +1,827 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # coding=utf-8
         
     | 
| 2 | 
         
            +
            # Copyright 2023 Language Technology Group from University of Oslo and The HuggingFace Inc. team.
         
     | 
| 3 | 
         
            +
            #
         
     | 
| 4 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 5 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 6 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 7 | 
         
            +
            #
         
     | 
| 8 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 9 | 
         
            +
            #
         
     | 
| 10 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 11 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 12 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 13 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 14 | 
         
            +
            # limitations under the License.
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            """ PyTorch LTG-BERT model."""
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            import math
         
     | 
| 20 | 
         
            +
            from typing import List, Optional, Tuple, Union
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            import torch
         
     | 
| 23 | 
         
            +
            import torch.nn as nn
         
     | 
| 24 | 
         
            +
            import torch.nn.functional as F
         
     | 
| 25 | 
         
            +
            from torch.utils import checkpoint
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            from .configuration_ltgbert import LtgBertConfig
         
     | 
| 28 | 
         
            +
            from transformers.modeling_utils import PreTrainedModel
         
     | 
| 29 | 
         
            +
            from transformers.activations import gelu_new
         
     | 
| 30 | 
         
            +
            from transformers.modeling_outputs import (
         
     | 
| 31 | 
         
            +
                MaskedLMOutput,
         
     | 
| 32 | 
         
            +
                MultipleChoiceModelOutput,
         
     | 
| 33 | 
         
            +
                QuestionAnsweringModelOutput,
         
     | 
| 34 | 
         
            +
                SequenceClassifierOutput,
         
     | 
| 35 | 
         
            +
                TokenClassifierOutput,
         
     | 
| 36 | 
         
            +
                BaseModelOutput
         
     | 
| 37 | 
         
            +
            )
         
     | 
| 38 | 
         
            +
            from transformers.pytorch_utils import softmax_backward_data
         
     | 
| 39 | 
         
            +
            from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            _CHECKPOINT_FOR_DOC = "ltg/bnc-bert-span"
         
     | 
| 43 | 
         
            +
            _CONFIG_FOR_DOC = "LtgBertConfig"
         
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            LTG_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
         
     | 
| 47 | 
         
            +
                "bnc-bert-span",
         
     | 
| 48 | 
         
            +
                "bnc-bert-span-2x",
         
     | 
| 49 | 
         
            +
                "bnc-bert-span-0.5x",
         
     | 
| 50 | 
         
            +
                "bnc-bert-span-0.25x",
         
     | 
| 51 | 
         
            +
                "bnc-bert-span-order",
         
     | 
| 52 | 
         
            +
                "bnc-bert-span-document",
         
     | 
| 53 | 
         
            +
                "bnc-bert-span-word",
         
     | 
| 54 | 
         
            +
                "bnc-bert-span-subword",
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
                "norbert3-xs",
         
     | 
| 57 | 
         
            +
                "norbert3-small",
         
     | 
| 58 | 
         
            +
                "norbert3-base",
         
     | 
| 59 | 
         
            +
                "norbert3-large",
         
     | 
| 60 | 
         
            +
             
     | 
| 61 | 
         
            +
                "norbert3-oversampled-base",
         
     | 
| 62 | 
         
            +
                "norbert3-ncc-base",
         
     | 
| 63 | 
         
            +
                "norbert3-nak-base",
         
     | 
| 64 | 
         
            +
                "norbert3-nb-base",
         
     | 
| 65 | 
         
            +
                "norbert3-wiki-base",
         
     | 
| 66 | 
         
            +
                "norbert3-c4-base"
         
     | 
| 67 | 
         
            +
            ]
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            class Encoder(nn.Module):
         
     | 
| 71 | 
         
            +
                def __init__(self, config, activation_checkpointing=False):
         
     | 
| 72 | 
         
            +
                    super().__init__()
         
     | 
| 73 | 
         
            +
                    self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.num_hidden_layers)])
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
                    for i, layer in enumerate(self.layers):
         
     | 
| 76 | 
         
            +
                        layer.mlp.mlp[1].weight.data *= math.sqrt(1.0 / (2.0 * (1 + i)))
         
     | 
| 77 | 
         
            +
                        layer.mlp.mlp[-2].weight.data *= math.sqrt(1.0 / (2.0 * (1 + i)))
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
                    self.activation_checkpointing = activation_checkpointing
         
     | 
| 80 | 
         
            +
                
         
     | 
| 81 | 
         
            +
                def forward(self, hidden_states, attention_mask, relative_embedding):
         
     | 
| 82 | 
         
            +
                    hidden_states, attention_probs = [hidden_states], []
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
                    for layer in self.layers:
         
     | 
| 85 | 
         
            +
                        if self.activation_checkpointing:
         
     | 
| 86 | 
         
            +
                            hidden_state, attention_p = checkpoint.checkpoint(layer, hidden_states[-1], attention_mask, relative_embedding)
         
     | 
| 87 | 
         
            +
                        else:
         
     | 
| 88 | 
         
            +
                            hidden_state, attention_p = layer(hidden_states[-1], attention_mask, relative_embedding)
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
                        hidden_states.append(hidden_state)
         
     | 
| 91 | 
         
            +
                        attention_probs.append(attention_p)
         
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
                    return hidden_states, attention_probs
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            class MaskClassifier(nn.Module):
         
     | 
| 97 | 
         
            +
                def __init__(self, config, subword_embedding):
         
     | 
| 98 | 
         
            +
                    super().__init__()
         
     | 
| 99 | 
         
            +
                    self.nonlinearity = nn.Sequential(
         
     | 
| 100 | 
         
            +
                        nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False),
         
     | 
| 101 | 
         
            +
                        nn.Linear(config.hidden_size, config.hidden_size),
         
     | 
| 102 | 
         
            +
                        nn.GELU(),
         
     | 
| 103 | 
         
            +
                        nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False),
         
     | 
| 104 | 
         
            +
                        nn.Dropout(config.hidden_dropout_prob),
         
     | 
| 105 | 
         
            +
                        nn.Linear(subword_embedding.size(1), subword_embedding.size(0))
         
     | 
| 106 | 
         
            +
                    )
         
     | 
| 107 | 
         
            +
                    self.initialize(config.hidden_size, subword_embedding)
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
                def initialize(self, hidden_size, embedding):
         
     | 
| 110 | 
         
            +
                    std = math.sqrt(2.0 / (5.0 * hidden_size))
         
     | 
| 111 | 
         
            +
                    nn.init.trunc_normal_(self.nonlinearity[1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 112 | 
         
            +
                    self.nonlinearity[-1].weight = embedding
         
     | 
| 113 | 
         
            +
                    self.nonlinearity[1].bias.data.zero_()
         
     | 
| 114 | 
         
            +
                    self.nonlinearity[-1].bias.data.zero_()
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
                def forward(self, x, masked_lm_labels=None):
         
     | 
| 117 | 
         
            +
                    if masked_lm_labels is not None:
         
     | 
| 118 | 
         
            +
                        x = torch.index_select(x.flatten(0, 1), 0, torch.nonzero(masked_lm_labels.flatten() != -100).squeeze())
         
     | 
| 119 | 
         
            +
                    x = self.nonlinearity(x)
         
     | 
| 120 | 
         
            +
                    return x
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
            class EncoderLayer(nn.Module):
         
     | 
| 124 | 
         
            +
                def __init__(self, config):
         
     | 
| 125 | 
         
            +
                    super().__init__()
         
     | 
| 126 | 
         
            +
                    self.attention = Attention(config)
         
     | 
| 127 | 
         
            +
                    self.cross_attention = DummyCrossAttention(config)
         
     | 
| 128 | 
         
            +
                    self.mlp = FeedForward(config)
         
     | 
| 129 | 
         
            +
             
     | 
| 130 | 
         
            +
                def forward(self, x, padding_mask, relative_embedding):
         
     | 
| 131 | 
         
            +
                    attention_output, attention_probs = self.attention(x, padding_mask, relative_embedding)
         
     | 
| 132 | 
         
            +
                    x = x + attention_output
         
     | 
| 133 | 
         
            +
                    x = x + self.cross_attention(x)
         
     | 
| 134 | 
         
            +
                    x = x + self.mlp(x)
         
     | 
| 135 | 
         
            +
                    return x, attention_probs
         
     | 
| 136 | 
         
            +
             
     | 
| 137 | 
         
            +
             
     | 
| 138 | 
         
            +
            class GeGLU(nn.Module):
         
     | 
| 139 | 
         
            +
                def forward(self, x):
         
     | 
| 140 | 
         
            +
                    x, gate = x.chunk(2, dim=-1)
         
     | 
| 141 | 
         
            +
                    x = x * gelu_new(gate)
         
     | 
| 142 | 
         
            +
                    return x
         
     | 
| 143 | 
         
            +
             
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
            class FeedForward(nn.Module):
         
     | 
| 146 | 
         
            +
                def __init__(self, config):
         
     | 
| 147 | 
         
            +
                    super().__init__()
         
     | 
| 148 | 
         
            +
                    self.mlp = nn.Sequential(
         
     | 
| 149 | 
         
            +
                        nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=False),
         
     | 
| 150 | 
         
            +
                        nn.Linear(config.hidden_size, 2*config.intermediate_size, bias=False),
         
     | 
| 151 | 
         
            +
                        GeGLU(),
         
     | 
| 152 | 
         
            +
                        nn.LayerNorm(config.intermediate_size, eps=config.layer_norm_eps, elementwise_affine=False),
         
     | 
| 153 | 
         
            +
                        nn.Linear(config.intermediate_size, config.hidden_size, bias=False),
         
     | 
| 154 | 
         
            +
                        nn.Dropout(config.hidden_dropout_prob)
         
     | 
| 155 | 
         
            +
                    )
         
     | 
| 156 | 
         
            +
                    self.initialize(config.hidden_size)
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
                def initialize(self, hidden_size):
         
     | 
| 159 | 
         
            +
                    std = math.sqrt(2.0 / (5.0 * hidden_size))
         
     | 
| 160 | 
         
            +
                    nn.init.trunc_normal_(self.mlp[1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 161 | 
         
            +
                    nn.init.trunc_normal_(self.mlp[-2].weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 162 | 
         
            +
             
     | 
| 163 | 
         
            +
                def forward(self, x):
         
     | 
| 164 | 
         
            +
                    return self.mlp(x)
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
            class MaskedSoftmax(torch.autograd.Function):
         
     | 
| 168 | 
         
            +
                @staticmethod
         
     | 
| 169 | 
         
            +
                def forward(self, x, mask, dim):
         
     | 
| 170 | 
         
            +
                    self.dim = dim
         
     | 
| 171 | 
         
            +
                    x.masked_fill_(mask, float('-inf'))
         
     | 
| 172 | 
         
            +
                    x = torch.softmax(x, self.dim)
         
     | 
| 173 | 
         
            +
                    x.masked_fill_(mask, 0.0)
         
     | 
| 174 | 
         
            +
                    self.save_for_backward(x)
         
     | 
| 175 | 
         
            +
                    return x
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
                @staticmethod
         
     | 
| 178 | 
         
            +
                def backward(self, grad_output):
         
     | 
| 179 | 
         
            +
                    output, = self.saved_tensors
         
     | 
| 180 | 
         
            +
                    input_grad = softmax_backward_data(self, grad_output, output, self.dim, output)
         
     | 
| 181 | 
         
            +
                    return input_grad, None, None
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
             
     | 
| 184 | 
         
            +
            class Attention(nn.Module):
         
     | 
| 185 | 
         
            +
                def __init__(self, config):
         
     | 
| 186 | 
         
            +
                    super().__init__()
         
     | 
| 187 | 
         
            +
             
     | 
| 188 | 
         
            +
                    self.config = config
         
     | 
| 189 | 
         
            +
             
     | 
| 190 | 
         
            +
                    if config.hidden_size % config.num_attention_heads != 0:
         
     | 
| 191 | 
         
            +
                        raise ValueError(f"The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}")
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
                    self.hidden_size = config.hidden_size
         
     | 
| 194 | 
         
            +
                    self.num_heads = config.num_attention_heads
         
     | 
| 195 | 
         
            +
                    self.head_size = config.hidden_size // config.num_attention_heads
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
                    self.in_proj_qk = nn.Linear(config.hidden_size, 2*config.hidden_size, bias=True)
         
     | 
| 198 | 
         
            +
                    self.in_proj_v = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
         
     | 
| 199 | 
         
            +
                    self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
         
     | 
| 200 | 
         
            +
             
     | 
| 201 | 
         
            +
                    self.pre_layer_norm = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False)
         
     | 
| 202 | 
         
            +
                    self.post_layer_norm = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
         
     | 
| 203 | 
         
            +
             
     | 
| 204 | 
         
            +
                    position_indices = torch.arange(config.max_position_embeddings, dtype=torch.long).unsqueeze(1) \
         
     | 
| 205 | 
         
            +
                        - torch.arange(config.max_position_embeddings, dtype=torch.long).unsqueeze(0)
         
     | 
| 206 | 
         
            +
                    position_indices = self.make_log_bucket_position(position_indices, config.position_bucket_size, config.max_position_embeddings)
         
     | 
| 207 | 
         
            +
                    position_indices = config.position_bucket_size - 1 + position_indices
         
     | 
| 208 | 
         
            +
                    self.register_buffer("position_indices", position_indices, persistent=True)
         
     | 
| 209 | 
         
            +
             
     | 
| 210 | 
         
            +
                    self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
         
     | 
| 211 | 
         
            +
                    self.scale = 1.0 / math.sqrt(3 * self.head_size)
         
     | 
| 212 | 
         
            +
                    self.initialize()
         
     | 
| 213 | 
         
            +
             
     | 
| 214 | 
         
            +
                def make_log_bucket_position(self, relative_pos, bucket_size, max_position):
         
     | 
| 215 | 
         
            +
                    sign = torch.sign(relative_pos)
         
     | 
| 216 | 
         
            +
                    mid = bucket_size // 2
         
     | 
| 217 | 
         
            +
                    abs_pos = torch.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, torch.abs(relative_pos).clamp(max=max_position - 1))
         
     | 
| 218 | 
         
            +
                    log_pos = torch.ceil(torch.log(abs_pos / mid) / math.log((max_position-1) / mid) * (mid - 1)).int() + mid
         
     | 
| 219 | 
         
            +
                    bucket_pos = torch.where(abs_pos <= mid, relative_pos, log_pos * sign).long()
         
     | 
| 220 | 
         
            +
                    return bucket_pos
         
     | 
| 221 | 
         
            +
             
     | 
| 222 | 
         
            +
                def initialize(self):
         
     | 
| 223 | 
         
            +
                    std = math.sqrt(2.0 / (5.0 * self.hidden_size))
         
     | 
| 224 | 
         
            +
                    nn.init.trunc_normal_(self.in_proj_qk.weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 225 | 
         
            +
                    nn.init.trunc_normal_(self.in_proj_v.weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 226 | 
         
            +
                    nn.init.trunc_normal_(self.out_proj.weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 227 | 
         
            +
                    self.in_proj_qk.bias.data.zero_()
         
     | 
| 228 | 
         
            +
                    self.in_proj_v.bias.data.zero_()
         
     | 
| 229 | 
         
            +
                    self.out_proj.bias.data.zero_()
         
     | 
| 230 | 
         
            +
             
     | 
| 231 | 
         
            +
                def compute_attention_scores(self, hidden_states, relative_embedding):
         
     | 
| 232 | 
         
            +
                    key_len, batch_size, _ = hidden_states.size()
         
     | 
| 233 | 
         
            +
                    query_len = key_len
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
                    if self.position_indices.size(0) < query_len:
         
     | 
| 236 | 
         
            +
                        position_indices = torch.arange(query_len, dtype=torch.long).unsqueeze(1) \
         
     | 
| 237 | 
         
            +
                            - torch.arange(query_len, dtype=torch.long).unsqueeze(0)
         
     | 
| 238 | 
         
            +
                        position_indices = self.make_log_bucket_position(position_indices, self.position_bucket_size, 512)
         
     | 
| 239 | 
         
            +
                        position_indices = self.position_bucket_size - 1 + position_indices
         
     | 
| 240 | 
         
            +
                        self.position_indices = position_indices.to(hidden_states.device)
         
     | 
| 241 | 
         
            +
             
     | 
| 242 | 
         
            +
                    hidden_states = self.pre_layer_norm(hidden_states)
         
     | 
| 243 | 
         
            +
             
     | 
| 244 | 
         
            +
                    query, key = self.in_proj_qk(hidden_states).chunk(2, dim=2)  # shape: [T, B, D]
         
     | 
| 245 | 
         
            +
                    value = self.in_proj_v(hidden_states)  # shape: [T, B, D]
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
                    query = query.reshape(query_len, batch_size * self.num_heads, self.head_size).transpose(0, 1)
         
     | 
| 248 | 
         
            +
                    key = key.reshape(key_len, batch_size * self.num_heads, self.head_size).transpose(0, 1)
         
     | 
| 249 | 
         
            +
                    value = value.view(key_len, batch_size * self.num_heads, self.head_size).transpose(0, 1)
         
     | 
| 250 | 
         
            +
             
     | 
| 251 | 
         
            +
                    attention_scores = torch.bmm(query, key.transpose(1, 2) * self.scale)
         
     | 
| 252 | 
         
            +
             
     | 
| 253 | 
         
            +
                    query_pos, key_pos = self.in_proj_qk(self.dropout(relative_embedding)).chunk(2, dim=-1)  # shape: [2T-1, D]
         
     | 
| 254 | 
         
            +
                    query_pos = query_pos.view(-1, self.num_heads, self.head_size)  # shape: [2T-1, H, D]
         
     | 
| 255 | 
         
            +
                    key_pos = key_pos.view(-1, self.num_heads, self.head_size)  # shape: [2T-1, H, D]
         
     | 
| 256 | 
         
            +
             
     | 
| 257 | 
         
            +
                    query = query.view(batch_size, self.num_heads, query_len, self.head_size)
         
     | 
| 258 | 
         
            +
                    key = key.view(batch_size, self.num_heads, query_len, self.head_size)
         
     | 
| 259 | 
         
            +
             
     | 
| 260 | 
         
            +
                    attention_c_p = torch.einsum("bhqd,khd->bhqk", query, key_pos.squeeze(1) * self.scale)
         
     | 
| 261 | 
         
            +
                    attention_p_c = torch.einsum("bhkd,qhd->bhqk", key * self.scale, query_pos.squeeze(1))
         
     | 
| 262 | 
         
            +
             
     | 
| 263 | 
         
            +
                    position_indices = self.position_indices[:query_len, :key_len].expand(batch_size, self.num_heads, -1, -1)
         
     | 
| 264 | 
         
            +
                    attention_c_p = attention_c_p.gather(3, position_indices)
         
     | 
| 265 | 
         
            +
                    attention_p_c = attention_p_c.gather(2, position_indices)
         
     | 
| 266 | 
         
            +
             
     | 
| 267 | 
         
            +
                    attention_scores = attention_scores.view(batch_size, self.num_heads, query_len, key_len)
         
     | 
| 268 | 
         
            +
                    attention_scores.add_(attention_c_p)
         
     | 
| 269 | 
         
            +
                    attention_scores.add_(attention_p_c)
         
     | 
| 270 | 
         
            +
             
     | 
| 271 | 
         
            +
                    return attention_scores, value
         
     | 
| 272 | 
         
            +
             
     | 
| 273 | 
         
            +
                def compute_output(self, attention_probs, value):
         
     | 
| 274 | 
         
            +
                    attention_probs = self.dropout(attention_probs)
         
     | 
| 275 | 
         
            +
                    context = torch.bmm(attention_probs.flatten(0, 1), value)  # shape: [B*H, Q, D]
         
     | 
| 276 | 
         
            +
                    context = context.transpose(0, 1).reshape(context.size(1), -1, self.hidden_size)  # shape: [Q, B, H*D]
         
     | 
| 277 | 
         
            +
                    context = self.out_proj(context)
         
     | 
| 278 | 
         
            +
                    context = self.post_layer_norm(context)
         
     | 
| 279 | 
         
            +
                    context = self.dropout(context)
         
     | 
| 280 | 
         
            +
                    return context
         
     | 
| 281 | 
         
            +
             
     | 
| 282 | 
         
            +
                def forward(self, hidden_states, attention_mask, relative_embedding):
         
     | 
| 283 | 
         
            +
                    attention_scores, value = self.compute_attention_scores(hidden_states, relative_embedding)
         
     | 
| 284 | 
         
            +
                    attention_probs = MaskedSoftmax.apply(attention_scores, attention_mask, -1)
         
     | 
| 285 | 
         
            +
                    return self.compute_output(attention_probs, value), attention_probs.detach()
         
     | 
| 286 | 
         
            +
             
     | 
| 287 | 
         
            +
             
     | 
| 288 | 
         
            +
            class DummyCrossAttention(nn.Module):
         
     | 
| 289 | 
         
            +
                def __init__(self, config):
         
     | 
| 290 | 
         
            +
                    super().__init__()
         
     | 
| 291 | 
         
            +
             
     | 
| 292 | 
         
            +
                    self.config = config
         
     | 
| 293 | 
         
            +
                    self.hidden_size = config.hidden_size
         
     | 
| 294 | 
         
            +
             
     | 
| 295 | 
         
            +
                    self.amputed_linear = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
         
     | 
| 296 | 
         
            +
             
     | 
| 297 | 
         
            +
                    self.pre_layer_norm = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False)
         
     | 
| 298 | 
         
            +
                    self.post_layer_norm = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
         
     | 
| 299 | 
         
            +
             
     | 
| 300 | 
         
            +
                    self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
         
     | 
| 301 | 
         
            +
                    self.initialize()
         
     | 
| 302 | 
         
            +
             
     | 
| 303 | 
         
            +
                def initialize(self):
         
     | 
| 304 | 
         
            +
                    std = math.sqrt(2.0 / (5.0 * self.hidden_size))
         
     | 
| 305 | 
         
            +
                    nn.init.trunc_normal_(self.amputed_linear.weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 306 | 
         
            +
                    nn.init.zeros_(self.amputed_linear.bias)
         
     | 
| 307 | 
         
            +
             
     | 
| 308 | 
         
            +
                def forward(self, q, *args, **kwargs):
         
     | 
| 309 | 
         
            +
                    q = self.pre_layer_norm(q)
         
     | 
| 310 | 
         
            +
                    q = self.amputed_linear(q)
         
     | 
| 311 | 
         
            +
                    q = self.post_layer_norm(q)
         
     | 
| 312 | 
         
            +
                    q = self.dropout(q)
         
     | 
| 313 | 
         
            +
                    return q
         
     | 
| 314 | 
         
            +
             
     | 
| 315 | 
         
            +
             
     | 
| 316 | 
         
            +
            class Embedding(nn.Module):
         
     | 
| 317 | 
         
            +
                def __init__(self, config):
         
     | 
| 318 | 
         
            +
                    super().__init__()
         
     | 
| 319 | 
         
            +
                    self.hidden_size = config.hidden_size
         
     | 
| 320 | 
         
            +
             
     | 
| 321 | 
         
            +
                    self.word_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
         
     | 
| 322 | 
         
            +
                    self.word_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=False)
         
     | 
| 323 | 
         
            +
                    self.dropout = nn.Dropout(config.hidden_dropout_prob)
         
     | 
| 324 | 
         
            +
             
     | 
| 325 | 
         
            +
                    self.relative_embedding = nn.Parameter(torch.empty(2 * config.position_bucket_size - 1, config.hidden_size))
         
     | 
| 326 | 
         
            +
                    self.relative_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
         
     | 
| 327 | 
         
            +
             
     | 
| 328 | 
         
            +
                    self.initialize()
         
     | 
| 329 | 
         
            +
             
     | 
| 330 | 
         
            +
                def initialize(self):
         
     | 
| 331 | 
         
            +
                    std = math.sqrt(2.0 / (5.0 * self.hidden_size))
         
     | 
| 332 | 
         
            +
                    nn.init.trunc_normal_(self.relative_embedding, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 333 | 
         
            +
                    nn.init.trunc_normal_(self.word_embedding.weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 334 | 
         
            +
             
     | 
| 335 | 
         
            +
                def forward(self, input_ids):
         
     | 
| 336 | 
         
            +
                    word_embedding = self.dropout(self.word_layer_norm(self.word_embedding(input_ids)))
         
     | 
| 337 | 
         
            +
                    relative_embeddings = self.relative_layer_norm(self.relative_embedding)
         
     | 
| 338 | 
         
            +
                    return word_embedding, relative_embeddings
         
     | 
| 339 | 
         
            +
             
     | 
| 340 | 
         
            +
             
     | 
| 341 | 
         
            +
            #
         
     | 
| 342 | 
         
            +
            # HuggingFace wrappers
         
     | 
| 343 | 
         
            +
            #
         
     | 
| 344 | 
         
            +
             
     | 
| 345 | 
         
            +
            class LtgBertPreTrainedModel(PreTrainedModel):
         
     | 
| 346 | 
         
            +
                """
         
     | 
| 347 | 
         
            +
                An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
         
     | 
| 348 | 
         
            +
                models.
         
     | 
| 349 | 
         
            +
                """
         
     | 
| 350 | 
         
            +
             
     | 
| 351 | 
         
            +
                config_class = LtgBertConfig
         
     | 
| 352 | 
         
            +
                base_model_prefix = "bnc-bert"
         
     | 
| 353 | 
         
            +
                supports_gradient_checkpointing = True
         
     | 
| 354 | 
         
            +
             
     | 
| 355 | 
         
            +
                def _set_gradient_checkpointing(self, module, value=False):
         
     | 
| 356 | 
         
            +
                    if isinstance(module, Encoder):
         
     | 
| 357 | 
         
            +
                        module.activation_checkpointing = value
         
     | 
| 358 | 
         
            +
             
     | 
| 359 | 
         
            +
                def _init_weights(self, _):
         
     | 
| 360 | 
         
            +
                    pass  # everything is already initialized
         
     | 
| 361 | 
         
            +
             
     | 
| 362 | 
         
            +
             
     | 
| 363 | 
         
            +
            LTG_BERT_START_DOCSTRING = r"""
         
     | 
| 364 | 
         
            +
                This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
         
     | 
| 365 | 
         
            +
                library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
         
     | 
| 366 | 
         
            +
                etc.)
         
     | 
| 367 | 
         
            +
                This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
         
     | 
| 368 | 
         
            +
                Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
         
     | 
| 369 | 
         
            +
                and behavior.
         
     | 
| 370 | 
         
            +
                Parameters:
         
     | 
| 371 | 
         
            +
                    config ([`LtgBertConfig`]): Model configuration class with all the parameters of the model.
         
     | 
| 372 | 
         
            +
                        Initializing with a config file does not load the weights associated with the model, only the
         
     | 
| 373 | 
         
            +
                        configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
         
     | 
| 374 | 
         
            +
            """
         
     | 
| 375 | 
         
            +
             
     | 
| 376 | 
         
            +
            LTG_BERT_INPUTS_DOCSTRING = r"""
         
     | 
| 377 | 
         
            +
                Args:
         
     | 
| 378 | 
         
            +
                    input_ids (`torch.LongTensor` of shape `({0})`):
         
     | 
| 379 | 
         
            +
                        Indices of input sequence tokens in the vocabulary.
         
     | 
| 380 | 
         
            +
                        Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
         
     | 
| 381 | 
         
            +
                        [`PreTrainedTokenizer.__call__`] for details.
         
     | 
| 382 | 
         
            +
                        [What are input IDs?](../glossary#input-ids)
         
     | 
| 383 | 
         
            +
                    attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
         
     | 
| 384 | 
         
            +
                        Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
         
     | 
| 385 | 
         
            +
                        - 1 for tokens that are **not masked**,
         
     | 
| 386 | 
         
            +
                        - 0 for tokens that are **masked**.
         
     | 
| 387 | 
         
            +
                        [What are attention masks?](../glossary#attention-mask)
         
     | 
| 388 | 
         
            +
                    output_hidden_states (`bool`, *optional*):
         
     | 
| 389 | 
         
            +
                        Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
         
     | 
| 390 | 
         
            +
                        more detail.
         
     | 
| 391 | 
         
            +
                    output_attentions (`bool`, *optional*):
         
     | 
| 392 | 
         
            +
                        Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
         
     | 
| 393 | 
         
            +
                        tensors for more detail.
         
     | 
| 394 | 
         
            +
                    return_dict (`bool`, *optional*):
         
     | 
| 395 | 
         
            +
                        Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
         
     | 
| 396 | 
         
            +
            """
         
     | 
| 397 | 
         
            +
             
     | 
| 398 | 
         
            +
             
     | 
| 399 | 
         
            +
            @add_start_docstrings(
         
     | 
| 400 | 
         
            +
                "The bare LTG-BERT transformer outputting raw hidden-states without any specific head on top.",
         
     | 
| 401 | 
         
            +
                LTG_BERT_START_DOCSTRING,
         
     | 
| 402 | 
         
            +
            )
         
     | 
| 403 | 
         
            +
            class LtgBertModel(LtgBertPreTrainedModel):
         
     | 
| 404 | 
         
            +
                def __init__(self, config, add_mlm_layer=False):
         
     | 
| 405 | 
         
            +
                    super().__init__(config)
         
     | 
| 406 | 
         
            +
                    self.config = config
         
     | 
| 407 | 
         
            +
             
     | 
| 408 | 
         
            +
                    self.embedding = Embedding(config)
         
     | 
| 409 | 
         
            +
                    self.transformer = Encoder(config, activation_checkpointing=False)
         
     | 
| 410 | 
         
            +
                    self.classifier = MaskClassifier(config, self.embedding.word_embedding.weight) if add_mlm_layer else None
         
     | 
| 411 | 
         
            +
             
     | 
| 412 | 
         
            +
                def get_input_embeddings(self):
         
     | 
| 413 | 
         
            +
                    return self.embedding.word_embedding
         
     | 
| 414 | 
         
            +
             
     | 
| 415 | 
         
            +
                def set_input_embeddings(self, value):
         
     | 
| 416 | 
         
            +
                    self.embedding.word_embedding = value
         
     | 
| 417 | 
         
            +
             
     | 
| 418 | 
         
            +
                def get_contextualized_embeddings(
         
     | 
| 419 | 
         
            +
                    self,
         
     | 
| 420 | 
         
            +
                    input_ids: Optional[torch.Tensor] = None,
         
     | 
| 421 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None
         
     | 
| 422 | 
         
            +
                ) -> List[torch.Tensor]:
         
     | 
| 423 | 
         
            +
                    if input_ids is not None:
         
     | 
| 424 | 
         
            +
                        input_shape = input_ids.size()
         
     | 
| 425 | 
         
            +
                    else:
         
     | 
| 426 | 
         
            +
                        raise ValueError("You have to specify input_ids")
         
     | 
| 427 | 
         
            +
             
     | 
| 428 | 
         
            +
                    batch_size, seq_length = input_shape
         
     | 
| 429 | 
         
            +
                    device = input_ids.device
         
     | 
| 430 | 
         
            +
             
     | 
| 431 | 
         
            +
                    if attention_mask is None:
         
     | 
| 432 | 
         
            +
                        attention_mask = torch.zeros(batch_size, seq_length, dtype=torch.bool, device=device)
         
     | 
| 433 | 
         
            +
                    else:
         
     | 
| 434 | 
         
            +
                        attention_mask = ~attention_mask.bool()
         
     | 
| 435 | 
         
            +
                    attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
         
     | 
| 436 | 
         
            +
             
         
     | 
| 437 | 
         
            +
                    static_embeddings, relative_embedding = self.embedding(input_ids.t())
         
     | 
| 438 | 
         
            +
                    contextualized_embeddings, attention_probs = self.transformer(static_embeddings, attention_mask, relative_embedding)
         
     | 
| 439 | 
         
            +
                    contextualized_embeddings = [e.transpose(0, 1) for e in contextualized_embeddings]
         
     | 
| 440 | 
         
            +
                    last_layer = contextualized_embeddings[-1]
         
     | 
| 441 | 
         
            +
                    contextualized_embeddings = [contextualized_embeddings[0]] + [
         
     | 
| 442 | 
         
            +
                        contextualized_embeddings[i] - contextualized_embeddings[i - 1]
         
     | 
| 443 | 
         
            +
                        for i in range(1, len(contextualized_embeddings))
         
     | 
| 444 | 
         
            +
                    ]
         
     | 
| 445 | 
         
            +
                    return last_layer, contextualized_embeddings, attention_probs
         
     | 
| 446 | 
         
            +
             
     | 
| 447 | 
         
            +
                @add_start_docstrings_to_model_forward(LTG_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
         
     | 
| 448 | 
         
            +
                def forward(
         
     | 
| 449 | 
         
            +
                    self,
         
     | 
| 450 | 
         
            +
                    input_ids: Optional[torch.Tensor] = None,
         
     | 
| 451 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None,
         
     | 
| 452 | 
         
            +
                    output_hidden_states: Optional[bool] = None,
         
     | 
| 453 | 
         
            +
                    output_attentions: Optional[bool] = None,
         
     | 
| 454 | 
         
            +
                    return_dict: Optional[bool] = None,
         
     | 
| 455 | 
         
            +
                    token_type_ids = None
         
     | 
| 456 | 
         
            +
                ) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
         
     | 
| 457 | 
         
            +
             
     | 
| 458 | 
         
            +
                    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
         
     | 
| 459 | 
         
            +
                    output_hidden_states = (
         
     | 
| 460 | 
         
            +
                        output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
         
     | 
| 461 | 
         
            +
                    )
         
     | 
| 462 | 
         
            +
                    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
         
     | 
| 463 | 
         
            +
             
     | 
| 464 | 
         
            +
                    sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask)
         
     | 
| 465 | 
         
            +
             
     | 
| 466 | 
         
            +
                    if not return_dict:
         
     | 
| 467 | 
         
            +
                        return (
         
     | 
| 468 | 
         
            +
                            sequence_output,
         
     | 
| 469 | 
         
            +
                            *([contextualized_embeddings] if output_hidden_states else []),
         
     | 
| 470 | 
         
            +
                            *([attention_probs] if output_attentions else [])
         
     | 
| 471 | 
         
            +
                        )
         
     | 
| 472 | 
         
            +
             
     | 
| 473 | 
         
            +
                    return BaseModelOutput(
         
     | 
| 474 | 
         
            +
                        last_hidden_state=sequence_output,
         
     | 
| 475 | 
         
            +
                        hidden_states=contextualized_embeddings if output_hidden_states else None,
         
     | 
| 476 | 
         
            +
                        attentions=attention_probs if output_attentions else None
         
     | 
| 477 | 
         
            +
                    )
         
     | 
| 478 | 
         
            +
             
     | 
| 479 | 
         
            +
             
     | 
| 480 | 
         
            +
            @add_start_docstrings("""LTG-BERT model with a `language modeling` head on top.""", LTG_BERT_START_DOCSTRING)
         
     | 
| 481 | 
         
            +
            class LtgBertForMaskedLM(LtgBertModel):
         
     | 
| 482 | 
         
            +
                _keys_to_ignore_on_load_unexpected = ["head"]
         
     | 
| 483 | 
         
            +
             
     | 
| 484 | 
         
            +
                def __init__(self, config):
         
     | 
| 485 | 
         
            +
                    super().__init__(config, add_mlm_layer=True)
         
     | 
| 486 | 
         
            +
             
     | 
| 487 | 
         
            +
                def get_output_embeddings(self):
         
     | 
| 488 | 
         
            +
                    return self.classifier.nonlinearity[-1].weight
         
     | 
| 489 | 
         
            +
             
     | 
| 490 | 
         
            +
                def set_output_embeddings(self, new_embeddings):
         
     | 
| 491 | 
         
            +
                    self.classifier.nonlinearity[-1].weight = new_embeddings
         
     | 
| 492 | 
         
            +
             
     | 
| 493 | 
         
            +
                @add_start_docstrings_to_model_forward(LTG_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
         
     | 
| 494 | 
         
            +
                def forward(
         
     | 
| 495 | 
         
            +
                    self,
         
     | 
| 496 | 
         
            +
                    input_ids: Optional[torch.Tensor] = None,
         
     | 
| 497 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None,
         
     | 
| 498 | 
         
            +
                    output_hidden_states: Optional[bool] = None,
         
     | 
| 499 | 
         
            +
                    output_attentions: Optional[bool] = None,
         
     | 
| 500 | 
         
            +
                    return_dict: Optional[bool] = None,
         
     | 
| 501 | 
         
            +
                    labels: Optional[torch.LongTensor] = None,
         
     | 
| 502 | 
         
            +
                    token_type_ids = None
         
     | 
| 503 | 
         
            +
                ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
         
     | 
| 504 | 
         
            +
                    r"""
         
     | 
| 505 | 
         
            +
                    labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
         
     | 
| 506 | 
         
            +
                        Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
         
     | 
| 507 | 
         
            +
                        config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
         
     | 
| 508 | 
         
            +
                        loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
         
     | 
| 509 | 
         
            +
                    """
         
     | 
| 510 | 
         
            +
                    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
         
     | 
| 511 | 
         
            +
             
     | 
| 512 | 
         
            +
                    sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask)
         
     | 
| 513 | 
         
            +
                    subword_prediction = self.classifier(sequence_output)
         
     | 
| 514 | 
         
            +
             
     | 
| 515 | 
         
            +
                    masked_lm_loss = None
         
     | 
| 516 | 
         
            +
                    if labels is not None:
         
     | 
| 517 | 
         
            +
                        masked_lm_loss = F.cross_entropy(subword_prediction.flatten(0, 1), labels.flatten())
         
     | 
| 518 | 
         
            +
             
     | 
| 519 | 
         
            +
                    if not return_dict:
         
     | 
| 520 | 
         
            +
                        output = (
         
     | 
| 521 | 
         
            +
                            subword_prediction,
         
     | 
| 522 | 
         
            +
                            *([contextualized_embeddings] if output_hidden_states else []),
         
     | 
| 523 | 
         
            +
                            *([attention_probs] if output_attentions else [])
         
     | 
| 524 | 
         
            +
                        )
         
     | 
| 525 | 
         
            +
                        return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
         
     | 
| 526 | 
         
            +
             
     | 
| 527 | 
         
            +
                    return MaskedLMOutput(
         
     | 
| 528 | 
         
            +
                        loss=masked_lm_loss,
         
     | 
| 529 | 
         
            +
                        logits=subword_prediction,
         
     | 
| 530 | 
         
            +
                        hidden_states=contextualized_embeddings if output_hidden_states else None,
         
     | 
| 531 | 
         
            +
                        attentions=attention_probs if output_attentions else None
         
     | 
| 532 | 
         
            +
                    )
         
     | 
| 533 | 
         
            +
             
     | 
| 534 | 
         
            +
             
     | 
| 535 | 
         
            +
            class Classifier(nn.Module):
         
     | 
| 536 | 
         
            +
                def __init__(self, config, num_labels: int):
         
     | 
| 537 | 
         
            +
                    super().__init__()
         
     | 
| 538 | 
         
            +
             
     | 
| 539 | 
         
            +
                    drop_out = getattr(config, "classifier_dropout", config.hidden_dropout_prob)
         
     | 
| 540 | 
         
            +
             
     | 
| 541 | 
         
            +
                    self.nonlinearity = nn.Sequential(
         
     | 
| 542 | 
         
            +
                        nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False),
         
     | 
| 543 | 
         
            +
                        nn.Linear(config.hidden_size, config.hidden_size),
         
     | 
| 544 | 
         
            +
                        nn.GELU(),
         
     | 
| 545 | 
         
            +
                        nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False),
         
     | 
| 546 | 
         
            +
                        nn.Dropout(drop_out),
         
     | 
| 547 | 
         
            +
                        nn.Linear(config.hidden_size, num_labels)
         
     | 
| 548 | 
         
            +
                    )
         
     | 
| 549 | 
         
            +
                    self.initialize(config.hidden_size)
         
     | 
| 550 | 
         
            +
             
     | 
| 551 | 
         
            +
                def initialize(self, hidden_size):
         
     | 
| 552 | 
         
            +
                    std = math.sqrt(2.0 / (5.0 * hidden_size))
         
     | 
| 553 | 
         
            +
                    nn.init.trunc_normal_(self.nonlinearity[1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 554 | 
         
            +
                    nn.init.trunc_normal_(self.nonlinearity[-1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
         
     | 
| 555 | 
         
            +
                    self.nonlinearity[1].bias.data.zero_()
         
     | 
| 556 | 
         
            +
                    self.nonlinearity[-1].bias.data.zero_()
         
     | 
| 557 | 
         
            +
             
     | 
| 558 | 
         
            +
                def forward(self, x):
         
     | 
| 559 | 
         
            +
                    x = self.nonlinearity(x)
         
     | 
| 560 | 
         
            +
                    return x
         
     | 
| 561 | 
         
            +
             
     | 
| 562 | 
         
            +
             
     | 
| 563 | 
         
            +
            @add_start_docstrings(
         
     | 
| 564 | 
         
            +
                """
         
     | 
| 565 | 
         
            +
                LTG-BERT model with a sequence classification/regression head on top (a linear layer on top of the pooled
         
     | 
| 566 | 
         
            +
                output) e.g. for GLUE tasks.
         
     | 
| 567 | 
         
            +
                """,
         
     | 
| 568 | 
         
            +
                LTG_BERT_START_DOCSTRING,
         
     | 
| 569 | 
         
            +
            )
         
     | 
| 570 | 
         
            +
            class LtgBertForSequenceClassification(LtgBertModel):
         
     | 
| 571 | 
         
            +
                _keys_to_ignore_on_load_unexpected = ["classifier"]
         
     | 
| 572 | 
         
            +
                _keys_to_ignore_on_load_missing = ["head"]
         
     | 
| 573 | 
         
            +
             
     | 
| 574 | 
         
            +
                def __init__(self, config):
         
     | 
| 575 | 
         
            +
                    super().__init__(config, add_mlm_layer=False)
         
     | 
| 576 | 
         
            +
             
     | 
| 577 | 
         
            +
                    self.num_labels = config.num_labels
         
     | 
| 578 | 
         
            +
                    self.head = Classifier(config, self.num_labels)
         
     | 
| 579 | 
         
            +
             
     | 
| 580 | 
         
            +
                @add_start_docstrings_to_model_forward(LTG_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
         
     | 
| 581 | 
         
            +
                def forward(
         
     | 
| 582 | 
         
            +
                    self,
         
     | 
| 583 | 
         
            +
                    input_ids: Optional[torch.Tensor] = None,
         
     | 
| 584 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None,
         
     | 
| 585 | 
         
            +
                    output_attentions: Optional[bool] = None,
         
     | 
| 586 | 
         
            +
                    output_hidden_states: Optional[bool] = None,
         
     | 
| 587 | 
         
            +
                    return_dict: Optional[bool] = None,
         
     | 
| 588 | 
         
            +
                    labels: Optional[torch.LongTensor] = None,
         
     | 
| 589 | 
         
            +
                ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
         
     | 
| 590 | 
         
            +
                    r"""
         
     | 
| 591 | 
         
            +
                    labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
         
     | 
| 592 | 
         
            +
                        Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
         
     | 
| 593 | 
         
            +
                        config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
         
     | 
| 594 | 
         
            +
                        `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
         
     | 
| 595 | 
         
            +
                    """
         
     | 
| 596 | 
         
            +
                    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
         
     | 
| 597 | 
         
            +
             
     | 
| 598 | 
         
            +
                    sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask)
         
     | 
| 599 | 
         
            +
                    logits = self.head(sequence_output[:, 0, :])
         
     | 
| 600 | 
         
            +
             
     | 
| 601 | 
         
            +
                    loss = None
         
     | 
| 602 | 
         
            +
                    if labels is not None:
         
     | 
| 603 | 
         
            +
                        if self.config.problem_type is None:
         
     | 
| 604 | 
         
            +
                            if self.num_labels == 1:
         
     | 
| 605 | 
         
            +
                                self.config.problem_type = "regression"
         
     | 
| 606 | 
         
            +
                            elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
         
     | 
| 607 | 
         
            +
                                self.config.problem_type = "single_label_classification"
         
     | 
| 608 | 
         
            +
                            else:
         
     | 
| 609 | 
         
            +
                                self.config.problem_type = "multi_label_classification"
         
     | 
| 610 | 
         
            +
             
     | 
| 611 | 
         
            +
                        if self.config.problem_type == "regression":
         
     | 
| 612 | 
         
            +
                            loss_fct = nn.MSELoss()
         
     | 
| 613 | 
         
            +
                            if self.num_labels == 1:
         
     | 
| 614 | 
         
            +
                                loss = loss_fct(logits.squeeze(), labels.squeeze())
         
     | 
| 615 | 
         
            +
                            else:
         
     | 
| 616 | 
         
            +
                                loss = loss_fct(logits, labels)
         
     | 
| 617 | 
         
            +
                        elif self.config.problem_type == "single_label_classification":
         
     | 
| 618 | 
         
            +
                            loss_fct = nn.CrossEntropyLoss()
         
     | 
| 619 | 
         
            +
                            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
         
     | 
| 620 | 
         
            +
                        elif self.config.problem_type == "multi_label_classification":
         
     | 
| 621 | 
         
            +
                            loss_fct = nn.BCEWithLogitsLoss()
         
     | 
| 622 | 
         
            +
                            loss = loss_fct(logits, labels)
         
     | 
| 623 | 
         
            +
             
     | 
| 624 | 
         
            +
                    if not return_dict:
         
     | 
| 625 | 
         
            +
                        output = (
         
     | 
| 626 | 
         
            +
                            logits,
         
     | 
| 627 | 
         
            +
                            *([contextualized_embeddings] if output_hidden_states else []),
         
     | 
| 628 | 
         
            +
                            *([attention_probs] if output_attentions else [])
         
     | 
| 629 | 
         
            +
                        )
         
     | 
| 630 | 
         
            +
                        return ((loss,) + output) if loss is not None else output
         
     | 
| 631 | 
         
            +
             
     | 
| 632 | 
         
            +
                    return SequenceClassifierOutput(
         
     | 
| 633 | 
         
            +
                        loss=loss,
         
     | 
| 634 | 
         
            +
                        logits=logits,
         
     | 
| 635 | 
         
            +
                        hidden_states=contextualized_embeddings if output_hidden_states else None,
         
     | 
| 636 | 
         
            +
                        attentions=attention_probs if output_attentions else None
         
     | 
| 637 | 
         
            +
                    )
         
     | 
| 638 | 
         
            +
             
     | 
| 639 | 
         
            +
             
     | 
| 640 | 
         
            +
            @add_start_docstrings(
         
     | 
| 641 | 
         
            +
                """
         
     | 
| 642 | 
         
            +
                LTG-BERT model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
         
     | 
| 643 | 
         
            +
                Named-Entity-Recognition (NER) tasks.
         
     | 
| 644 | 
         
            +
                """,
         
     | 
| 645 | 
         
            +
                LTG_BERT_START_DOCSTRING,
         
     | 
| 646 | 
         
            +
            )
         
     | 
| 647 | 
         
            +
            class LtgBertForTokenClassification(LtgBertModel):
         
     | 
| 648 | 
         
            +
                _keys_to_ignore_on_load_unexpected = ["classifier"]
         
     | 
| 649 | 
         
            +
                _keys_to_ignore_on_load_missing = ["head"]
         
     | 
| 650 | 
         
            +
             
     | 
| 651 | 
         
            +
                def __init__(self, config):
         
     | 
| 652 | 
         
            +
                    super().__init__(config, add_mlm_layer=False)
         
     | 
| 653 | 
         
            +
             
     | 
| 654 | 
         
            +
                    self.num_labels = config.num_labels
         
     | 
| 655 | 
         
            +
                    self.head = Classifier(config, self.num_labels)
         
     | 
| 656 | 
         
            +
             
     | 
| 657 | 
         
            +
                @add_start_docstrings_to_model_forward(LTG_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
         
     | 
| 658 | 
         
            +
                def forward(
         
     | 
| 659 | 
         
            +
                    self,
         
     | 
| 660 | 
         
            +
                    input_ids: Optional[torch.Tensor] = None,
         
     | 
| 661 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None,
         
     | 
| 662 | 
         
            +
                    token_type_ids: Optional[torch.Tensor] = None,
         
     | 
| 663 | 
         
            +
                    position_ids: Optional[torch.Tensor] = None,
         
     | 
| 664 | 
         
            +
                    output_attentions: Optional[bool] = None,
         
     | 
| 665 | 
         
            +
                    output_hidden_states: Optional[bool] = None,
         
     | 
| 666 | 
         
            +
                    return_dict: Optional[bool] = None,
         
     | 
| 667 | 
         
            +
                    labels: Optional[torch.LongTensor] = None,
         
     | 
| 668 | 
         
            +
                ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
         
     | 
| 669 | 
         
            +
                    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
         
     | 
| 670 | 
         
            +
             
     | 
| 671 | 
         
            +
                    sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask)
         
     | 
| 672 | 
         
            +
                    logits = self.head(sequence_output)
         
     | 
| 673 | 
         
            +
             
     | 
| 674 | 
         
            +
                    loss = None
         
     | 
| 675 | 
         
            +
                    if labels is not None:
         
     | 
| 676 | 
         
            +
                        loss_fct = nn.CrossEntropyLoss()
         
     | 
| 677 | 
         
            +
                        loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
         
     | 
| 678 | 
         
            +
             
     | 
| 679 | 
         
            +
                    if not return_dict:
         
     | 
| 680 | 
         
            +
                        output = (
         
     | 
| 681 | 
         
            +
                            logits,
         
     | 
| 682 | 
         
            +
                            *([contextualized_embeddings] if output_hidden_states else []),
         
     | 
| 683 | 
         
            +
                            *([attention_probs] if output_attentions else [])
         
     | 
| 684 | 
         
            +
                        )
         
     | 
| 685 | 
         
            +
                        return ((loss,) + output) if loss is not None else output
         
     | 
| 686 | 
         
            +
             
     | 
| 687 | 
         
            +
                    return TokenClassifierOutput(
         
     | 
| 688 | 
         
            +
                        loss=loss,
         
     | 
| 689 | 
         
            +
                        logits=logits,
         
     | 
| 690 | 
         
            +
                        hidden_states=contextualized_embeddings if output_hidden_states else None,
         
     | 
| 691 | 
         
            +
                        attentions=attention_probs if output_attentions else None
         
     | 
| 692 | 
         
            +
                    )
         
     | 
| 693 | 
         
            +
             
     | 
| 694 | 
         
            +
             
     | 
| 695 | 
         
            +
            @add_start_docstrings(
         
     | 
| 696 | 
         
            +
                """
         
     | 
| 697 | 
         
            +
                LTG-BERT model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
         
     | 
| 698 | 
         
            +
                layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
         
     | 
| 699 | 
         
            +
                """,
         
     | 
| 700 | 
         
            +
                LTG_BERT_START_DOCSTRING,
         
     | 
| 701 | 
         
            +
            )
         
     | 
| 702 | 
         
            +
            class LtgBertForQuestionAnswering(LtgBertModel):
         
     | 
| 703 | 
         
            +
                _keys_to_ignore_on_load_unexpected = ["classifier"]
         
     | 
| 704 | 
         
            +
                _keys_to_ignore_on_load_missing = ["head"]
         
     | 
| 705 | 
         
            +
             
     | 
| 706 | 
         
            +
                def __init__(self, config):
         
     | 
| 707 | 
         
            +
                    super().__init__(config, add_mlm_layer=False)
         
     | 
| 708 | 
         
            +
             
     | 
| 709 | 
         
            +
                    self.num_labels = config.num_labels
         
     | 
| 710 | 
         
            +
                    self.head = Classifier(config, self.num_labels)
         
     | 
| 711 | 
         
            +
             
     | 
| 712 | 
         
            +
                @add_start_docstrings_to_model_forward(LTG_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
         
     | 
| 713 | 
         
            +
                def forward(
         
     | 
| 714 | 
         
            +
                    self,
         
     | 
| 715 | 
         
            +
                    input_ids: Optional[torch.Tensor] = None,
         
     | 
| 716 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None,
         
     | 
| 717 | 
         
            +
                    token_type_ids: Optional[torch.Tensor] = None,
         
     | 
| 718 | 
         
            +
                    position_ids: Optional[torch.Tensor] = None,
         
     | 
| 719 | 
         
            +
                    output_attentions: Optional[bool] = None,
         
     | 
| 720 | 
         
            +
                    output_hidden_states: Optional[bool] = None,
         
     | 
| 721 | 
         
            +
                    return_dict: Optional[bool] = None,
         
     | 
| 722 | 
         
            +
                    start_positions: Optional[torch.Tensor] = None,
         
     | 
| 723 | 
         
            +
                    end_positions: Optional[torch.Tensor] = None
         
     | 
| 724 | 
         
            +
                ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
         
     | 
| 725 | 
         
            +
                    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
         
     | 
| 726 | 
         
            +
             
     | 
| 727 | 
         
            +
                    sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(input_ids, attention_mask)
         
     | 
| 728 | 
         
            +
                    logits = self.head(sequence_output)
         
     | 
| 729 | 
         
            +
             
     | 
| 730 | 
         
            +
                    start_logits, end_logits = logits.split(1, dim=-1)
         
     | 
| 731 | 
         
            +
                    start_logits = start_logits.squeeze(-1).contiguous()
         
     | 
| 732 | 
         
            +
                    end_logits = end_logits.squeeze(-1).contiguous()
         
     | 
| 733 | 
         
            +
             
     | 
| 734 | 
         
            +
                    total_loss = None
         
     | 
| 735 | 
         
            +
                    if start_positions is not None and end_positions is not None:
         
     | 
| 736 | 
         
            +
                        # If we are on multi-GPU, split add a dimension
         
     | 
| 737 | 
         
            +
                        if len(start_positions.size()) > 1:
         
     | 
| 738 | 
         
            +
                            start_positions = start_positions.squeeze(-1)
         
     | 
| 739 | 
         
            +
                        if len(end_positions.size()) > 1:
         
     | 
| 740 | 
         
            +
                            end_positions = end_positions.squeeze(-1)
         
     | 
| 741 | 
         
            +
             
     | 
| 742 | 
         
            +
                        # sometimes the start/end positions are outside our model inputs, we ignore these terms
         
     | 
| 743 | 
         
            +
                        ignored_index = start_logits.size(1)
         
     | 
| 744 | 
         
            +
                        start_positions = start_positions.clamp(0, ignored_index)
         
     | 
| 745 | 
         
            +
                        end_positions = end_positions.clamp(0, ignored_index)
         
     | 
| 746 | 
         
            +
             
     | 
| 747 | 
         
            +
                        loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
         
     | 
| 748 | 
         
            +
                        start_loss = loss_fct(start_logits, start_positions)
         
     | 
| 749 | 
         
            +
                        end_loss = loss_fct(end_logits, end_positions)
         
     | 
| 750 | 
         
            +
                        total_loss = (start_loss + end_loss) / 2
         
     | 
| 751 | 
         
            +
             
     | 
| 752 | 
         
            +
                    if not return_dict:
         
     | 
| 753 | 
         
            +
                        output = (
         
     | 
| 754 | 
         
            +
                            start_logits,
         
     | 
| 755 | 
         
            +
                            end_logits,
         
     | 
| 756 | 
         
            +
                            *([contextualized_embeddings] if output_hidden_states else []),
         
     | 
| 757 | 
         
            +
                            *([attention_probs] if output_attentions else [])
         
     | 
| 758 | 
         
            +
                        )
         
     | 
| 759 | 
         
            +
                        return ((total_loss,) + output) if total_loss is not None else output
         
     | 
| 760 | 
         
            +
             
     | 
| 761 | 
         
            +
                    return QuestionAnsweringModelOutput(
         
     | 
| 762 | 
         
            +
                        loss=total_loss,
         
     | 
| 763 | 
         
            +
                        start_logits=start_logits,
         
     | 
| 764 | 
         
            +
                        end_logits=end_logits,
         
     | 
| 765 | 
         
            +
                        hidden_states=contextualized_embeddings if output_hidden_states else None,
         
     | 
| 766 | 
         
            +
                        attentions=attention_probs if output_attentions else None
         
     | 
| 767 | 
         
            +
                    )
         
     | 
| 768 | 
         
            +
             
     | 
| 769 | 
         
            +
             
     | 
| 770 | 
         
            +
            @add_start_docstrings(
         
     | 
| 771 | 
         
            +
                """
         
     | 
| 772 | 
         
            +
                LTG-BERT model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
         
     | 
| 773 | 
         
            +
                softmax) e.g. for RocStories/SWAG tasks.
         
     | 
| 774 | 
         
            +
                """,
         
     | 
| 775 | 
         
            +
                LTG_BERT_START_DOCSTRING,
         
     | 
| 776 | 
         
            +
            )
         
     | 
| 777 | 
         
            +
            class LtgBertForMultipleChoice(LtgBertModel):
         
     | 
| 778 | 
         
            +
                _keys_to_ignore_on_load_unexpected = ["classifier"]
         
     | 
| 779 | 
         
            +
                _keys_to_ignore_on_load_missing = ["head"]
         
     | 
| 780 | 
         
            +
             
     | 
| 781 | 
         
            +
                def __init__(self, config):
         
     | 
| 782 | 
         
            +
                    super().__init__(config, add_mlm_layer=False)
         
     | 
| 783 | 
         
            +
             
     | 
| 784 | 
         
            +
                    self.num_labels = getattr(config, "num_labels", 2)
         
     | 
| 785 | 
         
            +
                    self.head = Classifier(config, self.num_labels)
         
     | 
| 786 | 
         
            +
             
     | 
| 787 | 
         
            +
                @add_start_docstrings_to_model_forward(LTG_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
         
     | 
| 788 | 
         
            +
                def forward(
         
     | 
| 789 | 
         
            +
                    self,
         
     | 
| 790 | 
         
            +
                    input_ids: Optional[torch.Tensor] = None,
         
     | 
| 791 | 
         
            +
                    attention_mask: Optional[torch.Tensor] = None,
         
     | 
| 792 | 
         
            +
                    token_type_ids: Optional[torch.Tensor] = None,
         
     | 
| 793 | 
         
            +
                    position_ids: Optional[torch.Tensor] = None,
         
     | 
| 794 | 
         
            +
                    labels: Optional[torch.Tensor] = None,
         
     | 
| 795 | 
         
            +
                    output_attentions: Optional[bool] = None,
         
     | 
| 796 | 
         
            +
                    output_hidden_states: Optional[bool] = None,
         
     | 
| 797 | 
         
            +
                    return_dict: Optional[bool] = None
         
     | 
| 798 | 
         
            +
                ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
         
     | 
| 799 | 
         
            +
                    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
         
     | 
| 800 | 
         
            +
                    num_choices = input_ids.shape[1]
         
     | 
| 801 | 
         
            +
             
     | 
| 802 | 
         
            +
                    flat_input_ids = input_ids.view(-1, input_ids.size(-1))
         
     | 
| 803 | 
         
            +
                    flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
         
     | 
| 804 | 
         
            +
             
     | 
| 805 | 
         
            +
                    sequence_output, contextualized_embeddings, attention_probs = self.get_contextualized_embeddings(flat_input_ids, flat_attention_mask)
         
     | 
| 806 | 
         
            +
                    logits = self.head(sequence_output)
         
     | 
| 807 | 
         
            +
                    reshaped_logits = logits.view(-1, num_choices)
         
     | 
| 808 | 
         
            +
             
     | 
| 809 | 
         
            +
                    loss = None
         
     | 
| 810 | 
         
            +
                    if labels is not None:
         
     | 
| 811 | 
         
            +
                        loss_fct = nn.CrossEntropyLoss()
         
     | 
| 812 | 
         
            +
                        loss = loss_fct(reshaped_logits, labels)
         
     | 
| 813 | 
         
            +
             
     | 
| 814 | 
         
            +
                    if not return_dict:
         
     | 
| 815 | 
         
            +
                        output = (
         
     | 
| 816 | 
         
            +
                            reshaped_logits,
         
     | 
| 817 | 
         
            +
                            *([contextualized_embeddings] if output_hidden_states else []),
         
     | 
| 818 | 
         
            +
                            *([attention_probs] if output_attentions else [])
         
     | 
| 819 | 
         
            +
                        )
         
     | 
| 820 | 
         
            +
                        return ((loss,) + output) if loss is not None else output
         
     | 
| 821 | 
         
            +
             
     | 
| 822 | 
         
            +
                    return MultipleChoiceModelOutput(
         
     | 
| 823 | 
         
            +
                        loss=loss,
         
     | 
| 824 | 
         
            +
                        logits=reshaped_logits,
         
     | 
| 825 | 
         
            +
                        hidden_states=contextualized_embeddings if output_hidden_states else None,
         
     | 
| 826 | 
         
            +
                        attentions=attention_probs if output_attentions else None
         
     | 
| 827 | 
         
            +
                    )
         
     | 
    	
        pytorch_model.bin
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:be463284eea3e162b0d75881835a3829b241573c0f8a30d8e24315b8ccb9043e
         
     | 
| 3 | 
         
            +
            size 811748865
         
     | 
    	
        special_tokens_map.json
    ADDED
    
    | 
         @@ -0,0 +1,9 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "bos_token": "[BOS]",
         
     | 
| 3 | 
         
            +
              "cls_token": "[CLS]",
         
     | 
| 4 | 
         
            +
              "eos_token": "[EOS]",
         
     | 
| 5 | 
         
            +
              "mask_token": "[MASK]",
         
     | 
| 6 | 
         
            +
              "pad_token": "[PAD]",
         
     | 
| 7 | 
         
            +
              "sep_token": "[SEP]",
         
     | 
| 8 | 
         
            +
              "unk_token": "[UNK]"
         
     | 
| 9 | 
         
            +
            }
         
     | 
    	
        tokenizer.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        tokenizer_config.json
    ADDED
    
    | 
         @@ -0,0 +1,4 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "model_max_length": 1000000000000000019884624838656,
         
     | 
| 3 | 
         
            +
              "tokenizer_class": "PreTrainedTokenizerFast"
         
     | 
| 4 | 
         
            +
            }
         
     |