Spaces:
Sleeping
Sleeping
arxiv_chatbot
/
models
/models--jinaai--jina-bert-implementation
/blobs
/64b6ce6fe4477c320b0ab303e2f26ae98beae1f7
| """PyTorch BERT model.""" | |
| import math | |
| import os | |
| import warnings | |
| from dataclasses import dataclass | |
| from typing import List, Optional, Tuple, Union | |
| import numpy as np | |
| import torch | |
| import torch.utils.checkpoint | |
| from torch import nn | |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss | |
| from transformers.activations import ACT2FN | |
| from transformers.modeling_outputs import ( | |
| BaseModelOutputWithPastAndCrossAttentions, | |
| BaseModelOutputWithPoolingAndCrossAttentions, | |
| CausalLMOutputWithCrossAttentions, | |
| MaskedLMOutput, | |
| MultipleChoiceModelOutput, | |
| NextSentencePredictorOutput, | |
| QuestionAnsweringModelOutput, | |
| SequenceClassifierOutput, | |
| TokenClassifierOutput, | |
| ) | |
| from transformers.modeling_utils import PreTrainedModel | |
| from transformers.pytorch_utils import ( | |
| apply_chunking_to_forward, | |
| find_pruneable_heads_and_indices, | |
| prune_linear_layer, | |
| ) | |
| from transformers.utils import ( | |
| ModelOutput, | |
| add_code_sample_docstrings, | |
| add_start_docstrings, | |
| add_start_docstrings_to_model_forward, | |
| logging, | |
| replace_return_docstrings, | |
| ) | |
| from .configuration_bert import JinaBertConfig | |
| try: | |
| from torch.nn.functional import scaled_dot_product_attention | |
| except ImportError: | |
| scaled_dot_product_attention = None | |
| try: | |
| from tqdm.autonotebook import trange | |
| has_tqdm = True | |
| except ImportError: | |
| has_tqdm = False | |
| logger = logging.get_logger(__name__) | |
| _CHECKPOINT_FOR_DOC = "bert-base-uncased" | |
| _CONFIG_FOR_DOC = "JinaBertConfig" | |
| _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = ( | |
| "dbmdz/bert-large-cased-finetuned-conll03-english" | |
| ) | |
| _TOKEN_CLASS_EXPECTED_OUTPUT = "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] " | |
| _TOKEN_CLASS_EXPECTED_LOSS = 0.01 | |
| _CHECKPOINT_FOR_QA = "deepset/bert-base-cased-squad2" | |
| _QA_EXPECTED_OUTPUT = "'a nice puppet'" | |
| _QA_EXPECTED_LOSS = 7.41 | |
| _QA_TARGET_START_INDEX = 14 | |
| _QA_TARGET_END_INDEX = 15 | |
| _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "textattack/bert-base-uncased-yelp-polarity" | |
| _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'" | |
| _SEQ_CLASS_EXPECTED_LOSS = 0.01 | |
| def load_tf_weights_in_bert(model, config, tf_checkpoint_path): | |
| """Load tf checkpoints in a pytorch model.""" | |
| try: | |
| import re | |
| import numpy as np | |
| import tensorflow as tf | |
| except ImportError: | |
| logger.error( | |
| "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " | |
| "https://www.tensorflow.org/install/ for installation instructions." | |
| ) | |
| raise | |
| tf_path = os.path.abspath(tf_checkpoint_path) | |
| logger.info(f"Converting TensorFlow checkpoint from {tf_path}") | |
| # Load weights from TF model | |
| init_vars = tf.train.list_variables(tf_path) | |
| names = [] | |
| arrays = [] | |
| for name, shape in init_vars: | |
| logger.info(f"Loading TF weight {name} with shape {shape}") | |
| array = tf.train.load_variable(tf_path, name) | |
| names.append(name) | |
| arrays.append(array) | |
| for name, array in zip(names, arrays): | |
| name = name.split("/") | |
| # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v | |
| # which are not required for using pretrained model | |
| if any( | |
| n | |
| in [ | |
| "adam_v", | |
| "adam_m", | |
| "AdamWeightDecayOptimizer", | |
| "AdamWeightDecayOptimizer_1", | |
| "global_step", | |
| ] | |
| for n in name | |
| ): | |
| logger.info(f"Skipping {'/'.join(name)}") | |
| continue | |
| pointer = model | |
| for m_name in name: | |
| if re.fullmatch(r"[A-Za-z]+_\d+", m_name): | |
| scope_names = re.split(r"_(\d+)", m_name) | |
| else: | |
| scope_names = [m_name] | |
| if scope_names[0] == "kernel" or scope_names[0] == "gamma": | |
| pointer = getattr(pointer, "weight") | |
| elif scope_names[0] == "output_bias" or scope_names[0] == "beta": | |
| pointer = getattr(pointer, "bias") | |
| elif scope_names[0] == "output_weights": | |
| pointer = getattr(pointer, "weight") | |
| elif scope_names[0] == "squad": | |
| pointer = getattr(pointer, "classifier") | |
| else: | |
| try: | |
| pointer = getattr(pointer, scope_names[0]) | |
| except AttributeError: | |
| logger.info(f"Skipping {'/'.join(name)}") | |
| continue | |
| if len(scope_names) >= 2: | |
| num = int(scope_names[1]) | |
| pointer = pointer[num] | |
| if m_name[-11:] == "_embeddings": | |
| pointer = getattr(pointer, "weight") | |
| elif m_name == "kernel": | |
| array = np.transpose(array) | |
| try: | |
| if pointer.shape != array.shape: | |
| raise ValueError( | |
| f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" | |
| ) | |
| except ValueError as e: | |
| e.args += (pointer.shape, array.shape) | |
| raise | |
| logger.info(f"Initialize PyTorch weight {name}") | |
| pointer.data = torch.from_numpy(array) | |
| return model | |
| class JinaBertEmbeddings(nn.Module): | |
| """Construct the embeddings from word, position and token_type embeddings.""" | |
| def __init__(self, config: JinaBertConfig): | |
| super().__init__() | |
| self.word_embeddings = nn.Embedding( | |
| config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id | |
| ) | |
| if config.position_embedding_type != "alibi": | |
| self.position_embeddings = nn.Embedding( | |
| config.max_position_embeddings, config.hidden_size | |
| ) | |
| self.token_type_embeddings = nn.Embedding( | |
| config.type_vocab_size, config.hidden_size | |
| ) | |
| # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load | |
| # any TensorFlow checkpoint file | |
| self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) | |
| # position_ids (1, len position emb) is contiguous in memory and exported when serialized | |
| self.position_embedding_type = getattr( | |
| config, "position_embedding_type", "absolute" | |
| ) | |
| self.register_buffer( | |
| "position_ids", | |
| torch.arange(config.max_position_embeddings).expand((1, -1)), | |
| persistent=False, | |
| ) | |
| self.register_buffer( | |
| "token_type_ids", | |
| torch.zeros(self.position_ids.size(), dtype=torch.long), | |
| persistent=False, | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.LongTensor] = None, | |
| token_type_ids: Optional[torch.LongTensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| past_key_values_length: int = 0, | |
| ) -> torch.Tensor: | |
| if input_ids is not None: | |
| input_shape = input_ids.size() | |
| else: | |
| input_shape = inputs_embeds.size()[:-1] | |
| seq_length = input_shape[1] | |
| if position_ids is None: | |
| position_ids = self.position_ids[ | |
| :, past_key_values_length : seq_length + past_key_values_length | |
| ] | |
| # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs | |
| # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves | |
| # issue #5664 | |
| if token_type_ids is None: | |
| if hasattr(self, "token_type_ids"): | |
| buffered_token_type_ids = self.token_type_ids[:, :seq_length] | |
| buffered_token_type_ids_expanded = buffered_token_type_ids.expand( | |
| input_shape[0], seq_length | |
| ) | |
| token_type_ids = buffered_token_type_ids_expanded | |
| else: | |
| token_type_ids = torch.zeros( | |
| input_shape, dtype=torch.long, device=self.position_ids.device | |
| ) | |
| if inputs_embeds is None: | |
| inputs_embeds = self.word_embeddings(input_ids) | |
| token_type_embeddings = self.token_type_embeddings(token_type_ids) | |
| embeddings = inputs_embeds + token_type_embeddings | |
| if self.position_embedding_type == "absolute": | |
| position_embeddings = self.position_embeddings(position_ids) | |
| embeddings += position_embeddings | |
| embeddings = self.LayerNorm(embeddings) | |
| embeddings = self.dropout(embeddings) | |
| return embeddings | |
| class JinaBertSelfAttention(nn.Module): | |
| def __init__(self, config: JinaBertConfig, position_embedding_type=None): | |
| super().__init__() | |
| if config.hidden_size % config.num_attention_heads != 0 and not hasattr( | |
| config, "embedding_size" | |
| ): | |
| raise ValueError( | |
| f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " | |
| f"heads ({config.num_attention_heads})" | |
| ) | |
| self.attn_implementation = config.attn_implementation | |
| self.num_attention_heads = config.num_attention_heads | |
| self.attention_head_size = int(config.hidden_size / config.num_attention_heads) | |
| self.all_head_size = self.num_attention_heads * self.attention_head_size | |
| self.query = nn.Linear(config.hidden_size, self.all_head_size) | |
| self.key = nn.Linear(config.hidden_size, self.all_head_size) | |
| self.value = nn.Linear(config.hidden_size, self.all_head_size) | |
| self.dropout_p = config.attention_probs_dropout_prob | |
| self.dropout = nn.Dropout(self.dropout_p) | |
| self.position_embedding_type = position_embedding_type or getattr( | |
| config, "position_embedding_type", "absolute" | |
| ) | |
| if ( | |
| self.position_embedding_type == "relative_key" | |
| or self.position_embedding_type == "relative_key_query" | |
| ): | |
| self.max_position_embeddings = config.max_position_embeddings | |
| self.distance_embedding = nn.Embedding( | |
| 2 * config.max_position_embeddings - 1, self.attention_head_size | |
| ) | |
| self.is_decoder = config.is_decoder | |
| def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: | |
| new_x_shape = x.size()[:-1] + ( | |
| self.num_attention_heads, | |
| self.attention_head_size, | |
| ) | |
| x = x.view(new_x_shape) | |
| return x.permute(0, 2, 1, 3) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, | |
| past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, | |
| output_attentions: Optional[bool] = False, | |
| bias: Optional[torch.FloatTensor] = None, | |
| ) -> Tuple[torch.Tensor]: | |
| mixed_query_layer = self.query(hidden_states) | |
| # If this is instantiated as a cross-attention module, the keys | |
| # and values come from an encoder; the attention mask needs to be | |
| # such that the encoder's padding tokens are not attended to. | |
| is_cross_attention = encoder_hidden_states is not None | |
| if is_cross_attention and past_key_value is not None: | |
| # reuse k,v, cross_attentions | |
| key_layer = past_key_value[0] | |
| value_layer = past_key_value[1] | |
| attention_mask = encoder_attention_mask | |
| elif is_cross_attention: | |
| key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) | |
| value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) | |
| attention_mask = encoder_attention_mask | |
| elif past_key_value is not None: | |
| key_layer = self.transpose_for_scores(self.key(hidden_states)) | |
| value_layer = self.transpose_for_scores(self.value(hidden_states)) | |
| key_layer = torch.cat([past_key_value[0], key_layer], dim=2) | |
| value_layer = torch.cat([past_key_value[1], value_layer], dim=2) | |
| else: | |
| key_layer = self.transpose_for_scores(self.key(hidden_states)) | |
| value_layer = self.transpose_for_scores(self.value(hidden_states)) | |
| query_layer = self.transpose_for_scores(mixed_query_layer) | |
| use_cache = past_key_value is not None | |
| if self.is_decoder: | |
| # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. | |
| # Further calls to cross_attention layer can then reuse all cross-attention | |
| # key/value_states (first "if" case) | |
| # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of | |
| # all previous decoder key/value_states. Further calls to uni-directional self-attention | |
| # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) | |
| # if encoder bi-directional self-attention `past_key_value` is always `None` | |
| past_key_value = (key_layer, value_layer) | |
| if self.attn_implementation == 'torch' and scaled_dot_product_attention is not None: | |
| b, _, s, _ = query_layer.shape | |
| new_bias = attention_mask + bias | |
| dropout_p = self.dropout_p if self.training else 0.0 | |
| attn = scaled_dot_product_attention(query_layer, key_layer, value_layer, new_bias, dropout_p=dropout_p) | |
| attn = attn.permute(0, 2, 1, 3).contiguous() | |
| return (attn.view(b, s, self.all_head_size),) | |
| # Take the dot product between "query" and "key" to get the raw attention scores. | |
| attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) | |
| if ( | |
| self.position_embedding_type == "relative_key" | |
| or self.position_embedding_type == "relative_key_query" | |
| ): | |
| query_length, key_length = query_layer.shape[2], key_layer.shape[2] | |
| if use_cache: | |
| position_ids_l = torch.tensor( | |
| key_length - 1, dtype=torch.long, device=hidden_states.device | |
| ).view(-1, 1) | |
| else: | |
| position_ids_l = torch.arange( | |
| query_length, dtype=torch.long, device=hidden_states.device | |
| ).view(-1, 1) | |
| position_ids_r = torch.arange( | |
| key_length, dtype=torch.long, device=hidden_states.device | |
| ).view(1, -1) | |
| distance = position_ids_l - position_ids_r | |
| positional_embedding = self.distance_embedding( | |
| distance + self.max_position_embeddings - 1 | |
| ) | |
| positional_embedding = positional_embedding.to( | |
| dtype=query_layer.dtype | |
| ) # fp16 compatibility | |
| if self.position_embedding_type == "relative_key": | |
| relative_position_scores = torch.einsum( | |
| "bhld,lrd->bhlr", query_layer, positional_embedding | |
| ) | |
| attention_scores = attention_scores + relative_position_scores | |
| elif self.position_embedding_type == "relative_key_query": | |
| relative_position_scores_query = torch.einsum( | |
| "bhld,lrd->bhlr", query_layer, positional_embedding | |
| ) | |
| relative_position_scores_key = torch.einsum( | |
| "bhrd,lrd->bhlr", key_layer, positional_embedding | |
| ) | |
| attention_scores = ( | |
| attention_scores | |
| + relative_position_scores_query | |
| + relative_position_scores_key | |
| ) | |
| attention_scores = attention_scores / math.sqrt(self.attention_head_size) | |
| if attention_mask is not None: | |
| # Apply the attention mask is (precomputed for all layers in BertModel forward() function) | |
| attention_scores = attention_scores + attention_mask | |
| # Normalize the attention scores to probabilities. | |
| attention_probs = nn.functional.softmax(attention_scores + bias, dim=-1) | |
| # This is actually dropping out entire tokens to attend to, which might | |
| # seem a bit unusual, but is taken from the original Transformer paper. | |
| attention_probs = self.dropout(attention_probs) | |
| # Mask heads if we want to | |
| if head_mask is not None: | |
| attention_probs = attention_probs * head_mask | |
| context_layer = torch.matmul(attention_probs, value_layer) | |
| context_layer = context_layer.permute(0, 2, 1, 3).contiguous() | |
| new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) | |
| context_layer = context_layer.view(new_context_layer_shape) | |
| outputs = ( | |
| (context_layer, attention_probs) if output_attentions else (context_layer,) | |
| ) | |
| if self.is_decoder: | |
| outputs = outputs + (past_key_value,) | |
| return outputs | |
| class JinaBertSelfOutput(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) | |
| self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) | |
| def forward( | |
| self, hidden_states: torch.Tensor, input_tensor: torch.Tensor | |
| ) -> torch.Tensor: | |
| hidden_states = self.dense(hidden_states) | |
| hidden_states = self.dropout(hidden_states) | |
| hidden_states = self.LayerNorm(hidden_states + input_tensor) | |
| return hidden_states | |
| class JinaBertAttention(nn.Module): | |
| def __init__(self, config, position_embedding_type=None): | |
| super().__init__() | |
| self.self = JinaBertSelfAttention( | |
| config, position_embedding_type=position_embedding_type | |
| ) | |
| self.output = JinaBertSelfOutput(config) | |
| self.pruned_heads = set() | |
| def prune_heads(self, heads): | |
| if len(heads) == 0: | |
| return | |
| heads, index = find_pruneable_heads_and_indices( | |
| heads, | |
| self.self.num_attention_heads, | |
| self.self.attention_head_size, | |
| self.pruned_heads, | |
| ) | |
| # Prune linear layers | |
| self.self.query = prune_linear_layer(self.self.query, index) | |
| self.self.key = prune_linear_layer(self.self.key, index) | |
| self.self.value = prune_linear_layer(self.self.value, index) | |
| self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) | |
| # Update hyper params and store pruned heads | |
| self.self.num_attention_heads = self.self.num_attention_heads - len(heads) | |
| self.self.all_head_size = ( | |
| self.self.attention_head_size * self.self.num_attention_heads | |
| ) | |
| self.pruned_heads = self.pruned_heads.union(heads) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, | |
| past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, | |
| output_attentions: Optional[bool] = False, | |
| bias: Optional[torch.FloatTensor] = None, | |
| ) -> Tuple[torch.Tensor]: | |
| self_outputs = self.self( | |
| hidden_states, | |
| attention_mask, | |
| head_mask, | |
| encoder_hidden_states, | |
| encoder_attention_mask, | |
| past_key_value, | |
| output_attentions, | |
| bias, | |
| ) | |
| attention_output = self.output(self_outputs[0], hidden_states) | |
| outputs = (attention_output,) + self_outputs[ | |
| 1: | |
| ] # add attentions if we output them | |
| return outputs | |
| class JinaBertIntermediate(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.dense = nn.Linear(config.hidden_size, config.intermediate_size) | |
| if isinstance(config.hidden_act, str): | |
| self.intermediate_act_fn = ACT2FN[config.hidden_act] | |
| else: | |
| self.intermediate_act_fn = config.hidden_act | |
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: | |
| hidden_states = self.dense(hidden_states) | |
| hidden_states = self.intermediate_act_fn(hidden_states) | |
| return hidden_states | |
| class JinaBertOutput(nn.Module): | |
| def __init__(self, config: JinaBertConfig): | |
| super().__init__() | |
| self.dense = nn.Linear(config.intermediate_size, config.hidden_size) | |
| self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) | |
| def forward( | |
| self, hidden_states: torch.Tensor, input_tensor: torch.Tensor | |
| ) -> torch.Tensor: | |
| hidden_states = self.dense(hidden_states) | |
| hidden_states = self.dropout(hidden_states) | |
| hidden_states = self.LayerNorm(hidden_states + input_tensor) | |
| return hidden_states | |
| class JinaBertGLUMLP(nn.Module): | |
| def __init__(self, config: JinaBertConfig): | |
| super().__init__() | |
| self.config = config | |
| self.gated_layers = nn.Linear( | |
| config.hidden_size, config.intermediate_size * 2, bias=False | |
| ) | |
| if config.feed_forward_type == 'reglu': | |
| self.act = nn.ReLU() | |
| elif config.feed_forward_type == 'geglu': | |
| self.act = nn.GELU() | |
| else: | |
| raise ValueError( | |
| f"feed_forward_type {config.feed_forward_type} not supported" | |
| ) | |
| self.wo = nn.Linear(config.intermediate_size, config.hidden_size) | |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) | |
| self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | |
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: | |
| residual_connection = hidden_states | |
| # compute the activation | |
| hidden_states = self.gated_layers(hidden_states) | |
| gated = hidden_states[:, :, : self.config.intermediate_size] | |
| non_gated = hidden_states[:, :, self.config.intermediate_size :] | |
| hidden_states = self.act(gated) * non_gated | |
| hidden_states = self.dropout(hidden_states) | |
| # multiply by the second matrix | |
| hidden_states = self.wo(hidden_states) | |
| # add the residual connection and post-LN | |
| hidden_states = self.layernorm(hidden_states + residual_connection) | |
| return hidden_states | |
| class JinaBertLayer(nn.Module): | |
| def __init__(self, config: JinaBertConfig): | |
| super().__init__() | |
| self.chunk_size_feed_forward = config.chunk_size_feed_forward | |
| self.seq_len_dim = 1 | |
| self.attention = JinaBertAttention(config) | |
| self.is_decoder = config.is_decoder | |
| self.add_cross_attention = config.add_cross_attention | |
| self.feed_forward_type = config.feed_forward_type | |
| if self.add_cross_attention: | |
| if not self.is_decoder: | |
| raise ValueError( | |
| f"{self} should be used as a decoder model if cross attention is added" | |
| ) | |
| self.crossattention = JinaBertAttention( | |
| config, position_embedding_type="absolute" | |
| ) | |
| if self.feed_forward_type.endswith('glu'): | |
| self.mlp = JinaBertGLUMLP(config) | |
| else: | |
| self.intermediate = JinaBertIntermediate(config) | |
| self.output = JinaBertOutput(config) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, | |
| bias: Optional[torch.FloatTensor] = None, | |
| past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, | |
| output_attentions: Optional[bool] = False, | |
| ) -> Tuple[torch.Tensor]: | |
| # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 | |
| self_attn_past_key_value = ( | |
| past_key_value[:2] if past_key_value is not None else None | |
| ) | |
| self_attention_outputs = self.attention( | |
| hidden_states, | |
| attention_mask, | |
| head_mask, | |
| output_attentions=output_attentions, | |
| past_key_value=self_attn_past_key_value, | |
| bias=bias, | |
| ) | |
| attention_output = self_attention_outputs[0] | |
| # if decoder, the last output is tuple of self-attn cache | |
| if self.is_decoder: | |
| outputs = self_attention_outputs[1:-1] | |
| present_key_value = self_attention_outputs[-1] | |
| else: | |
| outputs = self_attention_outputs[ | |
| 1: | |
| ] # add self attentions if we output attention weights | |
| cross_attn_present_key_value = None | |
| if self.is_decoder and encoder_hidden_states is not None: | |
| if not hasattr(self, "crossattention"): | |
| raise ValueError( | |
| f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" | |
| " by setting `config.add_cross_attention=True`" | |
| ) | |
| # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple | |
| cross_attn_past_key_value = ( | |
| past_key_value[-2:] if past_key_value is not None else None | |
| ) | |
| cross_attention_outputs = self.crossattention( | |
| attention_output, | |
| attention_mask, | |
| head_mask, | |
| encoder_hidden_states, | |
| encoder_attention_mask, | |
| cross_attn_past_key_value, | |
| output_attentions, | |
| ) | |
| attention_output = cross_attention_outputs[0] | |
| outputs = ( | |
| outputs + cross_attention_outputs[1:-1] | |
| ) # add cross attentions if we output attention weights | |
| # add cross-attn cache to positions 3,4 of present_key_value tuple | |
| cross_attn_present_key_value = cross_attention_outputs[-1] | |
| present_key_value = present_key_value + cross_attn_present_key_value | |
| if self.feed_forward_type.endswith('glu'): | |
| layer_output = self.mlp(attention_output) | |
| else: | |
| layer_output = apply_chunking_to_forward( | |
| self.feed_forward_chunk, | |
| self.chunk_size_feed_forward, | |
| self.seq_len_dim, | |
| attention_output, | |
| ) | |
| outputs = (layer_output,) + outputs | |
| # if decoder, return the attn key/values as the last output | |
| if self.is_decoder: | |
| outputs = outputs + (present_key_value,) | |
| return outputs | |
| def feed_forward_chunk(self, attention_output): | |
| intermediate_output = self.intermediate(attention_output) | |
| layer_output = self.output(intermediate_output, attention_output) | |
| return layer_output | |
| class JinaBertEncoder(nn.Module): | |
| def __init__(self, config: JinaBertConfig): | |
| super().__init__() | |
| self.config = config | |
| self.layer = nn.ModuleList( | |
| [JinaBertLayer(config) for _ in range(config.num_hidden_layers)] | |
| ) | |
| self.gradient_checkpointing = False | |
| self.num_attention_heads = config.num_attention_heads | |
| self.register_buffer( | |
| "alibi", | |
| self.rebuild_alibi_tensor(size=config.max_position_embeddings), | |
| persistent=False, | |
| ) | |
| def rebuild_alibi_tensor( | |
| self, size: int, device: Optional[Union[torch.device, str]] = None | |
| ): | |
| # Alibi | |
| # Following https://github.com/ofirpress/attention_with_linear_biases/issues/5 (Implementation 1) | |
| # In the causal case, you can exploit the fact that softmax is invariant to a uniform translation | |
| # of the logits, which makes the math work out *after* applying causal masking. If no causal masking | |
| # will be applied, it is necessary to construct the diagonal mask. | |
| n_heads = self.num_attention_heads | |
| def _get_alibi_head_slopes(n_heads: int) -> List[float]: | |
| def get_slopes_power_of_2(n): | |
| start = 2 ** (-(2 ** -(math.log2(n) - 3))) | |
| ratio = start | |
| return [start * ratio**i for i in range(n)] | |
| if math.log2(n_heads).is_integer(): | |
| return get_slopes_power_of_2( | |
| n_heads | |
| ) # In the paper, we only train models that have 2^a heads for some a. This function has | |
| else: # some good properties that only occur when the input is a power of 2. To maintain that even | |
| closest_power_of_2 = 2 ** math.floor( | |
| math.log2(n_heads) | |
| ) # when the number of heads is not a power of 2, we use this workaround. | |
| return ( | |
| get_slopes_power_of_2(closest_power_of_2) | |
| + _get_alibi_head_slopes(2 * closest_power_of_2)[0::2][ | |
| : n_heads - closest_power_of_2 | |
| ] | |
| ) | |
| context_position = torch.arange(size, device=device)[:, None] | |
| memory_position = torch.arange(size, device=device)[None, :] | |
| relative_position = torch.abs(memory_position - context_position) | |
| # [n_heads, max_token_length, max_token_length] | |
| relative_position = relative_position.unsqueeze(0).expand(n_heads, -1, -1) | |
| slopes = torch.Tensor(_get_alibi_head_slopes(n_heads)).to(device) * -1 | |
| alibi = slopes.unsqueeze(1).unsqueeze(1) * relative_position | |
| # [1, n_heads, max_token_length, max_token_length] | |
| alibi = alibi.unsqueeze(0) | |
| assert alibi.shape == torch.Size([1, n_heads, size, size]) | |
| self._current_alibi_size = size | |
| return alibi | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, | |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = False, | |
| output_hidden_states: Optional[bool] = False, | |
| return_dict: Optional[bool] = True, | |
| ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: | |
| all_hidden_states = () if output_hidden_states else None | |
| all_self_attentions = () if output_attentions else None | |
| all_cross_attentions = ( | |
| () if output_attentions and self.config.add_cross_attention else None | |
| ) | |
| # Add alibi matrix to extended_attention_mask | |
| _, seqlen, _ = hidden_states.size() | |
| if self._current_alibi_size < seqlen: | |
| # Rebuild the alibi tensor when needed | |
| warnings.warn( | |
| f'Increasing alibi size from {self._current_alibi_size} to {seqlen}.' | |
| ) | |
| self.register_buffer( | |
| "alibi", | |
| self.rebuild_alibi_tensor(size=seqlen, device=hidden_states.device).to( | |
| hidden_states.dtype | |
| ), | |
| persistent=False, | |
| ) | |
| elif self.alibi.device != hidden_states.device: | |
| # Device catch-up | |
| self.alibi = self.alibi.to(hidden_states.device) | |
| alibi_bias = self.alibi[:, :, :seqlen, :seqlen] | |
| if self.gradient_checkpointing and self.training: | |
| if use_cache: | |
| logger.warning_once( | |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." | |
| ) | |
| use_cache = False | |
| next_decoder_cache = () if use_cache else None | |
| for i, layer_module in enumerate(self.layer): | |
| if output_hidden_states: | |
| all_hidden_states = all_hidden_states + (hidden_states,) | |
| layer_head_mask = head_mask[i] if head_mask is not None else None | |
| past_key_value = past_key_values[i] if past_key_values is not None else None | |
| if self.gradient_checkpointing and self.training: | |
| def create_custom_forward(module): | |
| def custom_forward(*inputs): | |
| return module(*inputs, past_key_value, output_attentions) | |
| return custom_forward | |
| layer_outputs = torch.utils.checkpoint.checkpoint( | |
| create_custom_forward(layer_module), | |
| hidden_states, | |
| attention_mask, | |
| layer_head_mask, | |
| encoder_hidden_states, | |
| encoder_attention_mask, | |
| alibi_bias, | |
| ) | |
| else: | |
| layer_outputs = layer_module( | |
| hidden_states, | |
| attention_mask, | |
| layer_head_mask, | |
| encoder_hidden_states, | |
| encoder_attention_mask, | |
| alibi_bias, | |
| past_key_value, | |
| output_attentions, | |
| ) | |
| hidden_states = layer_outputs[0] | |
| if use_cache: | |
| next_decoder_cache += (layer_outputs[-1],) | |
| if output_attentions: | |
| all_self_attentions = all_self_attentions + (layer_outputs[1],) | |
| if self.config.add_cross_attention: | |
| all_cross_attentions = all_cross_attentions + (layer_outputs[2],) | |
| if output_hidden_states: | |
| all_hidden_states = all_hidden_states + (hidden_states,) | |
| if not return_dict: | |
| return tuple( | |
| v | |
| for v in [ | |
| hidden_states, | |
| next_decoder_cache, | |
| all_hidden_states, | |
| all_self_attentions, | |
| all_cross_attentions, | |
| ] | |
| if v is not None | |
| ) | |
| return BaseModelOutputWithPastAndCrossAttentions( | |
| last_hidden_state=hidden_states, | |
| past_key_values=next_decoder_cache, | |
| hidden_states=all_hidden_states, | |
| attentions=all_self_attentions, | |
| cross_attentions=all_cross_attentions, | |
| ) | |
| class JinaBertPooler(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) | |
| self.activation = nn.Tanh() | |
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: | |
| # We "pool" the model by simply taking the hidden state corresponding | |
| # to the first token. | |
| first_token_tensor = hidden_states[:, 0] | |
| pooled_output = self.dense(first_token_tensor) | |
| pooled_output = self.activation(pooled_output) | |
| return pooled_output | |
| class JinaBertPredictionHeadTransform(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) | |
| if isinstance(config.hidden_act, str): | |
| self.transform_act_fn = ACT2FN[config.hidden_act] | |
| else: | |
| self.transform_act_fn = config.hidden_act | |
| self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | |
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: | |
| hidden_states = self.dense(hidden_states) | |
| hidden_states = self.transform_act_fn(hidden_states) | |
| hidden_states = self.LayerNorm(hidden_states) | |
| return hidden_states | |
| class JinaBertLMPredictionHead(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.transform = JinaBertPredictionHeadTransform(config) | |
| # The output weights are the same as the input embeddings, but there is | |
| # an output-only bias for each token. | |
| self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) | |
| self.bias = nn.Parameter(torch.zeros(config.vocab_size)) | |
| # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` | |
| self.decoder.bias = self.bias | |
| def forward(self, hidden_states): | |
| hidden_states = self.transform(hidden_states) | |
| hidden_states = self.decoder(hidden_states) | |
| return hidden_states | |
| class JinaBertOnlyMLMHead(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.predictions = JinaBertLMPredictionHead(config) | |
| def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: | |
| prediction_scores = self.predictions(sequence_output) | |
| return prediction_scores | |
| class JinaBertOnlyNSPHead(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.seq_relationship = nn.Linear(config.hidden_size, 2) | |
| def forward(self, pooled_output): | |
| seq_relationship_score = self.seq_relationship(pooled_output) | |
| return seq_relationship_score | |
| class JinaBertPreTrainingHeads(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.predictions = JinaBertLMPredictionHead(config) | |
| self.seq_relationship = nn.Linear(config.hidden_size, 2) | |
| def forward(self, sequence_output, pooled_output): | |
| prediction_scores = self.predictions(sequence_output) | |
| seq_relationship_score = self.seq_relationship(pooled_output) | |
| return prediction_scores, seq_relationship_score | |
| class JinaBertPreTrainedModel(PreTrainedModel): | |
| """ | |
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained | |
| models. | |
| """ | |
| config_class = JinaBertConfig | |
| load_tf_weights = load_tf_weights_in_bert | |
| base_model_prefix = "bert" | |
| supports_gradient_checkpointing = True | |
| _no_split_modules = ["JinaBertLayer"] | |
| def _init_weights(self, module): | |
| """Initialize the weights""" | |
| if isinstance(module, nn.Linear): | |
| # Slightly different from the TF version which uses truncated_normal for initialization | |
| # cf https://github.com/pytorch/pytorch/pull/5617 | |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) | |
| if module.bias is not None: | |
| module.bias.data.zero_() | |
| elif isinstance(module, nn.Embedding): | |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) | |
| if module.padding_idx is not None: | |
| module.weight.data[module.padding_idx].zero_() | |
| elif isinstance(module, nn.LayerNorm): | |
| module.bias.data.zero_() | |
| module.weight.data.fill_(1.0) | |
| def _set_gradient_checkpointing(self, module, value=False): | |
| if isinstance(module, JinaBertEncoder): | |
| module.gradient_checkpointing = value | |
| @dataclass | |
| class JinaBertForPreTrainingOutput(ModelOutput): | |
| """ | |
| Output type of [`BertForPreTraining`]. | |
| Args: | |
| loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): | |
| Total loss as the sum of the masked language modeling loss and the next sequence prediction | |
| (classification) loss. | |
| prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): | |
| Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). | |
| seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): | |
| Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation | |
| before SoftMax). | |
| hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): | |
| Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of | |
| shape `(batch_size, sequence_length, hidden_size)`. | |
| Hidden-states of the model at the output of each layer plus the initial embedding outputs. | |
| attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): | |
| Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, | |
| sequence_length)`. | |
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention | |
| heads. | |
| """ | |
| loss: Optional[torch.FloatTensor] = None | |
| prediction_logits: torch.FloatTensor = None | |
| seq_relationship_logits: torch.FloatTensor = None | |
| hidden_states: Optional[Tuple[torch.FloatTensor]] = None | |
| attentions: Optional[Tuple[torch.FloatTensor]] = None | |
| BERT_START_DOCSTRING = r""" | |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the | |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads | |
| etc.) | |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. | |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage | |
| and behavior. | |
| Parameters: | |
| config ([`BertConfig`]): Model configuration class with all the parameters of the model. | |
| Initializing with a config file does not load the weights associated with the model, only the | |
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. | |
| """ | |
| BERT_INPUTS_DOCSTRING = r""" | |
| Args: | |
| input_ids (`torch.LongTensor` of shape `({0})`): | |
| Indices of input sequence tokens in the vocabulary. | |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and | |
| [`PreTrainedTokenizer.__call__`] for details. | |
| [What are input IDs?](../glossary#input-ids) | |
| attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): | |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: | |
| - 1 for tokens that are **not masked**, | |
| - 0 for tokens that are **masked**. | |
| [What are attention masks?](../glossary#attention-mask) | |
| token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): | |
| Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, | |
| 1]`: | |
| - 0 corresponds to a *sentence A* token, | |
| - 1 corresponds to a *sentence B* token. | |
| [What are token type IDs?](../glossary#token-type-ids) | |
| position_ids (`torch.LongTensor` of shape `({0})`, *optional*): | |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, | |
| config.max_position_embeddings - 1]`. | |
| [What are position IDs?](../glossary#position-ids) | |
| head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): | |
| Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: | |
| - 1 indicates the head is **not masked**, | |
| - 0 indicates the head is **masked**. | |
| inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): | |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This | |
| is useful if you want more control over how to convert `input_ids` indices into associated vectors than the | |
| model's internal embedding lookup matrix. | |
| output_attentions (`bool`, *optional*): | |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned | |
| tensors for more detail. | |
| output_hidden_states (`bool`, *optional*): | |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for | |
| more detail. | |
| return_dict (`bool`, *optional*): | |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. | |
| """ | |
| @add_start_docstrings( | |
| "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", | |
| BERT_START_DOCSTRING, | |
| ) | |
| class JinaBertModel(JinaBertPreTrainedModel): | |
| """ | |
| The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of | |
| cross-attention is added between the self-attention layers, following the architecture described in [Attention is | |
| all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, | |
| Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. | |
| To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set | |
| to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and | |
| `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. | |
| """ | |
| def __init__(self, config: JinaBertConfig, add_pooling_layer=True): | |
| super().__init__(config) | |
| self.config = config | |
| self.emb_pooler = config.emb_pooler | |
| self._name_or_path = config._name_or_path | |
| if self.emb_pooler: | |
| from transformers import AutoTokenizer | |
| self.tokenizer = AutoTokenizer.from_pretrained(config._name_or_path) | |
| self.embeddings = JinaBertEmbeddings(config) | |
| self.encoder = JinaBertEncoder(config) | |
| self.pooler = JinaBertPooler(config) if add_pooling_layer else None | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| @torch.inference_mode() | |
| def encode( | |
| self: 'JinaBertModel', | |
| sentences: Union[str, List[str]], | |
| batch_size: int = 32, | |
| show_progress_bar: Optional[bool] = None, | |
| output_value: str = 'sentence_embedding', | |
| convert_to_numpy: bool = True, | |
| convert_to_tensor: bool = False, | |
| device: Optional[torch.device] = None, | |
| normalize_embeddings: bool = False, | |
| **tokenizer_kwargs, | |
| ) -> Union[List[torch.Tensor], np.ndarray, torch.Tensor]: | |
| """ | |
| Computes sentence embeddings | |
| Args: | |
| sentences(`str` or `List[str]`): | |
| Sentence or sentences to be encoded | |
| batch_size(`int`, *optional*, defaults to 32): | |
| Batch size for the computation | |
| show_progress_bar(`bool`, *optional*, defaults to None): | |
| Show a progress bar when encoding sentences. | |
| If set to None, progress bar is only shown when `logger.level == logging.INFO` or `logger.level == logging.DEBUG`. | |
| output_value(`str`, *optional*, defaults to 'sentence_embedding'): | |
| Default sentence_embedding, to get sentence embeddings. | |
| Can be set to token_embeddings to get wordpiece token embeddings. | |
| Set to None, to get all output values | |
| convert_to_numpy(`bool`, *optional*, defaults to True): | |
| If true, the output is a list of numpy vectors. | |
| Else, it is a list of pytorch tensors. | |
| convert_to_tensor(`bool`, *optional*, defaults to False): | |
| If true, you get one large tensor as return. | |
| Overwrites any setting from convert_to_numpy | |
| device(`torch.device`, *optional*, defaults to None): | |
| Which torch.device to use for the computation | |
| normalize_embeddings(`bool`, *optional*, defaults to False): | |
| If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used. | |
| tokenizer_kwargs(`Dict[str, Any]`, *optional*, defaults to {}): | |
| Keyword arguments for the tokenizer | |
| Returns: | |
| By default, a list of tensors is returned. | |
| If convert_to_tensor, a stacked tensor is returned. | |
| If convert_to_numpy, a numpy matrix is returned. | |
| """ | |
| if not self.emb_pooler: | |
| warnings.warn("No emb_pooler specified, defaulting to mean pooling.") | |
| self.emb_pooler = 'mean' | |
| from transformers import AutoTokenizer | |
| self.tokenizer = AutoTokenizer.from_pretrained(self._name_or_path) | |
| is_training = self.training | |
| self.eval() | |
| if show_progress_bar is None: | |
| show_progress_bar = ( | |
| logger.getEffectiveLevel() == logging.INFO | |
| or logger.getEffectiveLevel() == logging.DEBUG | |
| ) | |
| if convert_to_tensor: | |
| convert_to_numpy = False | |
| if output_value != 'sentence_embedding': | |
| convert_to_tensor = False | |
| convert_to_numpy = False | |
| input_was_string = False | |
| if isinstance(sentences, str) or not hasattr(sentences, '__len__'): | |
| sentences = [sentences] | |
| input_was_string = True | |
| if device is not None: | |
| self.to(device) | |
| # TODO: Maybe use better length heuristic? | |
| permutation = np.argsort([-len(i) for i in sentences]) | |
| inverse_permutation = np.argsort(permutation) | |
| sentences = [sentences[idx] for idx in permutation] | |
| tokenizer_kwargs['padding'] = tokenizer_kwargs.get('padding', True) | |
| tokenizer_kwargs['max_length'] = tokenizer_kwargs.get('max_length', 8192) | |
| tokenizer_kwargs['truncation'] = tokenizer_kwargs.get('truncation', True) | |
| all_embeddings = [] | |
| if has_tqdm: | |
| range_iter = trange( | |
| 0, | |
| len(sentences), | |
| batch_size, | |
| desc="Encoding", | |
| disable=not show_progress_bar, | |
| ) | |
| else: | |
| range_iter = range(0, len(sentences), batch_size) | |
| for i in range_iter: | |
| encoded_input = self.tokenizer( | |
| sentences[i : i + batch_size], | |
| return_tensors='pt', | |
| **tokenizer_kwargs, | |
| ).to(self.device) | |
| token_embs = self.forward(**encoded_input)[0] | |
| # Accumulate in fp32 to avoid overflow | |
| token_embs = token_embs.float() | |
| if output_value == 'token_embeddings': | |
| raise NotImplementedError | |
| elif output_value is None: | |
| raise NotImplementedError | |
| else: | |
| embeddings = self.mean_pooling( | |
| token_embs, encoded_input['attention_mask'] | |
| ) | |
| if normalize_embeddings: | |
| embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) | |
| if convert_to_numpy: | |
| embeddings = embeddings.cpu() | |
| all_embeddings.extend(embeddings) | |
| all_embeddings = [all_embeddings[idx] for idx in inverse_permutation] | |
| if convert_to_tensor: | |
| all_embeddings = torch.stack(all_embeddings) | |
| elif convert_to_numpy: | |
| all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings]) | |
| if input_was_string: | |
| all_embeddings = all_embeddings[0] | |
| self.train(is_training) | |
| return all_embeddings | |
| def mean_pooling( | |
| self, token_embeddings: torch.Tensor, attention_mask: torch.Tensor | |
| ): | |
| input_mask_expanded = ( | |
| attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() | |
| ) | |
| return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( | |
| input_mask_expanded.sum(1), min=1e-9 | |
| ) | |
| def get_input_embeddings(self): | |
| return self.embeddings.word_embeddings | |
| def set_input_embeddings(self, value): | |
| self.embeddings.word_embeddings = value | |
| def _prune_heads(self, heads_to_prune): | |
| """ | |
| Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base | |
| class PreTrainedModel | |
| """ | |
| for layer, heads in heads_to_prune.items(): | |
| self.encoder.layer[layer].attention.prune_heads(heads) | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") | |
| ) | |
| @add_code_sample_docstrings( | |
| checkpoint=_CHECKPOINT_FOR_DOC, | |
| output_type=BaseModelOutputWithPoolingAndCrossAttentions, | |
| config_class=_CONFIG_FOR_DOC, | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| encoder_hidden_states: Optional[torch.Tensor] = None, | |
| encoder_attention_mask: Optional[torch.Tensor] = None, | |
| past_key_values: Optional[List[torch.FloatTensor]] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: | |
| r""" | |
| encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): | |
| Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if | |
| the model is configured as a decoder. | |
| encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in | |
| the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: | |
| - 1 for tokens that are **not masked**, | |
| - 0 for tokens that are **masked**. | |
| past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): | |
| Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. | |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that | |
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all | |
| `decoder_input_ids` of shape `(batch_size, sequence_length)`. | |
| use_cache (`bool`, *optional*): | |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see | |
| `past_key_values`). | |
| """ | |
| output_attentions = ( | |
| output_attentions | |
| if output_attentions is not None | |
| else self.config.output_attentions | |
| ) | |
| output_hidden_states = ( | |
| output_hidden_states | |
| if output_hidden_states is not None | |
| else self.config.output_hidden_states | |
| ) | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| if self.config.is_decoder: | |
| use_cache = use_cache if use_cache is not None else self.config.use_cache | |
| else: | |
| use_cache = False | |
| if input_ids is not None and inputs_embeds is not None: | |
| raise ValueError( | |
| "You cannot specify both input_ids and inputs_embeds at the same time" | |
| ) | |
| elif input_ids is not None: | |
| # self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) | |
| input_shape = input_ids.size() | |
| elif inputs_embeds is not None: | |
| input_shape = inputs_embeds.size()[:-1] | |
| else: | |
| raise ValueError("You have to specify either input_ids or inputs_embeds") | |
| batch_size, seq_length = input_shape | |
| device = input_ids.device if input_ids is not None else inputs_embeds.device | |
| # past_key_values_length | |
| past_key_values_length = ( | |
| past_key_values[0][0].shape[2] if past_key_values is not None else 0 | |
| ) | |
| if attention_mask is None: | |
| attention_mask = torch.ones( | |
| ((batch_size, seq_length + past_key_values_length)), device=device | |
| ) | |
| if token_type_ids is None: | |
| if hasattr(self.embeddings, "token_type_ids"): | |
| buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] | |
| buffered_token_type_ids_expanded = buffered_token_type_ids.expand( | |
| batch_size, seq_length | |
| ) | |
| token_type_ids = buffered_token_type_ids_expanded | |
| else: | |
| token_type_ids = torch.zeros( | |
| input_shape, dtype=torch.long, device=device | |
| ) | |
| # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] | |
| # ourselves in which case we just need to make it broadcastable to all heads. | |
| extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( | |
| attention_mask, input_shape | |
| ) | |
| # If a 2D or 3D attention mask is provided for the cross-attention | |
| # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] | |
| if self.config.is_decoder and encoder_hidden_states is not None: | |
| ( | |
| encoder_batch_size, | |
| encoder_sequence_length, | |
| _, | |
| ) = encoder_hidden_states.size() | |
| encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) | |
| if encoder_attention_mask is None: | |
| encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) | |
| encoder_extended_attention_mask = self.invert_attention_mask( | |
| encoder_attention_mask | |
| ) | |
| else: | |
| encoder_extended_attention_mask = None | |
| # Prepare head mask if needed | |
| # 1.0 in head_mask indicate we keep the head | |
| # attention_probs has shape bsz x n_heads x N x N | |
| # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] | |
| # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] | |
| head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) | |
| embedding_output = self.embeddings( | |
| input_ids=input_ids, | |
| position_ids=position_ids, | |
| token_type_ids=token_type_ids, | |
| inputs_embeds=inputs_embeds, | |
| past_key_values_length=past_key_values_length, | |
| ) | |
| encoder_outputs = self.encoder( | |
| embedding_output, | |
| attention_mask=extended_attention_mask, | |
| head_mask=head_mask, | |
| encoder_hidden_states=encoder_hidden_states, | |
| encoder_attention_mask=encoder_extended_attention_mask, | |
| past_key_values=past_key_values, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| sequence_output = encoder_outputs[0] | |
| pooled_output = ( | |
| self.pooler(sequence_output) if self.pooler is not None else None | |
| ) | |
| if not return_dict: | |
| return (sequence_output, pooled_output) + encoder_outputs[1:] | |
| return BaseModelOutputWithPoolingAndCrossAttentions( | |
| last_hidden_state=sequence_output, | |
| pooler_output=pooled_output, | |
| past_key_values=encoder_outputs.past_key_values, | |
| hidden_states=encoder_outputs.hidden_states, | |
| attentions=encoder_outputs.attentions, | |
| cross_attentions=encoder_outputs.cross_attentions, | |
| ) | |
| @add_start_docstrings( | |
| """ | |
| Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next | |
| sentence prediction (classification)` head. | |
| """, | |
| BERT_START_DOCSTRING, | |
| ) | |
| class JinaBertForPreTraining(JinaBertPreTrainedModel): | |
| _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.bert = JinaBertModel(config) | |
| self.cls = JinaBertPreTrainingHeads(config) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def get_output_embeddings(self): | |
| return self.cls.predictions.decoder | |
| def set_output_embeddings(self, new_embeddings): | |
| self.cls.predictions.decoder = new_embeddings | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") | |
| ) | |
| @replace_return_docstrings( | |
| output_type=JinaBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| labels: Optional[torch.Tensor] = None, | |
| next_sentence_label: Optional[torch.Tensor] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], JinaBertForPreTrainingOutput]: | |
| r""" | |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., | |
| config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), | |
| the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` | |
| next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
| Labels for computing the next sequence prediction (classification) loss. Input should be a sequence | |
| pair (see `input_ids` docstring) Indices should be in `[0, 1]`: | |
| - 0 indicates sequence B is a continuation of sequence A, | |
| - 1 indicates sequence B is a random sequence. | |
| kwargs (`Dict[str, any]`, optional, defaults to *{}*): | |
| Used to hide legacy arguments that have been deprecated. | |
| Returns: | |
| """ | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| sequence_output, pooled_output = outputs[:2] | |
| prediction_scores, seq_relationship_score = self.cls( | |
| sequence_output, pooled_output | |
| ) | |
| total_loss = None | |
| if labels is not None and next_sentence_label is not None: | |
| loss_fct = CrossEntropyLoss() | |
| masked_lm_loss = loss_fct( | |
| prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) | |
| ) | |
| next_sentence_loss = loss_fct( | |
| seq_relationship_score.view(-1, 2), next_sentence_label.view(-1) | |
| ) | |
| total_loss = masked_lm_loss + next_sentence_loss | |
| if not return_dict: | |
| output = (prediction_scores, seq_relationship_score) + outputs[2:] | |
| return ((total_loss,) + output) if total_loss is not None else output | |
| return JinaBertForPreTrainingOutput( | |
| loss=total_loss, | |
| prediction_logits=prediction_scores, | |
| seq_relationship_logits=seq_relationship_score, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |
| @add_start_docstrings( | |
| """JinaBert Model with a `language modeling` head on top for CLM fine-tuning.""", | |
| BERT_START_DOCSTRING, | |
| ) | |
| class JinaBertLMHeadModel(JinaBertPreTrainedModel): | |
| _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] | |
| def __init__(self, config): | |
| super().__init__(config) | |
| if not config.is_decoder: | |
| logger.warning( | |
| "If you want to use `JinaBertLMHeadModel` as a standalone, add `is_decoder=True.`" | |
| ) | |
| self.bert = JinaBertModel(config, add_pooling_layer=False) | |
| self.cls = JinaBertOnlyMLMHead(config) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def get_output_embeddings(self): | |
| return self.cls.predictions.decoder | |
| def set_output_embeddings(self, new_embeddings): | |
| self.cls.predictions.decoder = new_embeddings | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") | |
| ) | |
| @add_code_sample_docstrings( | |
| checkpoint=_CHECKPOINT_FOR_DOC, | |
| output_type=CausalLMOutputWithCrossAttentions, | |
| config_class=_CONFIG_FOR_DOC, | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| encoder_hidden_states: Optional[torch.Tensor] = None, | |
| encoder_attention_mask: Optional[torch.Tensor] = None, | |
| labels: Optional[torch.Tensor] = None, | |
| past_key_values: Optional[List[torch.Tensor]] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: | |
| r""" | |
| encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): | |
| Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if | |
| the model is configured as a decoder. | |
| encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in | |
| the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: | |
| - 1 for tokens that are **not masked**, | |
| - 0 for tokens that are **masked**. | |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in | |
| `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are | |
| ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` | |
| past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): | |
| Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. | |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that | |
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all | |
| `decoder_input_ids` of shape `(batch_size, sequence_length)`. | |
| use_cache (`bool`, *optional*): | |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see | |
| `past_key_values`). | |
| """ | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| if labels is not None: | |
| use_cache = False | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| encoder_hidden_states=encoder_hidden_states, | |
| encoder_attention_mask=encoder_attention_mask, | |
| past_key_values=past_key_values, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| sequence_output = outputs[0] | |
| prediction_scores = self.cls(sequence_output) | |
| lm_loss = None | |
| if labels is not None: | |
| # we are doing next-token prediction; shift prediction scores and input ids by one | |
| shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() | |
| labels = labels[:, 1:].contiguous() | |
| loss_fct = CrossEntropyLoss() | |
| lm_loss = loss_fct( | |
| shifted_prediction_scores.view(-1, self.config.vocab_size), | |
| labels.view(-1), | |
| ) | |
| if not return_dict: | |
| output = (prediction_scores,) + outputs[2:] | |
| return ((lm_loss,) + output) if lm_loss is not None else output | |
| return CausalLMOutputWithCrossAttentions( | |
| loss=lm_loss, | |
| logits=prediction_scores, | |
| past_key_values=outputs.past_key_values, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| cross_attentions=outputs.cross_attentions, | |
| ) | |
| def prepare_inputs_for_generation( | |
| self, | |
| input_ids, | |
| past_key_values=None, | |
| attention_mask=None, | |
| use_cache=True, | |
| **model_kwargs, | |
| ): | |
| input_shape = input_ids.shape | |
| # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly | |
| if attention_mask is None: | |
| attention_mask = input_ids.new_ones(input_shape) | |
| # cut decoder_input_ids if past_key_values is used | |
| if past_key_values is not None: | |
| input_ids = input_ids[:, -1:] | |
| return { | |
| "input_ids": input_ids, | |
| "attention_mask": attention_mask, | |
| "past_key_values": past_key_values, | |
| "use_cache": use_cache, | |
| } | |
| def _reorder_cache(self, past_key_values, beam_idx): | |
| reordered_past = () | |
| for layer_past in past_key_values: | |
| reordered_past += ( | |
| tuple( | |
| past_state.index_select(0, beam_idx) for past_state in layer_past | |
| ), | |
| ) | |
| return reordered_past | |
| @add_start_docstrings( | |
| """JinaBert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING | |
| ) | |
| class JinaBertForMaskedLM(JinaBertPreTrainedModel): | |
| _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] | |
| def __init__(self, config): | |
| super().__init__(config) | |
| if config.is_decoder: | |
| logger.warning( | |
| "If you want to use `JinaBertForMaskedLM` make sure `config.is_decoder=False` for " | |
| "bi-directional self-attention." | |
| ) | |
| self.bert = JinaBertModel(config, add_pooling_layer=False) | |
| self.cls = JinaBertOnlyMLMHead(config) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def get_output_embeddings(self): | |
| return self.cls.predictions.decoder | |
| def set_output_embeddings(self, new_embeddings): | |
| self.cls.predictions.decoder = new_embeddings | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") | |
| ) | |
| @add_code_sample_docstrings( | |
| checkpoint=_CHECKPOINT_FOR_DOC, | |
| output_type=MaskedLMOutput, | |
| config_class=_CONFIG_FOR_DOC, | |
| expected_output="'paris'", | |
| expected_loss=0.88, | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| encoder_hidden_states: Optional[torch.Tensor] = None, | |
| encoder_attention_mask: Optional[torch.Tensor] = None, | |
| labels: Optional[torch.Tensor] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: | |
| r""" | |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., | |
| config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the | |
| loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` | |
| """ | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| encoder_hidden_states=encoder_hidden_states, | |
| encoder_attention_mask=encoder_attention_mask, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| sequence_output = outputs[0] | |
| prediction_scores = self.cls(sequence_output) | |
| masked_lm_loss = None | |
| if labels is not None: | |
| loss_fct = CrossEntropyLoss() # -100 index = padding token | |
| masked_lm_loss = loss_fct( | |
| prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) | |
| ) | |
| if not return_dict: | |
| output = (prediction_scores,) + outputs[2:] | |
| return ( | |
| ((masked_lm_loss,) + output) if masked_lm_loss is not None else output | |
| ) | |
| return MaskedLMOutput( | |
| loss=masked_lm_loss, | |
| logits=prediction_scores, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |
| def prepare_inputs_for_generation( | |
| self, input_ids, attention_mask=None, **model_kwargs | |
| ): | |
| input_shape = input_ids.shape | |
| effective_batch_size = input_shape[0] | |
| # add a dummy token | |
| if self.config.pad_token_id is None: | |
| raise ValueError("The PAD token should be defined for generation") | |
| attention_mask = torch.cat( | |
| [attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], | |
| dim=-1, | |
| ) | |
| dummy_token = torch.full( | |
| (effective_batch_size, 1), | |
| self.config.pad_token_id, | |
| dtype=torch.long, | |
| device=input_ids.device, | |
| ) | |
| input_ids = torch.cat([input_ids, dummy_token], dim=1) | |
| return {"input_ids": input_ids, "attention_mask": attention_mask} | |
| @add_start_docstrings( | |
| """JinaBert Model with a `next sentence prediction (classification)` head on top.""", | |
| BERT_START_DOCSTRING, | |
| ) | |
| class JinaBertForNextSentencePrediction(JinaBertPreTrainedModel): | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.bert = JinaBertModel(config) | |
| self.cls = JinaBertOnlyNSPHead(config) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") | |
| ) | |
| @replace_return_docstrings( | |
| output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| labels: Optional[torch.Tensor] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| **kwargs, | |
| ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: | |
| r""" | |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
| Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair | |
| (see `input_ids` docstring). Indices should be in `[0, 1]`: | |
| - 0 indicates sequence B is a continuation of sequence A, | |
| - 1 indicates sequence B is a random sequence. | |
| Returns: | |
| """ | |
| if "next_sentence_label" in kwargs: | |
| warnings.warn( | |
| "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" | |
| " `labels` instead.", | |
| FutureWarning, | |
| ) | |
| labels = kwargs.pop("next_sentence_label") | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| pooled_output = outputs[1] | |
| seq_relationship_scores = self.cls(pooled_output) | |
| next_sentence_loss = None | |
| if labels is not None: | |
| loss_fct = CrossEntropyLoss() | |
| next_sentence_loss = loss_fct( | |
| seq_relationship_scores.view(-1, 2), labels.view(-1) | |
| ) | |
| if not return_dict: | |
| output = (seq_relationship_scores,) + outputs[2:] | |
| return ( | |
| ((next_sentence_loss,) + output) | |
| if next_sentence_loss is not None | |
| else output | |
| ) | |
| return NextSentencePredictorOutput( | |
| loss=next_sentence_loss, | |
| logits=seq_relationship_scores, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |
| @add_start_docstrings( | |
| """ | |
| JinaBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled | |
| output) e.g. for GLUE tasks. | |
| """, | |
| BERT_START_DOCSTRING, | |
| ) | |
| class JinaBertForSequenceClassification(JinaBertPreTrainedModel): | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.num_labels = config.num_labels | |
| self.config = config | |
| self.bert = JinaBertModel(config) | |
| classifier_dropout = ( | |
| config.classifier_dropout | |
| if config.classifier_dropout is not None | |
| else config.hidden_dropout_prob | |
| ) | |
| self.dropout = nn.Dropout(classifier_dropout) | |
| self.classifier = nn.Linear(config.hidden_size, config.num_labels) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") | |
| ) | |
| @add_code_sample_docstrings( | |
| checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, | |
| output_type=SequenceClassifierOutput, | |
| config_class=_CONFIG_FOR_DOC, | |
| expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, | |
| expected_loss=_SEQ_CLASS_EXPECTED_LOSS, | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| labels: Optional[torch.Tensor] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: | |
| r""" | |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., | |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If | |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). | |
| """ | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| pooled_output = outputs[1] | |
| pooled_output = self.dropout(pooled_output) | |
| logits = self.classifier(pooled_output) | |
| loss = None | |
| if labels is not None: | |
| if self.config.problem_type is None: | |
| if self.num_labels == 1: | |
| self.config.problem_type = "regression" | |
| elif self.num_labels > 1 and ( | |
| labels.dtype == torch.long or labels.dtype == torch.int | |
| ): | |
| self.config.problem_type = "single_label_classification" | |
| else: | |
| self.config.problem_type = "multi_label_classification" | |
| if self.config.problem_type == "regression": | |
| loss_fct = MSELoss() | |
| if self.num_labels == 1: | |
| loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
| else: | |
| loss = loss_fct(logits, labels) | |
| elif self.config.problem_type == "single_label_classification": | |
| loss_fct = CrossEntropyLoss() | |
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
| elif self.config.problem_type == "multi_label_classification": | |
| loss_fct = BCEWithLogitsLoss() | |
| loss = loss_fct(logits, labels) | |
| if not return_dict: | |
| output = (logits,) + outputs[2:] | |
| return ((loss,) + output) if loss is not None else output | |
| return SequenceClassifierOutput( | |
| loss=loss, | |
| logits=logits, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |
| @add_start_docstrings( | |
| """ | |
| JinaBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a | |
| softmax) e.g. for RocStories/SWAG tasks. | |
| """, | |
| BERT_START_DOCSTRING, | |
| ) | |
| class JinaBertForMultipleChoice(JinaBertPreTrainedModel): | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.bert = JinaBertModel(config) | |
| classifier_dropout = ( | |
| config.classifier_dropout | |
| if config.classifier_dropout is not None | |
| else config.hidden_dropout_prob | |
| ) | |
| self.dropout = nn.Dropout(classifier_dropout) | |
| self.classifier = nn.Linear(config.hidden_size, 1) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") | |
| ) | |
| @add_code_sample_docstrings( | |
| checkpoint=_CHECKPOINT_FOR_DOC, | |
| output_type=MultipleChoiceModelOutput, | |
| config_class=_CONFIG_FOR_DOC, | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| labels: Optional[torch.Tensor] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: | |
| r""" | |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
| Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., | |
| num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See | |
| `input_ids` above) | |
| """ | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| num_choices = ( | |
| input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] | |
| ) | |
| input_ids = ( | |
| input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None | |
| ) | |
| attention_mask = ( | |
| attention_mask.view(-1, attention_mask.size(-1)) | |
| if attention_mask is not None | |
| else None | |
| ) | |
| token_type_ids = ( | |
| token_type_ids.view(-1, token_type_ids.size(-1)) | |
| if token_type_ids is not None | |
| else None | |
| ) | |
| position_ids = ( | |
| position_ids.view(-1, position_ids.size(-1)) | |
| if position_ids is not None | |
| else None | |
| ) | |
| inputs_embeds = ( | |
| inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) | |
| if inputs_embeds is not None | |
| else None | |
| ) | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| pooled_output = outputs[1] | |
| pooled_output = self.dropout(pooled_output) | |
| logits = self.classifier(pooled_output) | |
| reshaped_logits = logits.view(-1, num_choices) | |
| loss = None | |
| if labels is not None: | |
| loss_fct = CrossEntropyLoss() | |
| loss = loss_fct(reshaped_logits, labels) | |
| if not return_dict: | |
| output = (reshaped_logits,) + outputs[2:] | |
| return ((loss,) + output) if loss is not None else output | |
| return MultipleChoiceModelOutput( | |
| loss=loss, | |
| logits=reshaped_logits, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |
| @add_start_docstrings( | |
| """ | |
| JinaBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for | |
| Named-Entity-Recognition (NER) tasks. | |
| """, | |
| BERT_START_DOCSTRING, | |
| ) | |
| class JinaBertForTokenClassification(JinaBertPreTrainedModel): | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.num_labels = config.num_labels | |
| self.bert = JinaBertModel(config, add_pooling_layer=False) | |
| classifier_dropout = ( | |
| config.classifier_dropout | |
| if config.classifier_dropout is not None | |
| else config.hidden_dropout_prob | |
| ) | |
| self.dropout = nn.Dropout(classifier_dropout) | |
| self.classifier = nn.Linear(config.hidden_size, config.num_labels) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") | |
| ) | |
| @add_code_sample_docstrings( | |
| checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, | |
| output_type=TokenClassifierOutput, | |
| config_class=_CONFIG_FOR_DOC, | |
| expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, | |
| expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| labels: Optional[torch.Tensor] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: | |
| r""" | |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
| Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. | |
| """ | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| sequence_output = outputs[0] | |
| sequence_output = self.dropout(sequence_output) | |
| logits = self.classifier(sequence_output) | |
| loss = None | |
| if labels is not None: | |
| loss_fct = CrossEntropyLoss() | |
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
| if not return_dict: | |
| output = (logits,) + outputs[2:] | |
| return ((loss,) + output) if loss is not None else output | |
| return TokenClassifierOutput( | |
| loss=loss, | |
| logits=logits, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |
| @add_start_docstrings( | |
| """ | |
| JinaBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear | |
| layers on top of the hidden-states output to compute `span start logits` and `span end logits`). | |
| """, | |
| BERT_START_DOCSTRING, | |
| ) | |
| class JinaBertForQuestionAnswering(JinaBertPreTrainedModel): | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.num_labels = config.num_labels | |
| self.bert = JinaBertModel(config, add_pooling_layer=False) | |
| self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| @add_start_docstrings_to_model_forward( | |
| BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") | |
| ) | |
| @add_code_sample_docstrings( | |
| checkpoint=_CHECKPOINT_FOR_QA, | |
| output_type=QuestionAnsweringModelOutput, | |
| config_class=_CONFIG_FOR_DOC, | |
| qa_target_start_index=_QA_TARGET_START_INDEX, | |
| qa_target_end_index=_QA_TARGET_END_INDEX, | |
| expected_output=_QA_EXPECTED_OUTPUT, | |
| expected_loss=_QA_EXPECTED_LOSS, | |
| ) | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| token_type_ids: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.Tensor] = None, | |
| head_mask: Optional[torch.Tensor] = None, | |
| inputs_embeds: Optional[torch.Tensor] = None, | |
| start_positions: Optional[torch.Tensor] = None, | |
| end_positions: Optional[torch.Tensor] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: | |
| r""" | |
| start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
| Labels for position (index) of the start of the labelled span for computing the token classification loss. | |
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence | |
| are not taken into account for computing the loss. | |
| end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
| Labels for position (index) of the end of the labelled span for computing the token classification loss. | |
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence | |
| are not taken into account for computing the loss. | |
| """ | |
| return_dict = ( | |
| return_dict if return_dict is not None else self.config.use_return_dict | |
| ) | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| sequence_output = outputs[0] | |
| logits = self.qa_outputs(sequence_output) | |
| start_logits, end_logits = logits.split(1, dim=-1) | |
| start_logits = start_logits.squeeze(-1).contiguous() | |
| end_logits = end_logits.squeeze(-1).contiguous() | |
| total_loss = None | |
| if start_positions is not None and end_positions is not None: | |
| # If we are on multi-GPU, split add a dimension | |
| if len(start_positions.size()) > 1: | |
| start_positions = start_positions.squeeze(-1) | |
| if len(end_positions.size()) > 1: | |
| end_positions = end_positions.squeeze(-1) | |
| # sometimes the start/end positions are outside our model inputs, we ignore these terms | |
| ignored_index = start_logits.size(1) | |
| start_positions = start_positions.clamp(0, ignored_index) | |
| end_positions = end_positions.clamp(0, ignored_index) | |
| loss_fct = CrossEntropyLoss(ignore_index=ignored_index) | |
| start_loss = loss_fct(start_logits, start_positions) | |
| end_loss = loss_fct(end_logits, end_positions) | |
| total_loss = (start_loss + end_loss) / 2 | |
| if not return_dict: | |
| output = (start_logits, end_logits) + outputs[2:] | |
| return ((total_loss,) + output) if total_loss is not None else output | |
| return QuestionAnsweringModelOutput( | |
| loss=total_loss, | |
| start_logits=start_logits, | |
| end_logits=end_logits, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| ) | |