Commit
·
46677b4
1
Parent(s):
c16a6f3
hotfix
Browse files
app.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from NeuralTextGenerator import BertTextGenerator
|
| 3 |
|
| 4 |
-
|
|
|
|
| 5 |
en_model = BertTextGenerator(model_name, tokenizer=model_name)
|
| 6 |
|
| 7 |
finetunned_BERT_model_name = "JuanJoseMV/BERT_text_gen"
|
|
@@ -19,14 +20,14 @@ special_tokens = [
|
|
| 19 |
'[NEGATIVE-2]'
|
| 20 |
]
|
| 21 |
|
| 22 |
-
en_model.tokenizer.add_special_tokens({'additional_special_tokens': special_tokens})
|
| 23 |
-
en_model.model.resize_token_embeddings(len(en_model.tokenizer))
|
| 24 |
|
| 25 |
finetunned_BERT_en_model.tokenizer.add_special_tokens({'additional_special_tokens': special_tokens})
|
| 26 |
finetunned_BERT_en_model.model.resize_token_embeddings(len(en_model.tokenizer))
|
| 27 |
|
| 28 |
-
finetunned_RoBERTa_en_model.tokenizer.add_special_tokens({'additional_special_tokens': special_tokens})
|
| 29 |
-
finetunned_RoBERTa_en_model.model.resize_token_embeddings(len(en_model.tokenizer))
|
| 30 |
|
| 31 |
def sentence_builder(selected_model, n_sentences, max_iter, sentiment, seed_text):
|
| 32 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from NeuralTextGenerator import BertTextGenerator
|
| 3 |
|
| 4 |
+
# Load models
|
| 5 |
+
model_name = "cardiffnlp/twitter-xlm-roberta-base"
|
| 6 |
en_model = BertTextGenerator(model_name, tokenizer=model_name)
|
| 7 |
|
| 8 |
finetunned_BERT_model_name = "JuanJoseMV/BERT_text_gen"
|
|
|
|
| 20 |
'[NEGATIVE-2]'
|
| 21 |
]
|
| 22 |
|
| 23 |
+
# en_model.tokenizer.add_special_tokens({'additional_special_tokens': special_tokens})
|
| 24 |
+
# en_model.model.resize_token_embeddings(len(en_model.tokenizer))
|
| 25 |
|
| 26 |
finetunned_BERT_en_model.tokenizer.add_special_tokens({'additional_special_tokens': special_tokens})
|
| 27 |
finetunned_BERT_en_model.model.resize_token_embeddings(len(en_model.tokenizer))
|
| 28 |
|
| 29 |
+
# finetunned_RoBERTa_en_model.tokenizer.add_special_tokens({'additional_special_tokens': special_tokens})
|
| 30 |
+
# finetunned_RoBERTa_en_model.model.resize_token_embeddings(len(en_model.tokenizer))
|
| 31 |
|
| 32 |
def sentence_builder(selected_model, n_sentences, max_iter, sentiment, seed_text):
|
| 33 |
|