Update app.py
Browse files
app.py
CHANGED
|
@@ -33,14 +33,15 @@ model_topic.resize_token_embeddings(len(tokenizer_topic))
|
|
| 33 |
|
| 34 |
|
| 35 |
def sentiment(sent: str):
|
| 36 |
-
|
|
|
|
| 37 |
input_sent = torch.tensor([tokenizer_sent.encode(sent_)]).to(device)
|
| 38 |
with torch.no_grad():
|
| 39 |
out_sent = model_sent(input_sent)
|
| 40 |
logits_sent = out_sent.logits.softmax(dim=-1).tolist()[0]
|
| 41 |
pred_sent = dict_[np.argmax(logits_sent)]
|
| 42 |
|
| 43 |
-
sent = replace_all(text=sent)
|
| 44 |
sent_segment = sent.split(".")
|
| 45 |
for i, s in enumerate(sent_segment):
|
| 46 |
s = s.strip()
|
|
@@ -70,6 +71,7 @@ key_list = ["person_name","dob_value","gender_value","phonenumber_value","email_
|
|
| 70 |
"designation_value","degree_value","skill_value"]
|
| 71 |
label2id = {v: k for k, v in id2label.items()}
|
| 72 |
def pred_resume(pdf_path) -> dict:
|
|
|
|
| 73 |
global key_list, device
|
| 74 |
result = {}
|
| 75 |
for i in key_list:
|
|
|
|
| 33 |
|
| 34 |
|
| 35 |
def sentiment(sent: str):
|
| 36 |
+
print("\n\nNew inference at: ", datetime.utcnow(), "\n\n")
|
| 37 |
+
sent_ = normalize(text=sent)
|
| 38 |
input_sent = torch.tensor([tokenizer_sent.encode(sent_)]).to(device)
|
| 39 |
with torch.no_grad():
|
| 40 |
out_sent = model_sent(input_sent)
|
| 41 |
logits_sent = out_sent.logits.softmax(dim=-1).tolist()[0]
|
| 42 |
pred_sent = dict_[np.argmax(logits_sent)]
|
| 43 |
|
| 44 |
+
sent = replace_all(text=sent)
|
| 45 |
sent_segment = sent.split(".")
|
| 46 |
for i, s in enumerate(sent_segment):
|
| 47 |
s = s.strip()
|
|
|
|
| 71 |
"designation_value","degree_value","skill_value"]
|
| 72 |
label2id = {v: k for k, v in id2label.items()}
|
| 73 |
def pred_resume(pdf_path) -> dict:
|
| 74 |
+
print("\n\nNew inference at: ", datetime.utcnow(), "\n\n")
|
| 75 |
global key_list, device
|
| 76 |
result = {}
|
| 77 |
for i in key_list:
|