Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import torch | |
| class EmotionModel: | |
| def __init__(self): | |
| self.model_name = "joeddav/distilbert-base-uncased-go-emotions-student" | |
| self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) | |
| self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name) | |
| self.labels = self.model.config.id2label | |
| def predict(self, text): | |
| inputs = self.tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
| with torch.no_grad(): | |
| logits = self.model(**inputs).logits | |
| probs = torch.sigmoid(logits)[0] | |
| return { | |
| self.labels[i]: float(probs[i]) | |
| for i in range(len(probs)) if probs[i] > 0.4 | |
| } | |
| class SuicidalIntentModel: | |
| def __init__(self): | |
| self.model_name = "sentinet/suicidality" | |
| self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) | |
| self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name) | |
| def predict(self, text): | |
| inputs = self.tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
| with torch.no_grad(): | |
| logits = self.model(**inputs).logits | |
| probs = torch.nn.functional.softmax(logits, dim=1) | |
| return float(probs[0][1]) # Probability of suicidal intent | |