Spaces:
Sleeping
Sleeping
Update model.py
Browse files
model.py
CHANGED
|
@@ -24,9 +24,23 @@ class SuicidalIntentModel:
|
|
| 24 |
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 25 |
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name)
|
| 26 |
|
| 27 |
-
def
|
| 28 |
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
| 29 |
with torch.no_grad():
|
| 30 |
logits = self.model(**inputs).logits
|
| 31 |
probs = torch.nn.functional.softmax(logits, dim=1)
|
| 32 |
return float(probs[0][1]) # Probability of suicidal intent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 25 |
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name)
|
| 26 |
|
| 27 |
+
def _score_text(self, text):
|
| 28 |
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
| 29 |
with torch.no_grad():
|
| 30 |
logits = self.model(**inputs).logits
|
| 31 |
probs = torch.nn.functional.softmax(logits, dim=1)
|
| 32 |
return float(probs[0][1]) # Probability of suicidal intent
|
| 33 |
+
|
| 34 |
+
def predict(self, text, window_size=20, stride=10):
|
| 35 |
+
tokens = self.tokenizer.tokenize(text)
|
| 36 |
+
if len(tokens) <= window_size:
|
| 37 |
+
return self._score_text(text)
|
| 38 |
+
|
| 39 |
+
scores = []
|
| 40 |
+
for i in range(0, len(tokens) - window_size + 1, stride):
|
| 41 |
+
window_tokens = tokens[i:i + window_size]
|
| 42 |
+
window_text = self.tokenizer.convert_tokens_to_string(window_tokens)
|
| 43 |
+
score = self._score_text(window_text)
|
| 44 |
+
scores.append(score)
|
| 45 |
+
|
| 46 |
+
return max(scores, default=0.0)
|