Spaces:
Running
Running
Commit
Β·
f97e9a9
1
Parent(s):
9054596
UI - theme
Browse files
app.py
CHANGED
|
@@ -8,8 +8,7 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
|
| 9 |
model = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
|
| 10 |
model.load_state_dict(torch.load(model_path, map_location=device))
|
| 11 |
-
model.to(device)
|
| 12 |
-
model.eval()
|
| 13 |
|
| 14 |
label_mapping = {
|
| 15 |
0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
|
|
@@ -27,28 +26,34 @@ label_mapping = {
|
|
| 27 |
def classify_text(text):
|
| 28 |
if not text.strip():
|
| 29 |
return "----"
|
| 30 |
-
|
| 31 |
-
inputs =
|
| 32 |
-
|
| 33 |
with torch.no_grad():
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
return result_message
|
| 51 |
|
|
|
|
|
|
|
| 52 |
title = "AI Text Detector"
|
| 53 |
|
| 54 |
description = """
|
|
|
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
|
| 9 |
model = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
|
| 10 |
model.load_state_dict(torch.load(model_path, map_location=device))
|
| 11 |
+
model.to(device).eval()
|
|
|
|
| 12 |
|
| 13 |
label_mapping = {
|
| 14 |
0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
|
|
|
|
| 26 |
def classify_text(text):
|
| 27 |
if not text.strip():
|
| 28 |
return "----"
|
| 29 |
+
|
| 30 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device)
|
| 31 |
+
|
| 32 |
with torch.no_grad():
|
| 33 |
+
probabilities = torch.softmax(model(**inputs).logits, dim=1)[0]
|
| 34 |
+
|
| 35 |
+
human_prob = probabilities[24].item() * 100
|
| 36 |
+
ai_probs = probabilities.clone()
|
| 37 |
+
ai_probs[24] = 0
|
| 38 |
+
ai_total_prob = ai_probs.sum().item() * 100
|
| 39 |
+
|
| 40 |
+
ai_argmax_index = torch.argmax(ai_probs).item()
|
| 41 |
+
ai_argmax_model = label_mapping[ai_argmax_index]
|
| 42 |
+
|
| 43 |
+
if human_prob > ai_total_prob:
|
| 44 |
+
result_message = (
|
| 45 |
+
f"β
- The text is <span class='highlight-human'>**{human_prob:.2f}%** likely <b>Human written</b>.</span>"
|
| 46 |
+
)
|
| 47 |
+
else:
|
| 48 |
+
result_message = (
|
| 49 |
+
f"π€ - The text is <span class='highlight-ai'>**{ai_total_prob:.2f}%** likely <b>AI generated</b>.</span>\n\n"
|
| 50 |
+
f"**Identified AI Model:** {ai_argmax_model}"
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
return result_message
|
| 54 |
|
| 55 |
+
|
| 56 |
+
|
| 57 |
title = "AI Text Detector"
|
| 58 |
|
| 59 |
description = """
|