bditto commited on
Commit
dafbfa4
·
verified ·
1 Parent(s): 4633d62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -27
app.py CHANGED
@@ -1,33 +1,41 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from transformers import pipeline
4
- import random
5
 
6
- # Safety tools 🛡️
7
- BLOCKED_WORDS = ["violence", "hate", "gun", "personal"]
8
- SAFE_IDEAS = [
9
- "Design a robot to clean parks 🌳",
10
- "Code a game about recycling ♻️",
11
- "Plan an AI tool for school safety 🚸"
12
- ]
13
- safety_checker = pipeline("text-classification", model="facebook/roberta-hate-speech-dynabic-multilingual")
14
 
15
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
16
 
17
- def is_safe(text):
18
- text = text.lower()
19
- if any(bad_word in text for bad_word in BLOCKED_WORDS):
20
- return False
21
- result = safety_checker(text)[0]
22
- return not (result["label"] == "HATE" and result["score"] > 0.7)
23
 
24
- def respond(message, history, system_message, max_tokens, temperature, top_p):
25
- if not is_safe(message):
26
- return f"🚫 Let's focus on positive projects! Try: {random.choice(SAFE_IDEAS)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- messages = [{
29
- "role": "system",
30
- "content": f"{system_message}\nYou are a friendly STEM mentor for kids. Never discuss unsafe topics!"
31
- }]
32
 
33
- # ... (rest of original code) ...
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient, login # Added login
3
+ from transformers import pipeline
4
+ import random
5
 
6
+ # Authenticate with Hugging Face (get token: https://huggingface.co/settings/tokens)
7
+ login(token="YOUR_HF_TOKEN") # 👈 Replace with your token!
 
 
 
 
 
 
8
 
9
+ # Safety tools 🛡️
10
+ BLOCKED_WORDS = ["violence", "hate", "gun", "personal"]
11
+ SAFE_IDEAS = [
12
+ "Design a robot to clean parks 🌳",
13
+ "Code a game about recycling ♻️",
14
+ "Plan an AI tool for school safety 🚸"
15
+ ]
16
 
17
+ # Changed to PUBLIC safety model ✅
18
+ safety_checker = pipeline("text-classification", model="unitary/toxic-bert")
 
 
 
 
19
 
20
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
21
+
22
+ def is_safe(text):
23
+ text = text.lower()
24
+ if any(bad_word in text for bad_word in BLOCKED_WORDS):
25
+ return False
26
+ result = safety_checker(text)[0]
27
+ return not (result["label"] == "toxic" and result["score"] > 0.7) # Changed label check
28
+
29
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
30
+ if not is_safe(message):
31
+ return f"🚫 Let's focus on positive projects! Try: {random.choice(SAFE_IDEAS)}"
32
+
33
+ messages = [{
34
+ "role": "system",
35
+ "content": f"{system_message}\nYou are a friendly STEM mentor for kids. Never discuss unsafe topics!"
36
+ }]
37
 
38
+ # Rest of your chat code...
39
+ # (Keep your existing chat implementation here)
 
 
40
 
41
+ # Rest of your Gradio setup...