Spaces:
Runtime error
Runtime error
File size: 1,425 Bytes
dafbfa4 981a2dd dafbfa4 981a2dd dafbfa4 981a2dd dafbfa4 981a2dd dafbfa4 981a2dd dafbfa4 981a2dd dafbfa4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from huggingface_hub import InferenceClient, login # Added login
from transformers import pipeline
import random
# Authenticate with Hugging Face (get token: https://huggingface.co/settings/tokens)
login(token="YOUR_HF_TOKEN") # π Replace with your token!
# Safety tools π‘οΈ
BLOCKED_WORDS = ["violence", "hate", "gun", "personal"]
SAFE_IDEAS = [
"Design a robot to clean parks π³",
"Code a game about recycling β»οΈ",
"Plan an AI tool for school safety πΈ"
]
# Changed to PUBLIC safety model β
safety_checker = pipeline("text-classification", model="unitary/toxic-bert")
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def is_safe(text):
text = text.lower()
if any(bad_word in text for bad_word in BLOCKED_WORDS):
return False
result = safety_checker(text)[0]
return not (result["label"] == "toxic" and result["score"] > 0.7) # Changed label check
def respond(message, history, system_message, max_tokens, temperature, top_p):
if not is_safe(message):
return f"π« Let's focus on positive projects! Try: {random.choice(SAFE_IDEAS)}"
messages = [{
"role": "system",
"content": f"{system_message}\nYou are a friendly STEM mentor for kids. Never discuss unsafe topics!"
}]
# Rest of your chat code...
# (Keep your existing chat implementation here)
# Rest of your Gradio setup... |