bditto commited on
Commit
ff54bae
Β·
verified Β·
1 Parent(s): dafbfa4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -8
app.py CHANGED
@@ -1,10 +1,11 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient, login # Added login
 
3
  from transformers import pipeline
4
  import random
5
 
6
- # Authenticate with Hugging Face (get token: https://huggingface.co/settings/tokens)
7
- login(token="YOUR_HF_TOKEN") # πŸ‘ˆ Replace with your token!
8
 
9
  # Safety tools πŸ›‘οΈ
10
  BLOCKED_WORDS = ["violence", "hate", "gun", "personal"]
@@ -14,7 +15,7 @@ SAFE_IDEAS = [
14
  "Plan an AI tool for school safety 🚸"
15
  ]
16
 
17
- # Changed to PUBLIC safety model βœ…
18
  safety_checker = pipeline("text-classification", model="unitary/toxic-bert")
19
 
20
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
@@ -24,7 +25,7 @@ def is_safe(text):
24
  if any(bad_word in text for bad_word in BLOCKED_WORDS):
25
  return False
26
  result = safety_checker(text)[0]
27
- return not (result["label"] == "toxic" and result["score"] > 0.7) # Changed label check
28
 
29
  def respond(message, history, system_message, max_tokens, temperature, top_p):
30
  if not is_safe(message):
@@ -35,7 +36,42 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
35
  "content": f"{system_message}\nYou are a friendly STEM mentor for kids. Never discuss unsafe topics!"
36
  }]
37
 
38
- # Rest of your chat code...
39
- # (Keep your existing chat implementation here)
 
 
 
 
40
 
41
- # Rest of your Gradio setup...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import os
3
+ from huggingface_hub import InferenceClient, login
4
  from transformers import pipeline
5
  import random
6
 
7
+ # Authenticate using secret environment variable πŸ”’
8
+ login(token=os.environ.get("HF_TOKEN"))
9
 
10
  # Safety tools πŸ›‘οΈ
11
  BLOCKED_WORDS = ["violence", "hate", "gun", "personal"]
 
15
  "Plan an AI tool for school safety 🚸"
16
  ]
17
 
18
+ # Safety model
19
  safety_checker = pipeline("text-classification", model="unitary/toxic-bert")
20
 
21
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
25
  if any(bad_word in text for bad_word in BLOCKED_WORDS):
26
  return False
27
  result = safety_checker(text)[0]
28
+ return not (result["label"] == "toxic" and result["score"] > 0.7)
29
 
30
  def respond(message, history, system_message, max_tokens, temperature, top_p):
31
  if not is_safe(message):
 
36
  "content": f"{system_message}\nYou are a friendly STEM mentor for kids. Never discuss unsafe topics!"
37
  }]
38
 
39
+ # Rest of chat implementation
40
+ for user_msg, bot_msg in history:
41
+ if user_msg:
42
+ messages.append({"role": "user", "content": user_msg})
43
+ if bot_msg:
44
+ messages.append({"role": "assistant", "content": bot_msg})
45
 
46
+ messages.append({"role": "user", "content": message})
47
+
48
+ response = ""
49
+ for chunk in client.chat_completion(
50
+ messages,
51
+ max_tokens=max_tokens,
52
+ stream=True,
53
+ temperature=temperature,
54
+ top_p=top_p
55
+ ):
56
+ token = chunk.choices[0].delta.content
57
+ response += token
58
+ yield response
59
+
60
+ with gr.Blocks() as demo:
61
+ gr.Markdown("# πŸ€– REACT Ethical AI Lab")
62
+ gr.ChatInterface(
63
+ respond,
64
+ additional_inputs=[
65
+ gr.Textbox("You help students create ethical AI projects.", label="Guidelines"),
66
+ gr.Slider(128, 1024, value=512, label="Max Response Length"),
67
+ gr.Slider(0.1, 1.0, value=0.3, label="Creativity Level"),
68
+ gr.Slider(0.7, 1.0, value=0.85, label="Focus Level")
69
+ ],
70
+ examples=[
71
+ ["How to build a robot that plants trees?"],
72
+ ["Python code for a pollution sensor"]
73
+ ]
74
+ )
75
+
76
+ if __name__ == "__main__":
77
+ demo.launch()