File size: 3,975 Bytes
18cc9d9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import os
import uuid
import tempfile
import gradio as gr
from dotenv import load_dotenv
from groq import Groq
from gtts import gTTS
import pyglet
# ---------------------------
# π Load API key from .env
# ---------------------------
load_dotenv()
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
if not GROQ_API_KEY:
raise ValueError("β GROQ_API_KEY not found in .env. Add it like: GROQ_API_KEY=your_key_here")
client = Groq(api_key=GROQ_API_KEY)
# ---------------------------
# π Voice output (optional)
# ---------------------------
def speak_text(text, lang="en"):
try:
tts = gTTS(text=text, lang=lang)
filename = os.path.join(tempfile.gettempdir(), f"{uuid.uuid4()}.mp3")
tts.save(filename)
music = pyglet.media.load(filename, streaming=False)
music.play()
pyglet.app.run()
os.remove(filename)
except Exception as e:
print(f"[TTS Error] {e}")
# ---------------------------
# π§ LLaMA-3 Role Response
# ---------------------------
def query_llama(role: str, topic: str, pro: str = "", con: str = "") -> str:
print(f"[Groq] Calling LLaMA for role: {role}")
if role == "pro":
prompt = f"You are a skilled debater arguing in favor of:\n'{topic}'.\nPresent 3 strong logical points."
elif role == "con":
prompt = f"You are a skilled debater arguing against:\n'{topic}'.\nPresent 3 strong logical points."
elif role == "moderator":
prompt = (
f"Topic: {topic}\n\n"
f"β
Pro Argument:\n{pro}\n\n"
f"β Con Argument:\n{con}\n\n"
f"As a neutral judge, rate both arguments (1β10) based on logic, clarity, and evidence. Justify your score clearly."
)
else:
raise ValueError("Invalid role!")
try:
response = client.chat.completions.create(
model="llama3-70b-8192",
messages=[{"role": "user", "content": prompt}]
)
print(f"[Groq] β
Response OK for {role}")
return response.choices[0].message.content.strip()
except Exception as e:
print(f"β [Groq Error for {role}]: {e}")
return f"β Error: {e}"
# ---------------------------
# ποΈ Debate Logic
# ---------------------------
def host_debate(topic: str) -> str:
print(f"\nπ§ Topic Received: {topic}")
if not topic.strip():
return "β Please enter a valid debate topic."
try:
print("β Generating Pro argument...")
pro = query_llama("pro", topic)
print("β
Pro:\n", pro)
print("β Generating Con argument...")
con = query_llama("con", topic)
print("β
Con:\n", con)
print("β Moderating...")
verdict = query_llama("moderator", topic, pro=pro, con=con)
print("β
Verdict:\n", verdict)
# π£οΈ Speak results (optional)
speak_text(f"Debate Topic: {topic}")
speak_text(f"Pro says: {pro}")
speak_text(f"Con says: {con}")
speak_text(f"Moderator verdict: {verdict}")
return f"""
## ποΈ Topic: **{topic}**
---
### β
Pro Side
{pro}
---
### β Con Side
{con}
---
### βοΈ Moderator's Verdict
{verdict}
""".strip()
except Exception as e:
print(f"β Debate error: {e}")
return f"β Unexpected error: {e}"
# ---------------------------
# π Gradio Web Interface
# ---------------------------
demo = gr.Interface(
fn=host_debate,
inputs=gr.Textbox(label="Enter a Debate Topic", placeholder="e.g. Should AI be regulated?", lines=2),
outputs="markdown",
title="π€ AI Debate Moderator",
description="Enter any controversial topic. Two LLaMA 3 agents will debate (Pro vs Con), and a moderator will score them based on logic and clarity.",
allow_flagging="never"
)
if __name__ == "__main__":
demo.launch()
|