Spaces:
Sleeping
Sleeping
File size: 4,195 Bytes
2b6e44c eef1a09 2e73fb0 ecf00c0 2e73fb0 ecf00c0 568f517 2e73fb0 ecf00c0 2e73fb0 568f517 2e73fb0 ecf00c0 2e73fb0 2b6e44c 2e73fb0 ecf00c0 2e73fb0 ecf00c0 ead98ab 2e73fb0 2b6e44c 2e73fb0 2b6e44c 2e73fb0 2b6e44c 2e73fb0 eef1a09 2e73fb0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import os
import gradio as gr
from transformers import pipeline
# ============================================================
# 1. LOAD ARABERT CLASSIFIER
# ============================================================
print("Loading AraBERT classifier...")
CLF_MODEL = "imaneumabderahmane/Arabertv02-classifier-FA"
classifier = pipeline("text-classification", model=CLF_MODEL)
print("Classifier loaded successfully.")
# ============================================================
# 2. LOAD APOLLO GENERATOR
# ============================================================
print("Loading Apollo model...")
GEN_MODEL = "FreedomIntelligence/Apollo2-2B"
generator = pipeline(
"text-generation",
model=GEN_MODEL,
torch_dtype="auto",
device_map="auto"
)
print("Apollo loaded successfully.")
# ============================================================
# 3. GENERATION FUNCTION
# ============================================================
def generate_with_acegpt(prompt: str) -> str:
"""Generate a response in Arabic using AceGPT locally."""
try:
system_prompt = (
"أنت مساعد طبي مختص في الإسعافات الأولية. "
"قدّم إجابات دقيقة قصيرة و واضحة باللغة العربية الفصحى.\n\n"
)
input_text = system_prompt + f"المستخدم: {prompt}\nالمساعد:"
result = generator(
input_text,
max_new_tokens=512,
temperature=0.3,
do_sample=True,
top_p=0.9
)
return result[0]["generated_text"].split("المساعد:")[-1].strip()
except Exception as e:
print("Apollo generation error:", e)
return "حدث خطأ أثناء توليد الإجابة من نموذج AceGPT."
# ============================================================
# 4. CHATBOT LOGIC
# ============================================================
def chatbot_fn(message: str, history: list):
"""Main function: classify → route → generate."""
try:
pred = classifier(message)[0]
label = pred["label"]
if label == "LABEL_1":
response = generate_with_acegpt(message)
else:
response = "عذرًا، يمكنني الإجابة فقط على الأسئلة المتعلقة بالإسعافات الأولية."
except Exception as e:
print("Error in chatbot_fn:", e)
response = "حدث خطأ أثناء معالجة الطلب."
if history is None:
history = []
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, ""
# ============================================================
# 5. GRADIO INTERFACE
# ============================================================
with gr.Blocks(title="المساعد الذكي في الإسعافات الأولية") as demo:
gr.Markdown(
"""
# المساعد في الإسعافات الأولية
اكتب سؤالك بالعربية، وسيرد المساعد.
"""
)
chatbot_ui = gr.Chatbot(
label="المحادثة",
type="messages",
height=500,
show_copy_button=True
)
with gr.Row():
user_input = gr.Textbox(
placeholder="اكتب سؤالك هنا...",
label="سؤالك",
lines=2,
scale=8,
)
send_btn = gr.Button("إرسال", scale=1)
clear_btn = gr.Button("مسح", scale=1)
chat_state = gr.State([])
send_btn.click(
chatbot_fn,
inputs=[user_input, chat_state],
outputs=[chatbot_ui, user_input]
)
user_input.submit(
chatbot_fn,
inputs=[user_input, chat_state],
outputs=[chatbot_ui, user_input]
)
clear_btn.click(
lambda: ([], []),
outputs=[chatbot_ui, chat_state]
)
# ============================================================
# 6. LAUNCH
# ============================================================
if __name__ == "__main__":
demo.launch() |