Spaces:
Paused
Paused
| from typing import List, Dict | |
| from transformers import ( | |
| AutoTokenizer, | |
| AutoModelForSequenceClassification, | |
| AutoModelForCausalLM, | |
| pipeline | |
| ) | |
| class AdviceGenerator(object): | |
| def __init__(self, llm): | |
| self.llm = llm | |
| self.role = { | |
| "role": "system", | |
| "content": ( | |
| "You are a supportive assistant (not a mental health professional). " | |
| "Be concrete and tailor every response to the user's situation. " | |
| "Requirements:\n" | |
| "1) Begin with ONE empathetic sentence that mentions a key detail from the user's text (name, event, constraint).\n" | |
| "2) Then give 3β5 numbered, practical tips. Each tip must reference the user's situation (use names/keywords when present).\n" | |
| "3) If the user's text involves talking to someone (crush, friend, teacher, parent, boss), include a short **Script** block " | |
| " with two options (in-person and text), customized with any names from the user's text.\n" | |
| "4) Add a **Try now (2 min)** micro-step.\n" | |
| "5) End with ONE targeted follow-up question that references the user's situation.\n" | |
| "Avoid platitudes and generic advice; avoid clinical instructions." | |
| ), | |
| } | |
| def generate_advice( | |
| self, | |
| disorder: str, | |
| user_text: str, | |
| history: List[Dict[str, str]] = None, | |
| max_history_msgs: int = 50, | |
| max_tokens: int = 600, # give enough headroom | |
| temperature: float = 0.6, | |
| top_p: float = 0.9, | |
| ): | |
| msgs = [self.role] | |
| # preserve rolling chat history if available | |
| if history: | |
| msgs.extend(history[-max_history_msgs:]) | |
| # always append the new user input | |
| msgs.append({ | |
| "role": "user", | |
| "content": ( | |
| "Use the exact situation below to personalize your advice. " | |
| "Extract the main goal or barrier from the text and ground each tip in it.\n\n" | |
| f"Detected context: {disorder}\n" | |
| f"User text: {user_text}\n\n" | |
| "Follow the system instructions strictly. Do NOT ask vague questions first." | |
| ), | |
| }) | |
| stream = self.llm.create_chat_completion( | |
| messages=msgs, | |
| temperature=temperature, | |
| top_p=top_p, | |
| max_tokens=max_tokens, | |
| stream=True, | |
| ) | |
| for chunk in stream: | |
| if "choices" in chunk: | |
| delta = chunk["choices"][0]["delta"].get("content", "") | |
| if delta: | |
| yield delta |