Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Upload folder using huggingface_hub
Browse files- modules/quiz/quiz_helper.py +130 -0
- sanatan_assistant.py +3 -1
modules/quiz/quiz_helper.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
from typing import List, Optional, Literal
|
| 3 |
+
from pydantic import BaseModel, Field
|
| 4 |
+
from config import SanatanConfig
|
| 5 |
+
from sanatan_assistant import allowedScriptureTitles, query, allowedCollections
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
|
| 8 |
+
client = OpenAI()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Question(BaseModel):
|
| 12 |
+
preferred_language : str = Field(description="User's preferred language. Respond in this language")
|
| 13 |
+
question: str = Field(
|
| 14 |
+
description="The generated question to be asked to the user"
|
| 15 |
+
)
|
| 16 |
+
scripture: allowedScriptureTitles = Field(
|
| 17 |
+
description="The scripture title this question is sourced from (e.g., Bhagavad Gita, Divya Prabandham)"
|
| 18 |
+
)
|
| 19 |
+
native_lyrics: str = Field(
|
| 20 |
+
description="The lyrics in its native/original script (e.g., Sanskrit/Tamil). Sanitize this if there are garbled characters."
|
| 21 |
+
)
|
| 22 |
+
transliteration: str = Field(
|
| 23 |
+
description="The transliterated text of the verse in English/the user's preferred language"
|
| 24 |
+
)
|
| 25 |
+
translation: str = Field(
|
| 26 |
+
description="The translated meaning of the verse in English/the user's preferred language"
|
| 27 |
+
)
|
| 28 |
+
detailed_explanation: Optional[str] = Field(
|
| 29 |
+
default=None,
|
| 30 |
+
description="A detailed explanation of the verse or context, if available in the context"
|
| 31 |
+
)
|
| 32 |
+
word_by_word_meaning: Optional[str] = Field(
|
| 33 |
+
default=None,
|
| 34 |
+
description="Word-by-word meaning (in both native and english) or breakdown of the verse, if available in the context"
|
| 35 |
+
)
|
| 36 |
+
verse_number: Optional[int] = Field(
|
| 37 |
+
default=None,
|
| 38 |
+
description="The absolute verse number from the context if available."
|
| 39 |
+
)
|
| 40 |
+
page_number: Optional[int] = Field(
|
| 41 |
+
default=None,
|
| 42 |
+
description="The page number from the context if available."
|
| 43 |
+
)
|
| 44 |
+
verse_reference: Optional[str] = Field(
|
| 45 |
+
default=None,
|
| 46 |
+
description="The reference identifier of the verse or the title (e.g., Bhagavad Gita 2.47)"
|
| 47 |
+
)
|
| 48 |
+
reference_link: Optional[str] = Field(
|
| 49 |
+
default=None,
|
| 50 |
+
description="The html_url if available"
|
| 51 |
+
)
|
| 52 |
+
complexity: Literal["beginner", "intermediate", "advanced"] = Field(
|
| 53 |
+
description="Difficulty level of the question"
|
| 54 |
+
)
|
| 55 |
+
choices: List[str] = Field(
|
| 56 |
+
description="List of multiple-choice options. Leave empty for open-text questions"
|
| 57 |
+
)
|
| 58 |
+
expected_answer: str = Field(
|
| 59 |
+
description="The correct or expected answer to the question"
|
| 60 |
+
)
|
| 61 |
+
answer_explanation: str = Field(
|
| 62 |
+
description="Explanation why the expected answer is correct"
|
| 63 |
+
)
|
| 64 |
+
expected_choice_index: Optional[int] = Field(
|
| 65 |
+
default=None,
|
| 66 |
+
description="Index of the correct choice in 'choices'. Null if open-text question"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def generate_question(
|
| 72 |
+
collection: allowedCollections,
|
| 73 |
+
complexity: Literal["beginner", "intermediate", "advanced"] = "beginner",
|
| 74 |
+
mode: Literal["mcq", "open"] = "mcq",
|
| 75 |
+
preferred_lamguage : str = "English"
|
| 76 |
+
) -> Question:
|
| 77 |
+
"""
|
| 78 |
+
Fetch a random scripture record and have the LLM generate a structured Question.
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
# 1. Fetch random scripture record
|
| 82 |
+
context = query(
|
| 83 |
+
collection_name=collection,
|
| 84 |
+
query=None,
|
| 85 |
+
metadata_where_clause=None,
|
| 86 |
+
n_results=1,
|
| 87 |
+
search_type="random",
|
| 88 |
+
)
|
| 89 |
+
if not context:
|
| 90 |
+
raise ValueError(f"No records found in collection {collection}")
|
| 91 |
+
|
| 92 |
+
# 2. Prompt (grounded in record only)
|
| 93 |
+
prompt = f"""
|
| 94 |
+
You are a quiz generator. Use ONLY the following scripture record to create a question.
|
| 95 |
+
|
| 96 |
+
Context from {collection}:
|
| 97 |
+
{context}
|
| 98 |
+
|
| 99 |
+
Rules:
|
| 100 |
+
- Do not invent facts beyond the context.
|
| 101 |
+
- Difficulty level: {complexity}
|
| 102 |
+
- Mode: {mode}
|
| 103 |
+
- If mode is 'mcq', generate 3–4 plausible choices (with one correct).
|
| 104 |
+
- If mode is 'open', leave 'choices' empty and provide a reference answer.
|
| 105 |
+
- User's preferred language is {preferred_lamguage}. Translate everything except the native verses to this language.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
# 3. Structured response with Pydantic class reference
|
| 109 |
+
response = client.chat.completions.parse(
|
| 110 |
+
model="gpt-5-nano",
|
| 111 |
+
messages=[{"role": "user", "content": prompt}],
|
| 112 |
+
response_format=Question,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# print(response)
|
| 116 |
+
|
| 117 |
+
return response.choices[0].message.parsed
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# Example usage
|
| 121 |
+
if __name__ == "__main__":
|
| 122 |
+
for i in range(3):
|
| 123 |
+
scripture = random.choice([s for s in SanatanConfig.scriptures if s["collection_name"] != "yt_metadata"])
|
| 124 |
+
mode = random.choice(["mcq", "open"])
|
| 125 |
+
complexity = random.choice(["beginner", "intermediate", "advanced"])
|
| 126 |
+
q = generate_question(
|
| 127 |
+
scripture["collection_name"], complexity=complexity, mode=mode
|
| 128 |
+
)
|
| 129 |
+
print(q.model_dump_json(indent=1))
|
| 130 |
+
print("_______________________")
|
sanatan_assistant.py
CHANGED
|
@@ -13,7 +13,9 @@ sanatanConfig = SanatanConfig()
|
|
| 13 |
allowedCollections = Literal[
|
| 14 |
*[scripture["collection_name"] for scripture in sanatanConfig.scriptures]
|
| 15 |
]
|
| 16 |
-
|
|
|
|
|
|
|
| 17 |
|
| 18 |
def format_scripture_answer(
|
| 19 |
collection_name: allowedCollections, question: str, query_tool_output: str
|
|
|
|
| 13 |
allowedCollections = Literal[
|
| 14 |
*[scripture["collection_name"] for scripture in sanatanConfig.scriptures]
|
| 15 |
]
|
| 16 |
+
allowedScriptureTitles = Literal[
|
| 17 |
+
*[scripture["title"] for scripture in sanatanConfig.scriptures]
|
| 18 |
+
]
|
| 19 |
|
| 20 |
def format_scripture_answer(
|
| 21 |
collection_name: allowedCollections, question: str, query_tool_output: str
|