Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Upload folder using huggingface_hub
Browse files- modules/quiz/answer_validator.py +44 -0
- modules/quiz/models.py +65 -0
- modules/quiz/quiz_helper.py +19 -73
- tests/test_quiz.py +36 -0
modules/quiz/answer_validator.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
+
from modules.quiz.models import AnswerValidation
|
| 3 |
+
from modules.quiz.quiz_helper import Question
|
| 4 |
+
|
| 5 |
+
client = OpenAI()
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def validate_answer(
|
| 9 |
+
question: Question, user_answer: str, preferred_language: str = "English"
|
| 10 |
+
) -> AnswerValidation:
|
| 11 |
+
"""
|
| 12 |
+
Validate a user's answer against a Question object.
|
| 13 |
+
Uses LLM reasoning to account for synonyms, variations, and explanation.
|
| 14 |
+
"""
|
| 15 |
+
print("validating answer ...")
|
| 16 |
+
prompt = f"""
|
| 17 |
+
You are an answer validator for a scripture-based quiz.
|
| 18 |
+
|
| 19 |
+
Question:
|
| 20 |
+
{question.question}
|
| 21 |
+
|
| 22 |
+
Choices (if any):
|
| 23 |
+
{question.choices}
|
| 24 |
+
|
| 25 |
+
Expected Answer:
|
| 26 |
+
{question.expected_answer}
|
| 27 |
+
|
| 28 |
+
User's Answer:
|
| 29 |
+
{user_answer}
|
| 30 |
+
|
| 31 |
+
Rules:
|
| 32 |
+
- Compare strictly against expected_answer and choices.
|
| 33 |
+
- Accept semantically equivalent answers (e.g., synonyms, transliterations).
|
| 34 |
+
- Respond in {question.preferred_language}.
|
| 35 |
+
- Do not invent a new correct answer. Only check against expected_answer.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
response = client.chat.completions.parse(
|
| 39 |
+
model="gpt-5-nano",
|
| 40 |
+
messages=[{"role": "user", "content": prompt}],
|
| 41 |
+
response_format=AnswerValidation,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
return response.choices[0].message.parsed
|
modules/quiz/models.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Literal
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
from sanatan_assistant import allowedScriptureTitles
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Question(BaseModel):
|
| 7 |
+
preferred_language: str = Field(
|
| 8 |
+
description="User's preferred language. Respond in this language"
|
| 9 |
+
)
|
| 10 |
+
question: str = Field(description="The generated question to be asked to the user")
|
| 11 |
+
scripture: allowedScriptureTitles = Field(
|
| 12 |
+
description="The scripture title this question is sourced from (e.g., Bhagavad Gita, Divya Prabandham)"
|
| 13 |
+
)
|
| 14 |
+
native_lyrics: str = Field(
|
| 15 |
+
description="The lyrics in its native/original script (e.g., Sanskrit/Tamil). Sanitize this if there are garbled characters."
|
| 16 |
+
)
|
| 17 |
+
transliteration: str = Field(
|
| 18 |
+
description="The transliterated text of the verse in English/the user's preferred language"
|
| 19 |
+
)
|
| 20 |
+
translation: str = Field(
|
| 21 |
+
description="The translated meaning of the verse in English/the user's preferred language"
|
| 22 |
+
)
|
| 23 |
+
detailed_explanation: Optional[str] = Field(
|
| 24 |
+
default=None,
|
| 25 |
+
description="A detailed explanation of the verse or context, if available in the context",
|
| 26 |
+
)
|
| 27 |
+
word_by_word_meaning: Optional[str] = Field(
|
| 28 |
+
default=None,
|
| 29 |
+
description="Word-by-word meaning (in both native and english) or breakdown of the verse, if available in the context",
|
| 30 |
+
)
|
| 31 |
+
verse_number: Optional[int] = Field(
|
| 32 |
+
default=None,
|
| 33 |
+
description="The absolute verse number from the context if available.",
|
| 34 |
+
)
|
| 35 |
+
page_number: Optional[int] = Field(
|
| 36 |
+
default=None, description="The page number from the context if available."
|
| 37 |
+
)
|
| 38 |
+
verse_reference: Optional[str] = Field(
|
| 39 |
+
default=None,
|
| 40 |
+
description="The reference identifier of the verse or the title (e.g., Bhagavad Gita 2.47)",
|
| 41 |
+
)
|
| 42 |
+
reference_link: Optional[str] = Field(
|
| 43 |
+
default=None, description="The html_url if available"
|
| 44 |
+
)
|
| 45 |
+
complexity: Literal["beginner", "intermediate", "advanced"] = Field(
|
| 46 |
+
description="Difficulty level of the question"
|
| 47 |
+
)
|
| 48 |
+
choices: List[str] = Field(
|
| 49 |
+
description="List of multiple-choice options. Leave empty for open-text questions"
|
| 50 |
+
)
|
| 51 |
+
expected_answer: str = Field(
|
| 52 |
+
description="The correct or expected answer to the question"
|
| 53 |
+
)
|
| 54 |
+
answer_explanation: str = Field(
|
| 55 |
+
description="Explanation why the expected answer is correct"
|
| 56 |
+
)
|
| 57 |
+
expected_choice_index: Optional[int] = Field(
|
| 58 |
+
default=None,
|
| 59 |
+
description="Index of the correct choice in 'choices'. Null if open-text question",
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
class AnswerValidation(BaseModel):
|
| 63 |
+
is_correct: bool = Field(description="Whether the provided answer is correct")
|
| 64 |
+
reasoning: str = Field(description="Explanation of why the answer is correct or not")
|
| 65 |
+
correct_answer: str = Field(description="The correct/expected answer from the question object")
|
modules/quiz/quiz_helper.py
CHANGED
|
@@ -1,83 +1,23 @@
|
|
| 1 |
import random
|
| 2 |
-
from typing import
|
| 3 |
-
from pydantic import BaseModel, Field
|
| 4 |
from config import SanatanConfig
|
| 5 |
-
from
|
|
|
|
| 6 |
from openai import OpenAI
|
| 7 |
|
| 8 |
client = OpenAI()
|
| 9 |
|
| 10 |
|
| 11 |
-
class Question(BaseModel):
|
| 12 |
-
preferred_language : str = Field(description="User's preferred language. Respond in this language")
|
| 13 |
-
question: str = Field(
|
| 14 |
-
description="The generated question to be asked to the user"
|
| 15 |
-
)
|
| 16 |
-
scripture: allowedScriptureTitles = Field(
|
| 17 |
-
description="The scripture title this question is sourced from (e.g., Bhagavad Gita, Divya Prabandham)"
|
| 18 |
-
)
|
| 19 |
-
native_lyrics: str = Field(
|
| 20 |
-
description="The lyrics in its native/original script (e.g., Sanskrit/Tamil). Sanitize this if there are garbled characters."
|
| 21 |
-
)
|
| 22 |
-
transliteration: str = Field(
|
| 23 |
-
description="The transliterated text of the verse in English/the user's preferred language"
|
| 24 |
-
)
|
| 25 |
-
translation: str = Field(
|
| 26 |
-
description="The translated meaning of the verse in English/the user's preferred language"
|
| 27 |
-
)
|
| 28 |
-
detailed_explanation: Optional[str] = Field(
|
| 29 |
-
default=None,
|
| 30 |
-
description="A detailed explanation of the verse or context, if available in the context"
|
| 31 |
-
)
|
| 32 |
-
word_by_word_meaning: Optional[str] = Field(
|
| 33 |
-
default=None,
|
| 34 |
-
description="Word-by-word meaning (in both native and english) or breakdown of the verse, if available in the context"
|
| 35 |
-
)
|
| 36 |
-
verse_number: Optional[int] = Field(
|
| 37 |
-
default=None,
|
| 38 |
-
description="The absolute verse number from the context if available."
|
| 39 |
-
)
|
| 40 |
-
page_number: Optional[int] = Field(
|
| 41 |
-
default=None,
|
| 42 |
-
description="The page number from the context if available."
|
| 43 |
-
)
|
| 44 |
-
verse_reference: Optional[str] = Field(
|
| 45 |
-
default=None,
|
| 46 |
-
description="The reference identifier of the verse or the title (e.g., Bhagavad Gita 2.47)"
|
| 47 |
-
)
|
| 48 |
-
reference_link: Optional[str] = Field(
|
| 49 |
-
default=None,
|
| 50 |
-
description="The html_url if available"
|
| 51 |
-
)
|
| 52 |
-
complexity: Literal["beginner", "intermediate", "advanced"] = Field(
|
| 53 |
-
description="Difficulty level of the question"
|
| 54 |
-
)
|
| 55 |
-
choices: List[str] = Field(
|
| 56 |
-
description="List of multiple-choice options. Leave empty for open-text questions"
|
| 57 |
-
)
|
| 58 |
-
expected_answer: str = Field(
|
| 59 |
-
description="The correct or expected answer to the question"
|
| 60 |
-
)
|
| 61 |
-
answer_explanation: str = Field(
|
| 62 |
-
description="Explanation why the expected answer is correct"
|
| 63 |
-
)
|
| 64 |
-
expected_choice_index: Optional[int] = Field(
|
| 65 |
-
default=None,
|
| 66 |
-
description="Index of the correct choice in 'choices'. Null if open-text question"
|
| 67 |
-
)
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
def generate_question(
|
| 72 |
collection: allowedCollections,
|
| 73 |
-
complexity: Literal["beginner", "intermediate", "advanced"]
|
| 74 |
-
mode: Literal["mcq", "open"]
|
| 75 |
-
preferred_lamguage
|
| 76 |
) -> Question:
|
| 77 |
"""
|
| 78 |
Fetch a random scripture record and have the LLM generate a structured Question.
|
| 79 |
"""
|
| 80 |
-
|
| 81 |
# 1. Fetch random scripture record
|
| 82 |
context = query(
|
| 83 |
collection_name=collection,
|
|
@@ -107,9 +47,9 @@ def generate_question(
|
|
| 107 |
|
| 108 |
# 3. Structured response with Pydantic class reference
|
| 109 |
response = client.chat.completions.parse(
|
| 110 |
-
model="gpt-5-nano",
|
| 111 |
messages=[{"role": "user", "content": prompt}],
|
| 112 |
-
response_format=Question,
|
| 113 |
)
|
| 114 |
|
| 115 |
# print(response)
|
|
@@ -120,11 +60,17 @@ def generate_question(
|
|
| 120 |
# Example usage
|
| 121 |
if __name__ == "__main__":
|
| 122 |
for i in range(3):
|
| 123 |
-
scripture = random.choice([s for s in SanatanConfig.scriptures if s["collection_name"] != "yt_metadata"])
|
| 124 |
-
mode = random.choice(["mcq", "open"])
|
| 125 |
-
complexity = random.choice(["beginner", "intermediate", "advanced"])
|
| 126 |
q = generate_question(
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
)
|
| 129 |
print(q.model_dump_json(indent=1))
|
| 130 |
print("_______________________")
|
|
|
|
| 1 |
import random
|
| 2 |
+
from typing import Literal
|
|
|
|
| 3 |
from config import SanatanConfig
|
| 4 |
+
from modules.quiz.models import Question
|
| 5 |
+
from sanatan_assistant import query, allowedCollections
|
| 6 |
from openai import OpenAI
|
| 7 |
|
| 8 |
client = OpenAI()
|
| 9 |
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def generate_question(
|
| 12 |
collection: allowedCollections,
|
| 13 |
+
complexity: Literal["beginner", "intermediate", "advanced"],
|
| 14 |
+
mode: Literal["mcq", "open"],
|
| 15 |
+
preferred_lamguage: str = "English",
|
| 16 |
) -> Question:
|
| 17 |
"""
|
| 18 |
Fetch a random scripture record and have the LLM generate a structured Question.
|
| 19 |
"""
|
| 20 |
+
print("Generating question ...", collection, complexity,mode, preferred_lamguage)
|
| 21 |
# 1. Fetch random scripture record
|
| 22 |
context = query(
|
| 23 |
collection_name=collection,
|
|
|
|
| 47 |
|
| 48 |
# 3. Structured response with Pydantic class reference
|
| 49 |
response = client.chat.completions.parse(
|
| 50 |
+
model="gpt-5-nano",
|
| 51 |
messages=[{"role": "user", "content": prompt}],
|
| 52 |
+
response_format=Question,
|
| 53 |
)
|
| 54 |
|
| 55 |
# print(response)
|
|
|
|
| 60 |
# Example usage
|
| 61 |
if __name__ == "__main__":
|
| 62 |
for i in range(3):
|
|
|
|
|
|
|
|
|
|
| 63 |
q = generate_question(
|
| 64 |
+
collection=random.choice(
|
| 65 |
+
[
|
| 66 |
+
s["collection_name"]
|
| 67 |
+
for s in SanatanConfig.scriptures
|
| 68 |
+
if s["collection_name"] != "yt_metadata"
|
| 69 |
+
]
|
| 70 |
+
),
|
| 71 |
+
complexity=random.choice(["beginner", "intermediate", "advanced"]),
|
| 72 |
+
mode=random.choice(["mcq", "open"]),
|
| 73 |
+
preferred_lamguage="Tamil",
|
| 74 |
)
|
| 75 |
print(q.model_dump_json(indent=1))
|
| 76 |
print("_______________________")
|
tests/test_quiz.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
from config import SanatanConfig
|
| 3 |
+
from modules.quiz.answer_validator import validate_answer
|
| 4 |
+
from modules.quiz.quiz_helper import generate_question
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
if __name__ == "__main__":
|
| 8 |
+
while True:
|
| 9 |
+
q = generate_question(
|
| 10 |
+
collection=random.choice(
|
| 11 |
+
[
|
| 12 |
+
s["collection_name"]
|
| 13 |
+
for s in SanatanConfig.scriptures
|
| 14 |
+
if s["collection_name"] != "yt_metadata"
|
| 15 |
+
]
|
| 16 |
+
),
|
| 17 |
+
complexity=random.choice(["beginner", "intermediate", "advanced"]),
|
| 18 |
+
mode=random.choice(["mcq", "open"]),
|
| 19 |
+
preferred_lamguage="English",
|
| 20 |
+
)
|
| 21 |
+
print(q.model_dump_json(indent=1))
|
| 22 |
+
|
| 23 |
+
print("Q:", q.question)
|
| 24 |
+
print("Choices:", q.choices)
|
| 25 |
+
print("Expected:", q.expected_answer)
|
| 26 |
+
|
| 27 |
+
# Simulate user input
|
| 28 |
+
user_ans = input("Your Answer: ")
|
| 29 |
+
|
| 30 |
+
result = validate_answer(q, user_ans, preferred_language="English")
|
| 31 |
+
print(result.model_dump_json(indent=2))
|
| 32 |
+
|
| 33 |
+
want_more = input("Want to keep playing? Y/N: ")
|
| 34 |
+
if want_more == "N":
|
| 35 |
+
break
|
| 36 |
+
print("Game over!")
|