Update all_tools.py
Browse files- all_tools.py +81 -53
all_tools.py
CHANGED
|
@@ -1,40 +1,63 @@
|
|
| 1 |
-
from
|
|
|
|
| 2 |
from smolagents import tool
|
| 3 |
-
|
| 4 |
-
#
|
| 5 |
-
from level_classifier_tool_2 import
|
| 6 |
-
classify_levels_phrases,
|
| 7 |
-
HFEmbeddingBackend,
|
| 8 |
-
build_phrase_index
|
| 9 |
-
)
|
| 10 |
from phrases import BLOOMS_PHRASES, DOK_PHRASES
|
| 11 |
|
| 12 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
@tool
|
| 14 |
def QuestionRetrieverTool(subject: str, topic: str, grade: str) -> dict:
|
| 15 |
-
"""
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
)
|
| 19 |
Args:
|
| 20 |
-
subject: The subject area (e.g., "Math", "Science").
|
| 21 |
-
topic: The specific topic within the subject (e.g., "Algebra", "Biology").
|
| 22 |
-
grade: The grade level (e.g., "
|
|
|
|
| 23 |
Returns:
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
query = f"{topic} question for {grade} of the {subject}"
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
"
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
| 36 |
}
|
| 37 |
-
|
|
|
|
| 38 |
@tool
|
| 39 |
def classify_and_score(
|
| 40 |
question: str,
|
|
@@ -42,33 +65,38 @@ def classify_and_score(
|
|
| 42 |
target_dok: str,
|
| 43 |
agg: str = "max"
|
| 44 |
) -> dict:
|
| 45 |
-
"""
|
| 46 |
-
|
|
|
|
| 47 |
Args:
|
| 48 |
-
question:
|
| 49 |
-
target_bloom: Target Bloom’s level
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
agg: Aggregation method over phrase similarities within a level
|
| 54 |
-
(choices: "mean", "max", "topk_mean").
|
| 55 |
-
|
| 56 |
Returns:
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
|
|
|
| 61 |
"""
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
def _parse_target_bloom(t: str):
|
| 74 |
order = ["Remember","Understand","Apply","Analyze","Evaluate","Create"]
|
|
@@ -114,4 +142,4 @@ def classify_and_score(
|
|
| 114 |
"dok_scores": res["dok"]["scores"],
|
| 115 |
},
|
| 116 |
"feedback": " ".join(feedback_parts) if feedback_parts else "On target.",
|
| 117 |
-
}
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
from typing import Dict, List, Tuple
|
| 3 |
from smolagents import tool
|
| 4 |
+
|
| 5 |
+
# Import only the classifier API; DO NOT construct models here.
|
| 6 |
+
from level_classifier_tool_2 import classify_levels_phrases
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
from phrases import BLOOMS_PHRASES, DOK_PHRASES
|
| 8 |
|
| 9 |
+
# ------------------------ Injected state (set from app.py) ------------------------
|
| 10 |
+
_INDEX = None
|
| 11 |
+
_BACKEND = None
|
| 12 |
+
_BLOOM_INDEX = None
|
| 13 |
+
_DOK_INDEX = None
|
| 14 |
+
|
| 15 |
+
def set_retrieval_index(index) -> None:
|
| 16 |
+
"""Call this from app.py after loading your LlamaIndex index."""
|
| 17 |
+
global _INDEX
|
| 18 |
+
_INDEX = index
|
| 19 |
+
|
| 20 |
+
def set_classifier_state(backend, bloom_index, dok_index) -> None:
|
| 21 |
+
"""Call this from app.py after building the backend and prebuilt indices."""
|
| 22 |
+
global _BACKEND, _BLOOM_INDEX, _DOK_INDEX
|
| 23 |
+
_BACKEND = backend
|
| 24 |
+
_BLOOM_INDEX = bloom_index
|
| 25 |
+
_DOK_INDEX = dok_index
|
| 26 |
+
|
| 27 |
+
# ----------------------------- Tools -------------------------------------
|
| 28 |
+
|
| 29 |
@tool
|
| 30 |
def QuestionRetrieverTool(subject: str, topic: str, grade: str) -> dict:
|
| 31 |
+
"""
|
| 32 |
+
Retrieve up to 5 closely-related example Q&A pairs from the source datasets.
|
| 33 |
+
|
|
|
|
| 34 |
Args:
|
| 35 |
+
subject: The subject area (e.g., "Math", "Science").
|
| 36 |
+
topic: The specific topic within the subject (e.g., "Algebra", "Biology").
|
| 37 |
+
grade: The grade level (e.g., "Grade 5", "Grade 8").
|
| 38 |
+
|
| 39 |
Returns:
|
| 40 |
+
{
|
| 41 |
+
"closest questions found for": {"subject": ..., "topic": ..., "grade": ...},
|
| 42 |
+
"questions": [{"text": "..."} * up to 5]
|
| 43 |
+
}
|
| 44 |
+
"""
|
| 45 |
+
if _INDEX is None:
|
| 46 |
+
return {"error": "Retriever not initialized. Call set_retrieval_index(index) before using this tool."}
|
| 47 |
+
|
| 48 |
query = f"{topic} question for {grade} of the {subject}"
|
| 49 |
+
try:
|
| 50 |
+
results = _INDEX.as_retriever(similarity_top_k=5).retrieve(query)
|
| 51 |
+
question_texts = [r.node.text for r in results]
|
| 52 |
+
except Exception as e:
|
| 53 |
+
return {"error": f"Retriever error: {e}"}
|
| 54 |
+
|
| 55 |
+
return {
|
| 56 |
+
"closest questions found for": {"subject": subject, "topic": topic, "grade": grade},
|
| 57 |
+
"questions": [{"text": q} for q in question_texts]
|
| 58 |
}
|
| 59 |
+
|
| 60 |
+
|
| 61 |
@tool
|
| 62 |
def classify_and_score(
|
| 63 |
question: str,
|
|
|
|
| 65 |
target_dok: str,
|
| 66 |
agg: str = "max"
|
| 67 |
) -> dict:
|
| 68 |
+
"""
|
| 69 |
+
Classify a question against Bloom’s and DOK targets and return guidance.
|
| 70 |
+
|
| 71 |
Args:
|
| 72 |
+
question: Question text to evaluate.
|
| 73 |
+
target_bloom: Target Bloom’s level (e.g., "Analyze" or "Apply+").
|
| 74 |
+
target_dok: Target DOK level (e.g., "DOK3" or "DOK2-DOK3").
|
| 75 |
+
agg: Aggregation over phrase sims ("mean", "max", "topk_mean").
|
| 76 |
+
|
|
|
|
|
|
|
|
|
|
| 77 |
Returns:
|
| 78 |
+
{
|
| 79 |
+
"ok": bool,
|
| 80 |
+
"measured": {"bloom_best": str, "bloom_scores": dict, "dok_best": str, "dok_scores": dict},
|
| 81 |
+
"feedback": str
|
| 82 |
+
}
|
| 83 |
"""
|
| 84 |
+
if _BACKEND is None or _BLOOM_INDEX is None or _DOK_INDEX is None:
|
| 85 |
+
return {"error": "Classifier not initialized. Call set_classifier_state(backend, bloom_index, dok_index) first."}
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
res = classify_levels_phrases(
|
| 89 |
+
question,
|
| 90 |
+
BLOOMS_PHRASES,
|
| 91 |
+
DOK_PHRASES,
|
| 92 |
+
backend=_BACKEND,
|
| 93 |
+
prebuilt_bloom_index=_BLOOM_INDEX,
|
| 94 |
+
prebuilt_dok_index=_DOK_INDEX,
|
| 95 |
+
agg=agg,
|
| 96 |
+
return_phrase_matches=True
|
| 97 |
+
)
|
| 98 |
+
except Exception as e:
|
| 99 |
+
return {"error": f"classify_levels_phrases failed: {e}"}
|
| 100 |
|
| 101 |
def _parse_target_bloom(t: str):
|
| 102 |
order = ["Remember","Understand","Apply","Analyze","Evaluate","Create"]
|
|
|
|
| 142 |
"dok_scores": res["dok"]["scores"],
|
| 143 |
},
|
| 144 |
"feedback": " ".join(feedback_parts) if feedback_parts else "On target.",
|
| 145 |
+
}
|