File size: 10,693 Bytes
79418f8
e2ade71
808dfa1
79418f8
 
 
 
 
 
 
 
 
 
 
 
 
 
00dcf6d
 
 
 
 
 
2d6b358
27caf8b
 
2d6b358
 
 
 
 
 
 
 
 
 
 
 
 
 
27caf8b
 
 
2d6b358
27caf8b
 
 
 
 
2d6b358
 
 
 
27caf8b
 
69796e9
27caf8b
79418f8
00dcf6d
79418f8
 
 
 
 
00dcf6d
79418f8
 
 
 
 
 
 
 
 
00dcf6d
 
79418f8
 
 
00dcf6d
79418f8
 
 
 
00dcf6d
bc8ffe3
79418f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
from __future__ import annotations
import os
from dataclasses import dataclass, field
from typing import Dict, List, Tuple, Iterable, Optional, Literal, Callable, Any
import math
import torch
from transformers import AutoTokenizer, AutoModel
#import tensorflow
Agg = Literal["mean", "max", "topk_mean"]

@dataclass
class HFEmbeddingBackend:
    """
    Minimal huggingface transformers encoder for sentence-level embeddings.
    Uses mean pooling over last_hidden_state and L2 normalizes the result.
    """
    model_name: str = "google/embeddinggemma-300m"
    device: str = field(default_factory=lambda: "cuda" if torch.cuda.is_available() else "cpu")
    # Lazy-initialized in __post_init__
    TOK: Any = field(init=False, repr=False)
    MODEL: Any = field(init=False, repr=False)

    def __post_init__(self):
        # Nuke Spaces ZeroGPU if present (still good to keep)
        os.environ.setdefault("SPACES_ZERO_DISABLED", "1")
        try:
            import sys, importlib
            for modname in (
                "spaces.zero", "spaces.zero.torch.patching", "spaces.zero.torch",
                "spaces.zero.patch", "spaces.zero.patching"
            ):
                try:
                    m = sys.modules.get(modname) or importlib.import_module(modname)
                except Exception:
                    continue
                for attr in ("disable", "unpatch", "deactivate"):
                    fn = getattr(m, attr, None)
                    if callable(fn):
                        try: fn()
                        except Exception: pass
        except Exception:
            pass
    
        # Prefer simple math attention kernels
        try:
            torch.backends.cuda.sdp_kernel(enable_math=True, enable_flash=False, enable_mem_efficient=False)
        except Exception:
            pass
    
        # Make eager attention the default everywhere
        os.environ.setdefault("TRANSFORMERS_ATTENTION_IMPLEMENTATION", "eager")
    
        # Load tokenizer/model with eager attention
        self.TOK = AutoTokenizer.from_pretrained(self.model_name)
        self.MODEL = AutoModel.from_pretrained(self.model_name, attn_implementation="eager")

        self.MODEL.to(self.device).eval()

    def encode(self, texts: Iterable[str], batch_size: int = 32) -> "Tuple[torch.Tensor, List[str]]":
        """
        Returns (embeddings, texts_list). Embeddings have shape [N, D] and are unit-normalized.
        """
        texts_list = list(texts)
        if not texts_list:
            return torch.empty((0, self.MODEL.config.hidden_size)), []

        all_out = []
        with torch.inference_mode():
            for i in range(0, len(texts_list), batch_size):
                batch = texts_list[i:i + batch_size]
                enc = self.TOK(batch, padding=True, truncation=True, return_tensors="pt").to(self.device)  # type: ignore
                out = self.MODEL(**enc)
                last = out.last_hidden_state  # [B, T, H]
                mask = enc["attention_mask"].unsqueeze(-1)  # [B, T, 1]

                # Mean pool
                summed = (last * mask).sum(dim=1)
                counts = mask.sum(dim=1).clamp(min=1)
                pooled = summed / counts

                # L2 normalize
                pooled = pooled / pooled.norm(dim=1, keepdim=True).clamp(min=1e-12)
                all_out.append(pooled.cpu())

        embs = torch.cat(all_out, dim=0) if all_out else torch.empty((0, self.MODEL.config.hidden_size))  # type: ignore
        return embs, texts_list

def _normalize_whitespace(s: str) -> str:
    return " ".join(s.strip().split())


def _default_preprocess(s: str) -> str:
    # Keep simple, deterministic preprocessing. Users can override with a custom callable.
    return _normalize_whitespace(s)


@dataclass
class PhraseIndex:
    phrases_by_level: Dict[str, List[str]]
    embeddings_by_level: Dict[str, "Any"]  # torch.Tensor, but keep Any to avoid hard dep at import time
    model_name: str


def build_phrase_index(
    backend: HFEmbeddingBackend,
    phrases_by_level: Dict[str, Iterable[str]],
) -> PhraseIndex:
    """
    Pre-encode all anchor phrases per level into a searchable index.
    """
    # Flatten texts while preserving level boundaries
    cleaned: Dict[str, List[str]] = {lvl: [_default_preprocess(p) for p in phrases] for lvl, phrases in phrases_by_level.items()}
    all_texts: List[str] = []
    spans: List[Tuple[str, int, int]] = []  # (level, start, end) in the flat list
    cur = 0
    for lvl, plist in cleaned.items():
        start = cur
        all_texts.extend(plist)
        cur += len(plist)
        spans.append((lvl, start, cur))

    embs, _ = backend.encode(all_texts)
    # Slice embeddings back into level buckets
    embeddings_by_level: Dict[str, "Any"] = {}
    for lvl, start, end in spans:
        embeddings_by_level[lvl] = embs[start:end] if end > start else torch.empty((0, embs.shape[1]))  # type: ignore

    return PhraseIndex(phrases_by_level={lvl: list(pl) for lvl, pl in cleaned.items()},
                       embeddings_by_level=embeddings_by_level,
                       model_name=backend.model_name)


def _aggregate_sims(
    sims: "Any", agg: Agg, topk: int
) -> float:
    """
    Aggregate a 1D tensor of similarities into a single score.
    """
    if sims.numel() == 0:
        return float("nan")
    if agg == "mean":
        return float(sims.mean().item())
    if agg == "max":
        return float(sims.max().item())
    if agg == "topk_mean":
        k = min(topk, sims.numel())
        topk_vals, _ = torch.topk(sims, k)
        return float(topk_vals.mean().item())
    raise ValueError(f"Unknown agg: {agg}")


# --------------------------- Public API ---------------------------

def classify_levels_phrases(
    question: str,
    blooms_phrases: Dict[str, Iterable[str]],
    dok_phrases: Dict[str, Iterable[str]],
    *,
    model_name: str = "google/embeddinggemma-300m",
    agg: Agg = "max",
    topk: int = 5,
    preprocess: Optional[Callable[[str], str]] = None,
    backend: Optional[HFEmbeddingBackend] = None,
    prebuilt_bloom_index: Optional[PhraseIndex] = None,
    prebuilt_dok_index: Optional[PhraseIndex] = None,
    return_phrase_matches: bool = True,
) -> Dict[str, Any]:
    """
    Score a question against Bloom's taxonomy and DOK (Depth of Knowledge)
    using cosine similarity to level-specific anchor phrases.

    Parameters
    ----------
    question : str
        The input question or prompt.
    blooms_phrases : dict[str, Iterable[str]]
        Mapping level -> list of anchor phrases for Bloom's.
    dok_phrases : dict[str, Iterable[str]]
        Mapping level -> list of anchor phrases for DOK.
    model_name : str
        Hugging Face model name for text embeddings. Ignored when `backend` provided.
    agg : {"mean","max","topk_mean"}
        Aggregation over phrase similarities within a level.
    topk : int
        Used only when `agg="topk_mean"`.
    preprocess : Optional[Callable[[str], str]]
        Preprocessing function for the question string. Defaults to whitespace normalization.
    backend : Optional[HFEmbeddingBackend]
        Injected embedding backend. If not given, one is constructed.
    prebuilt_bloom_index, prebuilt_dok_index : Optional[PhraseIndex]
        If provided, reuse precomputed phrase embeddings to avoid re-encoding.
    return_phrase_matches : bool
        If True, returns per-level top contributing phrases.

    Returns
    -------
    dict
        {
          "question": ...,
          "model_name": ...,
          "blooms": {
              "scores": {level: float, ...},
              "best_level": str,
              "best_score": float,
              "top_phrases": {level: [(phrase, sim_float), ...], ...}  # only if return_phrase_matches
          },
          "dok": {
              "scores": {level: float, ...},
              "best_level": str,
              "best_score": float,
              "top_phrases": {level: [(phrase, sim_float), ...], ...}  # only if return_phrase_matches
          },
          "config": {"agg": agg, "topk": topk if agg=='topk_mean' else None}
        }
    """
    preprocess = preprocess or _default_preprocess
    question_clean = preprocess(question)

    # Prepare backend
    be = backend or HFEmbeddingBackend(model_name=model_name)

    # Build / reuse indices
    bloom_index = prebuilt_bloom_index or build_phrase_index(be, blooms_phrases)
    dok_index = prebuilt_dok_index or build_phrase_index(be, dok_phrases)

    # Encode question
    q_emb, _ = be.encode([question_clean])
    q_emb = q_emb[0:1]  # [1, D]

    def _score_block(index: PhraseIndex) -> Tuple[Dict[str, float], Dict[str, List[Tuple[str, float]]]]:
        scores: Dict[str, float] = {}
        top_contribs: Dict[str, List[Tuple[str, float]]] = {}

        for lvl, phrases in index.phrases_by_level.items():
            embs = index.embeddings_by_level[lvl]  # [N, D]
            if embs.numel() == 0:
                scores[lvl] = float("nan")
                top_contribs[lvl] = []
                continue
            sims = (q_emb @ embs.T).squeeze(0)  # cosine sim due to L2 norm
            scores[lvl] = _aggregate_sims(sims, agg, topk)
            if return_phrase_matches:
                k = min(5, sims.numel())
                vals, idxs = torch.topk(sims, k)
                top_contribs[lvl] = [(phrases[int(i)], float(v.item())) for v, i in zip(vals, idxs)]
        return scores, top_contribs

    bloom_scores, bloom_top = _score_block(bloom_index)
    dok_scores, dok_top = _score_block(dok_index)

    def _best(scores: Dict[str, float]) -> Tuple[str, float]:
        # max with NaN-safe handling
        best_lvl, best_val = None, -float("inf")
        for lvl, val in scores.items():
            if isinstance(val, float) and (not math.isnan(val)) and val > best_val:
                best_lvl, best_val = lvl, val
        return best_lvl or "", best_val

    best_bloom, best_bloom_val = _best(bloom_scores)
    best_dok, best_dok_val = _best(dok_scores)

    return {
        "question": question_clean,
        "model_name": be.model_name,
        "blooms": {
            "scores": bloom_scores,
            "best_level": best_bloom,
            "best_score": best_bloom_val,
            "top_phrases": bloom_top if return_phrase_matches else None,
        },
        "dok": {
            "scores": dok_scores,
            "best_level": best_dok,
            "best_score": best_dok_val,
            "top_phrases": dok_top if return_phrase_matches else None,
        },
        "config": {
            "agg": agg,
            "topk": topk if agg == "topk_mean" else None,
        },
    }