Datasets:
Uploading tokenizer_robustness_completion_stem_latex subset
Browse files
README.md
CHANGED
|
@@ -1124,6 +1124,130 @@ dataset_info:
|
|
| 1124 |
num_examples: 17
|
| 1125 |
download_size: 31032
|
| 1126 |
dataset_size: 10163
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1127 |
configs:
|
| 1128 |
- config_name: tokenizer_robustness_completion_stem_canonical
|
| 1129 |
data_files:
|
|
@@ -1161,6 +1285,10 @@ configs:
|
|
| 1161 |
data_files:
|
| 1162 |
- split: test
|
| 1163 |
path: tokenizer_robustness_completion_stem_fullwidth_characters/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1164 |
---
|
| 1165 |
|
| 1166 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 1124 |
num_examples: 17
|
| 1125 |
download_size: 31032
|
| 1126 |
dataset_size: 10163
|
| 1127 |
+
- config_name: tokenizer_robustness_completion_stem_latex
|
| 1128 |
+
features:
|
| 1129 |
+
- name: question
|
| 1130 |
+
dtype: string
|
| 1131 |
+
- name: choices
|
| 1132 |
+
list: string
|
| 1133 |
+
- name: answer
|
| 1134 |
+
dtype: int64
|
| 1135 |
+
- name: answer_label
|
| 1136 |
+
dtype: string
|
| 1137 |
+
- name: split
|
| 1138 |
+
dtype: string
|
| 1139 |
+
- name: subcategories
|
| 1140 |
+
dtype: string
|
| 1141 |
+
- name: lang
|
| 1142 |
+
dtype: string
|
| 1143 |
+
- name: second_lang
|
| 1144 |
+
dtype: string
|
| 1145 |
+
- name: notes
|
| 1146 |
+
dtype: string
|
| 1147 |
+
- name: id
|
| 1148 |
+
dtype: string
|
| 1149 |
+
- name: set_id
|
| 1150 |
+
dtype: string
|
| 1151 |
+
- name: variation_id
|
| 1152 |
+
dtype: string
|
| 1153 |
+
- name: question_general_category
|
| 1154 |
+
dtype: string
|
| 1155 |
+
- name: vanilla_cos_sim_to_canonical
|
| 1156 |
+
struct:
|
| 1157 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1158 |
+
dtype: float64
|
| 1159 |
+
- name: Qwen/Qwen3-8B
|
| 1160 |
+
dtype: float64
|
| 1161 |
+
- name: bigscience/bloom
|
| 1162 |
+
dtype: float64
|
| 1163 |
+
- name: common-pile/comma-v0.1-1t
|
| 1164 |
+
dtype: float64
|
| 1165 |
+
- name: facebook/xglm-564M
|
| 1166 |
+
dtype: float64
|
| 1167 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1168 |
+
dtype: float64
|
| 1169 |
+
- name: google/byt5-small
|
| 1170 |
+
dtype: float64
|
| 1171 |
+
- name: google/gemma-2-2b
|
| 1172 |
+
dtype: float64
|
| 1173 |
+
- name: gpt2
|
| 1174 |
+
dtype: float64
|
| 1175 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1176 |
+
dtype: float64
|
| 1177 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1178 |
+
dtype: float64
|
| 1179 |
+
- name: mistralai/tekken
|
| 1180 |
+
dtype: float64
|
| 1181 |
+
- name: tiktoken/gpt-4o
|
| 1182 |
+
dtype: float64
|
| 1183 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1184 |
+
dtype: float64
|
| 1185 |
+
- name: trimmed_cos_sim_to_canonical
|
| 1186 |
+
struct:
|
| 1187 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1188 |
+
dtype: float64
|
| 1189 |
+
- name: Qwen/Qwen3-8B
|
| 1190 |
+
dtype: float64
|
| 1191 |
+
- name: bigscience/bloom
|
| 1192 |
+
dtype: float64
|
| 1193 |
+
- name: common-pile/comma-v0.1-1t
|
| 1194 |
+
dtype: float64
|
| 1195 |
+
- name: facebook/xglm-564M
|
| 1196 |
+
dtype: float64
|
| 1197 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1198 |
+
dtype: float64
|
| 1199 |
+
- name: google/byt5-small
|
| 1200 |
+
dtype: float64
|
| 1201 |
+
- name: google/gemma-2-2b
|
| 1202 |
+
dtype: float64
|
| 1203 |
+
- name: gpt2
|
| 1204 |
+
dtype: float64
|
| 1205 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1206 |
+
dtype: float64
|
| 1207 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1208 |
+
dtype: float64
|
| 1209 |
+
- name: mistralai/tekken
|
| 1210 |
+
dtype: float64
|
| 1211 |
+
- name: tiktoken/gpt-4o
|
| 1212 |
+
dtype: float64
|
| 1213 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1214 |
+
dtype: float64
|
| 1215 |
+
- name: token_counts
|
| 1216 |
+
struct:
|
| 1217 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1218 |
+
dtype: int64
|
| 1219 |
+
- name: Qwen/Qwen3-8B
|
| 1220 |
+
dtype: int64
|
| 1221 |
+
- name: bigscience/bloom
|
| 1222 |
+
dtype: int64
|
| 1223 |
+
- name: common-pile/comma-v0.1-1t
|
| 1224 |
+
dtype: int64
|
| 1225 |
+
- name: facebook/xglm-564M
|
| 1226 |
+
dtype: int64
|
| 1227 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1228 |
+
dtype: int64
|
| 1229 |
+
- name: google/byt5-small
|
| 1230 |
+
dtype: int64
|
| 1231 |
+
- name: google/gemma-2-2b
|
| 1232 |
+
dtype: int64
|
| 1233 |
+
- name: gpt2
|
| 1234 |
+
dtype: int64
|
| 1235 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1236 |
+
dtype: int64
|
| 1237 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1238 |
+
dtype: int64
|
| 1239 |
+
- name: mistralai/tekken
|
| 1240 |
+
dtype: int64
|
| 1241 |
+
- name: tiktoken/gpt-4o
|
| 1242 |
+
dtype: int64
|
| 1243 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1244 |
+
dtype: int64
|
| 1245 |
+
splits:
|
| 1246 |
+
- name: test
|
| 1247 |
+
num_bytes: 35181
|
| 1248 |
+
num_examples: 68
|
| 1249 |
+
download_size: 44974
|
| 1250 |
+
dataset_size: 35181
|
| 1251 |
configs:
|
| 1252 |
- config_name: tokenizer_robustness_completion_stem_canonical
|
| 1253 |
data_files:
|
|
|
|
| 1285 |
data_files:
|
| 1286 |
- split: test
|
| 1287 |
path: tokenizer_robustness_completion_stem_fullwidth_characters/test-*
|
| 1288 |
+
- config_name: tokenizer_robustness_completion_stem_latex
|
| 1289 |
+
data_files:
|
| 1290 |
+
- split: test
|
| 1291 |
+
path: tokenizer_robustness_completion_stem_latex/test-*
|
| 1292 |
---
|
| 1293 |
|
| 1294 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_stem_latex/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5c5ffebfeaec4c598bbb56df699e0e74ca0fae4f07afda1f80b3262af2c8ca6
|
| 3 |
+
size 44974
|