Datasets:
Uploading tokenizer_robustness_completion_stem_unicode_formatting subset
Browse files
README.md
CHANGED
|
@@ -2116,6 +2116,130 @@ dataset_info:
|
|
| 2116 |
num_examples: 38
|
| 2117 |
download_size: 39429
|
| 2118 |
dataset_size: 21308
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2119 |
configs:
|
| 2120 |
- config_name: tokenizer_robustness_completion_stem_canonical
|
| 2121 |
data_files:
|
|
@@ -2185,6 +2309,10 @@ configs:
|
|
| 2185 |
data_files:
|
| 2186 |
- split: test
|
| 2187 |
path: tokenizer_robustness_completion_stem_typographical_errors/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2188 |
---
|
| 2189 |
|
| 2190 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 2116 |
num_examples: 38
|
| 2117 |
download_size: 39429
|
| 2118 |
dataset_size: 21308
|
| 2119 |
+
- config_name: tokenizer_robustness_completion_stem_unicode_formatting
|
| 2120 |
+
features:
|
| 2121 |
+
- name: question
|
| 2122 |
+
dtype: string
|
| 2123 |
+
- name: choices
|
| 2124 |
+
list: string
|
| 2125 |
+
- name: answer
|
| 2126 |
+
dtype: int64
|
| 2127 |
+
- name: answer_label
|
| 2128 |
+
dtype: string
|
| 2129 |
+
- name: split
|
| 2130 |
+
dtype: string
|
| 2131 |
+
- name: subcategories
|
| 2132 |
+
dtype: string
|
| 2133 |
+
- name: lang
|
| 2134 |
+
dtype: string
|
| 2135 |
+
- name: second_lang
|
| 2136 |
+
dtype: string
|
| 2137 |
+
- name: notes
|
| 2138 |
+
dtype: string
|
| 2139 |
+
- name: id
|
| 2140 |
+
dtype: string
|
| 2141 |
+
- name: set_id
|
| 2142 |
+
dtype: string
|
| 2143 |
+
- name: variation_id
|
| 2144 |
+
dtype: string
|
| 2145 |
+
- name: question_general_category
|
| 2146 |
+
dtype: string
|
| 2147 |
+
- name: vanilla_cos_sim_to_canonical
|
| 2148 |
+
struct:
|
| 2149 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2150 |
+
dtype: float64
|
| 2151 |
+
- name: Qwen/Qwen3-8B
|
| 2152 |
+
dtype: float64
|
| 2153 |
+
- name: bigscience/bloom
|
| 2154 |
+
dtype: float64
|
| 2155 |
+
- name: common-pile/comma-v0.1-1t
|
| 2156 |
+
dtype: float64
|
| 2157 |
+
- name: facebook/xglm-564M
|
| 2158 |
+
dtype: float64
|
| 2159 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2160 |
+
dtype: float64
|
| 2161 |
+
- name: google/byt5-small
|
| 2162 |
+
dtype: float64
|
| 2163 |
+
- name: google/gemma-2-2b
|
| 2164 |
+
dtype: float64
|
| 2165 |
+
- name: gpt2
|
| 2166 |
+
dtype: float64
|
| 2167 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2168 |
+
dtype: float64
|
| 2169 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2170 |
+
dtype: float64
|
| 2171 |
+
- name: mistralai/tekken
|
| 2172 |
+
dtype: float64
|
| 2173 |
+
- name: tiktoken/gpt-4o
|
| 2174 |
+
dtype: float64
|
| 2175 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2176 |
+
dtype: float64
|
| 2177 |
+
- name: trimmed_cos_sim_to_canonical
|
| 2178 |
+
struct:
|
| 2179 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2180 |
+
dtype: float64
|
| 2181 |
+
- name: Qwen/Qwen3-8B
|
| 2182 |
+
dtype: float64
|
| 2183 |
+
- name: bigscience/bloom
|
| 2184 |
+
dtype: float64
|
| 2185 |
+
- name: common-pile/comma-v0.1-1t
|
| 2186 |
+
dtype: float64
|
| 2187 |
+
- name: facebook/xglm-564M
|
| 2188 |
+
dtype: float64
|
| 2189 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2190 |
+
dtype: float64
|
| 2191 |
+
- name: google/byt5-small
|
| 2192 |
+
dtype: float64
|
| 2193 |
+
- name: google/gemma-2-2b
|
| 2194 |
+
dtype: float64
|
| 2195 |
+
- name: gpt2
|
| 2196 |
+
dtype: float64
|
| 2197 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2198 |
+
dtype: float64
|
| 2199 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2200 |
+
dtype: float64
|
| 2201 |
+
- name: mistralai/tekken
|
| 2202 |
+
dtype: float64
|
| 2203 |
+
- name: tiktoken/gpt-4o
|
| 2204 |
+
dtype: float64
|
| 2205 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2206 |
+
dtype: float64
|
| 2207 |
+
- name: token_counts
|
| 2208 |
+
struct:
|
| 2209 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2210 |
+
dtype: int64
|
| 2211 |
+
- name: Qwen/Qwen3-8B
|
| 2212 |
+
dtype: int64
|
| 2213 |
+
- name: bigscience/bloom
|
| 2214 |
+
dtype: int64
|
| 2215 |
+
- name: common-pile/comma-v0.1-1t
|
| 2216 |
+
dtype: int64
|
| 2217 |
+
- name: facebook/xglm-564M
|
| 2218 |
+
dtype: int64
|
| 2219 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2220 |
+
dtype: int64
|
| 2221 |
+
- name: google/byt5-small
|
| 2222 |
+
dtype: int64
|
| 2223 |
+
- name: google/gemma-2-2b
|
| 2224 |
+
dtype: int64
|
| 2225 |
+
- name: gpt2
|
| 2226 |
+
dtype: int64
|
| 2227 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2228 |
+
dtype: int64
|
| 2229 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2230 |
+
dtype: int64
|
| 2231 |
+
- name: mistralai/tekken
|
| 2232 |
+
dtype: int64
|
| 2233 |
+
- name: tiktoken/gpt-4o
|
| 2234 |
+
dtype: int64
|
| 2235 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2236 |
+
dtype: int64
|
| 2237 |
+
splits:
|
| 2238 |
+
- name: test
|
| 2239 |
+
num_bytes: 15211
|
| 2240 |
+
num_examples: 28
|
| 2241 |
+
download_size: 35076
|
| 2242 |
+
dataset_size: 15211
|
| 2243 |
configs:
|
| 2244 |
- config_name: tokenizer_robustness_completion_stem_canonical
|
| 2245 |
data_files:
|
|
|
|
| 2309 |
data_files:
|
| 2310 |
- split: test
|
| 2311 |
path: tokenizer_robustness_completion_stem_typographical_errors/test-*
|
| 2312 |
+
- config_name: tokenizer_robustness_completion_stem_unicode_formatting
|
| 2313 |
+
data_files:
|
| 2314 |
+
- split: test
|
| 2315 |
+
path: tokenizer_robustness_completion_stem_unicode_formatting/test-*
|
| 2316 |
---
|
| 2317 |
|
| 2318 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_stem_unicode_formatting/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9430b116c8d192ec0fd89acc7570b5ca3aaf1aeab35fb92c854ffbb78e08f545
|
| 3 |
+
size 35076
|