Datasets:
Uploading tokenizer_robustness_completion_stem_strikethrough subset
Browse files
README.md
CHANGED
|
@@ -1744,6 +1744,130 @@ dataset_info:
|
|
| 1744 |
num_examples: 25
|
| 1745 |
download_size: 35840
|
| 1746 |
dataset_size: 13749
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1747 |
configs:
|
| 1748 |
- config_name: tokenizer_robustness_completion_stem_canonical
|
| 1749 |
data_files:
|
|
@@ -1801,6 +1925,10 @@ configs:
|
|
| 1801 |
data_files:
|
| 1802 |
- split: test
|
| 1803 |
path: tokenizer_robustness_completion_stem_spelled_out/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1804 |
---
|
| 1805 |
|
| 1806 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 1744 |
num_examples: 25
|
| 1745 |
download_size: 35840
|
| 1746 |
dataset_size: 13749
|
| 1747 |
+
- config_name: tokenizer_robustness_completion_stem_strikethrough
|
| 1748 |
+
features:
|
| 1749 |
+
- name: question
|
| 1750 |
+
dtype: string
|
| 1751 |
+
- name: choices
|
| 1752 |
+
list: string
|
| 1753 |
+
- name: answer
|
| 1754 |
+
dtype: int64
|
| 1755 |
+
- name: answer_label
|
| 1756 |
+
dtype: string
|
| 1757 |
+
- name: split
|
| 1758 |
+
dtype: string
|
| 1759 |
+
- name: subcategories
|
| 1760 |
+
dtype: string
|
| 1761 |
+
- name: lang
|
| 1762 |
+
dtype: string
|
| 1763 |
+
- name: second_lang
|
| 1764 |
+
dtype: string
|
| 1765 |
+
- name: notes
|
| 1766 |
+
dtype: string
|
| 1767 |
+
- name: id
|
| 1768 |
+
dtype: string
|
| 1769 |
+
- name: set_id
|
| 1770 |
+
dtype: string
|
| 1771 |
+
- name: variation_id
|
| 1772 |
+
dtype: string
|
| 1773 |
+
- name: question_general_category
|
| 1774 |
+
dtype: string
|
| 1775 |
+
- name: vanilla_cos_sim_to_canonical
|
| 1776 |
+
struct:
|
| 1777 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1778 |
+
dtype: float64
|
| 1779 |
+
- name: Qwen/Qwen3-8B
|
| 1780 |
+
dtype: float64
|
| 1781 |
+
- name: bigscience/bloom
|
| 1782 |
+
dtype: float64
|
| 1783 |
+
- name: common-pile/comma-v0.1-1t
|
| 1784 |
+
dtype: float64
|
| 1785 |
+
- name: facebook/xglm-564M
|
| 1786 |
+
dtype: float64
|
| 1787 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1788 |
+
dtype: float64
|
| 1789 |
+
- name: google/byt5-small
|
| 1790 |
+
dtype: float64
|
| 1791 |
+
- name: google/gemma-2-2b
|
| 1792 |
+
dtype: float64
|
| 1793 |
+
- name: gpt2
|
| 1794 |
+
dtype: float64
|
| 1795 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1796 |
+
dtype: float64
|
| 1797 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1798 |
+
dtype: float64
|
| 1799 |
+
- name: mistralai/tekken
|
| 1800 |
+
dtype: float64
|
| 1801 |
+
- name: tiktoken/gpt-4o
|
| 1802 |
+
dtype: float64
|
| 1803 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1804 |
+
dtype: float64
|
| 1805 |
+
- name: trimmed_cos_sim_to_canonical
|
| 1806 |
+
struct:
|
| 1807 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1808 |
+
dtype: float64
|
| 1809 |
+
- name: Qwen/Qwen3-8B
|
| 1810 |
+
dtype: float64
|
| 1811 |
+
- name: bigscience/bloom
|
| 1812 |
+
dtype: float64
|
| 1813 |
+
- name: common-pile/comma-v0.1-1t
|
| 1814 |
+
dtype: float64
|
| 1815 |
+
- name: facebook/xglm-564M
|
| 1816 |
+
dtype: float64
|
| 1817 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1818 |
+
dtype: float64
|
| 1819 |
+
- name: google/byt5-small
|
| 1820 |
+
dtype: float64
|
| 1821 |
+
- name: google/gemma-2-2b
|
| 1822 |
+
dtype: float64
|
| 1823 |
+
- name: gpt2
|
| 1824 |
+
dtype: float64
|
| 1825 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1826 |
+
dtype: float64
|
| 1827 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1828 |
+
dtype: float64
|
| 1829 |
+
- name: mistralai/tekken
|
| 1830 |
+
dtype: float64
|
| 1831 |
+
- name: tiktoken/gpt-4o
|
| 1832 |
+
dtype: float64
|
| 1833 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1834 |
+
dtype: float64
|
| 1835 |
+
- name: token_counts
|
| 1836 |
+
struct:
|
| 1837 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1838 |
+
dtype: int64
|
| 1839 |
+
- name: Qwen/Qwen3-8B
|
| 1840 |
+
dtype: int64
|
| 1841 |
+
- name: bigscience/bloom
|
| 1842 |
+
dtype: int64
|
| 1843 |
+
- name: common-pile/comma-v0.1-1t
|
| 1844 |
+
dtype: int64
|
| 1845 |
+
- name: facebook/xglm-564M
|
| 1846 |
+
dtype: int64
|
| 1847 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1848 |
+
dtype: int64
|
| 1849 |
+
- name: google/byt5-small
|
| 1850 |
+
dtype: int64
|
| 1851 |
+
- name: google/gemma-2-2b
|
| 1852 |
+
dtype: int64
|
| 1853 |
+
- name: gpt2
|
| 1854 |
+
dtype: int64
|
| 1855 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1856 |
+
dtype: int64
|
| 1857 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1858 |
+
dtype: int64
|
| 1859 |
+
- name: mistralai/tekken
|
| 1860 |
+
dtype: int64
|
| 1861 |
+
- name: tiktoken/gpt-4o
|
| 1862 |
+
dtype: int64
|
| 1863 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1864 |
+
dtype: int64
|
| 1865 |
+
splits:
|
| 1866 |
+
- name: test
|
| 1867 |
+
num_bytes: 31149
|
| 1868 |
+
num_examples: 52
|
| 1869 |
+
download_size: 33762
|
| 1870 |
+
dataset_size: 31149
|
| 1871 |
configs:
|
| 1872 |
- config_name: tokenizer_robustness_completion_stem_canonical
|
| 1873 |
data_files:
|
|
|
|
| 1925 |
data_files:
|
| 1926 |
- split: test
|
| 1927 |
path: tokenizer_robustness_completion_stem_spelled_out/test-*
|
| 1928 |
+
- config_name: tokenizer_robustness_completion_stem_strikethrough
|
| 1929 |
+
data_files:
|
| 1930 |
+
- split: test
|
| 1931 |
+
path: tokenizer_robustness_completion_stem_strikethrough/test-*
|
| 1932 |
---
|
| 1933 |
|
| 1934 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_stem_strikethrough/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0bbf9d75505aa29504e4818d3e68385c3c635d6df50a1c4ce6558b0cbd241f97
|
| 3 |
+
size 33762
|