Datasets:
Uploading tokenizer_robustness_completion_italian_web_search_query subset
Browse files
README.md
CHANGED
|
@@ -2150,6 +2150,132 @@ dataset_info:
|
|
| 2150 |
num_examples: 281
|
| 2151 |
download_size: 89075
|
| 2152 |
dataset_size: 158477
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2153 |
configs:
|
| 2154 |
- config_name: tokenizer_robustness_completion_italian_abbreviations
|
| 2155 |
data_files:
|
|
@@ -2219,6 +2345,10 @@ configs:
|
|
| 2219 |
data_files:
|
| 2220 |
- split: test
|
| 2221 |
path: tokenizer_robustness_completion_italian_typographical_errors/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2222 |
---
|
| 2223 |
|
| 2224 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 2150 |
num_examples: 281
|
| 2151 |
download_size: 89075
|
| 2152 |
dataset_size: 158477
|
| 2153 |
+
- config_name: tokenizer_robustness_completion_italian_web_search_query
|
| 2154 |
+
features:
|
| 2155 |
+
- name: question
|
| 2156 |
+
dtype: string
|
| 2157 |
+
- name: choices
|
| 2158 |
+
list: string
|
| 2159 |
+
- name: answer
|
| 2160 |
+
dtype: int64
|
| 2161 |
+
- name: answer_label
|
| 2162 |
+
dtype: string
|
| 2163 |
+
- name: split
|
| 2164 |
+
dtype: string
|
| 2165 |
+
- name: subcategories
|
| 2166 |
+
dtype: string
|
| 2167 |
+
- name: category
|
| 2168 |
+
dtype: string
|
| 2169 |
+
- name: lang
|
| 2170 |
+
dtype: string
|
| 2171 |
+
- name: second_lang
|
| 2172 |
+
dtype: string
|
| 2173 |
+
- name: notes
|
| 2174 |
+
dtype: string
|
| 2175 |
+
- name: id
|
| 2176 |
+
dtype: string
|
| 2177 |
+
- name: set_id
|
| 2178 |
+
dtype: string
|
| 2179 |
+
- name: variation_id
|
| 2180 |
+
dtype: string
|
| 2181 |
+
- name: perturbed_word
|
| 2182 |
+
dtype: string
|
| 2183 |
+
- name: vanilla_cos_sim_to_canonical
|
| 2184 |
+
struct:
|
| 2185 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2186 |
+
dtype: float64
|
| 2187 |
+
- name: Qwen/Qwen3-8B
|
| 2188 |
+
dtype: float64
|
| 2189 |
+
- name: bigscience/bloom
|
| 2190 |
+
dtype: float64
|
| 2191 |
+
- name: common-pile/comma-v0.1-1t
|
| 2192 |
+
dtype: float64
|
| 2193 |
+
- name: facebook/xglm-564M
|
| 2194 |
+
dtype: float64
|
| 2195 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2196 |
+
dtype: float64
|
| 2197 |
+
- name: google/byt5-small
|
| 2198 |
+
dtype: float64
|
| 2199 |
+
- name: google/gemma-2-2b
|
| 2200 |
+
dtype: float64
|
| 2201 |
+
- name: gpt2
|
| 2202 |
+
dtype: float64
|
| 2203 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2204 |
+
dtype: float64
|
| 2205 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2206 |
+
dtype: float64
|
| 2207 |
+
- name: mistralai/tekken
|
| 2208 |
+
dtype: float64
|
| 2209 |
+
- name: tiktoken/gpt-4o
|
| 2210 |
+
dtype: float64
|
| 2211 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2212 |
+
dtype: float64
|
| 2213 |
+
- name: trimmed_cos_sim_to_canonical
|
| 2214 |
+
struct:
|
| 2215 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2216 |
+
dtype: float64
|
| 2217 |
+
- name: Qwen/Qwen3-8B
|
| 2218 |
+
dtype: float64
|
| 2219 |
+
- name: bigscience/bloom
|
| 2220 |
+
dtype: float64
|
| 2221 |
+
- name: common-pile/comma-v0.1-1t
|
| 2222 |
+
dtype: float64
|
| 2223 |
+
- name: facebook/xglm-564M
|
| 2224 |
+
dtype: float64
|
| 2225 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2226 |
+
dtype: float64
|
| 2227 |
+
- name: google/byt5-small
|
| 2228 |
+
dtype: float64
|
| 2229 |
+
- name: google/gemma-2-2b
|
| 2230 |
+
dtype: float64
|
| 2231 |
+
- name: gpt2
|
| 2232 |
+
dtype: float64
|
| 2233 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2234 |
+
dtype: float64
|
| 2235 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2236 |
+
dtype: float64
|
| 2237 |
+
- name: mistralai/tekken
|
| 2238 |
+
dtype: float64
|
| 2239 |
+
- name: tiktoken/gpt-4o
|
| 2240 |
+
dtype: float64
|
| 2241 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2242 |
+
dtype: float64
|
| 2243 |
+
- name: token_counts
|
| 2244 |
+
struct:
|
| 2245 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2246 |
+
dtype: int64
|
| 2247 |
+
- name: Qwen/Qwen3-8B
|
| 2248 |
+
dtype: int64
|
| 2249 |
+
- name: bigscience/bloom
|
| 2250 |
+
dtype: int64
|
| 2251 |
+
- name: common-pile/comma-v0.1-1t
|
| 2252 |
+
dtype: int64
|
| 2253 |
+
- name: facebook/xglm-564M
|
| 2254 |
+
dtype: int64
|
| 2255 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2256 |
+
dtype: int64
|
| 2257 |
+
- name: google/byt5-small
|
| 2258 |
+
dtype: int64
|
| 2259 |
+
- name: google/gemma-2-2b
|
| 2260 |
+
dtype: int64
|
| 2261 |
+
- name: gpt2
|
| 2262 |
+
dtype: int64
|
| 2263 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2264 |
+
dtype: int64
|
| 2265 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2266 |
+
dtype: int64
|
| 2267 |
+
- name: mistralai/tekken
|
| 2268 |
+
dtype: int64
|
| 2269 |
+
- name: tiktoken/gpt-4o
|
| 2270 |
+
dtype: int64
|
| 2271 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2272 |
+
dtype: int64
|
| 2273 |
+
splits:
|
| 2274 |
+
- name: test
|
| 2275 |
+
num_bytes: 20426
|
| 2276 |
+
num_examples: 40
|
| 2277 |
+
download_size: 38920
|
| 2278 |
+
dataset_size: 20426
|
| 2279 |
configs:
|
| 2280 |
- config_name: tokenizer_robustness_completion_italian_abbreviations
|
| 2281 |
data_files:
|
|
|
|
| 2345 |
data_files:
|
| 2346 |
- split: test
|
| 2347 |
path: tokenizer_robustness_completion_italian_typographical_errors/test-*
|
| 2348 |
+
- config_name: tokenizer_robustness_completion_italian_web_search_query
|
| 2349 |
+
data_files:
|
| 2350 |
+
- split: test
|
| 2351 |
+
path: tokenizer_robustness_completion_italian_web_search_query/test-*
|
| 2352 |
---
|
| 2353 |
|
| 2354 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_italian_web_search_query/test-00000-of-00001.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc11e79ee96a827ffad906a3d33582f7230065462a1b767717489083bce2e1f8
|
| 3 |
+
size 38920
|