Datasets:
Uploading tokenizer_robustness_completion_italian_word_reordering subset
Browse files
README.md
CHANGED
|
@@ -84,6 +84,10 @@ configs:
|
|
| 84 |
data_files:
|
| 85 |
- split: test
|
| 86 |
path: tokenizer_robustness_completion_italian_web_search_query/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
dataset_info:
|
| 88 |
- config_name: tokenizer_robustness_completion_italian_abbreviations
|
| 89 |
features:
|
|
@@ -2479,6 +2483,132 @@ dataset_info:
|
|
| 2479 |
num_examples: 40
|
| 2480 |
download_size: 38907
|
| 2481 |
dataset_size: 20426
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2482 |
---
|
| 2483 |
|
| 2484 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 84 |
data_files:
|
| 85 |
- split: test
|
| 86 |
path: tokenizer_robustness_completion_italian_web_search_query/test-*
|
| 87 |
+
- config_name: tokenizer_robustness_completion_italian_word_reordering
|
| 88 |
+
data_files:
|
| 89 |
+
- split: test
|
| 90 |
+
path: tokenizer_robustness_completion_italian_word_reordering/test-*
|
| 91 |
dataset_info:
|
| 92 |
- config_name: tokenizer_robustness_completion_italian_abbreviations
|
| 93 |
features:
|
|
|
|
| 2483 |
num_examples: 40
|
| 2484 |
download_size: 38907
|
| 2485 |
dataset_size: 20426
|
| 2486 |
+
- config_name: tokenizer_robustness_completion_italian_word_reordering
|
| 2487 |
+
features:
|
| 2488 |
+
- name: question
|
| 2489 |
+
dtype: string
|
| 2490 |
+
- name: choices
|
| 2491 |
+
list: string
|
| 2492 |
+
- name: answer
|
| 2493 |
+
dtype: int64
|
| 2494 |
+
- name: answer_label
|
| 2495 |
+
dtype: string
|
| 2496 |
+
- name: split
|
| 2497 |
+
dtype: string
|
| 2498 |
+
- name: subcategories
|
| 2499 |
+
dtype: string
|
| 2500 |
+
- name: category
|
| 2501 |
+
dtype: string
|
| 2502 |
+
- name: lang
|
| 2503 |
+
dtype: string
|
| 2504 |
+
- name: second_lang
|
| 2505 |
+
dtype: string
|
| 2506 |
+
- name: notes
|
| 2507 |
+
dtype: string
|
| 2508 |
+
- name: id
|
| 2509 |
+
dtype: string
|
| 2510 |
+
- name: set_id
|
| 2511 |
+
dtype: string
|
| 2512 |
+
- name: variation_id
|
| 2513 |
+
dtype: string
|
| 2514 |
+
- name: perturbed_word
|
| 2515 |
+
dtype: string
|
| 2516 |
+
- name: vanilla_cos_sim_to_canonical
|
| 2517 |
+
struct:
|
| 2518 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2519 |
+
dtype: float64
|
| 2520 |
+
- name: Qwen/Qwen3-8B
|
| 2521 |
+
dtype: float64
|
| 2522 |
+
- name: bigscience/bloom
|
| 2523 |
+
dtype: float64
|
| 2524 |
+
- name: common-pile/comma-v0.1-1t
|
| 2525 |
+
dtype: float64
|
| 2526 |
+
- name: facebook/xglm-564M
|
| 2527 |
+
dtype: float64
|
| 2528 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2529 |
+
dtype: float64
|
| 2530 |
+
- name: google/byt5-small
|
| 2531 |
+
dtype: float64
|
| 2532 |
+
- name: google/gemma-2-2b
|
| 2533 |
+
dtype: float64
|
| 2534 |
+
- name: gpt2
|
| 2535 |
+
dtype: float64
|
| 2536 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2537 |
+
dtype: float64
|
| 2538 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2539 |
+
dtype: float64
|
| 2540 |
+
- name: mistralai/tekken
|
| 2541 |
+
dtype: float64
|
| 2542 |
+
- name: tiktoken/gpt-4o
|
| 2543 |
+
dtype: float64
|
| 2544 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2545 |
+
dtype: float64
|
| 2546 |
+
- name: trimmed_cos_sim_to_canonical
|
| 2547 |
+
struct:
|
| 2548 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2549 |
+
dtype: float64
|
| 2550 |
+
- name: Qwen/Qwen3-8B
|
| 2551 |
+
dtype: float64
|
| 2552 |
+
- name: bigscience/bloom
|
| 2553 |
+
dtype: float64
|
| 2554 |
+
- name: common-pile/comma-v0.1-1t
|
| 2555 |
+
dtype: float64
|
| 2556 |
+
- name: facebook/xglm-564M
|
| 2557 |
+
dtype: float64
|
| 2558 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2559 |
+
dtype: float64
|
| 2560 |
+
- name: google/byt5-small
|
| 2561 |
+
dtype: float64
|
| 2562 |
+
- name: google/gemma-2-2b
|
| 2563 |
+
dtype: float64
|
| 2564 |
+
- name: gpt2
|
| 2565 |
+
dtype: float64
|
| 2566 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2567 |
+
dtype: float64
|
| 2568 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2569 |
+
dtype: float64
|
| 2570 |
+
- name: mistralai/tekken
|
| 2571 |
+
dtype: float64
|
| 2572 |
+
- name: tiktoken/gpt-4o
|
| 2573 |
+
dtype: float64
|
| 2574 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2575 |
+
dtype: float64
|
| 2576 |
+
- name: token_counts
|
| 2577 |
+
struct:
|
| 2578 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2579 |
+
dtype: int64
|
| 2580 |
+
- name: Qwen/Qwen3-8B
|
| 2581 |
+
dtype: int64
|
| 2582 |
+
- name: bigscience/bloom
|
| 2583 |
+
dtype: int64
|
| 2584 |
+
- name: common-pile/comma-v0.1-1t
|
| 2585 |
+
dtype: int64
|
| 2586 |
+
- name: facebook/xglm-564M
|
| 2587 |
+
dtype: int64
|
| 2588 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2589 |
+
dtype: int64
|
| 2590 |
+
- name: google/byt5-small
|
| 2591 |
+
dtype: int64
|
| 2592 |
+
- name: google/gemma-2-2b
|
| 2593 |
+
dtype: int64
|
| 2594 |
+
- name: gpt2
|
| 2595 |
+
dtype: int64
|
| 2596 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2597 |
+
dtype: int64
|
| 2598 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2599 |
+
dtype: int64
|
| 2600 |
+
- name: mistralai/tekken
|
| 2601 |
+
dtype: int64
|
| 2602 |
+
- name: tiktoken/gpt-4o
|
| 2603 |
+
dtype: int64
|
| 2604 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2605 |
+
dtype: int64
|
| 2606 |
+
splits:
|
| 2607 |
+
- name: test
|
| 2608 |
+
num_bytes: 527
|
| 2609 |
+
num_examples: 1
|
| 2610 |
+
download_size: 27939
|
| 2611 |
+
dataset_size: 527
|
| 2612 |
---
|
| 2613 |
|
| 2614 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_italian_word_reordering/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ec4463d4591bd940aa42b1b520713f224f9b0c3b221197a33c6e593eb4508d7
|
| 3 |
+
size 27939
|