Datasets:
Uploading tokenizer_robustness_completion_italian_english_keyboard subset
Browse files
README.md
CHANGED
|
@@ -36,6 +36,10 @@ configs:
|
|
| 36 |
data_files:
|
| 37 |
- split: test
|
| 38 |
path: tokenizer_robustness_completion_italian_dialects/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
dataset_info:
|
| 40 |
- config_name: tokenizer_robustness_completion_italian_abbreviations
|
| 41 |
features:
|
|
@@ -919,6 +923,132 @@ dataset_info:
|
|
| 919 |
num_examples: 31
|
| 920 |
download_size: 35296
|
| 921 |
dataset_size: 16845
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 922 |
---
|
| 923 |
|
| 924 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 36 |
data_files:
|
| 37 |
- split: test
|
| 38 |
path: tokenizer_robustness_completion_italian_dialects/test-*
|
| 39 |
+
- config_name: tokenizer_robustness_completion_italian_english_keyboard
|
| 40 |
+
data_files:
|
| 41 |
+
- split: test
|
| 42 |
+
path: tokenizer_robustness_completion_italian_english_keyboard/test-*
|
| 43 |
dataset_info:
|
| 44 |
- config_name: tokenizer_robustness_completion_italian_abbreviations
|
| 45 |
features:
|
|
|
|
| 923 |
num_examples: 31
|
| 924 |
download_size: 35296
|
| 925 |
dataset_size: 16845
|
| 926 |
+
- config_name: tokenizer_robustness_completion_italian_english_keyboard
|
| 927 |
+
features:
|
| 928 |
+
- name: question
|
| 929 |
+
dtype: string
|
| 930 |
+
- name: choices
|
| 931 |
+
list: string
|
| 932 |
+
- name: answer
|
| 933 |
+
dtype: int64
|
| 934 |
+
- name: answer_label
|
| 935 |
+
dtype: string
|
| 936 |
+
- name: split
|
| 937 |
+
dtype: string
|
| 938 |
+
- name: subcategories
|
| 939 |
+
dtype: string
|
| 940 |
+
- name: category
|
| 941 |
+
dtype: string
|
| 942 |
+
- name: lang
|
| 943 |
+
dtype: string
|
| 944 |
+
- name: second_lang
|
| 945 |
+
dtype: string
|
| 946 |
+
- name: notes
|
| 947 |
+
dtype: string
|
| 948 |
+
- name: id
|
| 949 |
+
dtype: string
|
| 950 |
+
- name: set_id
|
| 951 |
+
dtype: string
|
| 952 |
+
- name: variation_id
|
| 953 |
+
dtype: string
|
| 954 |
+
- name: perturbed_word
|
| 955 |
+
dtype: string
|
| 956 |
+
- name: vanilla_cos_sim_to_canonical
|
| 957 |
+
struct:
|
| 958 |
+
- name: CohereLabs/aya-expanse-8b
|
| 959 |
+
dtype: float64
|
| 960 |
+
- name: Qwen/Qwen3-8B
|
| 961 |
+
dtype: float64
|
| 962 |
+
- name: bigscience/bloom
|
| 963 |
+
dtype: float64
|
| 964 |
+
- name: common-pile/comma-v0.1-1t
|
| 965 |
+
dtype: float64
|
| 966 |
+
- name: facebook/xglm-564M
|
| 967 |
+
dtype: float64
|
| 968 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 969 |
+
dtype: float64
|
| 970 |
+
- name: google/byt5-small
|
| 971 |
+
dtype: float64
|
| 972 |
+
- name: google/gemma-2-2b
|
| 973 |
+
dtype: float64
|
| 974 |
+
- name: gpt2
|
| 975 |
+
dtype: float64
|
| 976 |
+
- name: meta-llama/Llama-3.2-1B
|
| 977 |
+
dtype: float64
|
| 978 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 979 |
+
dtype: float64
|
| 980 |
+
- name: mistralai/tekken
|
| 981 |
+
dtype: float64
|
| 982 |
+
- name: tiktoken/gpt-4o
|
| 983 |
+
dtype: float64
|
| 984 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 985 |
+
dtype: float64
|
| 986 |
+
- name: trimmed_cos_sim_to_canonical
|
| 987 |
+
struct:
|
| 988 |
+
- name: CohereLabs/aya-expanse-8b
|
| 989 |
+
dtype: float64
|
| 990 |
+
- name: Qwen/Qwen3-8B
|
| 991 |
+
dtype: float64
|
| 992 |
+
- name: bigscience/bloom
|
| 993 |
+
dtype: float64
|
| 994 |
+
- name: common-pile/comma-v0.1-1t
|
| 995 |
+
dtype: float64
|
| 996 |
+
- name: facebook/xglm-564M
|
| 997 |
+
dtype: float64
|
| 998 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 999 |
+
dtype: float64
|
| 1000 |
+
- name: google/byt5-small
|
| 1001 |
+
dtype: float64
|
| 1002 |
+
- name: google/gemma-2-2b
|
| 1003 |
+
dtype: float64
|
| 1004 |
+
- name: gpt2
|
| 1005 |
+
dtype: float64
|
| 1006 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1007 |
+
dtype: float64
|
| 1008 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1009 |
+
dtype: float64
|
| 1010 |
+
- name: mistralai/tekken
|
| 1011 |
+
dtype: float64
|
| 1012 |
+
- name: tiktoken/gpt-4o
|
| 1013 |
+
dtype: float64
|
| 1014 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1015 |
+
dtype: float64
|
| 1016 |
+
- name: token_counts
|
| 1017 |
+
struct:
|
| 1018 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1019 |
+
dtype: int64
|
| 1020 |
+
- name: Qwen/Qwen3-8B
|
| 1021 |
+
dtype: int64
|
| 1022 |
+
- name: bigscience/bloom
|
| 1023 |
+
dtype: int64
|
| 1024 |
+
- name: common-pile/comma-v0.1-1t
|
| 1025 |
+
dtype: int64
|
| 1026 |
+
- name: facebook/xglm-564M
|
| 1027 |
+
dtype: int64
|
| 1028 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1029 |
+
dtype: int64
|
| 1030 |
+
- name: google/byt5-small
|
| 1031 |
+
dtype: int64
|
| 1032 |
+
- name: google/gemma-2-2b
|
| 1033 |
+
dtype: int64
|
| 1034 |
+
- name: gpt2
|
| 1035 |
+
dtype: int64
|
| 1036 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1037 |
+
dtype: int64
|
| 1038 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1039 |
+
dtype: int64
|
| 1040 |
+
- name: mistralai/tekken
|
| 1041 |
+
dtype: int64
|
| 1042 |
+
- name: tiktoken/gpt-4o
|
| 1043 |
+
dtype: int64
|
| 1044 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1045 |
+
dtype: int64
|
| 1046 |
+
splits:
|
| 1047 |
+
- name: test
|
| 1048 |
+
num_bytes: 36901
|
| 1049 |
+
num_examples: 68
|
| 1050 |
+
download_size: 41603
|
| 1051 |
+
dataset_size: 36901
|
| 1052 |
---
|
| 1053 |
|
| 1054 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_italian_english_keyboard/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cafc061990a6c53fe1630aac33f24bdf1cb1e26a9a544509027804d3f63efeae
|
| 3 |
+
size 41603
|