agkphysics commited on
Commit
0c609e8
·
verified ·
1 Parent(s): 5a2fa42

Convert to Parquet format (#5)

Browse files

- Convert balanced data to Parquet format (6762f044d1c88619c7f2006486036192128fb07e)
- Remove balanced data tar files (dbc89ba34a8f107ff1bef215a13eab648b0c7da8)
- Convert unbalanced data to Parquet format (0049167e89f259a010c3f070fe3666d9e5242836)
- Remove unbalanced labels CSV and update README (cb06767e072e3bd3e4145834cbcb0837b9e0065a)
- Move ontology.json (ceb9eaaa7844c9ad7351e659c84a572e376ad06d)

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. AudioSet.py +0 -183
  2. README.md +47 -53
  3. data/{bal_train09.tar → bal_train/00.parquet} +2 -2
  4. data/{balanced_train_segments.csv → bal_train/01.parquet} +2 -2
  5. data/{eval_segments.csv → bal_train/02.parquet} +2 -2
  6. data/{unbalanced_train_segments.csv → bal_train/03.parquet} +2 -2
  7. data/bal_train/04.parquet +3 -0
  8. data/bal_train/05.parquet +3 -0
  9. data/bal_train/06.parquet +3 -0
  10. data/bal_train/07.parquet +3 -0
  11. data/bal_train/08.parquet +3 -0
  12. data/bal_train/09.parquet +3 -0
  13. data/bal_train/10.parquet +3 -0
  14. data/bal_train/11.parquet +3 -0
  15. data/bal_train/12.parquet +3 -0
  16. data/bal_train/13.parquet +3 -0
  17. data/bal_train/14.parquet +3 -0
  18. data/bal_train/15.parquet +3 -0
  19. data/bal_train/16.parquet +3 -0
  20. data/bal_train/17.parquet +3 -0
  21. data/bal_train/18.parquet +3 -0
  22. data/bal_train/19.parquet +3 -0
  23. data/bal_train/20.parquet +3 -0
  24. data/bal_train/21.parquet +3 -0
  25. data/bal_train/22.parquet +3 -0
  26. data/bal_train/23.parquet +3 -0
  27. data/bal_train/24.parquet +3 -0
  28. data/bal_train/25.parquet +3 -0
  29. data/bal_train/26.parquet +3 -0
  30. data/bal_train/27.parquet +3 -0
  31. data/bal_train/28.parquet +3 -0
  32. data/bal_train/29.parquet +3 -0
  33. data/bal_train/30.parquet +3 -0
  34. data/bal_train/31.parquet +3 -0
  35. data/bal_train/32.parquet +3 -0
  36. data/bal_train/33.parquet +3 -0
  37. data/bal_train/34.parquet +3 -0
  38. data/bal_train/35.parquet +3 -0
  39. data/bal_train/36.parquet +3 -0
  40. data/bal_train/37.parquet +3 -0
  41. data/bal_train04.tar +0 -3
  42. data/bal_train05.tar +0 -3
  43. data/bal_train06.tar +0 -3
  44. data/bal_train07.tar +0 -3
  45. data/bal_train08.tar +0 -3
  46. data/{unbal_train257.tar → eval/00.parquet} +2 -2
  47. data/eval/01.parquet +3 -0
  48. data/eval/02.parquet +3 -0
  49. data/eval/03.parquet +3 -0
  50. data/eval/04.parquet +3 -0
AudioSet.py DELETED
@@ -1,183 +0,0 @@
1
- # Copyright (C) 2024 Aaron Keesing
2
- #
3
- # Permission is hereby granted, free of charge, to any person obtaining
4
- # a copy of this software and associated documentation files (the
5
- # “Software”), to deal in the Software without restriction, including
6
- # without limitation the rights to use, copy, modify, merge, publish,
7
- # distribute, sublicense, and/or sell copies of the Software, and to
8
- # permit persons to whom the Software is furnished to do so, subject to
9
- # the following conditions:
10
- #
11
- # The above copyright notice and this permission notice shall be
12
- # included in all copies or substantial portions of the Software.
13
- #
14
- # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
15
- # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
- # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17
- # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18
- # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19
- # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20
- # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
-
22
- from itertools import chain
23
- import json
24
- import os
25
- import tarfile
26
-
27
- import pandas as pd
28
- import datasets
29
-
30
-
31
- _CITATION = """\
32
- @inproceedings{45857,
33
- title = {Audio Set: An ontology and human-labeled dataset for audio events},
34
- author = {Jort F. Gemmeke and Daniel P. W. Ellis and Dylan Freedman and Aren Jansen and Wade Lawrence and R. Channing Moore and Manoj Plakal and Marvin Ritter},
35
- year = {2017},
36
- booktitle = {Proc. IEEE ICASSP 2017},
37
- address = {New Orleans, LA}
38
- }
39
- """
40
-
41
- _DESCRIPTION = """\
42
- This repository contains the balanced training set and evaluation set of the AudioSet
43
- data, described here: https://research.google.com/audioset/dataset/index.html. The
44
- YouTube videos were downloaded in March 2023, and so not all of the original audios are
45
- available.
46
- """
47
-
48
- _HOMEPAGE = "https://research.google.com/audioset/dataset/index.html"
49
-
50
- _LICENSE = "cc-by-4.0"
51
-
52
- _URL_PREFIX = "https://huggingface.co/datasets/agkphysics/AudioSet/resolve/main"
53
-
54
- _N_BAL_TRAIN_TARS = 10
55
- _N_UNBAL_TRAIN_TARS = 870
56
- _N_EVAL_TARS = 9
57
-
58
-
59
- def _iter_tar(path):
60
- """Iterate through the tar archive, but without skipping some files, which the HF
61
- DL does.
62
- """
63
- with open(path, "rb") as fid:
64
- stream = tarfile.open(fileobj=fid, mode="r|*")
65
- for tarinfo in stream:
66
- file_obj = stream.extractfile(tarinfo)
67
- yield tarinfo.name, file_obj
68
- stream.members = []
69
- del stream
70
-
71
-
72
- class AudioSetDataset(datasets.GeneratorBasedBuilder):
73
- VERSION = datasets.Version("1.0.0")
74
-
75
- BUILDER_CONFIGS = [
76
- datasets.BuilderConfig(
77
- name="balanced",
78
- version=VERSION,
79
- description="Balanced training and balanced evaluation set.",
80
- ),
81
- datasets.BuilderConfig(
82
- name="unbalanced",
83
- version=VERSION,
84
- description="Full unbalanced training set and balanced evaluation set.",
85
- ),
86
- ]
87
- DEFAULT_CONFIG_NAME = "balanced"
88
-
89
- def _info(self) -> datasets.DatasetInfo:
90
- return datasets.DatasetInfo(
91
- description=_DESCRIPTION,
92
- citation=_CITATION,
93
- homepage=_HOMEPAGE,
94
- license=_LICENSE,
95
- features=datasets.Features(
96
- {
97
- "video_id": datasets.Value("string"),
98
- "audio": datasets.Audio(sampling_rate=None, mono=True, decode=True),
99
- "labels": datasets.Sequence(datasets.Value("string")),
100
- "human_labels": datasets.Sequence(datasets.Value("string")),
101
- }
102
- ),
103
- )
104
-
105
- def _split_generators(self, dl_manager: datasets.DownloadManager):
106
- if self.config.data_dir:
107
- prefix = self.config.data_dir
108
- else:
109
- prefix = _URL_PREFIX
110
- prefix = prefix + "/data"
111
-
112
- _LABEL_URLS = {
113
- "bal_train": (
114
- f"{prefix}/balanced_train_segments.csv"
115
- if self.config.name == "balanced"
116
- else f"{prefix}/unbalanced_train_segments.csv"
117
- ),
118
- "eval": f"{prefix}/eval_segments.csv",
119
- "ontology": f"{prefix}/ontology.json",
120
- }
121
- _DATA_URLS = {
122
- "bal_train": (
123
- [f"{prefix}/bal_train0{i}.tar" for i in range(_N_BAL_TRAIN_TARS)]
124
- if self.config.name == "balanced"
125
- else [
126
- f"{prefix}/unbal_train{i:03d}.tar"
127
- for i in range(_N_UNBAL_TRAIN_TARS)
128
- ]
129
- ),
130
- "eval": [f"{prefix}/eval0{i}.tar" for i in range(_N_EVAL_TARS)],
131
- }
132
-
133
- tar_files = dl_manager.download(_DATA_URLS)
134
- label_files = dl_manager.download(_LABEL_URLS)
135
-
136
- return [
137
- datasets.SplitGenerator(
138
- name=datasets.Split.TRAIN,
139
- gen_kwargs={
140
- "labels": label_files["bal_train"],
141
- "ontology": label_files["ontology"],
142
- "audio_files": chain.from_iterable(
143
- _iter_tar(x) for x in tar_files["bal_train"]
144
- ),
145
- },
146
- ),
147
- datasets.SplitGenerator(
148
- name=datasets.Split.TEST,
149
- gen_kwargs={
150
- "labels": label_files["eval"],
151
- "ontology": label_files["ontology"],
152
- "audio_files": chain.from_iterable(
153
- _iter_tar(x) for x in tar_files["eval"]
154
- ),
155
- },
156
- ),
157
- ]
158
-
159
- def _generate_examples(self, labels, ontology, audio_files):
160
- with open(ontology) as fid:
161
- ontology_data = json.load(fid)
162
- id_to_name = {x["id"]: x["name"] for x in ontology_data}
163
-
164
- labels_df = pd.read_csv(
165
- labels,
166
- skiprows=3,
167
- header=None,
168
- skipinitialspace=True,
169
- names=["vid_id", "start", "end", "labels"],
170
- index_col="vid_id",
171
- )
172
-
173
- for path, fid in audio_files:
174
- vid_id = os.path.splitext(os.path.basename(path))[0]
175
- label_ids = labels_df.loc[vid_id, "labels"].split(",")
176
- human_labels = [id_to_name[x] for x in label_ids]
177
- example = {
178
- "video_id": vid_id,
179
- "labels": label_ids,
180
- "human_labels": human_labels,
181
- "audio": {"path": path, "bytes": fid.read()},
182
- }
183
- yield vid_id, example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -16,71 +16,54 @@ config_names:
16
  - unbalanced
17
  tags:
18
  - audio
19
- dataset_info:
20
  - config_name: balanced
21
- features:
22
- - name: video_id
23
- dtype: string
24
- - name: audio
25
- dtype: audio
26
- - name: labels
27
- sequence: string
28
- - name: human_labels
29
- sequence: string
30
- splits:
31
- - name: train
32
- num_bytes: 26016210987
33
- num_examples: 18685
34
- - name: test
35
- num_bytes: 23763682278
36
- num_examples: 17142
37
- download_size: 49805654900
38
- dataset_size: 49779893265
39
  - config_name: unbalanced
40
- features:
41
- - name: video_id
42
- dtype: string
43
- - name: audio
44
- dtype: audio
45
- - name: labels
46
- sequence: string
47
- - name: human_labels
48
- sequence: string
49
- splits:
50
- - name: train
51
- num_bytes: 2408656417541
52
- num_examples: 1738788
53
- - name: test
54
- num_bytes: 23763682278
55
- num_examples: 17142
56
- download_size: 2433673104977
57
- dataset_size: 2432420099819
58
  ---
59
 
60
  # Dataset Card for AudioSet
61
 
62
  ## Dataset Description
 
63
  - **Homepage**: https://research.google.com/audioset/index.html
64
  - **Paper**: https://storage.googleapis.com/gweb-research2023-media/pubtools/pdf/45857.pdf
65
  - **Leaderboard**: https://paperswithcode.com/sota/audio-classification-on-audioset
66
 
67
  ### Dataset Summary
68
- [AudioSet](https://research.google.com/audioset/dataset/index.html) is a
69
- dataset of 10-second clips from YouTube, annotated into one or more
70
- sound categories, following the AudioSet ontology.
71
 
72
  ### Supported Tasks and Leaderboards
73
- - `audio-classification`: Classify audio clips into categories. The
74
- leaderboard is available
75
- [here](https://paperswithcode.com/sota/audio-classification-on-audioset)
76
 
77
  ### Languages
78
- The class labels in the dataset are in English.
79
 
 
80
 
81
  ## Dataset Structure
82
 
83
  ### Data Instances
 
84
  Example instance from the dataset:
85
  ```python
86
  {
@@ -97,6 +80,7 @@ Example instance from the dataset:
97
  ```
98
 
99
  ### Data Fields
 
100
  Instances have the following fields:
101
  - `video_id`: a `string` feature containing the original YouTube ID.
102
  - `audio`: an `Audio` feature containing the audio data and sample rate.
@@ -106,70 +90,80 @@ Instances have the following fields:
106
  human-readable forms of the same labels as in `labels`.
107
 
108
  ### Data Splits
 
109
  The distribuion of audio clips is as follows:
110
 
111
  #### `balanced` configuration
112
  | |train|test |
113
  |-----------|----:|----:|
114
- |# instances|18685|17142|
115
 
116
  #### `unbalanced` configuration
117
  | |train |test |
118
  |-----------|------:|----:|
119
- |# instances|1738788|17142|
120
 
121
 
122
  ## Dataset Creation
123
 
124
  ### Curation Rationale
 
125
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
126
 
127
  ### Source Data
128
 
129
  #### Initial Data Collection and Normalization
 
130
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
131
 
132
  #### Who are the source language producers?
 
133
  The labels are from the AudioSet ontology. Audio clips are from YouTube.
134
 
135
  ### Annotations
136
 
137
  #### Annotation process
 
138
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
139
 
140
  #### Who are the annotators?
 
141
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
142
 
143
  ### Personal and Sensitive Information
 
144
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
145
 
146
  ## Considerations for Using the Data
147
 
148
  ### Social Impact of Dataset
 
149
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
150
 
151
  ### Discussion of Biases
 
152
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
153
 
154
  ### Other Known Limitations
155
- 1. The YouTube videos in this copy of AudioSet were downloaded in March
156
- 2023, so not all of the original audios are available. The number of
157
- clips able to be downloaded is as follows:
158
- - Balanced train: 18685 audio clips out of 22160 originally.
159
  - Unbalanced train: 1738788 clips out of 2041789 originally.
160
- - Evaluation: 17142 audio clips out of 20371 originally.
161
- 2. Most audio is sampled at 48 kHz 24 bit, but about 10% is sampled at
162
- 44.1 kHz 24 bit. Audio files are stored in the FLAC format.
163
 
164
  ## Additional Information
165
 
166
  ### Dataset Curators
 
167
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
168
 
169
  ### Licensing Information
 
170
  The AudioSet data is licensed under CC-BY-4.0
171
 
172
  ## Citation
 
173
  ```bibtex
174
  @inproceedings{jort_audioset_2017,
175
  title = {Audio Set: An ontology and human-labeled dataset for audio events},
 
16
  - unbalanced
17
  tags:
18
  - audio
19
+ configs:
20
  - config_name: balanced
21
+ default: true
22
+ data_files:
23
+ - split: train
24
+ path: data/bal_train/*.parquet
25
+ - split: test
26
+ path: data/eval/*.parquet
 
 
 
 
 
 
 
 
 
 
 
 
27
  - config_name: unbalanced
28
+ data_files:
29
+ - split: train
30
+ path: data/unbal_train/*.parquet
31
+ - split: test
32
+ path: data/eval/*.parquet
33
+ - config_name: full
34
+ data_files:
35
+ - split: bal_train
36
+ path: data/bal_train/*.parquet
37
+ - split: unbal_train
38
+ path: data/unbal_train/*.parquet
39
+ - split: eval
40
+ path: data/eval/*.parquet
 
 
 
 
 
41
  ---
42
 
43
  # Dataset Card for AudioSet
44
 
45
  ## Dataset Description
46
+
47
  - **Homepage**: https://research.google.com/audioset/index.html
48
  - **Paper**: https://storage.googleapis.com/gweb-research2023-media/pubtools/pdf/45857.pdf
49
  - **Leaderboard**: https://paperswithcode.com/sota/audio-classification-on-audioset
50
 
51
  ### Dataset Summary
52
+
53
+ [AudioSet](https://research.google.com/audioset/dataset/index.html) is a dataset of 10-second clips from YouTube, annotated into one or more sound categories, following the AudioSet ontology.
 
54
 
55
  ### Supported Tasks and Leaderboards
56
+
57
+ - `audio-classification`: Classify audio clips into categories. The leaderboard is available [here](https://paperswithcode.com/sota/audio-classification-on-audioset)
 
58
 
59
  ### Languages
 
60
 
61
+ The class labels in the dataset are in English.
62
 
63
  ## Dataset Structure
64
 
65
  ### Data Instances
66
+
67
  Example instance from the dataset:
68
  ```python
69
  {
 
80
  ```
81
 
82
  ### Data Fields
83
+
84
  Instances have the following fields:
85
  - `video_id`: a `string` feature containing the original YouTube ID.
86
  - `audio`: an `Audio` feature containing the audio data and sample rate.
 
90
  human-readable forms of the same labels as in `labels`.
91
 
92
  ### Data Splits
93
+
94
  The distribuion of audio clips is as follows:
95
 
96
  #### `balanced` configuration
97
  | |train|test |
98
  |-----------|----:|----:|
99
+ |# instances|18683|17141|
100
 
101
  #### `unbalanced` configuration
102
  | |train |test |
103
  |-----------|------:|----:|
104
+ |# instances|1738657|17141|
105
 
106
 
107
  ## Dataset Creation
108
 
109
  ### Curation Rationale
110
+
111
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
112
 
113
  ### Source Data
114
 
115
  #### Initial Data Collection and Normalization
116
+
117
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
118
 
119
  #### Who are the source language producers?
120
+
121
  The labels are from the AudioSet ontology. Audio clips are from YouTube.
122
 
123
  ### Annotations
124
 
125
  #### Annotation process
126
+
127
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
128
 
129
  #### Who are the annotators?
130
+
131
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
132
 
133
  ### Personal and Sensitive Information
134
+
135
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
136
 
137
  ## Considerations for Using the Data
138
 
139
  ### Social Impact of Dataset
140
+
141
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
142
 
143
  ### Discussion of Biases
144
+
145
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
146
 
147
  ### Other Known Limitations
148
+
149
+ 1. The YouTube videos in this copy of AudioSet were downloaded in March 2023, so not all of the original audios are available. The number of clips able to be downloaded is as follows:
150
+ - Balanced train: 18683 audio clips out of 22160 originally.
 
151
  - Unbalanced train: 1738788 clips out of 2041789 originally.
152
+ - Evaluation: 17141 audio clips out of 20371 originally.
153
+ 2. Most audio is sampled at 48 kHz 24 bit, but about 10% is sampled at 44.1 kHz 24 bit. Audio files are stored in the FLAC format.
 
154
 
155
  ## Additional Information
156
 
157
  ### Dataset Curators
158
+
159
  [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
160
 
161
  ### Licensing Information
162
+
163
  The AudioSet data is licensed under CC-BY-4.0
164
 
165
  ## Citation
166
+
167
  ```bibtex
168
  @inproceedings{jort_audioset_2017,
169
  title = {Audio Set: An ontology and human-labeled dataset for audio events},
data/{bal_train09.tar → bal_train/00.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ab88550e77c8289acf71d19ae3f254fadad16f3cea305ef08a16949a928acf9
3
- size 968540160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b433e7bcf3bbdfb0488791fceae1eb7100711d13093d22e2253f15d2dcabc084
3
+ size 687636067
data/{balanced_train_segments.csv → bal_train/01.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b83e5ff9ed72cfda37d4e36ce08250e107c1ea70f109f6ab6668ac200db5e94
3
- size 1211931
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c38eba06b54801c655d61346193a5974722e7b92af3a9e668effc3a41e47a2a1
3
+ size 682091793
data/{eval_segments.csv → bal_train/02.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7055f53935f70e74e39494a5c8c4c78f1700fce275028bd36e31186f33d45db
3
- size 1143389
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dad9f493edde823055ba7c1ed417946949fe62f702317f94477ad1ca5d9749c2
3
+ size 704032600
data/{unbalanced_train_segments.csv → bal_train/03.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb2dfd5e4f1ced39db9480633c510f813d7899f4b7472aa83a411b4a43a12698
3
- size 101468408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b7c49aa9096ab69c7dc547029b1e2461478363dac59d47867106c69ca4f7e2a
3
+ size 713737819
data/bal_train/04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:103ddeccc27027eea74e57308819039f6fd0870f9be068a522ef86d98e2168a2
3
+ size 698053030
data/bal_train/05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:709f1a4dada024d9fc1aee461ae35214a1d9ba3edbcc107f297a2798a5a75845
3
+ size 680826612
data/bal_train/06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:236b258b185fad26cf754637a5ed496e21eef7eaa834a931bf7aa5bf8b4903dd
3
+ size 696804833
data/bal_train/07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bf30b2118c20d7ddb7ac27f1287bf950033b9379ace4fab2c5d80d04b5d4d74
3
+ size 697684352
data/bal_train/08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db17feaf9f5bd4bcabde4e7fe05161dcc0e8d1457f5b21bd2fed2675d99deb49
3
+ size 703082680
data/bal_train/09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1121a990b52fdc9805adf9e6d704cc053c7fed02b5ce6f5b83cf73d3e9eccad7
3
+ size 692591080
data/bal_train/10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bb3986da782b019dbae5b80974d7fd1a98b6251307ac22755bb71f8f3fc173c
3
+ size 683898581
data/bal_train/11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39c74b7ac59c0513ae4e422cb8ed0c83ef2b9f34f6c5823576ee8c2437b1021d
3
+ size 684079014
data/bal_train/12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7c65af0fcaee74d0ea7226f4728c6b1f179ae11da863bf77c750f4243c6e6f6
3
+ size 686237063
data/bal_train/13.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97439794cf59f1644228ae7512af96668fdc6d2eeff4b38929b9cedc9cb2bf46
3
+ size 694211942
data/bal_train/14.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d54735a7e69cb4e8485412e66cfad8b0f4bdb62992d17e89fdf86b44b61af9e
3
+ size 687515110
data/bal_train/15.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d38ba3d20b2a18a92a49c4ca0ca0156e85e9e961701a5cc5a8c0f5b6ef8cc9fa
3
+ size 698624074
data/bal_train/16.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bd84fd55bae889f4be99966fdc93a0dacf9ba166130570a252478fe6fc783be
3
+ size 696877214
data/bal_train/17.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15d6f12119b6062ea48125b3cbeb65dac7d35ba9fdbb5aa19cff36ada14e6d6a
3
+ size 695048059
data/bal_train/18.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e743fa4484afe9ac99ae602c67fa5760bedbae1375d4553339e125293c5d29c4
3
+ size 674365856
data/bal_train/19.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9018da4254042fe685e7865e5099ef6ed5abf6f2ce690e2337e3e34ae1e11d33
3
+ size 691482982
data/bal_train/20.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d654cdd737482581f5b3ddeecdc481f9f52b8faf7bd1434bb09e92551477901
3
+ size 689837549
data/bal_train/21.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba7ff2e3d594e26b41076500efbbfed41620980b5ea3745cd29c79531d6edad1
3
+ size 696972412
data/bal_train/22.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5eb988951801156ba4d94a88fd70bb246e9930ea09706a9eff310b246a0ace6
3
+ size 701192003
data/bal_train/23.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:579043dd1ec0849e0fd9ffe0ff9c8f8027839b444cc146a0064c153564e5ad2d
3
+ size 701560427
data/bal_train/24.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e46c83fef963310415da6d885f7e2fe478b3c5e0964c1135795194ec31483d18
3
+ size 684620710
data/bal_train/25.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f0346c44e5037d9bc657b28e87cb982a143f4c69e170b7d439de28245d87baf
3
+ size 698965374
data/bal_train/26.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2916eb80e10b3618eab389780e10c1403227a2ec1e43c97f7a9339910d8b8b54
3
+ size 708032569
data/bal_train/27.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7f9903e520c1529ddd5da15ccd487621784f4b09cfe67ea67ba68bd04155b42
3
+ size 697119213
data/bal_train/28.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ee260a090ee2bff00514cca7ee1b6f45457af9d6d360db28f9ff2f6708df06
3
+ size 683021048
data/bal_train/29.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42c9a27e93b355e470a16c7526ed482426f34d04330db835d06726dc12b0fb8c
3
+ size 715123794
data/bal_train/30.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14580c345b381048bae3f602d9676f5a9fa712156e8988fbb3369f8e33d1ce41
3
+ size 689893393
data/bal_train/31.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bd1ca9083608c22a6ad1785f21c91d16506bdadad2e764d8e18c72f2286e82f
3
+ size 686508896
data/bal_train/32.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07f29190ed86d7c8e4248028c4e22814c533b8e2e670072d586d68ccd84edfba
3
+ size 683259323
data/bal_train/33.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:814aaa6d2b3a49490380d6f992a8c7327388518aea050b1f5866c776a0a8d281
3
+ size 671026160
data/bal_train/34.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99c5b719010677d072adc6bb5c6307352b1c5d43c1501dbdd60e5498037a6480
3
+ size 709550432
data/bal_train/35.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd23aed3bf7054a04c205cad12b461a9a119f5f771ad499a9097e791459b875c
3
+ size 658688728
data/bal_train/36.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4127bcb8de82328db554b0fca9d29cf2d6da6263f2bd4f1c43e4bf261a92d525
3
+ size 707348595
data/bal_train/37.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4b6f777199b6ca055130628d1abd22ea0a9f93df8530833ff4614a715ef81c6
3
+ size 256051577
data/bal_train04.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f99305df0fb64c1ff6a6f2d74f50faef163fdfdc000299ad074de86527186f9
3
- size 2772848640
 
 
 
 
data/bal_train05.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe45e29db7c146e55e37b8b63be641bacb403eebcde0a57866a49a9f925d7f7b
3
- size 2804664320
 
 
 
 
data/bal_train06.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4065ac7d5d53425c64ef590fbba2863d3bcf0d532730a8c2584ddc4f8a74a2f
3
- size 2803824640
 
 
 
 
data/bal_train07.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5422f2d8925359ea629671738862e92b54b58d781ee883aa07f242bae3ebea7
3
- size 2789519360
 
 
 
 
data/bal_train08.tar DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:93849b0b552f4ece53ffea24c6dcecec7b84a56c8e3368bb1befc33f6eb69325
3
- size 2737479680
 
 
 
 
data/{unbal_train257.tar → eval/00.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02f028d4aebfeb119870a7dd79e565dd3762a5b4e84003fc750e1b084a088f7a
3
- size 2772910080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38baa63ea6ac649bb14d473bed61b891503d9a7b2a8c270ad42ced619d2ede24
3
+ size 685168859
data/eval/01.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:323783ee27baacabcd4e5273d0dc939f26420f2777f16e81f84e143b5ad67a31
3
+ size 697647736
data/eval/02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:412e3b9ea41ecc2f06b3c9aa8f75831e2389728c945f2d174e8aa2b766e44990
3
+ size 705676865
data/eval/03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcf32f43a86787041164ed2ad2a09e8ca1905e2fbf9bc8933517e7b19e3124d
3
+ size 679036596
data/eval/04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c63c1d68a9f5ecbe5d1d2d813432696be494d975b08bcd9437822d903f63b53
3
+ size 671462808