lhoestq HF Staff commited on
Commit
6335836
·
verified ·
1 Parent(s): b343c26

Delete asr_dummy.py

Browse files
Files changed (1) hide show
  1. asr_dummy.py +0 -173
asr_dummy.py DELETED
@@ -1,173 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """SUPERB: Speech processing Universal PERformance Benchmark."""
18
-
19
-
20
- import glob
21
- import os
22
- import textwrap
23
-
24
- import datasets
25
-
26
-
27
- _CITATION = """\
28
- @article{DBLP:journals/corr/abs-2105-01051,
29
- author = {Shu{-}Wen Yang and
30
- Po{-}Han Chi and
31
- Yung{-}Sung Chuang and
32
- Cheng{-}I Jeff Lai and
33
- Kushal Lakhotia and
34
- Yist Y. Lin and
35
- Andy T. Liu and
36
- Jiatong Shi and
37
- Xuankai Chang and
38
- Guan{-}Ting Lin and
39
- Tzu{-}Hsien Huang and
40
- Wei{-}Cheng Tseng and
41
- Ko{-}tik Lee and
42
- Da{-}Rong Liu and
43
- Zili Huang and
44
- Shuyan Dong and
45
- Shang{-}Wen Li and
46
- Shinji Watanabe and
47
- Abdelrahman Mohamed and
48
- Hung{-}yi Lee},
49
- title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
50
- journal = {CoRR},
51
- volume = {abs/2105.01051},
52
- year = {2021},
53
- url = {https://arxiv.org/abs/2105.01051},
54
- archivePrefix = {arXiv},
55
- eprint = {2105.01051},
56
- timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
57
- biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
58
- bibsource = {dblp computer science bibliography, https://dblp.org}
59
- }
60
- """
61
-
62
- _DESCRIPTION = """\
63
- Self-supervised learning (SSL) has proven vital for advancing research in
64
- natural language processing (NLP) and computer vision (CV). The paradigm
65
- pretrains a shared model on large volumes of unlabeled data and achieves
66
- state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
67
- speech processing community lacks a similar setup to systematically explore the
68
- paradigm. To bridge this gap, we introduce Speech processing Universal
69
- PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
70
- performance of a shared model across a wide range of speech processing tasks
71
- with minimal architecture changes and labeled data. Among multiple usages of the
72
- shared model, we especially focus on extracting the representation learned from
73
- SSL due to its preferable re-usability. We present a simple framework to solve
74
- SUPERB tasks by learning task-specialized lightweight prediction heads on top of
75
- the frozen shared model. Our results demonstrate that the framework is promising
76
- as SSL representations show competitive generalizability and accessibility
77
- across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
78
- benchmark toolkit to fuel the research in representation learning and general
79
- speech processing.
80
-
81
- Note that in order to limit the required storage for preparing this dataset, the
82
- audio is stored in the .flac format and is not converted to a float32 array. To
83
- convert, the audio file to a float32 array, please make use of the `.map()`
84
- function as follows:
85
-
86
-
87
- ```python
88
- import soundfile as sf
89
-
90
- def map_to_array(batch):
91
- speech_array, _ = sf.read(batch["file"])
92
- batch["speech"] = speech_array
93
- return batch
94
-
95
- dataset = dataset.map(map_to_array, remove_columns=["file"])
96
- ```
97
- """
98
-
99
-
100
- class AsrDummybConfig(datasets.BuilderConfig):
101
- """BuilderConfig for Superb."""
102
-
103
- def __init__(
104
- self,
105
- data_url,
106
- url,
107
- **kwargs,
108
- ):
109
- super(AsrDummybConfig, self).__init__(
110
- version=datasets.Version("1.9.0", ""), **kwargs
111
- )
112
- self.data_url = data_url
113
- self.url = url
114
-
115
-
116
- class AsrDummy(datasets.GeneratorBasedBuilder):
117
- """Superb dataset."""
118
-
119
- BUILDER_CONFIGS = [
120
- AsrDummybConfig(
121
- name="asr",
122
- description=textwrap.dedent(
123
- """\
124
- ASR transcribes utterances into words. While PR analyzes the
125
- improvement in modeling phonetics, ASR reflects the significance of
126
- the improvement in a real-world scenario. LibriSpeech
127
- train-clean-100/dev-clean/test-clean subsets are used for
128
- training/validation/testing. The evaluation metric is word error
129
- rate (WER)."""
130
- ),
131
- url="http://www.openslr.org/12",
132
- data_url="http://www.openslr.org/resources/12/",
133
- )
134
- ]
135
-
136
- DEFAULT_CONFIG_NAME = "asr"
137
-
138
- def _info(self):
139
- return datasets.DatasetInfo(
140
- description=_DESCRIPTION,
141
- features=datasets.Features(
142
- {
143
- "id": datasets.Value("string"),
144
- "file": datasets.Value("string"),
145
- }
146
- ),
147
- supervised_keys=("file",),
148
- homepage=self.config.url,
149
- citation=_CITATION,
150
- )
151
-
152
- def _split_generators(self, dl_manager):
153
- DL_URLS = [
154
- f"https://huggingface.co/datasets/Narsil/asr_dummy/raw/main/{i}.flac"
155
- for i in range(1, 5)
156
- ]
157
- archive_path = dl_manager.download_and_extract(DL_URLS)
158
- return [
159
- datasets.SplitGenerator(
160
- name=datasets.Split.TEST,
161
- gen_kwargs={"archive_path": archive_path},
162
- ),
163
- ]
164
-
165
- def _generate_examples(self, archive_path):
166
- """Generate examples."""
167
- for i, filename in enumerate(archive_path):
168
- key = str(i)
169
- example = {
170
- "id": key,
171
- "file": filename,
172
- }
173
- yield key, example