codewithaman commited on
Commit
4b4ea7f
·
verified ·
1 Parent(s): 8e6bddb

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +61 -59
  2. README.md +242 -0
  3. dataset/pdfs/01030000000001.pdf +0 -0
  4. dataset/pdfs/01030000000002.pdf +0 -0
  5. dataset/pdfs/01030000000003.pdf +0 -0
  6. dataset/pdfs/01030000000004.pdf +0 -0
  7. dataset/pdfs/01030000000005.pdf +3 -0
  8. dataset/pdfs/01030000000006.pdf +3 -0
  9. dataset/pdfs/01030000000007.pdf +0 -0
  10. dataset/pdfs/01030000000008.pdf +3 -0
  11. dataset/pdfs/01030000000009.pdf +3 -0
  12. dataset/pdfs/01030000000010.pdf +3 -0
  13. dataset/pdfs/01030000000011.pdf +3 -0
  14. dataset/pdfs/01030000000012.pdf +3 -0
  15. dataset/pdfs/01030000000013.pdf +3 -0
  16. dataset/pdfs/01030000000014.pdf +3 -0
  17. dataset/pdfs/01030000000015.pdf +3 -0
  18. dataset/pdfs/01030000000016.pdf +0 -0
  19. dataset/pdfs/01030000000017.pdf +3 -0
  20. dataset/pdfs/01030000000018.pdf +0 -0
  21. dataset/pdfs/01030000000019.pdf +0 -0
  22. dataset/pdfs/01030000000020.pdf +0 -0
  23. dataset/pdfs/01030000000021.pdf +0 -0
  24. dataset/pdfs/01030000000022.pdf +0 -0
  25. dataset/pdfs/01030000000023.pdf +0 -0
  26. dataset/pdfs/01030000000024.pdf +0 -0
  27. dataset/pdfs/01030000000025.pdf +0 -0
  28. dataset/pdfs/01030000000026.pdf +0 -0
  29. dataset/pdfs/01030000000027.pdf +0 -0
  30. dataset/pdfs/01030000000028.pdf +0 -0
  31. dataset/pdfs/01030000000029.pdf +0 -0
  32. dataset/pdfs/01030000000030.pdf +0 -0
  33. dataset/pdfs/01030000000031.pdf +0 -0
  34. dataset/pdfs/01030000000032.pdf +0 -0
  35. dataset/pdfs/01030000000033.pdf +0 -0
  36. dataset/pdfs/01030000000034.pdf +0 -0
  37. dataset/pdfs/01030000000035.pdf +0 -0
  38. dataset/pdfs/01030000000036.pdf +0 -0
  39. dataset/pdfs/01030000000037.pdf +0 -0
  40. dataset/pdfs/01030000000038.pdf +3 -0
  41. dataset/pdfs/01030000000039.pdf +0 -0
  42. dataset/pdfs/01030000000040.pdf +0 -0
  43. dataset/pdfs/01030000000041.pdf +0 -0
  44. dataset/pdfs/01030000000042.pdf +0 -0
  45. dataset/pdfs/01030000000043.pdf +0 -0
  46. dataset/pdfs/01030000000044.pdf +0 -0
  47. dataset/pdfs/01030000000045.pdf +3 -0
  48. dataset/pdfs/01030000000046.pdf +3 -0
  49. dataset/pdfs/01030000000047.pdf +3 -0
  50. dataset/pdfs/01030000000048.pdf +0 -0
.gitattributes CHANGED
@@ -1,59 +1,61 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
56
+ # Video files - compressed
57
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
58
+ *.webm filter=lfs diff=lfs merge=lfs -text
59
+ dataset/pdfs/01030000000113.pdf filter=lfs diff=lfs merge=lfs -text
60
+ dataset/pdfs/01030000000163.pdf filter=lfs diff=lfs merge=lfs -text
61
+ dataset/pdfs/* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,245 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ tags:
4
+ - nlp
5
+ - layout parser
6
+ - evaluation
7
+ - novalad
8
  ---
9
+
10
+ # **DP-Bench: Document Parsing Benchmark**
11
+
12
+ <div align="center">
13
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/6524ab1e27d1f3d84ad07705/Q7CC2z4CAJzZ4-CGaSnBO.png" width="800px">
14
+ </div>
15
+
16
+
17
+ Document parsing refers to the process of converting complex documents, such as PDFs and scanned images, into structured text formats like HTML and Markdown.
18
+ It is especially useful as a preprocessor for [RAG](https://en.wikipedia.org/wiki/Retrieval-augmented_generation) systems, as it preserves key structural information from visually rich documents.
19
+
20
+ While various parsers are available on the market, there is currently no standard evaluation metric to assess their performance.
21
+ To address this gap, we propose a set of new evaluation metrics along with a benchmark dataset designed to measure parser performance.
22
+
23
+ ## Metrics
24
+ We propose assessing the performance of parsers using three key metrics:
25
+ NID for element detection and serialization, TEDS and TEDS-S for table structure recognition.
26
+
27
+ ### Element detection and serialization
28
+
29
+ **NID (Normalized Indel Distance).**
30
+ NID evaluates how well a parser detects and serializes document elements according to human reading order.
31
+ NID is similar to the [normalized edit distance](https://en.wikipedia.org/wiki/Levenshtein_distance) metric but excludes substitutions during evaluation, making it more sensitive to length differences between strings.
32
+
33
+ The NID metric is computed as follows:
34
+
35
+ $$
36
+ NID = 1 - \frac{\text{distance}}{\text{len(reference)} + \text{len(prediction)}}
37
+ $$
38
+
39
+ The normalized distance in the equation measures the similarity between the reference and predicted text, with values ranging from 0 to 1, where 0 represents perfect alignment and 1 denotes complete dissimilarity.
40
+ Here, the predicted text is compared against the reference text to determine how many character-level insertions and deletions are needed to match it.
41
+ A higher NID score reflects better performance in both recognizing and ordering the text within the document's detected layout regions.
42
+
43
+ ### Table structure recognition
44
+
45
+ Tables are one of the most complex elements in documents, often presenting both structural and content-related challenges.
46
+ Yet, during NID evaluation, table elements (as well as figures and charts) are excluded, allowing the metric to focus on text elements such as paragraphs, headings, indexes, and footnotes. To specifically evaluate table structure and content extraction, we use the [TEDS](https://arxiv.org/abs/1911.10683) and [TEDS-S](https://arxiv.org/abs/1911.10683) metrics.
47
+
48
+ The [traditional metric](https://ieeexplore.ieee.org/document/1227792) fails to account for the hierarchical nature of tables (rows, columns, cells), but TEDS/TEDS-S measures the similarity between the predicted and ground-truth tables by comparing both structural layout and content, offering a more comprehensive evaluation.
49
+
50
+ **TEDS (Tree Edit Distance-based Similarity).**
51
+ The TEDS metric is computed as follows:
52
+
53
+ $$
54
+ TEDS(T_a, T_b) = 1 - \frac{EditDist(T_a, T_b)}{\max(|T_a|, |T_b|)}
55
+ $$
56
+
57
+ The equation evaluates the similarity between two tables by modeling them as tree structures \\(T_a\\) and \\(T_b\\).
58
+ This metric evaluates how accurately the table structure is predicted, including the content of each cell.
59
+ A higher TEDS score indicates better overall performance in capturing both the table structure and the content of each cell.
60
+
61
+ **TEDS-S (Tree Edit Distance-based Similarity-Struct).**
62
+ TEDS-S stands for Tree Edit Distance-based Similarity-Struct, measuring the structural similarity between the predicted and reference tables.
63
+ While the metric formulation is identical to TEDS, it uses modified tree representations, denoted as \\(T_a'\\) and \\(T_b'\\), where the nodes correspond solely to the table structure, omitting the content of each cell.
64
+ This allows TEDS-S to concentrate on assessing the structural similarity of the tables, such as row and column alignment, without being influenced by the contents within the cells.
65
+
66
+ ## Benchmark dataset
67
+
68
+ ### Document sources
69
+ The benchmark dataset is gathered from three sources: 90 samples from the Library of Congress; 90 samples from Open Educational Resources;
70
+ and 20 samples from Upstage's internal documents.
71
+ Together, these sources provide a broad and specialized range of information.
72
+
73
+ <div style="max-width: 500px; width: 100%; overflow-x: auto; margin: 0 auto;">
74
+
75
+
76
+ | Sources | Count|
77
+ |:---------------------------|:----:|
78
+ | Library of Congress | 90 |
79
+ | Open educational resources | 90 |
80
+ | Upstage | 20 |
81
+
82
+ </div>
83
+
84
+ ### Layout elements
85
+
86
+ While works like [ReadingBank](https://github.com/doc-analysis/ReadingBank) often focus solely on text conversion in document parsing, we have taken a more detailed approach by dividing the document into specific elements, with a particular emphasis on table performance.
87
+
88
+ This benchmark dataset was created by extracting pages with various layout elements from multiple types of documents.
89
+ The layout elements consist of 12 element types: **Table, Paragraph, Figure, Chart, Header, Footer, Caption, Equation, Heading1, List, Index, Footnote**.
90
+ This diverse set of layout elements ensures that our evaluation covers a wide range of document structures and complexities, providing a comprehensive assessment of document parsing capabilities.
91
+
92
+ Note that only Heading1 is included among various heading sizes because it represents the main structural divisions in most documents, serving as the primary section title.
93
+ This high-level segmentation is sufficient for assessing the core structure without adding unnecessary complexity.
94
+ Detailed heading levels like Heading2 and Heading3 are omitted to keep the evaluation focused and manageable.
95
+
96
+ <div style="max-width: 500px; width: 100%; overflow-x: auto; margin: 0 auto;">
97
+
98
+ | Category | Count |
99
+ |:-----------|------:|
100
+ | Paragraph | 804 |
101
+ | Heading1 | 194 |
102
+ | Footer | 168 |
103
+ | Caption | 154 |
104
+ | Header | 101 |
105
+ | List | 91 |
106
+ | Chart | 67 |
107
+ | Footnote | 63 |
108
+ | Equation | 58 |
109
+ | Figure | 57 |
110
+ | Table | 55 |
111
+ | Index | 10 |
112
+
113
+ </div>
114
+
115
+ ### Dataset format
116
+
117
+ The dataset is in JSON format, representing elements extracted from a PDF file, with each element defined by its position, layout class, and content.
118
+ The **category** field represents various layout classes, including but not limited to text regions, headings, footers, captions, tables, and more.
119
+ The **content** field has three options: the **text** field contains text-based content, **html** represents layout regions where equations are in LaTeX and tables in HTML, and **markdown** distinguishes between regions like Heading1 and other text-based regions such as paragraphs, captions, and footers.
120
+ Each element includes coordinates (x, y), a unique ID, and the page number it appears on.
121
+ The dataset’s structure supports flexible representation of layout classes and content formats for document parsing.
122
+
123
+ ```
124
+ {
125
+ "01030000000001.pdf": {
126
+ "elements": [
127
+ {
128
+ "coordinates": [
129
+ {
130
+ "x": 170.9176246670229,
131
+ "y": 102.3493458064781
132
+ },
133
+ {
134
+ "x": 208.5023846755278,
135
+ "y": 102.3493458064781
136
+ },
137
+ {
138
+ "x": 208.5023846755278,
139
+ "y": 120.6598699131856
140
+ },
141
+ {
142
+ "x": 170.9176246670229,
143
+ "y": 120.6598699131856
144
+ }
145
+ ],
146
+ "category": "Header",
147
+ "id": 0,
148
+ "page": 1,
149
+ "content": {
150
+ "text": "314",
151
+ "html": "",
152
+ "markdown": ""
153
+ }
154
+ },
155
+ ...
156
+ ...
157
+ ```
158
+
159
+ <div style="max-width: 800px; width: 100%; overflow-x: auto; margin: 0 auto;">
160
+
161
+ ### Document domains
162
+ | Domain | Subdomain | Count |
163
+ |:-------------------------------------|:------------------------|------:|
164
+ | Social Sciences | Economics | 26 |
165
+ | Social Sciences | Political Science | 18 |
166
+ | Social Sciences | Sociology | 16 |
167
+ | Social Sciences | Law | 12 |
168
+ | Social Sciences | Cultural Anthropology | 11 |
169
+ | Social Sciences | Education | 8 |
170
+ | Social Sciences | Psychology | 4 |
171
+ | Natural Sciences | Environmental Science | 26 |
172
+ | Natural Sciences | Biology | 10 |
173
+ | Natural Sciences | Astronomy | 4 |
174
+ | Technology | Technology | 33 |
175
+ | Mathematics and Information Sciences | Mathematics | 13 |
176
+ | Mathematics and Information Sciences | Informatics | 9 |
177
+ | Mathematics and Information Sciences | Computer Science | 8 |
178
+ | Mathematics and Information Sciences | Statistics | 2 |
179
+
180
+ </div>
181
+
182
+ ## Usage
183
+
184
+ ### Setup
185
+
186
+ Before setting up the environment, **make sure to [install Git LFS](https://git-lfs.com/)**, which is required for handling large files.
187
+ Once installed, you can clone the repository and install the necessary dependencies by running the following commands:
188
+
189
+ ```
190
+ $ git clone https://huggingface.co/datasets/upstage/dp-bench.git
191
+ $ cd dp-bench
192
+ $ pip install -r requirements.txt
193
+ ```
194
+ The repository includes necessary scripts for inference and evaluation, as described in the following sections.
195
+
196
+ ### Inference
197
+ We offer inference scripts that let you request results from various document parsing services.
198
+ For more details, refer to this [README](https://huggingface.co/datasets/upstage/dp-bench/blob/main/scripts/README.md).
199
+
200
+ ### Evaluation
201
+ The benchmark dataset can be found in the `dataset` folder.
202
+ It contains a wide range of document layouts, from text-heavy pages to complex tables, enabling a thorough evaluation of the parser’s performance.
203
+ The dataset comes with annotations for layout elements such as paragraphs, headings, and tables.
204
+
205
+ The following options are required for evaluation:
206
+ - **`--ref_path`**: Specifies the path to the reference JSON file, predefined as `dataset/reference.json` for evaluation purposes.
207
+ - **`--pred_path`**: Indicates the path to the predicted JSON file. You can either use a sample result located in the `dataset/sample_results` folder, or generate your own by using the inference script provided in the `scripts` folder.
208
+
209
+ #### Element detection and serialization evaluation
210
+ This evaluation will compute the NID metric to assess how accurately the text in the document is recognized considering the structure and order of the document layout.
211
+ To evaluate the document layout results, run the following command:
212
+
213
+ ```
214
+ $ python evaluate.py \
215
+ --ref_path <path to the reference json file> \
216
+ --pred_path <path to the predicted json file> \
217
+ --mode layout
218
+ ```
219
+
220
+
221
+ #### Table structure recognition evaluation
222
+ This will compute TEDS-S (structural accuracy) and TEDS (structural and textual accuracy).
223
+ To evaluate table recognition performance, use the following command:
224
+
225
+ ```
226
+ $ python evaluate.py \
227
+ --ref_path <path to the reference json file> \
228
+ --pred_path <path to the predicted json file> \
229
+ --mode table
230
+ ```
231
+
232
+ # Leaderboard
233
+ <div style="max-width: 800px; width: 100%; overflow-x: auto; margin: 0 auto;">
234
+
235
+ | Source | Request date | TEDS ↑ | TEDS-S ↑ | NID ↑ | Avg. Time (secs) ↓ |
236
+ |:---------------------|:------------:|-----------:|----------:|------------:|------------:|
237
+ | novalad | 2025-03-24 | **96.49** | **96.49** | **98.51** | **8.50** |
238
+ | upstage | 2024-10-24 | 93.48 | 94.16 | 97.02 | 3.79 |
239
+ | aws | 2024-10-24 | 88.05 | 90.79 | 96.71 | 14.47 |
240
+ | llamaparse | 2024-10-24 | 74.57 | 76.34 | 92.82 | 4.14 |
241
+ | unstructured | 2024-10-24 | 65.56 | 70.00 | 91.18 | 13.14 |
242
+ | google | 2024-10-24 | 66.13 | 71.58 | 90.86 | 5.85 |
243
+ | microsoft | 2024-10-24 | 87.19 | 89.75 | 87.69 | 4.44 |
244
+
245
+ </div>
dataset/pdfs/01030000000001.pdf ADDED
Binary file (88.5 kB). View file
 
dataset/pdfs/01030000000002.pdf ADDED
Binary file (88.4 kB). View file
 
dataset/pdfs/01030000000003.pdf ADDED
Binary file (90.6 kB). View file
 
dataset/pdfs/01030000000004.pdf ADDED
Binary file (56.4 kB). View file
 
dataset/pdfs/01030000000005.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daa5167329cda28a8a3841b6d5b8a37ee30273285f174af98ce5c0e5c76c0976
3
+ size 213760
dataset/pdfs/01030000000006.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9713894c940f2ad16f015d857757b003e4ecaaa1602afa72799c08526806f533
3
+ size 296819
dataset/pdfs/01030000000007.pdf ADDED
Binary file (35 kB). View file
 
dataset/pdfs/01030000000008.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:906d54a21ef3c7bfac03f4bb613b0c79ef32fdf81b362450c79e98a96f88708a
3
+ size 149389
dataset/pdfs/01030000000009.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e22636736c43765307cbc5e876b0a18e340b52c909fc36043cefb38a7a11638
3
+ size 220047
dataset/pdfs/01030000000010.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d4bf58bf1563cdfb71b2e1272e95c33d01625a9627694fafdf0c69b50816ea5
3
+ size 475068
dataset/pdfs/01030000000011.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3e3d62292122d9be48e262e7d8e1f9363bab000e26437ffb6f2f94afc3c2294
3
+ size 183687
dataset/pdfs/01030000000012.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2773d8fcd2f2b18b6bd0b2952ba8dd56702e1f738a4f8a7b1e0e03c9cad9dba
3
+ size 343505
dataset/pdfs/01030000000013.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a266dd4ddaff135dc5b1ce3d960d4b29cfc96bee5f30f3fb879a0b730c57e6f7
3
+ size 305072
dataset/pdfs/01030000000014.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10817297774a93c1838650c06b79f518db43af558496041e256bd8c259057f79
3
+ size 311724
dataset/pdfs/01030000000015.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93bc667fb64454b9df1688b9873ed8fbf3f548d58c81ec0ca3d9cc50e79fb667
3
+ size 307142
dataset/pdfs/01030000000016.pdf ADDED
Binary file (40 kB). View file
 
dataset/pdfs/01030000000017.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bd9bf7f193c5670d79bea2d81c123c7dc3094467ec9616245f81a148b063c77
3
+ size 132199
dataset/pdfs/01030000000018.pdf ADDED
Binary file (51.1 kB). View file
 
dataset/pdfs/01030000000019.pdf ADDED
Binary file (44.5 kB). View file
 
dataset/pdfs/01030000000020.pdf ADDED
Binary file (22.4 kB). View file
 
dataset/pdfs/01030000000021.pdf ADDED
Binary file (44.3 kB). View file
 
dataset/pdfs/01030000000022.pdf ADDED
Binary file (23.6 kB). View file
 
dataset/pdfs/01030000000023.pdf ADDED
Binary file (23.5 kB). View file
 
dataset/pdfs/01030000000024.pdf ADDED
Binary file (36.2 kB). View file
 
dataset/pdfs/01030000000025.pdf ADDED
Binary file (35.8 kB). View file
 
dataset/pdfs/01030000000026.pdf ADDED
Binary file (23.1 kB). View file
 
dataset/pdfs/01030000000027.pdf ADDED
Binary file (69.4 kB). View file
 
dataset/pdfs/01030000000028.pdf ADDED
Binary file (76.1 kB). View file
 
dataset/pdfs/01030000000029.pdf ADDED
Binary file (76.1 kB). View file
 
dataset/pdfs/01030000000030.pdf ADDED
Binary file (85.4 kB). View file
 
dataset/pdfs/01030000000031.pdf ADDED
Binary file (92.7 kB). View file
 
dataset/pdfs/01030000000032.pdf ADDED
Binary file (58.3 kB). View file
 
dataset/pdfs/01030000000033.pdf ADDED
Binary file (87.8 kB). View file
 
dataset/pdfs/01030000000034.pdf ADDED
Binary file (73.4 kB). View file
 
dataset/pdfs/01030000000035.pdf ADDED
Binary file (74.7 kB). View file
 
dataset/pdfs/01030000000036.pdf ADDED
Binary file (69.3 kB). View file
 
dataset/pdfs/01030000000037.pdf ADDED
Binary file (86.5 kB). View file
 
dataset/pdfs/01030000000038.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22f4e5d348c45a3399f3c8a0e48756dd82a96ed9f5723a2a39183b554667d19a
3
+ size 111984
dataset/pdfs/01030000000039.pdf ADDED
Binary file (72.3 kB). View file
 
dataset/pdfs/01030000000040.pdf ADDED
Binary file (89.6 kB). View file
 
dataset/pdfs/01030000000041.pdf ADDED
Binary file (73.6 kB). View file
 
dataset/pdfs/01030000000042.pdf ADDED
Binary file (92.4 kB). View file
 
dataset/pdfs/01030000000043.pdf ADDED
Binary file (67.6 kB). View file
 
dataset/pdfs/01030000000044.pdf ADDED
Binary file (82 kB). View file
 
dataset/pdfs/01030000000045.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00f0adaaa8358a28b4b4e83bc97dcd83a01f7283605b140c2be8e8d47bba8b6b
3
+ size 113003
dataset/pdfs/01030000000046.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d276ca9a5ecb8d6d11359f515e50c8f78395548de4e3e2c49e38f5500ee40ebe
3
+ size 137011
dataset/pdfs/01030000000047.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:934fbf534914863f6431eef38f5bf66fa91afd439ddf20fb1af0cf3225159ac1
3
+ size 115606
dataset/pdfs/01030000000048.pdf ADDED
Binary file (27.7 kB). View file