-
Notifications
You must be signed in to change notification settings - Fork 5
/
extract_tables_from_doc.py
executable file
·289 lines (257 loc) · 10.2 KB
/
extract_tables_from_doc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
#!/usr/bin/env python
import argparse
import csv
import re
import atexit
import os
import pathlib
import math
from urllib.parse import urlparse, unquote
import requests
from tqdm import tqdm
from PyPDF2 import PdfFileReader, PdfFileWriter
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
from dotenv import load_dotenv
selection_marks_regex = re.compile(r"\s+:(un)?selected:")
def ensure_dir(build_dir, name):
d = build_dir / name
if not d.exists():
os.makedirs(d)
return d
def filename_from_url(url):
o = urlparse(url)
filename = unquote(o.path).split("/")[-1].lower()
return re.sub(r"\s+", "_", filename)
class PDFBatchReader(object):
def __init__(self, build_dir, doc_url, batch_size=10, end_page=math.inf):
self.build_dir = build_dir
self.doc_url = doc_url
self.batch_size = batch_size
self.end_page = end_page
self._current_batch_idx = None
self.filename = filename_from_url(self.doc_url)
if not self.batch_dir.exists():
os.makedirs(self.batch_dir)
self.download_doc(self.doc_url)
self.split_pdf_into_batches()
with open(self.batch_dir / "pageCount", "r") as cf:
page_count = int(cf.read().strip())
self.pbar = tqdm(
total=page_count,
desc="analyzing PDF pages {batch_size=%d}" % (self.batch_size),
)
atexit.register(self.cleanup)
@property
def progress_path(self):
return self.build_dir / "pdf_batch_reader/progress.txt"
@property
def current_batch_idx(self):
if self._current_batch_idx is None:
prog_path = self.progress_path
if prog_path.exists():
with open(prog_path, "r") as f:
self._current_batch_idx = int(f.read().strip())
print("resume at batch %d" % self._current_batch_idx)
else:
self._current_batch_idx = 0
return self._current_batch_idx
@current_batch_idx.setter
def current_batch_idx(self, value):
self._current_batch_idx = value
def download_doc(self, doc_url):
docs_dir = ensure_dir(self.build_dir, "pdf_batch_reader/docs")
filename = filename_from_url(doc_url)
fp = docs_dir / filename
if not fp.exists():
r = requests.get(doc_url)
progress_bar = tqdm(
desc="downloading %s" % filename,
total=int(r.headers.get("content-length", 0)),
unit="iB",
unit_scale=True,
)
with open(fp, "wb") as f:
for data in r.iter_content(1024):
progress_bar.update(len(data))
f.write(data)
progress_bar.close()
@property
def batch_dir(self):
return self.build_dir / ("pdf_batch_reader/batches/%s" % self.filename)
def split_pdf_into_batches(self):
with open(self.build_dir / "pdf_batch_reader/docs" / self.filename, "rb") as f:
reader = PdfFileReader(f)
total = reader.getNumPages()
with open(self.batch_dir / "pageCount", "w") as cf:
cf.write("%d" % total)
print(
"splitting pdf (%d pages) into %d batches"
% (total, int(math.ceil(float(total) / self.batch_size)))
)
for batch_idx, start_page in enumerate(range(0, total, self.batch_size)):
writer = PdfFileWriter()
for page in range(start_page, start_page + self.batch_size):
if page >= total or page >= self.end_page:
break
writer.addPage(reader.getPage(page))
batch_name = "%03d.pdf" % batch_idx
with open(self.batch_dir / batch_name, "wb") as wf:
writer.write(wf)
def pdf_batches(self):
self.pbar.update(self.current_batch_idx * self.batch_size)
for batch_file in sorted(self.batch_dir.iterdir())[self.current_batch_idx :]:
if batch_file.name == "pageCount":
continue
with open(batch_file, "rb") as f:
yield (self.current_batch_idx + 1, f)
self.current_batch_idx += 1
self.pbar.update(self.batch_size)
self.pbar.close()
self.current_batch_idx = None
def cleanup(self):
if self._current_batch_idx is not None and self._current_batch_idx > 0:
with open(self.progress_path, "w") as f:
f.write("%d" % (self._current_batch_idx))
elif self.progress_path.exists():
self.progress_path.unlink()
class TableExtractor(object):
def __init__(self, build_dir, csv_path, trim_selection_marks=False):
self.build_dir = build_dir
self.csv_path = csv_path
self.trim_selection_marks = trim_selection_marks
self._table_idx = None
self._file_handles = dict()
self._writers = dict()
def get_writer(self, name, fieldnames):
if name not in self._writers:
fp = pathlib.Path("%s_%s.csv" % (self.csv_path.strip(), name))
if not fp.parent.exists():
os.makedirs(fp.parent)
exists = fp.exists()
self._file_handles[name] = open(fp, "w")
self._writers[name] = csv.DictWriter(
self._file_handles[name], fieldnames=fieldnames
)
if not exists:
self._writers[name].writeheader()
return self._writers[name]
def save_csvs(self):
for f in self._file_handles.values():
f.close()
def extract_tables_from_extracted_documents(self, batch_idx, result):
for idx, doc in enumerate(result.documents):
for field_name, field in doc.fields.items():
if (
field.value_type != "list"
or field.value[0].value_type != "dictionary"
):
print(
"field %s in document %d is not a table: %s"
% (field_name, idx, field)
)
continue
fieldnames = list(field.value[0].value.keys()) + [
"batch_index",
"table_index",
]
writer = self.get_writer(field_name, fieldnames)
writer.writerows(
[
dict(
[(k, v.content) for k, v in obj.value.items()]
+ [("batch_index", batch_idx), ("table_index", idx)]
)
for obj in field.value
]
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.save_csvs()
def extract_tables(csv_path, start_idx, result, trim_selection_marks=False):
for idx, tbl in enumerate(result.tables):
tbl_idx = start_idx + idx
fp = pathlib.Path("%s_%03d.csv" % (csv_path.strip(), tbl_idx))
if not fp.parent.exists():
os.makedirs(fp.parent)
rows = [[""] * tbl.column_count for _ in range(tbl.row_count)]
for cell in tbl.cells:
content = cell.content.strip()
if trim_selection_marks:
cell.content = selection_marks_regex.sub("", content)
rows[cell.row_index][cell.column_index] = cell.content
with open(fp, "w") as f:
w = csv.writer(f)
w.writerows(rows)
return start_idx + len(result.tables)
if __name__ == "__main__":
load_dotenv()
fr_endpoint = os.getenv("FORM_RECOGNIZER_ENDPOINT")
fr_key = os.getenv("FORM_RECOGNIZER_KEY")
parser = argparse.ArgumentParser(
description="Extract tables from a document using Azure Form Recognizer and save as CSV files."
)
parser.add_argument(
"doc_url",
type=str,
metavar="DOC_URL",
help="URL of the PDF document to extract tables from",
)
parser.add_argument(
"csv_path",
type=str,
metavar="CSV_PATH",
help=(
'Path pattern that will be appended with table index and ".csv" to generate the CSV save paths. '
'I.e., if DOC_URL contains two tables and CSV_PATH is "/data/my_doc" then the results '
'will be saved to "/data/my_doc_00.csv" and "/data/my_doc_01.csv"'
),
)
parser.add_argument(
"--trim-selection-marks",
action="store_true",
help='remove ":selected:" and ":unselected:" sequences from cell values',
)
parser.add_argument(
"--model-id", type=str, default="prebuilt-layout", help="ID of model to use"
)
parser.add_argument(
"--build-dir",
type=pathlib.Path,
metavar="BUILD_DIR",
default=pathlib.Path.cwd() / "build",
help=(
"Build directory which will contain temporary files that is necessary for document extraction. "
"This script will create and store files under BUILD_DIR/docs. Defaults to {{cwd}}/build."
),
)
parser.add_argument(
"--batch-size",
type=int,
metavar="BATCH_SIZE",
default=10,
help="PDF batch size (number of pages) that get sent to FormRecognizer in a single request.",
)
parser.add_argument(
"-e",
"--end-page",
type=int,
metavar="END_PAGE",
default=math.inf,
help="only analyze until this page (non-inclusive)",
)
args = parser.parse_args()
document_analysis_client = DocumentAnalysisClient(
endpoint=fr_endpoint, credential=AzureKeyCredential(fr_key)
)
with TableExtractor(args.build_dir, args.csv_path, args.trim_selection_marks) as ex:
reader = PDFBatchReader(
args.build_dir, args.doc_url, args.batch_size, args.end_page
)
for batch_idx, batch_file in reader.pdf_batches():
poller = document_analysis_client.begin_analyze_document(
model=args.model_id, document=batch_file
)
result = poller.result()
ex.extract_tables_from_extracted_documents(batch_idx, result)