kalle07 commited on
Commit
b958bfb
·
verified ·
1 Parent(s): 70bc59c

Upload docling_by_sevenof9_v1.py

Browse files
Files changed (1) hide show
  1. docling_by_sevenof9_v1.py +147 -0
docling_by_sevenof9_v1.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+ from collections import defaultdict
7
+ from multiprocessing import get_context
8
+ from docling.datamodel.pipeline_options import (
9
+ AcceleratorDevice,
10
+ AcceleratorOptions,
11
+ PdfPipelineOptions,
12
+ )
13
+ from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
14
+ from docling.datamodel.base_models import InputFormat
15
+ from docling.document_converter import DocumentConverter, PdfFormatOption
16
+ from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline
17
+
18
+ _log = logging.getLogger(__name__)
19
+ logging.basicConfig(level=logging.INFO)
20
+
21
+ def extract_clean_table_data(table):
22
+ cells = table.get("data", {}).get("table_cells", [])
23
+ if not cells:
24
+ return None
25
+
26
+ max_row = max(cell["end_row_offset_idx"] for cell in cells)
27
+ max_col = max(cell["end_col_offset_idx"] for cell in cells)
28
+ table_matrix = [["" for _ in range(max_col)] for _ in range(max_row)]
29
+
30
+ for cell in cells:
31
+ row = cell["start_row_offset_idx"]
32
+ col = cell["start_col_offset_idx"]
33
+ table_matrix[row][col] = cell.get("text", "").strip()
34
+
35
+ column_headers = table_matrix[0]
36
+ data_rows = table_matrix[1:]
37
+
38
+ structured_rows = []
39
+ for row in data_rows:
40
+ row_data = {
41
+ column_headers[i]: row[i] for i in range(len(column_headers)) if column_headers[i]
42
+ }
43
+ structured_rows.append(row_data)
44
+
45
+ return {
46
+ "num_rows": len(data_rows),
47
+ "num_columns": len(column_headers),
48
+ "columns": column_headers,
49
+ "data": structured_rows,
50
+ }
51
+
52
+ def process_single_pdf(pdf_path: Path, accelerator_options: AcceleratorOptions):
53
+ logging.info(f"Verarbeite: {pdf_path.name}")
54
+ output_dir = pdf_path.parent
55
+
56
+ pipeline_options = PdfPipelineOptions()
57
+ pipeline_options.accelerator_options = accelerator_options
58
+ pipeline_options.do_ocr = False
59
+ pipeline_options.do_table_structure = True
60
+ pipeline_options.table_structure_options.do_cell_matching = True
61
+
62
+ converter = DocumentConverter(
63
+ format_options={
64
+ InputFormat.PDF: PdfFormatOption(
65
+ pipeline_cls=StandardPdfPipeline,
66
+ backend=PyPdfiumDocumentBackend,
67
+ pipeline_options=pipeline_options,
68
+ )
69
+ }
70
+ )
71
+
72
+ doc = converter.convert(pdf_path).document
73
+ doc_dict = doc.export_to_dict()
74
+
75
+ page_texts = defaultdict(list)
76
+ page_tables = defaultdict(list)
77
+
78
+ for text_item in doc_dict.get("texts", []):
79
+ if "text" in text_item and "prov" in text_item:
80
+ for prov in text_item["prov"]:
81
+ page = prov.get("page_no")
82
+ if page is not None:
83
+ page_texts[page].append(text_item["text"])
84
+
85
+ for table_item in doc_dict.get("tables", []):
86
+ prov = table_item.get("prov", [])
87
+ if not prov:
88
+ continue
89
+ page = prov[0].get("page_no")
90
+ clean_table = extract_clean_table_data(table_item)
91
+ if clean_table:
92
+ page_tables[page].append(clean_table)
93
+
94
+ output_txt_path = output_dir / f"{pdf_path.stem}_extracted.txt"
95
+ with open(output_txt_path, "w", encoding="utf-8") as f:
96
+ for page_no in sorted(set(page_texts.keys()).union(page_tables.keys())):
97
+ f.write(f"=== Page {page_no} ===\n\n")
98
+
99
+ texts = page_texts.get(page_no, [])
100
+ if texts:
101
+ f.write("\n")
102
+ f.write("\n".join(texts))
103
+ f.write("\n\n")
104
+
105
+ tables = page_tables.get(page_no, [])
106
+ if tables:
107
+ f.write("tabele:\n")
108
+ for i, table in enumerate(tables, 1):
109
+ table_entry = {
110
+ "table_index": i,
111
+ **table,
112
+ }
113
+ f.write(json.dumps(table_entry, ensure_ascii=False, indent=1))
114
+ f.write("\n\n")
115
+
116
+ logging.info(f"Fertig: {pdf_path.name} → {output_txt_path.name}")
117
+
118
+
119
+ def main():
120
+ base_dir = Path(__file__).resolve().parent
121
+ pdf_files = list(base_dir.glob("*.pdf"))
122
+
123
+ if not pdf_files:
124
+ print("Keine PDF-Dateien im aktuellen Ordner gefunden.")
125
+ return
126
+
127
+ print(f"{len(pdf_files)} PDF-Dateien gefunden. Starte Verarbeitung.")
128
+
129
+ # Manuell festgelegter VRAM in GB
130
+ vram_gb = 16 # YOUR GPU VRAM, Dedicated RAM
131
+
132
+ # Anzahl paralleler Prozesse basierend auf VRAM
133
+ max_subprocesses = int(vram_gb / 1.3)
134
+ print(f"Maximale Anzahl paralleler Subprozesse: {max_subprocesses}")
135
+
136
+ accelerator_options = AcceleratorOptions(num_threads=1, device=AcceleratorDevice.AUTO)
137
+
138
+ ctx = get_context("spawn")
139
+
140
+ # Verteile PDFs auf Prozesse – jeweils eine ganze PDF pro Subprozess
141
+ with ctx.Pool(processes=min(max_subprocesses, len(pdf_files))) as pool:
142
+ pool.starmap(process_single_pdf, [(pdf_path, accelerator_options) for pdf_path in pdf_files])
143
+
144
+ sys.exit(">>> STOP <<<")
145
+
146
+ if __name__ == "__main__":
147
+ main()