File size: 18,274 Bytes
4d49335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce38f80
4d49335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff69eac
 
 
 
 
 
 
 
4d49335
 
 
 
 
 
 
 
 
 
 
c35bb2c
 
 
4d49335
 
 
 
 
 
 
 
 
 
 
 
 
c35bb2c
 
4d49335
c35bb2c
 
 
 
4d49335
 
c35bb2c
4d49335
 
 
 
c144ca3
4d49335
0970492
c35bb2c
4d49335
c35bb2c
 
 
 
 
 
 
 
 
4d49335
c35bb2c
 
 
 
 
 
 
 
4d49335
c35bb2c
4d49335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c35bb2c
c144ca3
c35bb2c
 
4d49335
c35bb2c
 
 
 
 
 
 
 
 
4d49335
 
 
 
c35bb2c
 
4d49335
c35bb2c
4d49335
 
c35bb2c
 
4d49335
 
 
c35bb2c
 
4d49335
 
 
c35bb2c
 
4d49335
 
 
c35bb2c
4d49335
 
 
 
 
c35bb2c
4d49335
 
 
 
 
c35bb2c
4d49335
 
 
c35bb2c
4d49335
c35bb2c
4d49335
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
import os

# workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
# os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html')
os.system('pip install -q torch==1.10.0+cu111 torchvision==0.11+cu111 -f https://download.pytorch.org/whl/torch_stable.html')

# install detectron2 that matches pytorch 1.8
# See https://detectron2.readthedocs.io/tutorials/install.html for instructions
#os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')

import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()

import gradio as gr
import re
import string

from operator import itemgetter
import collections

import pypdf
from pypdf import PdfReader
from pypdf.errors import PdfReadError

import pypdfium2 as pdfium
import langdetect
from langdetect import detect_langs

import pandas as pd
import numpy as np
import random
import tempfile
import itertools

from matplotlib import font_manager
from PIL import Image, ImageDraw, ImageFont
import cv2

## files

import sys  
sys.path.insert(0, 'files/')

import functions
from functions import *

# update pip
os.system('python -m pip install --upgrade pip')

## model / feature extractor / tokenizer

model_id_lilt = "pierreguillou/lilt-xlm-roberta-base-finetuned-with-DocLayNet-base-at-paragraphlevel-ml512"
model_id1 = model_id_lilt
model_id_layoutxlm = "pierreguillou/layout-xlm-base-finetuned-with-DocLayNet-base-at-paragraphlevel-ml512"
model_id2 = model_id_layoutxlm

# tokenizer for LayoutXLM
tokenizer_id_layoutxlm = "xlm-roberta-base"

# get device
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

## model LiLT
import transformers
from transformers import AutoTokenizer, AutoModelForTokenClassification
tokenizer_lilt = AutoTokenizer.from_pretrained(model_id_lilt)
model_lilt = AutoModelForTokenClassification.from_pretrained(model_id_lilt);
model_lilt.to(device);

tokenizer1 = tokenizer_lilt
model1 = model_lilt

## model LayoutXLM
from transformers import LayoutLMv2ForTokenClassification # LayoutXLMTokenizerFast, 
model_layoutxlm = LayoutLMv2ForTokenClassification.from_pretrained(model_id_layoutxlm);
model_layoutxlm.to(device);

# feature extractor
from transformers import LayoutLMv2FeatureExtractor
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)

# tokenizer
from transformers import AutoTokenizer
tokenizer_layoutxlm = AutoTokenizer.from_pretrained(tokenizer_id_layoutxlm)

tokenizer2 = tokenizer_layoutxlm
model2 = model_layoutxlm

# get labels
id2label = model_lilt.config.id2label
label2id = model_lilt.config.label2id
num_labels = len(id2label)

# APP outputs by model
def app_outputs(uploaded_pdf):
    filename, msg, images = pdf_to_images(uploaded_pdf)
    num_images = len(images)

    if not msg.startswith("Error with the PDF"):
    
        # Extraction of image data (text and bounding boxes)
        dataset, texts_lines, texts_pars, texts_lines_par, row_indexes, par_boxes, line_boxes, lines_par_boxes = extraction_data_from_image(images)
        
        # prepare our data in the format of the model
        # model1
        prepare_inference_features_partial1 = partial(prepare_inference_features_paragraph, tokenizer=tokenizer1, max_length=max_length, cls_box=cls_box1, sep_box=sep_box1)
        encoded_dataset1 = dataset.map(prepare_inference_features_partial1, batched=True, batch_size=64, remove_columns=dataset.column_names)
        custom_encoded_dataset1 = CustomDataset(encoded_dataset1, tokenizer1)
        # model2
        prepare_inference_features_partial2 = partial(prepare_inference_features_paragraph, tokenizer=tokenizer2, max_length=max_length, cls_box=cls_box2, sep_box=sep_box2)
        encoded_dataset2 = dataset.map(prepare_inference_features_partial2, batched=True, batch_size=64, remove_columns=dataset.column_names)
        custom_encoded_dataset2 = CustomDataset(encoded_dataset2, tokenizer2)
        
        # Get predictions (token level)
        # model1
        outputs1, images_ids_list1, chunk_ids1, input_ids1, bboxes1 = predictions_token_level(images, custom_encoded_dataset1, model_id1, model1)
        # model2
        outputs2, images_ids_list2, chunk_ids2, input_ids2, bboxes2 = predictions_token_level(images, custom_encoded_dataset2, model_id2, model2)
        
        # Get predictions (paragraph level)
        bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df = predictions_paragraph_level(max_length, tokenizer1, id2label, dataset, outputs1, images_ids_list1, chunk_ids1, input_ids1, bboxes1, cls_box1, sep_box1, tokenizer2, outputs2, images_ids_list2, chunk_ids2, input_ids2, bboxes2, cls_box2, sep_box2)
        
        # Get labeled images with lines bounding boxes
        images = get_labeled_images(id2label, dataset, images_ids_list1, bboxes_list_dict, probs_dict_dict)

        img_files = list()
        # get image of PDF without bounding boxes
        for i in range(num_images):
            if filename != "files/blank.png": img_file = f"img_{i}_" + filename.replace(".pdf", ".png")
            else: img_file = filename.replace(".pdf", ".png")
            img_file = img_file.replace("/", "_")
            images[i].save(img_file)
            img_files.append(img_file)

        if num_images < max_imgboxes:
            img_files += [image_blank]*(max_imgboxes - num_images)
            images += [Image.open(image_blank)]*(max_imgboxes - num_images)
            for count in range(max_imgboxes - num_images):
                df[num_images + count] = pd.DataFrame()
        else:
            img_files = img_files[:max_imgboxes]
            images = images[:max_imgboxes]
            df = dict(itertools.islice(df.items(), max_imgboxes))

        # save 
        csv_files = list()
        for i in range(max_imgboxes):
            csv_file = f"csv_{i}_" + filename.replace(".pdf", ".csv")
            csv_file = csv_file.replace("/", "_")
            csv_files.append(gr.File.update(value=csv_file, visible=True))
            df[i].to_csv(csv_file, encoding="utf-8", index=False)

    else:  
        img_files, images, csv_files = [""]*max_imgboxes, [""]*max_imgboxes, [""]*max_imgboxes
        img_files[0], img_files[1] = image_blank, image_blank
        images[0], images[1] = Image.open(image_blank), Image.open(image_blank)
        csv_file = "csv_wo_content.csv"
        csv_files[0], csv_files[1] = gr.File.update(value=csv_file, visible=True), gr.File.update(value=csv_file, visible=True)
        df, df_empty = dict(), pd.DataFrame()
        df[0], df[1] = df_empty.to_csv(csv_file, encoding="utf-8", index=False), df_empty.to_csv(csv_file, encoding="utf-8", index=False)
    
    return msg, img_files[0], img_files[1], images[0], images[1], csv_files[0], csv_files[1], df[0], df[1]
   
# Gradio APP
with gr.Blocks(title='Inference APP for Document Understanding at paragraph level (v3 - Ensemble "LiLT + LayoutXLM" base)', css=".gradio-container") as demo:
    gr.HTML("""
    <div style="font-family:'Times New Roman', 'Serif'; font-size:26pt; font-weight:bold; text-align:center;"><h1>Inference APP for Document Understanding at paragraph level (v3 - Ensemble "LiLT + LayoutXLM" base)</h1></div>
    <div style="margin-top: 40px"><p>(04/04/2023) This Inference APP uses an ensemble of 2 Document Understanding models finetuned on the dataset DocLayNet base at paragraph level (chunk size of 512 tokens) and combined with XLM-RoBERTa base: <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/pierreguillou/lilt-base-finetuned-with-DocLayNet-base-at-paragraphlevel-ml512" target="_blank">LiLT base</a> and <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/pierreguillou/layout-xlm-base-finetuned-with-DocLayNet-base-at-paragraphlevel-ml512" target="_blank">LayoutXLM base</a>.</p><p>This ensemble calculates the probabilities of each block from the outputs of the models for each label before selecting the label with the highest sum of the normalized probabilities.</p>
    <p>Note: <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://arxiv.org/abs/2202.13669" target="_blank">LiLT (Language-Independent Layout Transformer)</a> and <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://arxiv.org/abs/2104.08836" target="_blank">LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding</a> are Document Understanding models that use both layout and text in order to detect labels of bounding boxes. Combined with the model <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/xlm-roberta-base" target="_blank">XML-RoBERTa base</a>, this finetuned model has the capacity to <b>understand any language</b>. Finetuned on the dataset <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/datasets/pierreguillou/DocLayNet-base" target="_blank">DocLayNet base</a>, they can <b>classifly any bounding box (and its OCR text) to 11 labels</b> (Caption, Footnote, Formula, List-item, Page-footer, Page-header, Picture, Section-header, Table, Text, Title).</p>
    <p>They rely on an external OCR engine to get words and bounding boxes from the document image. Thus, let's run in this APP an OCR engine (<a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://github.com/madmaze/pytesseract#python-tesseract" target="_blank">PyTesseract</a>) to get the bounding boxes, then run the 2 models (already fine-tuned on the dataset DocLayNet base at paragraph level) on the individual tokens and then, normalized the sum of block probabilities as explained, and visualize the result at paragraph level!</p>
    <p><b>It allows to get all pages of any PDF (of any language) with bounding boxes labeled at paragraph level and the associated dataframes with labeled data (bounding boxes, texts, labels) :-)</b></p></div>
    <div><p>However, the inference time per page can be high when running the model on CPU due to the number of paragraph predictions to be made. Therefore, to avoid running this APP for too long, <b>only the first 2 pages are processed by this APP</b>. If you want to increase this limit, you can either clone this APP in Hugging Face Space (or run its <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://github.com/piegu/language-models/blob/master/Gradio_inference_on_Ensemble_LiLT_&_LayoutXLM_base_model_finetuned_on_DocLayNet_base_in_any_language_at_levelparagraphs_ml512.ipynb" target="_blank">notebook</a> on your own plateform) and change the value of the parameter <code>max_imgboxes</code>, or run the inference notebook "<a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://github.com/piegu/language-models/blob/master/inference_on_Ensemble_LiLT_&_LayoutXLM_base_model_finetuned_on_DocLayNet_base_in_any_language_at_levelparagraphs_ml512.ipynb" target="_blank">Document AI | Inference at paragraph level by using the association of 2 Document Understanding models (LiLT and LayoutXLM base fine-tuned on DocLayNet base dataset)</a>" on your own platform as it does not have this limit.</p></div>
    <div style="margin-top: 20px"><p>Links to Document Understanding APPs:</p><ul><li>Line level: <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v1" target="_blank">v1 (LiLT base)</a> | <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v2" target="_blank">v2 (LayoutXLM base)</a> | <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v3" target="_blank">v3 (LilT base vs LayoutXLM base)</a></li><li>Paragraph level: <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v1" target="_blank">v1 (LiLT base)</a> | <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v2" target="_blank">v2 (LayoutXLM base)</a> | <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v3" target="_blank">v3 (LilT base vs LayoutXLM base)</a></li></ul></div>
    <div style="margin-top: 20px"><p>More information about the DocLayNet datasets, the finetuning of the model and this APP in the following blog posts:</p>
    <ul><li>(03/31/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-inference-app-and-fine-tuning-notebook-for-document-understanding-at-paragraph-level-3507af80573d" target="_blank">Document AI | Inference APP and fine-tuning notebook for Document Understanding at paragraph level with LayoutXLM base</a></li><li>(03/25/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-app-to-compare-the-document-understanding-lilt-and-layoutxlm-base-models-at-line-1c53eb481a15" target="_blank">Document AI | APP to compare the Document Understanding LiLT and LayoutXLM (base) models at line level</a></li><li>(03/05/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-inference-app-and-fine-tuning-notebook-for-document-understanding-at-line-level-with-b08fdca5f4dc" target="_blank">Document AI | Inference APP and fine-tuning notebook for Document Understanding at line level with LayoutXLM base</a></li><li>(02/14/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-inference-app-for-document-understanding-at-line-level-a35bbfa98893" target="_blank">Document AI | Inference APP for Document Understanding at line level</a></li><li>(02/10/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-document-understanding-model-at-line-level-with-lilt-tesseract-and-doclaynet-dataset-347107a643b8" target="_blank">Document AI | Document Understanding model at line level with LiLT, Tesseract and DocLayNet dataset</a></li><li>(01/31/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-doclaynet-image-viewer-app-3ac54c19956" target="_blank">Document AI | DocLayNet image viewer APP</a></li><li>(01/27/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-processing-of-doclaynet-dataset-to-be-used-by-layout-models-of-the-hugging-face-hub-308d8bd81cdb" target="_blank">Document AI | Processing of DocLayNet dataset to be used by layout models of the Hugging Face hub (finetuning, inference)</a></li></ul></div> 
    """)
    with gr.Row():
        pdf_file = gr.File(label="PDF")
    with gr.Row():
        submit_btn = gr.Button(f"Display first {max_imgboxes} labeled PDF pages")
        reset_btn = gr.Button(value="Clear")
    with gr.Row():
        output_msg = gr.Textbox(label="Output message")
    with gr.Row():
        fileboxes = []
        for num_page in range(max_imgboxes):
            file_path = gr.File(visible=True, label=f"Image file of the PDF page n°{num_page}")
            fileboxes.append(file_path)
    with gr.Row():
        imgboxes = []
        for num_page in range(max_imgboxes):
            img = gr.Image(type="pil", label=f"Image of the PDF page n°{num_page}")
            imgboxes.append(img)
    with gr.Row():
        csvboxes = []
        for num_page in range(max_imgboxes):
            csv = gr.File(visible=True, label=f"CSV file at paragraph level (page {num_page})")
            csvboxes.append(csv)
    with gr.Row():
        dfboxes = []
        for num_page in range(max_imgboxes):
            df = gr.Dataframe(
                      headers=["bounding boxes", "texts", "labels"],
                      datatype=["str", "str", "str"],
                      col_count=(3, "fixed"), 
                      visible=True,
                      label=f"Data of page {num_page}",
                      type="pandas",
                      wrap=True
                    )
            dfboxes.append(df)

    outputboxes = [output_msg] + fileboxes + imgboxes + csvboxes + dfboxes
    submit_btn.click(app_outputs, inputs=[pdf_file], outputs=outputboxes)
    # https://github.com/gradio-app/gradio/pull/2044/files#diff-a91dd2749f68bb7d0099a0f4079a4fd2d10281e299e7b451cb1bb876a7c21975R91
    reset_btn.click(
        lambda: [pdf_file.update(value=None), output_msg.update(value=None)] + [filebox.update(value=None) for filebox in fileboxes] + [imgbox.update(value=None) for imgbox in imgboxes] + [csvbox.update(value=None) for csvbox in csvboxes] + [dfbox.update(value=None) for dfbox in dfboxes],
        inputs=[],
        outputs=[pdf_file, output_msg] + fileboxes + imgboxes + csvboxes + dfboxes
        )
    
    gr.Examples(
        [["files/example.pdf"]],
        [pdf_file],
        outputboxes,
        fn=app_outputs,
        cache_examples=True,
        )
    
demo.launch()