Spaces:
Running
Running
Update data_preprocess.py
Browse files- data_preprocess.py +746 -745
data_preprocess.py
CHANGED
@@ -1,746 +1,747 @@
|
|
1 |
-
import re
|
2 |
-
import os
|
3 |
-
#import streamlit as st
|
4 |
-
import subprocess
|
5 |
-
import re
|
6 |
-
from Bio import Entrez
|
7 |
-
from docx import Document
|
8 |
-
import fitz
|
9 |
-
import spacy
|
10 |
-
from spacy.cli import download
|
11 |
-
from NER.PDF import pdf
|
12 |
-
from NER.WordDoc import wordDoc
|
13 |
-
from NER.html import extractHTML
|
14 |
-
from NER.word2Vec import word2vec
|
15 |
-
#from transformers import pipeline
|
16 |
-
import urllib.parse, requests
|
17 |
-
from pathlib import Path
|
18 |
-
import pandas as pd
|
19 |
-
import model
|
20 |
-
import pipeline
|
21 |
-
import tempfile
|
22 |
-
import nltk
|
23 |
-
nltk.download('punkt_tab')
|
24 |
-
def download_excel_file(url, save_path="temp.xlsx"):
|
25 |
-
if "view.officeapps.live.com" in url:
|
26 |
-
parsed_url = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
|
27 |
-
real_url = urllib.parse.unquote(parsed_url["src"][0])
|
28 |
-
response = requests.get(real_url)
|
29 |
-
with open(save_path, "wb") as f:
|
30 |
-
f.write(response.content)
|
31 |
-
return save_path
|
32 |
-
elif url.startswith("http") and (url.endswith(".xls") or url.endswith(".xlsx")):
|
33 |
-
response = requests.get(url)
|
34 |
-
response.raise_for_status() # Raises error if download fails
|
35 |
-
with open(save_path, "wb") as f:
|
36 |
-
f.write(response.content)
|
37 |
-
print(len(response.content))
|
38 |
-
return save_path
|
39 |
-
else:
|
40 |
-
print("URL must point directly to an .xls or .xlsx file\n or it already downloaded.")
|
41 |
-
return url
|
42 |
-
def extract_text(link,saveFolder):
|
43 |
-
try:
|
44 |
-
text = ""
|
45 |
-
name = link.split("/")[-1]
|
46 |
-
print("name: ", name)
|
47 |
-
#file_path = Path(saveFolder) / name
|
48 |
-
local_temp_path = os.path.join(tempfile.gettempdir(), name)
|
49 |
-
print("this is local temp path: ", local_temp_path)
|
50 |
-
if os.path.exists(local_temp_path):
|
51 |
-
input_to_class = local_temp_path
|
52 |
-
print("exist")
|
53 |
-
else:
|
54 |
-
#input_to_class = link # Let the class handle downloading
|
55 |
-
# 1. Check if file exists in shared Google Drive folder
|
56 |
-
file_id = pipeline.find_drive_file(name, saveFolder)
|
57 |
-
if file_id:
|
58 |
-
print("π₯ Downloading from Google Drive...")
|
59 |
-
pipeline.download_file_from_drive(name, saveFolder, local_temp_path)
|
60 |
-
else:
|
61 |
-
print("π Downloading from web link...")
|
62 |
-
response = requests.get(link)
|
63 |
-
with open(local_temp_path, 'wb') as f:
|
64 |
-
f.write(response.content)
|
65 |
-
print("β
Saved locally.")
|
66 |
-
|
67 |
-
# 2. Upload to Drive so it's available for later
|
68 |
-
pipeline.upload_file_to_drive(local_temp_path, name, saveFolder)
|
69 |
-
|
70 |
-
input_to_class = local_temp_path
|
71 |
-
print(input_to_class)
|
72 |
-
# pipeline.download_file_from_drive(name, saveFolder, local_temp_path)
|
73 |
-
# pdf
|
74 |
-
if link.endswith(".pdf"):
|
75 |
-
# if file_path.is_file():
|
76 |
-
# link = saveFolder + "/" + name
|
77 |
-
# print("File exists.")
|
78 |
-
#p = pdf.PDF(local_temp_path, saveFolder)
|
79 |
-
print("inside pdf and input to class: ", input_to_class)
|
80 |
-
print("save folder in extract text: ", saveFolder)
|
81 |
-
p = pdf.PDF(input_to_class, saveFolder)
|
82 |
-
#p = pdf.PDF(link,saveFolder)
|
83 |
-
text = p.extractTextWithPDFReader()
|
84 |
-
|
85 |
-
print(text)
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
d = wordDoc.wordDoc(
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
html
|
98 |
-
|
99 |
-
|
100 |
-
print(text)
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
#
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
#
|
141 |
-
|
142 |
-
|
143 |
-
#
|
144 |
-
#
|
145 |
-
#
|
146 |
-
p = pdf.PDF(
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
d = wordDoc.wordDoc(
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
xls = pd.ExcelFile(
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
-
|
188 |
-
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
if
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
s = s.
|
222 |
-
s = s.replace('\r', '\n')
|
223 |
-
|
224 |
-
|
225 |
-
#
|
226 |
-
#
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
-
|
235 |
-
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
if
|
261 |
-
if
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
print(link)
|
279 |
-
print(
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
if
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
#
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
#
|
350 |
-
|
351 |
-
range_matches = re.finditer(r'([A-Z0-9]
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
if
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
initial_keywords.add(
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
#
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
word_to_sentences
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
if
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
s = re.sub(r'\s
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
if not
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
#
|
534 |
-
#
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
#
|
554 |
-
# This
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
# AND
|
566 |
-
#
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
#
|
573 |
-
#
|
574 |
-
#
|
575 |
-
# Remaining
|
576 |
-
#
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
from
|
592 |
-
|
593 |
-
#
|
594 |
-
#
|
595 |
-
|
596 |
-
|
597 |
-
#
|
598 |
-
#
|
599 |
-
#
|
600 |
-
#
|
601 |
-
#
|
602 |
-
#
|
603 |
-
|
604 |
-
|
605 |
-
#
|
606 |
-
#
|
607 |
-
|
608 |
-
|
609 |
-
#
|
610 |
-
#
|
611 |
-
#
|
612 |
-
#
|
613 |
-
#
|
614 |
-
#
|
615 |
-
|
616 |
-
|
617 |
-
#
|
618 |
-
#
|
619 |
-
#
|
620 |
-
#
|
621 |
-
#
|
622 |
-
#
|
623 |
-
#
|
624 |
-
# print("
|
625 |
-
#
|
626 |
-
#
|
627 |
-
#
|
628 |
-
#
|
629 |
-
#
|
630 |
-
#
|
631 |
-
|
632 |
-
|
633 |
-
#
|
634 |
-
#
|
635 |
-
|
636 |
-
|
637 |
-
#
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
- quick look then found then deepdive
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
if
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
686 |
-
|
687 |
-
|
688 |
-
|
689 |
-
|
690 |
-
country
|
691 |
-
|
692 |
-
|
693 |
-
|
694 |
-
|
695 |
-
|
696 |
-
|
697 |
-
|
698 |
-
|
699 |
-
|
700 |
-
|
701 |
-
|
702 |
-
|
703 |
-
|
704 |
-
|
705 |
-
|
706 |
-
|
707 |
-
|
708 |
-
|
709 |
-
|
710 |
-
|
711 |
-
|
712 |
-
|
713 |
-
|
714 |
-
|
715 |
-
|
716 |
-
|
717 |
-
|
718 |
-
|
719 |
-
|
720 |
-
|
721 |
-
|
722 |
-
|
723 |
-
|
724 |
-
|
725 |
-
|
726 |
-
|
727 |
-
|
728 |
-
|
729 |
-
|
730 |
-
|
731 |
-
|
732 |
-
|
733 |
-
|
734 |
-
|
735 |
-
|
736 |
-
|
737 |
-
|
738 |
-
|
739 |
-
|
740 |
-
|
741 |
-
|
742 |
-
|
743 |
-
|
744 |
-
|
745 |
-
|
|
|
746 |
return country, "", output
|
|
|
1 |
+
import re
|
2 |
+
import os
|
3 |
+
#import streamlit as st
|
4 |
+
import subprocess
|
5 |
+
import re
|
6 |
+
from Bio import Entrez
|
7 |
+
from docx import Document
|
8 |
+
import fitz
|
9 |
+
import spacy
|
10 |
+
from spacy.cli import download
|
11 |
+
from NER.PDF import pdf
|
12 |
+
from NER.WordDoc import wordDoc
|
13 |
+
from NER.html import extractHTML
|
14 |
+
from NER.word2Vec import word2vec
|
15 |
+
#from transformers import pipeline
|
16 |
+
import urllib.parse, requests
|
17 |
+
from pathlib import Path
|
18 |
+
import pandas as pd
|
19 |
+
import model
|
20 |
+
import pipeline
|
21 |
+
import tempfile
|
22 |
+
import nltk
|
23 |
+
nltk.download('punkt_tab')
|
24 |
+
def download_excel_file(url, save_path="temp.xlsx"):
|
25 |
+
if "view.officeapps.live.com" in url:
|
26 |
+
parsed_url = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
|
27 |
+
real_url = urllib.parse.unquote(parsed_url["src"][0])
|
28 |
+
response = requests.get(real_url)
|
29 |
+
with open(save_path, "wb") as f:
|
30 |
+
f.write(response.content)
|
31 |
+
return save_path
|
32 |
+
elif url.startswith("http") and (url.endswith(".xls") or url.endswith(".xlsx")):
|
33 |
+
response = requests.get(url)
|
34 |
+
response.raise_for_status() # Raises error if download fails
|
35 |
+
with open(save_path, "wb") as f:
|
36 |
+
f.write(response.content)
|
37 |
+
print(len(response.content))
|
38 |
+
return save_path
|
39 |
+
else:
|
40 |
+
print("URL must point directly to an .xls or .xlsx file\n or it already downloaded.")
|
41 |
+
return url
|
42 |
+
def extract_text(link,saveFolder):
|
43 |
+
try:
|
44 |
+
text = ""
|
45 |
+
name = link.split("/")[-1]
|
46 |
+
print("name: ", name)
|
47 |
+
#file_path = Path(saveFolder) / name
|
48 |
+
local_temp_path = os.path.join(tempfile.gettempdir(), name)
|
49 |
+
print("this is local temp path: ", local_temp_path)
|
50 |
+
if os.path.exists(local_temp_path):
|
51 |
+
input_to_class = local_temp_path
|
52 |
+
print("exist")
|
53 |
+
else:
|
54 |
+
#input_to_class = link # Let the class handle downloading
|
55 |
+
# 1. Check if file exists in shared Google Drive folder
|
56 |
+
file_id = pipeline.find_drive_file(name, saveFolder)
|
57 |
+
if file_id:
|
58 |
+
print("π₯ Downloading from Google Drive...")
|
59 |
+
pipeline.download_file_from_drive(name, saveFolder, local_temp_path)
|
60 |
+
else:
|
61 |
+
print("π Downloading from web link...")
|
62 |
+
response = requests.get(link)
|
63 |
+
with open(local_temp_path, 'wb') as f:
|
64 |
+
f.write(response.content)
|
65 |
+
print("β
Saved locally.")
|
66 |
+
|
67 |
+
# 2. Upload to Drive so it's available for later
|
68 |
+
pipeline.upload_file_to_drive(local_temp_path, name, saveFolder)
|
69 |
+
|
70 |
+
input_to_class = local_temp_path
|
71 |
+
print(input_to_class)
|
72 |
+
# pipeline.download_file_from_drive(name, saveFolder, local_temp_path)
|
73 |
+
# pdf
|
74 |
+
if link.endswith(".pdf"):
|
75 |
+
# if file_path.is_file():
|
76 |
+
# link = saveFolder + "/" + name
|
77 |
+
# print("File exists.")
|
78 |
+
#p = pdf.PDF(local_temp_path, saveFolder)
|
79 |
+
print("inside pdf and input to class: ", input_to_class)
|
80 |
+
print("save folder in extract text: ", saveFolder)
|
81 |
+
p = pdf.PDF(input_to_class, saveFolder)
|
82 |
+
#p = pdf.PDF(link,saveFolder)
|
83 |
+
#text = p.extractTextWithPDFReader()
|
84 |
+
text = p.extractText()
|
85 |
+
print("text from pdf:")
|
86 |
+
print(text)
|
87 |
+
#text_exclude_table = p.extract_text_excluding_tables()
|
88 |
+
# worddoc
|
89 |
+
elif link.endswith(".doc") or link.endswith(".docx"):
|
90 |
+
#d = wordDoc.wordDoc(local_temp_path,saveFolder)
|
91 |
+
d = wordDoc.wordDoc(input_to_class,saveFolder)
|
92 |
+
text = d.extractTextByPage()
|
93 |
+
# html
|
94 |
+
else:
|
95 |
+
if link.split(".")[-1].lower() not in "xlsx":
|
96 |
+
if "http" in link or "html" in link:
|
97 |
+
print("html link: ", link)
|
98 |
+
html = extractHTML.HTML("",link)
|
99 |
+
text = html.getListSection() # the text already clean
|
100 |
+
print("text html: ")
|
101 |
+
print(text)
|
102 |
+
# Cleanup: delete the local temp file
|
103 |
+
if name:
|
104 |
+
if os.path.exists(local_temp_path):
|
105 |
+
os.remove(local_temp_path)
|
106 |
+
print(f"π§Ή Deleted local temp file: {local_temp_path}")
|
107 |
+
print("done extract text")
|
108 |
+
except:
|
109 |
+
text = ""
|
110 |
+
return text
|
111 |
+
|
112 |
+
def extract_table(link,saveFolder):
|
113 |
+
try:
|
114 |
+
table = []
|
115 |
+
name = link.split("/")[-1]
|
116 |
+
#file_path = Path(saveFolder) / name
|
117 |
+
local_temp_path = os.path.join(tempfile.gettempdir(), name)
|
118 |
+
if os.path.exists(local_temp_path):
|
119 |
+
input_to_class = local_temp_path
|
120 |
+
print("exist")
|
121 |
+
else:
|
122 |
+
#input_to_class = link # Let the class handle downloading
|
123 |
+
# 1. Check if file exists in shared Google Drive folder
|
124 |
+
file_id = pipeline.find_drive_file(name, saveFolder)
|
125 |
+
if file_id:
|
126 |
+
print("π₯ Downloading from Google Drive...")
|
127 |
+
pipeline.download_file_from_drive(name, saveFolder, local_temp_path)
|
128 |
+
else:
|
129 |
+
print("π Downloading from web link...")
|
130 |
+
response = requests.get(link)
|
131 |
+
with open(local_temp_path, 'wb') as f:
|
132 |
+
f.write(response.content)
|
133 |
+
print("β
Saved locally.")
|
134 |
+
|
135 |
+
# 2. Upload to Drive so it's available for later
|
136 |
+
pipeline.upload_file_to_drive(local_temp_path, name, saveFolder)
|
137 |
+
|
138 |
+
input_to_class = local_temp_path
|
139 |
+
print(input_to_class)
|
140 |
+
#pipeline.download_file_from_drive(name, saveFolder, local_temp_path)
|
141 |
+
# pdf
|
142 |
+
if link.endswith(".pdf"):
|
143 |
+
# if file_path.is_file():
|
144 |
+
# link = saveFolder + "/" + name
|
145 |
+
# print("File exists.")
|
146 |
+
#p = pdf.PDF(local_temp_path,saveFolder)
|
147 |
+
p = pdf.PDF(input_to_class,saveFolder)
|
148 |
+
table = p.extractTable()
|
149 |
+
# worddoc
|
150 |
+
elif link.endswith(".doc") or link.endswith(".docx"):
|
151 |
+
#d = wordDoc.wordDoc(local_temp_path,saveFolder)
|
152 |
+
d = wordDoc.wordDoc(input_to_class,saveFolder)
|
153 |
+
table = d.extractTableAsList()
|
154 |
+
# excel
|
155 |
+
elif link.split(".")[-1].lower() in "xlsx":
|
156 |
+
# download excel file if it not downloaded yet
|
157 |
+
savePath = saveFolder +"/"+ link.split("/")[-1]
|
158 |
+
excelPath = download_excel_file(link, savePath)
|
159 |
+
try:
|
160 |
+
#xls = pd.ExcelFile(excelPath)
|
161 |
+
xls = pd.ExcelFile(local_temp_path)
|
162 |
+
table_list = []
|
163 |
+
for sheet_name in xls.sheet_names:
|
164 |
+
df = pd.read_excel(xls, sheet_name=sheet_name)
|
165 |
+
cleaned_table = df.fillna("").astype(str).values.tolist()
|
166 |
+
table_list.append(cleaned_table)
|
167 |
+
table = table_list
|
168 |
+
except Exception as e:
|
169 |
+
print("β Failed to extract tables from Excel:", e)
|
170 |
+
# html
|
171 |
+
elif "http" in link or "html" in link:
|
172 |
+
html = extractHTML.HTML("",link)
|
173 |
+
table = html.extractTable() # table is a list
|
174 |
+
table = clean_tables_format(table)
|
175 |
+
# Cleanup: delete the local temp file
|
176 |
+
if os.path.exists(local_temp_path):
|
177 |
+
os.remove(local_temp_path)
|
178 |
+
print(f"π§Ή Deleted local temp file: {local_temp_path}")
|
179 |
+
except:
|
180 |
+
table = []
|
181 |
+
return table
|
182 |
+
|
183 |
+
def clean_tables_format(tables):
|
184 |
+
"""
|
185 |
+
Ensures all tables are in consistent format: List[List[List[str]]]
|
186 |
+
Cleans by:
|
187 |
+
- Removing empty strings and rows
|
188 |
+
- Converting all cells to strings
|
189 |
+
- Handling DataFrames and list-of-lists
|
190 |
+
"""
|
191 |
+
cleaned = []
|
192 |
+
if tables:
|
193 |
+
for table in tables:
|
194 |
+
standardized = []
|
195 |
+
|
196 |
+
# Case 1: Pandas DataFrame
|
197 |
+
if isinstance(table, pd.DataFrame):
|
198 |
+
table = table.fillna("").astype(str).values.tolist()
|
199 |
+
|
200 |
+
# Case 2: List of Lists
|
201 |
+
if isinstance(table, list) and all(isinstance(row, list) for row in table):
|
202 |
+
for row in table:
|
203 |
+
filtered_row = [str(cell).strip() for cell in row if str(cell).strip()]
|
204 |
+
if filtered_row:
|
205 |
+
standardized.append(filtered_row)
|
206 |
+
|
207 |
+
if standardized:
|
208 |
+
cleaned.append(standardized)
|
209 |
+
|
210 |
+
return cleaned
|
211 |
+
|
212 |
+
import json
|
213 |
+
def normalize_text_for_comparison(s: str) -> str:
|
214 |
+
"""
|
215 |
+
Normalizes text for robust comparison by:
|
216 |
+
1. Converting to lowercase.
|
217 |
+
2. Replacing all types of newlines with a single consistent newline (\n).
|
218 |
+
3. Removing extra spaces (e.g., multiple spaces, leading/trailing spaces on lines).
|
219 |
+
4. Stripping leading/trailing whitespace from the entire string.
|
220 |
+
"""
|
221 |
+
s = s.lower()
|
222 |
+
s = s.replace('\r\n', '\n') # Handle Windows newlines
|
223 |
+
s = s.replace('\r', '\n') # Handle Mac classic newlines
|
224 |
+
|
225 |
+
# Replace sequences of whitespace (including multiple newlines) with a single space
|
226 |
+
# This might be too aggressive if you need to preserve paragraph breaks,
|
227 |
+
# but good for exact word-sequence matching.
|
228 |
+
s = re.sub(r'\s+', ' ', s)
|
229 |
+
|
230 |
+
return s.strip()
|
231 |
+
def merge_text_and_tables(text, tables, max_tokens=12000, keep_tables=True, tokenizer="cl100k_base", accession_id=None, isolate=None):
|
232 |
+
"""
|
233 |
+
Merge cleaned text and table into one string for LLM input.
|
234 |
+
- Avoids duplicating tables already in text
|
235 |
+
- Extracts only relevant rows from large tables
|
236 |
+
- Skips or saves oversized tables
|
237 |
+
"""
|
238 |
+
import importlib
|
239 |
+
json = importlib.import_module("json")
|
240 |
+
|
241 |
+
def estimate_tokens(text_str):
|
242 |
+
try:
|
243 |
+
enc = tiktoken.get_encoding(tokenizer)
|
244 |
+
return len(enc.encode(text_str))
|
245 |
+
except:
|
246 |
+
return len(text_str) // 4 # Fallback estimate
|
247 |
+
|
248 |
+
def is_table_relevant(table, keywords, accession_id=None):
|
249 |
+
flat = " ".join(" ".join(row).lower() for row in table)
|
250 |
+
if accession_id and accession_id.lower() in flat:
|
251 |
+
return True
|
252 |
+
return any(kw.lower() in flat for kw in keywords)
|
253 |
+
preview, preview1 = "",""
|
254 |
+
llm_input = "## Document Text\n" + text.strip() + "\n"
|
255 |
+
clean_text = normalize_text_for_comparison(text)
|
256 |
+
|
257 |
+
if tables:
|
258 |
+
for idx, table in enumerate(tables):
|
259 |
+
keywords = ["province","district","region","village","location", "country", "region", "origin", "ancient", "modern"]
|
260 |
+
if accession_id: keywords += [accession_id.lower()]
|
261 |
+
if isolate: keywords += [isolate.lower()]
|
262 |
+
if is_table_relevant(table, keywords, accession_id):
|
263 |
+
if len(table) > 0:
|
264 |
+
for tab in table:
|
265 |
+
preview = " ".join(tab) if tab else ""
|
266 |
+
preview1 = "\n".join(tab) if tab else ""
|
267 |
+
clean_preview = normalize_text_for_comparison(preview)
|
268 |
+
clean_preview1 = normalize_text_for_comparison(preview1)
|
269 |
+
if clean_preview not in clean_text:
|
270 |
+
if clean_preview1 not in clean_text:
|
271 |
+
table_str = json.dumps([tab], indent=2)
|
272 |
+
llm_input += f"## Table {idx+1}\n{table_str}\n"
|
273 |
+
return llm_input.strip()
|
274 |
+
|
275 |
+
def preprocess_document(link, saveFolder, accession=None, isolate=None):
|
276 |
+
try:
|
277 |
+
text = extract_text(link, saveFolder)
|
278 |
+
print("text and link")
|
279 |
+
print(link)
|
280 |
+
print(text)
|
281 |
+
except: text = ""
|
282 |
+
try:
|
283 |
+
tables = extract_table(link, saveFolder)
|
284 |
+
except: tables = []
|
285 |
+
if accession: accession = accession
|
286 |
+
if isolate: isolate = isolate
|
287 |
+
try:
|
288 |
+
final_input = merge_text_and_tables(text, tables, max_tokens=12000, accession_id=accession, isolate=isolate)
|
289 |
+
except: final_input = ""
|
290 |
+
return text, tables, final_input
|
291 |
+
|
292 |
+
def extract_sentences(text):
|
293 |
+
sentences = re.split(r'(?<=[.!?])\s+', text)
|
294 |
+
return [s.strip() for s in sentences if s.strip()]
|
295 |
+
|
296 |
+
def is_irrelevant_number_sequence(text):
|
297 |
+
if re.search(r'\b[A-Z]{2,}\d+\b|\b[A-Za-z]+\s+\d+\b', text, re.IGNORECASE):
|
298 |
+
return False
|
299 |
+
word_count = len(re.findall(r'\b[A-Za-z]{2,}\b', text))
|
300 |
+
number_count = len(re.findall(r'\b\d[\d\.]*\b', text))
|
301 |
+
total_tokens = len(re.findall(r'\S+', text))
|
302 |
+
if total_tokens > 0 and (word_count / total_tokens < 0.2) and (number_count / total_tokens > 0.5):
|
303 |
+
return True
|
304 |
+
elif re.fullmatch(r'(\d+(\.\d+)?\s*)+', text.strip()):
|
305 |
+
return True
|
306 |
+
return False
|
307 |
+
|
308 |
+
def remove_isolated_single_digits(sentence):
|
309 |
+
tokens = sentence.split()
|
310 |
+
filtered_tokens = []
|
311 |
+
for token in tokens:
|
312 |
+
if token == '0' or token == '1':
|
313 |
+
pass
|
314 |
+
else:
|
315 |
+
filtered_tokens.append(token)
|
316 |
+
return ' '.join(filtered_tokens).strip()
|
317 |
+
|
318 |
+
def get_contextual_sentences_BFS(text_content, keyword, depth=2):
|
319 |
+
def extract_codes(sentence):
|
320 |
+
# Match codes like 'A1YU101', 'KM1', 'MO6' β at least 2 letters + numbers
|
321 |
+
return [code for code in re.findall(r'\b[A-Z]{2,}[0-9]+\b', sentence, re.IGNORECASE)]
|
322 |
+
sentences = extract_sentences(text_content)
|
323 |
+
relevant_sentences = set()
|
324 |
+
initial_keywords = set()
|
325 |
+
|
326 |
+
# Define a regex to capture codes like A1YU101 or KM1
|
327 |
+
# This pattern looks for an alphanumeric sequence followed by digits at the end of the string
|
328 |
+
code_pattern = re.compile(r'([A-Z0-9]+?)(\d+)$', re.IGNORECASE)
|
329 |
+
|
330 |
+
# Attempt to parse the keyword into its prefix and numerical part using re.search
|
331 |
+
keyword_match = code_pattern.search(keyword)
|
332 |
+
|
333 |
+
keyword_prefix = None
|
334 |
+
keyword_num = None
|
335 |
+
|
336 |
+
if keyword_match:
|
337 |
+
keyword_prefix = keyword_match.group(1).lower()
|
338 |
+
keyword_num = int(keyword_match.group(2))
|
339 |
+
|
340 |
+
for sentence in sentences:
|
341 |
+
sentence_added = False
|
342 |
+
|
343 |
+
# 1. Check for exact match of the keyword
|
344 |
+
if re.search(r'\b' + re.escape(keyword) + r'\b', sentence, re.IGNORECASE):
|
345 |
+
relevant_sentences.add(sentence.strip())
|
346 |
+
initial_keywords.add(keyword.lower())
|
347 |
+
sentence_added = True
|
348 |
+
|
349 |
+
# 2. Check for range patterns (e.g., A1YU101-A1YU137)
|
350 |
+
# The range pattern should be broad enough to capture the full code string within the range.
|
351 |
+
range_matches = re.finditer(r'([A-Z0-9]+-\d+)', sentence, re.IGNORECASE) # More specific range pattern if needed, or rely on full code pattern below
|
352 |
+
range_matches = re.finditer(r'([A-Z0-9]+\d+)-([A-Z0-9]+\d+)', sentence, re.IGNORECASE) # This is the more robust range pattern
|
353 |
+
|
354 |
+
for r_match in range_matches:
|
355 |
+
start_code_str = r_match.group(1)
|
356 |
+
end_code_str = r_match.group(2)
|
357 |
+
|
358 |
+
# CRITICAL FIX: Use code_pattern.search for start_match and end_match
|
359 |
+
start_match = code_pattern.search(start_code_str)
|
360 |
+
end_match = code_pattern.search(end_code_str)
|
361 |
+
|
362 |
+
if keyword_prefix and keyword_num is not None and start_match and end_match:
|
363 |
+
start_prefix = start_match.group(1).lower()
|
364 |
+
end_prefix = end_match.group(1).lower()
|
365 |
+
start_num = int(start_match.group(2))
|
366 |
+
end_num = int(end_match.group(2))
|
367 |
+
|
368 |
+
# Check if the keyword's prefix matches and its number is within the range
|
369 |
+
if keyword_prefix == start_prefix and \
|
370 |
+
keyword_prefix == end_prefix and \
|
371 |
+
start_num <= keyword_num <= end_num:
|
372 |
+
relevant_sentences.add(sentence.strip())
|
373 |
+
initial_keywords.add(start_code_str.lower())
|
374 |
+
initial_keywords.add(end_code_str.lower())
|
375 |
+
sentence_added = True
|
376 |
+
break # Only need to find one matching range per sentence
|
377 |
+
|
378 |
+
# 3. If the sentence was added due to exact match or range, add all its alphanumeric codes
|
379 |
+
# to initial_keywords to ensure graph traversal from related terms.
|
380 |
+
if sentence_added:
|
381 |
+
for word in extract_codes(sentence):
|
382 |
+
initial_keywords.add(word.lower())
|
383 |
+
|
384 |
+
|
385 |
+
# Build word_to_sentences mapping for all sentences
|
386 |
+
word_to_sentences = {}
|
387 |
+
for sent in sentences:
|
388 |
+
codes_in_sent = set(extract_codes(sent))
|
389 |
+
for code in codes_in_sent:
|
390 |
+
word_to_sentences.setdefault(code.lower(), set()).add(sent.strip())
|
391 |
+
|
392 |
+
|
393 |
+
# Build the graph
|
394 |
+
graph = {}
|
395 |
+
for sent in sentences:
|
396 |
+
codes = set(extract_codes(sent))
|
397 |
+
for word1 in codes:
|
398 |
+
word1_lower = word1.lower()
|
399 |
+
graph.setdefault(word1_lower, set())
|
400 |
+
for word2 in codes:
|
401 |
+
word2_lower = word2.lower()
|
402 |
+
if word1_lower != word2_lower:
|
403 |
+
graph[word1_lower].add(word2_lower)
|
404 |
+
|
405 |
+
|
406 |
+
# Perform BFS/graph traversal
|
407 |
+
queue = [(k, 0) for k in initial_keywords if k in word_to_sentences]
|
408 |
+
visited_words = set(initial_keywords)
|
409 |
+
|
410 |
+
while queue:
|
411 |
+
current_word, level = queue.pop(0)
|
412 |
+
if level >= depth:
|
413 |
+
continue
|
414 |
+
|
415 |
+
relevant_sentences.update(word_to_sentences.get(current_word, []))
|
416 |
+
|
417 |
+
for neighbor in graph.get(current_word, []):
|
418 |
+
if neighbor not in visited_words:
|
419 |
+
visited_words.add(neighbor)
|
420 |
+
queue.append((neighbor, level + 1))
|
421 |
+
|
422 |
+
final_sentences = set()
|
423 |
+
for sentence in relevant_sentences:
|
424 |
+
if not is_irrelevant_number_sequence(sentence):
|
425 |
+
processed_sentence = remove_isolated_single_digits(sentence)
|
426 |
+
if processed_sentence:
|
427 |
+
final_sentences.add(processed_sentence)
|
428 |
+
|
429 |
+
return "\n".join(sorted(list(final_sentences)))
|
430 |
+
|
431 |
+
|
432 |
+
|
433 |
+
def get_contextual_sentences_DFS(text_content, keyword, depth=2):
|
434 |
+
sentences = extract_sentences(text_content)
|
435 |
+
|
436 |
+
# Build word-to-sentences mapping
|
437 |
+
word_to_sentences = {}
|
438 |
+
for sent in sentences:
|
439 |
+
words_in_sent = set(re.findall(r'\b[A-Za-z0-9\-_\/]+\b', sent))
|
440 |
+
for word in words_in_sent:
|
441 |
+
word_to_sentences.setdefault(word.lower(), set()).add(sent.strip())
|
442 |
+
|
443 |
+
# Function to extract codes in a sentence
|
444 |
+
def extract_codes(sentence):
|
445 |
+
# Only codes like 'KSK1', 'MG272794', not pure numbers
|
446 |
+
return [code for code in re.findall(r'\b[A-Z]{2,}[0-9]+\b', sentence, re.IGNORECASE)]
|
447 |
+
|
448 |
+
# DFS with priority based on distance to keyword and early stop if country found
|
449 |
+
def dfs_traverse(current_word, current_depth, max_depth, visited_words, collected_sentences, parent_sentence=None):
|
450 |
+
country = "unknown"
|
451 |
+
if current_depth > max_depth:
|
452 |
+
return country, False
|
453 |
+
|
454 |
+
if current_word not in word_to_sentences:
|
455 |
+
return country, False
|
456 |
+
|
457 |
+
for sentence in word_to_sentences[current_word]:
|
458 |
+
if sentence == parent_sentence:
|
459 |
+
continue # avoid reusing the same sentence
|
460 |
+
|
461 |
+
collected_sentences.add(sentence)
|
462 |
+
|
463 |
+
#print("current_word:", current_word)
|
464 |
+
small_sen = extract_context(sentence, current_word, int(len(sentence) / 4))
|
465 |
+
#print(small_sen)
|
466 |
+
country = model.get_country_from_text(small_sen)
|
467 |
+
#print("small context country:", country)
|
468 |
+
if country.lower() != "unknown":
|
469 |
+
return country, True
|
470 |
+
else:
|
471 |
+
country = model.get_country_from_text(sentence)
|
472 |
+
#print("full sentence country:", country)
|
473 |
+
if country.lower() != "unknown":
|
474 |
+
return country, True
|
475 |
+
|
476 |
+
codes_in_sentence = extract_codes(sentence)
|
477 |
+
idx = next((i for i, code in enumerate(codes_in_sentence) if code.lower() == current_word.lower()), None)
|
478 |
+
if idx is None:
|
479 |
+
continue
|
480 |
+
|
481 |
+
sorted_children = sorted(
|
482 |
+
[code for code in codes_in_sentence if code.lower() not in visited_words],
|
483 |
+
key=lambda x: (abs(codes_in_sentence.index(x) - idx),
|
484 |
+
0 if codes_in_sentence.index(x) > idx else 1)
|
485 |
+
)
|
486 |
+
|
487 |
+
#print("sorted_children:", sorted_children)
|
488 |
+
for child in sorted_children:
|
489 |
+
child_lower = child.lower()
|
490 |
+
if child_lower not in visited_words:
|
491 |
+
visited_words.add(child_lower)
|
492 |
+
country, should_stop = dfs_traverse(
|
493 |
+
child_lower, current_depth + 1, max_depth,
|
494 |
+
visited_words, collected_sentences, parent_sentence=sentence
|
495 |
+
)
|
496 |
+
if should_stop:
|
497 |
+
return country, True
|
498 |
+
|
499 |
+
return country, False
|
500 |
+
|
501 |
+
# Begin DFS
|
502 |
+
collected_sentences = set()
|
503 |
+
visited_words = set([keyword.lower()])
|
504 |
+
country, status = dfs_traverse(keyword.lower(), 0, depth, visited_words, collected_sentences)
|
505 |
+
|
506 |
+
# Filter irrelevant sentences
|
507 |
+
final_sentences = set()
|
508 |
+
for sentence in collected_sentences:
|
509 |
+
if not is_irrelevant_number_sequence(sentence):
|
510 |
+
processed = remove_isolated_single_digits(sentence)
|
511 |
+
if processed:
|
512 |
+
final_sentences.add(processed)
|
513 |
+
if not final_sentences:
|
514 |
+
return country, text_content
|
515 |
+
return country, "\n".join(sorted(list(final_sentences)))
|
516 |
+
|
517 |
+
# Helper function for normalizing text for overlap comparison
|
518 |
+
def normalize_for_overlap(s: str) -> str:
|
519 |
+
s = re.sub(r'[^a-zA-Z0-9\s]', ' ', s).lower()
|
520 |
+
s = re.sub(r'\s+', ' ', s).strip()
|
521 |
+
return s
|
522 |
+
|
523 |
+
def merge_texts_skipping_overlap(text1: str, text2: str) -> str:
|
524 |
+
if not text1: return text2
|
525 |
+
if not text2: return text1
|
526 |
+
|
527 |
+
# Case 1: text2 is fully contained in text1 or vice-versa
|
528 |
+
if text2 in text1:
|
529 |
+
return text1
|
530 |
+
if text1 in text2:
|
531 |
+
return text2
|
532 |
+
|
533 |
+
# --- Option 1: Original behavior (suffix of text1, prefix of text2) ---
|
534 |
+
# This is what your function was primarily designed for.
|
535 |
+
# It looks for the overlap at the "junction" of text1 and text2.
|
536 |
+
|
537 |
+
max_junction_overlap = 0
|
538 |
+
for i in range(min(len(text1), len(text2)), 0, -1):
|
539 |
+
suffix1 = text1[-i:]
|
540 |
+
prefix2 = text2[:i]
|
541 |
+
# Prioritize exact match, then normalized match
|
542 |
+
if suffix1 == prefix2:
|
543 |
+
max_junction_overlap = i
|
544 |
+
break
|
545 |
+
elif normalize_for_overlap(suffix1) == normalize_for_overlap(prefix2):
|
546 |
+
max_junction_overlap = i
|
547 |
+
break # Take the first (longest) normalized match
|
548 |
+
|
549 |
+
if max_junction_overlap > 0:
|
550 |
+
merged_text = text1 + text2[max_junction_overlap:]
|
551 |
+
return re.sub(r'\s+', ' ', merged_text).strip()
|
552 |
+
|
553 |
+
# --- Option 2: Longest Common Prefix (for cases like "Hi, I am Vy.") ---
|
554 |
+
# This addresses your specific test case where the overlap is at the very beginning of both strings.
|
555 |
+
# This is often used when trying to deduplicate content that shares a common start.
|
556 |
+
|
557 |
+
longest_common_prefix_len = 0
|
558 |
+
min_len = min(len(text1), len(text2))
|
559 |
+
for i in range(min_len):
|
560 |
+
if text1[i] == text2[i]:
|
561 |
+
longest_common_prefix_len = i + 1
|
562 |
+
else:
|
563 |
+
break
|
564 |
+
|
565 |
+
# If a common prefix is found AND it's a significant portion (e.g., more than a few chars)
|
566 |
+
# AND the remaining parts are distinct, then apply this merge.
|
567 |
+
# This is a heuristic and might need fine-tuning.
|
568 |
+
if longest_common_prefix_len > 0 and \
|
569 |
+
text1[longest_common_prefix_len:].strip() and \
|
570 |
+
text2[longest_common_prefix_len:].strip():
|
571 |
+
|
572 |
+
# Only merge this way if the remaining parts are not empty (i.e., not exact duplicates)
|
573 |
+
# For "Hi, I am Vy. Nice to meet you." and "Hi, I am Vy. Goodbye Vy."
|
574 |
+
# common prefix is "Hi, I am Vy."
|
575 |
+
# Remaining text1: " Nice to meet you."
|
576 |
+
# Remaining text2: " Goodbye Vy."
|
577 |
+
# So we merge common_prefix + remaining_text1 + remaining_text2
|
578 |
+
|
579 |
+
common_prefix_str = text1[:longest_common_prefix_len]
|
580 |
+
remainder_text1 = text1[longest_common_prefix_len:]
|
581 |
+
remainder_text2 = text2[longest_common_prefix_len:]
|
582 |
+
|
583 |
+
merged_text = common_prefix_str + remainder_text1 + remainder_text2
|
584 |
+
return re.sub(r'\s+', ' ', merged_text).strip()
|
585 |
+
|
586 |
+
|
587 |
+
# If neither specific overlap type is found, just concatenate
|
588 |
+
merged_text = text1 + text2
|
589 |
+
return re.sub(r'\s+', ' ', merged_text).strip()
|
590 |
+
|
591 |
+
from docx import Document
|
592 |
+
from pipeline import upload_file_to_drive
|
593 |
+
# def save_text_to_docx(text_content: str, file_path: str):
|
594 |
+
# """
|
595 |
+
# Saves a given text string into a .docx file.
|
596 |
+
|
597 |
+
# Args:
|
598 |
+
# text_content (str): The text string to save.
|
599 |
+
# file_path (str): The full path including the filename where the .docx file will be saved.
|
600 |
+
# Example: '/content/drive/MyDrive/CollectData/Examples/test/SEA_1234/merged_document.docx'
|
601 |
+
# """
|
602 |
+
# try:
|
603 |
+
# document = Document()
|
604 |
+
|
605 |
+
# # Add the entire text as a single paragraph, or split by newlines for multiple paragraphs
|
606 |
+
# for paragraph_text in text_content.split('\n'):
|
607 |
+
# document.add_paragraph(paragraph_text)
|
608 |
+
|
609 |
+
# document.save(file_path)
|
610 |
+
# print(f"Text successfully saved to '{file_path}'")
|
611 |
+
# except Exception as e:
|
612 |
+
# print(f"Error saving text to docx file: {e}")
|
613 |
+
# def save_text_to_docx(text_content: str, filename: str, drive_folder_id: str):
|
614 |
+
# """
|
615 |
+
# Saves a given text string into a .docx file locally, then uploads to Google Drive.
|
616 |
+
|
617 |
+
# Args:
|
618 |
+
# text_content (str): The text string to save.
|
619 |
+
# filename (str): The target .docx file name, e.g. 'BRU18_merged_document.docx'.
|
620 |
+
# drive_folder_id (str): Google Drive folder ID where to upload the file.
|
621 |
+
# """
|
622 |
+
# try:
|
623 |
+
# # β
Save to temporary local path first
|
624 |
+
# print("file name: ", filename)
|
625 |
+
# print("length text content: ", len(text_content))
|
626 |
+
# local_path = os.path.join(tempfile.gettempdir(), filename)
|
627 |
+
# document = Document()
|
628 |
+
# for paragraph_text in text_content.split('\n'):
|
629 |
+
# document.add_paragraph(paragraph_text)
|
630 |
+
# document.save(local_path)
|
631 |
+
# print(f"β
Text saved locally to: {local_path}")
|
632 |
+
|
633 |
+
# # β
Upload to Drive
|
634 |
+
# pipeline.upload_file_to_drive(local_path, filename, drive_folder_id)
|
635 |
+
# print(f"β
Uploaded '{filename}' to Google Drive folder ID: {drive_folder_id}")
|
636 |
+
|
637 |
+
# except Exception as e:
|
638 |
+
# print(f"β Error saving or uploading DOCX: {e}")
|
639 |
+
def save_text_to_docx(text_content: str, full_local_path: str):
|
640 |
+
document = Document()
|
641 |
+
for paragraph_text in text_content.split('\n'):
|
642 |
+
document.add_paragraph(paragraph_text)
|
643 |
+
document.save(full_local_path)
|
644 |
+
print(f"β
Saved DOCX locally: {full_local_path}")
|
645 |
+
|
646 |
+
|
647 |
+
|
648 |
+
'''2 scenerios:
|
649 |
+
- quick look then found then deepdive and directly get location then stop
|
650 |
+
- quick look then found then deepdive but not find location then hold the related words then
|
651 |
+
look another files iteratively for each related word and find location and stop'''
|
652 |
+
def extract_context(text, keyword, window=500):
|
653 |
+
# firstly try accession number
|
654 |
+
code_pattern = re.compile(r'([A-Z0-9]+?)(\d+)$', re.IGNORECASE)
|
655 |
+
|
656 |
+
# Attempt to parse the keyword into its prefix and numerical part using re.search
|
657 |
+
keyword_match = code_pattern.search(keyword)
|
658 |
+
|
659 |
+
keyword_prefix = None
|
660 |
+
keyword_num = None
|
661 |
+
|
662 |
+
if keyword_match:
|
663 |
+
keyword_prefix = keyword_match.group(1).lower()
|
664 |
+
keyword_num = int(keyword_match.group(2))
|
665 |
+
text = text.lower()
|
666 |
+
idx = text.find(keyword.lower())
|
667 |
+
if idx == -1:
|
668 |
+
if keyword_prefix:
|
669 |
+
idx = text.find(keyword_prefix)
|
670 |
+
if idx == -1:
|
671 |
+
return "Sample ID not found."
|
672 |
+
return text[max(0, idx-window): idx+window]
|
673 |
+
return text[max(0, idx-window): idx+window]
|
674 |
+
def process_inputToken(filePaths, saveLinkFolder,accession=None, isolate=None):
|
675 |
+
cache = {}
|
676 |
+
country = "unknown"
|
677 |
+
output = ""
|
678 |
+
tem_output, small_output = "",""
|
679 |
+
keyword_appear = (False,"")
|
680 |
+
keywords = []
|
681 |
+
if isolate: keywords.append(isolate)
|
682 |
+
if accession: keywords.append(accession)
|
683 |
+
for f in filePaths:
|
684 |
+
# scenerio 1: direct location: truncate the context and then use qa model?
|
685 |
+
if keywords:
|
686 |
+
for keyword in keywords:
|
687 |
+
text, tables, final_input = preprocess_document(f,saveLinkFolder, isolate=keyword)
|
688 |
+
if keyword in final_input:
|
689 |
+
context = extract_context(final_input, keyword)
|
690 |
+
# quick look if country already in context and if yes then return
|
691 |
+
country = model.get_country_from_text(context)
|
692 |
+
if country != "unknown":
|
693 |
+
return country, context, final_input
|
694 |
+
else:
|
695 |
+
country = model.get_country_from_text(final_input)
|
696 |
+
if country != "unknown":
|
697 |
+
return country, context, final_input
|
698 |
+
else: # might be cross-ref
|
699 |
+
keyword_appear = (True, f)
|
700 |
+
cache[f] = context
|
701 |
+
small_output = merge_texts_skipping_overlap(output, context) + "\n"
|
702 |
+
chunkBFS = get_contextual_sentences_BFS(small_output, keyword)
|
703 |
+
countryBFS = model.get_country_from_text(chunkBFS)
|
704 |
+
countryDFS, chunkDFS = get_contextual_sentences_DFS(output, keyword)
|
705 |
+
output = merge_texts_skipping_overlap(output, final_input)
|
706 |
+
if countryDFS != "unknown" and countryBFS != "unknown":
|
707 |
+
if len(chunkDFS) <= len(chunkBFS):
|
708 |
+
return countryDFS, chunkDFS, output
|
709 |
+
else:
|
710 |
+
return countryBFS, chunkBFS, output
|
711 |
+
else:
|
712 |
+
if countryDFS != "unknown":
|
713 |
+
return countryDFS, chunkDFS, output
|
714 |
+
if countryBFS != "unknown":
|
715 |
+
return countryBFS, chunkBFS, output
|
716 |
+
else:
|
717 |
+
# scenerio 2:
|
718 |
+
'''cross-ref: ex: A1YU101 keyword in file 2 which includes KM1 but KM1 in file 1
|
719 |
+
but if we look at file 1 first then maybe we can have lookup dict which country
|
720 |
+
such as Thailand as the key and its re'''
|
721 |
+
cache[f] = final_input
|
722 |
+
if keyword_appear[0] == True:
|
723 |
+
for c in cache:
|
724 |
+
if c!=keyword_appear[1]:
|
725 |
+
if cache[c].lower() not in output.lower():
|
726 |
+
output = merge_texts_skipping_overlap(output, cache[c]) + "\n"
|
727 |
+
chunkBFS = get_contextual_sentences_BFS(output, keyword)
|
728 |
+
countryBFS = model.get_country_from_text(chunkBFS)
|
729 |
+
countryDFS, chunkDFS = get_contextual_sentences_DFS(output, keyword)
|
730 |
+
if countryDFS != "unknown" and countryBFS != "unknown":
|
731 |
+
if len(chunkDFS) <= len(chunkBFS):
|
732 |
+
return countryDFS, chunkDFS, output
|
733 |
+
else:
|
734 |
+
return countryBFS, chunkBFS, output
|
735 |
+
else:
|
736 |
+
if countryDFS != "unknown":
|
737 |
+
return countryDFS, chunkDFS, output
|
738 |
+
if countryBFS != "unknown":
|
739 |
+
return countryBFS, chunkBFS, output
|
740 |
+
else:
|
741 |
+
if cache[f].lower() not in output.lower():
|
742 |
+
output = merge_texts_skipping_overlap(output, cache[f]) + "\n"
|
743 |
+
if len(output) == 0 or keyword_appear[0]==False:
|
744 |
+
for c in cache:
|
745 |
+
if cache[c].lower() not in output.lower():
|
746 |
+
output = merge_texts_skipping_overlap(output, cache[c]) + "\n"
|
747 |
return country, "", output
|