Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- evalkit_cambrian/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so +3 -0
- evalkit_tf437/lib/libtcl8.6.so +3 -0
- evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/chardata.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/cli.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/fixes.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/formatting.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/ftfy/chardata.py +691 -0
- evalkit_tf437/lib/python3.10/site-packages/ftfy/fixes.py +510 -0
- evalkit_tf437/lib/python3.10/site-packages/ftfy/py.typed +0 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/__init__.py +19 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/_constants.py +13 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/__init__.py +3 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/__pycache__/custom.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/__pycache__/properties.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/__pycache__/relation.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/custom.py +61 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/protection.py +41 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/relation.py +97 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/_read_only.py +190 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/_reader.py +472 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/_write_only.py +160 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/cell_range.py +512 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/header_footer.py +270 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/ole.py +133 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/picture.py +8 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/related.py +17 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/scenario.py +105 -0
- evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/views.py +155 -0
- evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/AUTHORS +98 -0
- evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/INSTALLER +1 -0
- evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/LICENSE +20 -0
- evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/RECORD +30 -0
- evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/REQUESTED +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE.APACHE2 +202 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/RECORD +20 -0
- evalkit_tf437/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/REQUESTED +0 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/__init__.py +59 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/_utils.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/exceptions.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/items.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/source.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/toml_file.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/_compat.py +22 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/_utils.py +158 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/api.py +308 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/container.py +866 -0
- evalkit_tf437/lib/python3.10/site-packages/tomlkit/exceptions.py +227 -0
.gitattributes
CHANGED
|
@@ -537,3 +537,6 @@ janus/lib/python3.10/site-packages/numpy/random/_common.cpython-310-x86_64-linux
|
|
| 537 |
evalkit_cambrian/lib/python3.10/site-packages/google/cloud/storage/__pycache__/bucket.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 538 |
infer_4_47_1/lib/python3.10/lib2to3/tests/__pycache__/test_fixers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 539 |
evalkit_cambrian/lib/python3.10/site-packages/opencv_python.libs/libavformat-d296e685.so.59.27.100 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 537 |
evalkit_cambrian/lib/python3.10/site-packages/google/cloud/storage/__pycache__/bucket.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 538 |
infer_4_47_1/lib/python3.10/lib2to3/tests/__pycache__/test_fixers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 539 |
evalkit_cambrian/lib/python3.10/site-packages/opencv_python.libs/libavformat-d296e685.so.59.27.100 filter=lfs diff=lfs merge=lfs -text
|
| 540 |
+
falcon/lib/python3.10/site-packages/pillow.libs/libtiff-a92b430c.so.6.0.2 filter=lfs diff=lfs merge=lfs -text
|
| 541 |
+
evalkit_cambrian/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 542 |
+
evalkit_tf437/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text
|
evalkit_cambrian/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94b3381d0a7eb849854e36036eaa73225ac3ba9c23f9b776ecc84560f4df8fd0
|
| 3 |
+
size 1854728
|
evalkit_tf437/lib/libtcl8.6.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93e108c5abb9c4b9bd0a256743fe8a22ecdd9f4a4740bf6182a6afe1ecaaf112
|
| 3 |
+
size 1983416
|
evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (21.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/chardata.cpython-310.pyc
ADDED
|
Binary file (5.54 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/cli.cpython-310.pyc
ADDED
|
Binary file (3.46 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/fixes.cpython-310.pyc
ADDED
|
Binary file (17.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/ftfy/__pycache__/formatting.cpython-310.pyc
ADDED
|
Binary file (5.61 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/ftfy/chardata.py
ADDED
|
@@ -0,0 +1,691 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This gives other modules access to the gritty details about characters and the
|
| 3 |
+
encodings that use them.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
import html
|
| 9 |
+
import itertools
|
| 10 |
+
import re
|
| 11 |
+
import unicodedata
|
| 12 |
+
|
| 13 |
+
# These are the encodings we will try to fix in ftfy, in the
|
| 14 |
+
# order that they should be tried.
|
| 15 |
+
CHARMAP_ENCODINGS = [
|
| 16 |
+
"latin-1",
|
| 17 |
+
"sloppy-windows-1252",
|
| 18 |
+
"sloppy-windows-1251",
|
| 19 |
+
"sloppy-windows-1250",
|
| 20 |
+
"sloppy-windows-1253",
|
| 21 |
+
"sloppy-windows-1254",
|
| 22 |
+
"sloppy-windows-1257",
|
| 23 |
+
"iso-8859-2",
|
| 24 |
+
"macroman",
|
| 25 |
+
"cp437",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
SINGLE_QUOTE_RE = re.compile("[\u02bc\u2018-\u201b]")
|
| 29 |
+
DOUBLE_QUOTE_RE = re.compile("[\u201c-\u201f]")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _build_regexes() -> dict[str, re.Pattern[str]]:
|
| 33 |
+
"""
|
| 34 |
+
ENCODING_REGEXES contain reasonably fast ways to detect if we
|
| 35 |
+
could represent a given string in a given encoding. The simplest one is
|
| 36 |
+
the 'ascii' detector, which of course just determines if all characters
|
| 37 |
+
are between U+0000 and U+007F.
|
| 38 |
+
"""
|
| 39 |
+
# Define a regex that matches ASCII text.
|
| 40 |
+
encoding_regexes = {"ascii": re.compile("^[\x00-\x7f]*$")}
|
| 41 |
+
|
| 42 |
+
for encoding in CHARMAP_ENCODINGS:
|
| 43 |
+
# Make a sequence of characters that bytes \x80 to \xFF decode to
|
| 44 |
+
# in each encoding, as well as byte \x1A, which is used to represent
|
| 45 |
+
# the replacement character � in the sloppy-* encodings.
|
| 46 |
+
byte_range = bytes(list(range(0x80, 0x100)) + [0x1A])
|
| 47 |
+
charlist = byte_range.decode(encoding)
|
| 48 |
+
|
| 49 |
+
# The rest of the ASCII bytes -- bytes \x00 to \x19 and \x1B
|
| 50 |
+
# to \x7F -- will decode as those ASCII characters in any encoding we
|
| 51 |
+
# support, so we can just include them as ranges. This also lets us
|
| 52 |
+
# not worry about escaping regex special characters, because all of
|
| 53 |
+
# them are in the \x1B to \x7F range.
|
| 54 |
+
regex = f"^[\x00-\x19\x1b-\x7f{charlist}]*$"
|
| 55 |
+
encoding_regexes[encoding] = re.compile(regex)
|
| 56 |
+
return encoding_regexes
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
ENCODING_REGEXES = _build_regexes()
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _build_html_entities() -> dict[str, str]:
|
| 63 |
+
entities = {}
|
| 64 |
+
# Create a dictionary based on the built-in HTML5 entity dictionary.
|
| 65 |
+
# Add a limited set of HTML entities that we'll also decode if they've
|
| 66 |
+
# been case-folded to uppercase, such as decoding Ñ as "Ñ".
|
| 67 |
+
for name, char in html.entities.html5.items(): # type: ignore
|
| 68 |
+
if name.endswith(";"):
|
| 69 |
+
entities["&" + name] = char
|
| 70 |
+
|
| 71 |
+
# Restrict the set of characters we can attempt to decode if their
|
| 72 |
+
# name has been uppercased. If we tried to handle all entity names,
|
| 73 |
+
# the results would be ambiguous.
|
| 74 |
+
if name == name.lower():
|
| 75 |
+
name_upper = name.upper()
|
| 76 |
+
entity_upper = "&" + name_upper
|
| 77 |
+
if html.unescape(entity_upper) == entity_upper:
|
| 78 |
+
entities[entity_upper] = char.upper()
|
| 79 |
+
return entities
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
HTML_ENTITY_RE = re.compile(r"&#?[0-9A-Za-z]{1,24};")
|
| 83 |
+
HTML_ENTITIES = _build_html_entities()
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def possible_encoding(text: str, encoding: str) -> bool:
|
| 87 |
+
"""
|
| 88 |
+
Given text and a single-byte encoding, check whether that text could have
|
| 89 |
+
been decoded from that single-byte encoding.
|
| 90 |
+
|
| 91 |
+
In other words, check whether it can be encoded in that encoding, possibly
|
| 92 |
+
sloppily.
|
| 93 |
+
"""
|
| 94 |
+
return bool(ENCODING_REGEXES[encoding].match(text))
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _build_control_char_mapping() -> dict[int, None]:
|
| 98 |
+
"""
|
| 99 |
+
Build a translate mapping that strips likely-unintended control characters.
|
| 100 |
+
See :func:`ftfy.fixes.remove_control_chars` for a description of these
|
| 101 |
+
codepoint ranges and why they should be removed.
|
| 102 |
+
"""
|
| 103 |
+
control_chars: dict[int, None] = {}
|
| 104 |
+
|
| 105 |
+
for i in itertools.chain(
|
| 106 |
+
range(0x00, 0x09),
|
| 107 |
+
[0x0B],
|
| 108 |
+
range(0x0E, 0x20),
|
| 109 |
+
[0x7F],
|
| 110 |
+
range(0x206A, 0x2070),
|
| 111 |
+
[0xFEFF],
|
| 112 |
+
range(0xFFF9, 0xFFFD),
|
| 113 |
+
):
|
| 114 |
+
control_chars[i] = None
|
| 115 |
+
|
| 116 |
+
return control_chars
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
CONTROL_CHARS = _build_control_char_mapping()
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# Recognize UTF-8 sequences that would be valid if it weren't for a b'\xa0'
|
| 123 |
+
# that some Windows-1252 program converted to a plain space.
|
| 124 |
+
#
|
| 125 |
+
# The smaller values are included on a case-by-case basis, because we don't want
|
| 126 |
+
# to decode likely input sequences to unlikely characters. These are the ones
|
| 127 |
+
# that *do* form likely characters before 0xa0:
|
| 128 |
+
#
|
| 129 |
+
# 0xc2 -> U+A0 NO-BREAK SPACE
|
| 130 |
+
# 0xc3 -> U+E0 LATIN SMALL LETTER A WITH GRAVE
|
| 131 |
+
# 0xc5 -> U+160 LATIN CAPITAL LETTER S WITH CARON
|
| 132 |
+
# 0xce -> U+3A0 GREEK CAPITAL LETTER PI
|
| 133 |
+
# 0xd0 -> U+420 CYRILLIC CAPITAL LETTER ER
|
| 134 |
+
# 0xd9 -> U+660 ARABIC-INDIC DIGIT ZERO
|
| 135 |
+
#
|
| 136 |
+
# In three-character sequences, we exclude some lead bytes in some cases.
|
| 137 |
+
#
|
| 138 |
+
# When the lead byte is immediately followed by 0xA0, we shouldn't accept
|
| 139 |
+
# a space there, because it leads to some less-likely character ranges:
|
| 140 |
+
#
|
| 141 |
+
# 0xe0 -> Samaritan script
|
| 142 |
+
# 0xe1 -> Mongolian script (corresponds to Latin-1 'á' which is too common)
|
| 143 |
+
#
|
| 144 |
+
# We accept 0xe2 and 0xe3, which cover many scripts. Bytes 0xe4 and
|
| 145 |
+
# higher point mostly to CJK characters, which we generally don't want to
|
| 146 |
+
# decode near Latin lowercase letters.
|
| 147 |
+
#
|
| 148 |
+
# In four-character sequences, the lead byte must be F0, because that accounts
|
| 149 |
+
# for almost all of the usage of high-numbered codepoints (tag characters whose
|
| 150 |
+
# UTF-8 starts with the byte F3 are only used in some rare new emoji sequences).
|
| 151 |
+
#
|
| 152 |
+
# This is meant to be applied to encodings of text that tests true for `is_bad`.
|
| 153 |
+
# Any of these could represent characters that legitimately appear surrounded by
|
| 154 |
+
# spaces, particularly U+C5 (Å), which is a word in multiple languages!
|
| 155 |
+
#
|
| 156 |
+
# We should consider checking for b'\x85' being converted to ... in the future.
|
| 157 |
+
# I've seen it once, but the text still wasn't recoverable.
|
| 158 |
+
|
| 159 |
+
ALTERED_UTF8_RE = re.compile(
|
| 160 |
+
b"[\xc2\xc3\xc5\xce\xd0\xd9][ ]"
|
| 161 |
+
b"|[\xe2\xe3][ ][\x80-\x84\x86-\x9f\xa1-\xbf]"
|
| 162 |
+
b"|[\xe0-\xe3][\x80-\x84\x86-\x9f\xa1-\xbf][ ]"
|
| 163 |
+
b"|[\xf0][ ][\x80-\xbf][\x80-\xbf]"
|
| 164 |
+
b"|[\xf0][\x80-\xbf][ ][\x80-\xbf]"
|
| 165 |
+
b"|[\xf0][\x80-\xbf][\x80-\xbf][ ]"
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# This expression matches UTF-8 and CESU-8 sequences where some of the
|
| 170 |
+
# continuation bytes have been lost. The byte 0x1a (sometimes written as ^Z) is
|
| 171 |
+
# used within ftfy to represent a byte that produced the replacement character
|
| 172 |
+
# \ufffd. We don't know which byte it was, but we can at least decode the UTF-8
|
| 173 |
+
# sequence as \ufffd instead of failing to re-decode it at all.
|
| 174 |
+
#
|
| 175 |
+
# In some cases, we allow the ASCII '?' in place of \ufffd, but at most once per
|
| 176 |
+
# sequence.
|
| 177 |
+
LOSSY_UTF8_RE = re.compile(
|
| 178 |
+
b"[\xc2-\xdf][\x1a]"
|
| 179 |
+
b"|[\xc2-\xc3][?]"
|
| 180 |
+
b"|\xed[\xa0-\xaf][\x1a?]\xed[\xb0-\xbf][\x1a?\x80-\xbf]"
|
| 181 |
+
b"|\xed[\xa0-\xaf][\x1a?\x80-\xbf]\xed[\xb0-\xbf][\x1a?]"
|
| 182 |
+
b"|[\xe0-\xef][\x1a?][\x1a\x80-\xbf]"
|
| 183 |
+
b"|[\xe0-\xef][\x1a\x80-\xbf][\x1a?]"
|
| 184 |
+
b"|[\xf0-\xf4][\x1a?][\x1a\x80-\xbf][\x1a\x80-\xbf]"
|
| 185 |
+
b"|[\xf0-\xf4][\x1a\x80-\xbf][\x1a?][\x1a\x80-\xbf]"
|
| 186 |
+
b"|[\xf0-\xf4][\x1a\x80-\xbf][\x1a\x80-\xbf][\x1a?]"
|
| 187 |
+
b"|\x1a"
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
# This regex matches C1 control characters, which occupy some of the positions
|
| 192 |
+
# in the Latin-1 character map that Windows assigns to other characters instead.
|
| 193 |
+
C1_CONTROL_RE = re.compile(r"[\x80-\x9f]")
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
# A translate mapping that breaks ligatures made of Latin letters. While
|
| 197 |
+
# ligatures may be important to the representation of other languages, in Latin
|
| 198 |
+
# letters they tend to represent a copy/paste error. It omits ligatures such
|
| 199 |
+
# as æ that are frequently used intentionally.
|
| 200 |
+
#
|
| 201 |
+
# This list additionally includes some Latin digraphs that represent two
|
| 202 |
+
# characters for legacy encoding reasons, not for typographical reasons.
|
| 203 |
+
#
|
| 204 |
+
# Ligatures and digraphs may also be separated by NFKC normalization, but that
|
| 205 |
+
# is sometimes more normalization than you want.
|
| 206 |
+
|
| 207 |
+
LIGATURES = {
|
| 208 |
+
ord("IJ"): "IJ", # Dutch ligatures
|
| 209 |
+
ord("ij"): "ij",
|
| 210 |
+
ord("ʼn"): "ʼn", # Afrikaans digraph meant to avoid auto-curled quote
|
| 211 |
+
ord("DZ"): "DZ", # Serbian/Croatian digraphs for Cyrillic conversion
|
| 212 |
+
ord("Dz"): "Dz",
|
| 213 |
+
ord("dz"): "dz",
|
| 214 |
+
ord("DŽ"): "DŽ",
|
| 215 |
+
ord("Dž"): "Dž",
|
| 216 |
+
ord("dž"): "dž",
|
| 217 |
+
ord("LJ"): "LJ",
|
| 218 |
+
ord("Lj"): "Lj",
|
| 219 |
+
ord("lj"): "lj",
|
| 220 |
+
ord("NJ"): "NJ",
|
| 221 |
+
ord("Nj"): "Nj",
|
| 222 |
+
ord("nj"): "nj",
|
| 223 |
+
ord("ff"): "ff", # Latin typographical ligatures
|
| 224 |
+
ord("fi"): "fi",
|
| 225 |
+
ord("fl"): "fl",
|
| 226 |
+
ord("ffi"): "ffi",
|
| 227 |
+
ord("ffl"): "ffl",
|
| 228 |
+
ord("ſt"): "ſt",
|
| 229 |
+
ord("st"): "st",
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def _build_width_map() -> dict[int, str]:
|
| 234 |
+
"""
|
| 235 |
+
Build a translate mapping that replaces halfwidth and fullwidth forms
|
| 236 |
+
with their standard-width forms.
|
| 237 |
+
"""
|
| 238 |
+
# Though it's not listed as a fullwidth character, we'll want to convert
|
| 239 |
+
# U+3000 IDEOGRAPHIC SPACE to U+20 SPACE on the same principle, so start
|
| 240 |
+
# with that in the dictionary.
|
| 241 |
+
width_map = {0x3000: " "}
|
| 242 |
+
for i in range(0xFF01, 0xFFF0):
|
| 243 |
+
char = chr(i)
|
| 244 |
+
alternate = unicodedata.normalize("NFKC", char)
|
| 245 |
+
if alternate != char:
|
| 246 |
+
width_map[i] = alternate
|
| 247 |
+
return width_map
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
WIDTH_MAP = _build_width_map()
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
# Character classes that help us pinpoint embedded mojibake. These can
|
| 254 |
+
# include common characters, because we'll also check them for 'badness'.
|
| 255 |
+
#
|
| 256 |
+
# Though they go on for many lines, the members of this dictionary are
|
| 257 |
+
# single concatenated strings.
|
| 258 |
+
#
|
| 259 |
+
# This code is generated using scripts/char_data_table.py.
|
| 260 |
+
UTF8_CLUES: dict[str, str] = {
|
| 261 |
+
# Letters that decode to 0xC2 - 0xDF in a Latin-1-like encoding
|
| 262 |
+
"utf8_first_of_2": (
|
| 263 |
+
"\N{LATIN CAPITAL LETTER A WITH BREVE}" # windows-1250:C3
|
| 264 |
+
"\N{LATIN CAPITAL LETTER A WITH CIRCUMFLEX}" # latin-1:C2
|
| 265 |
+
"\N{LATIN CAPITAL LETTER A WITH DIAERESIS}" # latin-1:C4
|
| 266 |
+
"\N{LATIN CAPITAL LETTER A WITH MACRON}" # windows-1257:C2
|
| 267 |
+
"\N{LATIN CAPITAL LETTER A WITH RING ABOVE}" # latin-1:C5
|
| 268 |
+
"\N{LATIN CAPITAL LETTER A WITH TILDE}" # latin-1:C3
|
| 269 |
+
"\N{LATIN CAPITAL LETTER AE}" # latin-1:C6
|
| 270 |
+
"\N{LATIN CAPITAL LETTER C WITH ACUTE}" # windows-1250:C6
|
| 271 |
+
"\N{LATIN CAPITAL LETTER C WITH CARON}" # windows-1250:C8
|
| 272 |
+
"\N{LATIN CAPITAL LETTER C WITH CEDILLA}" # latin-1:C7
|
| 273 |
+
"\N{LATIN CAPITAL LETTER D WITH CARON}" # windows-1250:CF
|
| 274 |
+
"\N{LATIN CAPITAL LETTER D WITH STROKE}" # windows-1250:D0
|
| 275 |
+
"\N{LATIN CAPITAL LETTER E WITH ACUTE}" # latin-1:C9
|
| 276 |
+
"\N{LATIN CAPITAL LETTER E WITH CARON}" # windows-1250:CC
|
| 277 |
+
"\N{LATIN CAPITAL LETTER E WITH CIRCUMFLEX}" # latin-1:CA
|
| 278 |
+
"\N{LATIN CAPITAL LETTER E WITH DIAERESIS}" # latin-1:CB
|
| 279 |
+
"\N{LATIN CAPITAL LETTER E WITH DOT ABOVE}" # windows-1257:CB
|
| 280 |
+
"\N{LATIN CAPITAL LETTER E WITH GRAVE}" # latin-1:C8
|
| 281 |
+
"\N{LATIN CAPITAL LETTER E WITH MACRON}" # windows-1257:C7
|
| 282 |
+
"\N{LATIN CAPITAL LETTER E WITH OGONEK}" # windows-1250:CA
|
| 283 |
+
"\N{LATIN CAPITAL LETTER ETH}" # latin-1:D0
|
| 284 |
+
"\N{LATIN CAPITAL LETTER G WITH BREVE}" # windows-1254:D0
|
| 285 |
+
"\N{LATIN CAPITAL LETTER G WITH CEDILLA}" # windows-1257:CC
|
| 286 |
+
"\N{LATIN CAPITAL LETTER I WITH ACUTE}" # latin-1:CD
|
| 287 |
+
"\N{LATIN CAPITAL LETTER I WITH CIRCUMFLEX}" # latin-1:CE
|
| 288 |
+
"\N{LATIN CAPITAL LETTER I WITH DIAERESIS}" # latin-1:CF
|
| 289 |
+
"\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}" # windows-1254:DD
|
| 290 |
+
"\N{LATIN CAPITAL LETTER I WITH GRAVE}" # latin-1:CC
|
| 291 |
+
"\N{LATIN CAPITAL LETTER I WITH MACRON}" # windows-1257:CE
|
| 292 |
+
"\N{LATIN CAPITAL LETTER K WITH CEDILLA}" # windows-1257:CD
|
| 293 |
+
"\N{LATIN CAPITAL LETTER L WITH ACUTE}" # windows-1250:C5
|
| 294 |
+
"\N{LATIN CAPITAL LETTER L WITH CEDILLA}" # windows-1257:CF
|
| 295 |
+
"\N{LATIN CAPITAL LETTER L WITH STROKE}" # windows-1257:D9
|
| 296 |
+
"\N{LATIN CAPITAL LETTER N WITH ACUTE}" # windows-1250:D1
|
| 297 |
+
"\N{LATIN CAPITAL LETTER N WITH CARON}" # windows-1250:D2
|
| 298 |
+
"\N{LATIN CAPITAL LETTER N WITH CEDILLA}" # windows-1257:D2
|
| 299 |
+
"\N{LATIN CAPITAL LETTER N WITH TILDE}" # latin-1:D1
|
| 300 |
+
"\N{LATIN CAPITAL LETTER O WITH ACUTE}" # latin-1:D3
|
| 301 |
+
"\N{LATIN CAPITAL LETTER O WITH CIRCUMFLEX}" # latin-1:D4
|
| 302 |
+
"\N{LATIN CAPITAL LETTER O WITH DIAERESIS}" # latin-1:D6
|
| 303 |
+
"\N{LATIN CAPITAL LETTER O WITH DOUBLE ACUTE}" # windows-1250:D5
|
| 304 |
+
"\N{LATIN CAPITAL LETTER O WITH GRAVE}" # latin-1:D2
|
| 305 |
+
"\N{LATIN CAPITAL LETTER O WITH MACRON}" # windows-1257:D4
|
| 306 |
+
"\N{LATIN CAPITAL LETTER O WITH STROKE}" # latin-1:D8
|
| 307 |
+
"\N{LATIN CAPITAL LETTER O WITH TILDE}" # latin-1:D5
|
| 308 |
+
"\N{LATIN CAPITAL LETTER R WITH CARON}" # windows-1250:D8
|
| 309 |
+
"\N{LATIN CAPITAL LETTER S WITH ACUTE}" # windows-1257:DA
|
| 310 |
+
"\N{LATIN CAPITAL LETTER S WITH CARON}" # windows-1257:D0
|
| 311 |
+
"\N{LATIN CAPITAL LETTER S WITH CEDILLA}" # windows-1254:DE
|
| 312 |
+
"\N{LATIN CAPITAL LETTER T WITH CEDILLA}" # windows-1250:DE
|
| 313 |
+
"\N{LATIN CAPITAL LETTER THORN}" # latin-1:DE
|
| 314 |
+
"\N{LATIN CAPITAL LETTER U WITH ACUTE}" # latin-1:DA
|
| 315 |
+
"\N{LATIN CAPITAL LETTER U WITH CIRCUMFLEX}" # latin-1:DB
|
| 316 |
+
"\N{LATIN CAPITAL LETTER U WITH DIAERESIS}" # latin-1:DC
|
| 317 |
+
"\N{LATIN CAPITAL LETTER U WITH DOUBLE ACUTE}" # windows-1250:DB
|
| 318 |
+
"\N{LATIN CAPITAL LETTER U WITH GRAVE}" # latin-1:D9
|
| 319 |
+
"\N{LATIN CAPITAL LETTER U WITH MACRON}" # windows-1257:DB
|
| 320 |
+
"\N{LATIN CAPITAL LETTER U WITH OGONEK}" # windows-1257:D8
|
| 321 |
+
"\N{LATIN CAPITAL LETTER U WITH RING ABOVE}" # windows-1250:D9
|
| 322 |
+
"\N{LATIN CAPITAL LETTER Y WITH ACUTE}" # latin-1:DD
|
| 323 |
+
"\N{LATIN CAPITAL LETTER Z WITH ACUTE}" # windows-1257:CA
|
| 324 |
+
"\N{LATIN CAPITAL LETTER Z WITH CARON}" # windows-1257:DE
|
| 325 |
+
"\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}" # windows-1257:DD
|
| 326 |
+
"\N{LATIN SMALL LETTER SHARP S}" # latin-1:DF
|
| 327 |
+
"\N{MULTIPLICATION SIGN}" # latin-1:D7
|
| 328 |
+
"\N{GREEK CAPITAL LETTER BETA}" # windows-1253:C2
|
| 329 |
+
"\N{GREEK CAPITAL LETTER GAMMA}" # windows-1253:C3
|
| 330 |
+
"\N{GREEK CAPITAL LETTER DELTA}" # windows-1253:C4
|
| 331 |
+
"\N{GREEK CAPITAL LETTER EPSILON}" # windows-1253:C5
|
| 332 |
+
"\N{GREEK CAPITAL LETTER ZETA}" # windows-1253:C6
|
| 333 |
+
"\N{GREEK CAPITAL LETTER ETA}" # windows-1253:C7
|
| 334 |
+
"\N{GREEK CAPITAL LETTER THETA}" # windows-1253:C8
|
| 335 |
+
"\N{GREEK CAPITAL LETTER IOTA}" # windows-1253:C9
|
| 336 |
+
"\N{GREEK CAPITAL LETTER KAPPA}" # windows-1253:CA
|
| 337 |
+
"\N{GREEK CAPITAL LETTER LAMDA}" # windows-1253:CB
|
| 338 |
+
"\N{GREEK CAPITAL LETTER MU}" # windows-1253:CC
|
| 339 |
+
"\N{GREEK CAPITAL LETTER NU}" # windows-1253:CD
|
| 340 |
+
"\N{GREEK CAPITAL LETTER XI}" # windows-1253:CE
|
| 341 |
+
"\N{GREEK CAPITAL LETTER OMICRON}" # windows-1253:CF
|
| 342 |
+
"\N{GREEK CAPITAL LETTER PI}" # windows-1253:D0
|
| 343 |
+
"\N{GREEK CAPITAL LETTER RHO}" # windows-1253:D1
|
| 344 |
+
"\N{GREEK CAPITAL LETTER SIGMA}" # windows-1253:D3
|
| 345 |
+
"\N{GREEK CAPITAL LETTER TAU}" # windows-1253:D4
|
| 346 |
+
"\N{GREEK CAPITAL LETTER UPSILON}" # windows-1253:D5
|
| 347 |
+
"\N{GREEK CAPITAL LETTER PHI}" # windows-1253:D6
|
| 348 |
+
"\N{GREEK CAPITAL LETTER CHI}" # windows-1253:D7
|
| 349 |
+
"\N{GREEK CAPITAL LETTER PSI}" # windows-1253:D8
|
| 350 |
+
"\N{GREEK CAPITAL LETTER OMEGA}" # windows-1253:D9
|
| 351 |
+
"\N{GREEK CAPITAL LETTER IOTA WITH DIALYTIKA}" # windows-1253:DA
|
| 352 |
+
"\N{GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA}" # windows-1253:DB
|
| 353 |
+
"\N{GREEK SMALL LETTER ALPHA WITH TONOS}" # windows-1253:DC
|
| 354 |
+
"\N{GREEK SMALL LETTER EPSILON WITH TONOS}" # windows-1253:DD
|
| 355 |
+
"\N{GREEK SMALL LETTER ETA WITH TONOS}" # windows-1253:DE
|
| 356 |
+
"\N{GREEK SMALL LETTER IOTA WITH TONOS}" # windows-1253:DF
|
| 357 |
+
"\N{CYRILLIC CAPITAL LETTER VE}" # windows-1251:C2
|
| 358 |
+
"\N{CYRILLIC CAPITAL LETTER GHE}" # windows-1251:C3
|
| 359 |
+
"\N{CYRILLIC CAPITAL LETTER DE}" # windows-1251:C4
|
| 360 |
+
"\N{CYRILLIC CAPITAL LETTER IE}" # windows-1251:C5
|
| 361 |
+
"\N{CYRILLIC CAPITAL LETTER ZHE}" # windows-1251:C6
|
| 362 |
+
"\N{CYRILLIC CAPITAL LETTER ZE}" # windows-1251:C7
|
| 363 |
+
"\N{CYRILLIC CAPITAL LETTER I}" # windows-1251:C8
|
| 364 |
+
"\N{CYRILLIC CAPITAL LETTER SHORT I}" # windows-1251:C9
|
| 365 |
+
"\N{CYRILLIC CAPITAL LETTER KA}" # windows-1251:CA
|
| 366 |
+
"\N{CYRILLIC CAPITAL LETTER EL}" # windows-1251:CB
|
| 367 |
+
"\N{CYRILLIC CAPITAL LETTER EM}" # windows-1251:CC
|
| 368 |
+
"\N{CYRILLIC CAPITAL LETTER EN}" # windows-1251:CD
|
| 369 |
+
"\N{CYRILLIC CAPITAL LETTER O}" # windows-1251:CE
|
| 370 |
+
"\N{CYRILLIC CAPITAL LETTER PE}" # windows-1251:CF
|
| 371 |
+
"\N{CYRILLIC CAPITAL LETTER ER}" # windows-1251:D0
|
| 372 |
+
"\N{CYRILLIC CAPITAL LETTER ES}" # windows-1251:D1
|
| 373 |
+
"\N{CYRILLIC CAPITAL LETTER TE}" # windows-1251:D2
|
| 374 |
+
"\N{CYRILLIC CAPITAL LETTER U}" # windows-1251:D3
|
| 375 |
+
"\N{CYRILLIC CAPITAL LETTER EF}" # windows-1251:D4
|
| 376 |
+
"\N{CYRILLIC CAPITAL LETTER HA}" # windows-1251:D5
|
| 377 |
+
"\N{CYRILLIC CAPITAL LETTER TSE}" # windows-1251:D6
|
| 378 |
+
"\N{CYRILLIC CAPITAL LETTER CHE}" # windows-1251:D7
|
| 379 |
+
"\N{CYRILLIC CAPITAL LETTER SHA}" # windows-1251:D8
|
| 380 |
+
"\N{CYRILLIC CAPITAL LETTER SHCHA}" # windows-1251:D9
|
| 381 |
+
"\N{CYRILLIC CAPITAL LETTER HARD SIGN}" # windows-1251:DA
|
| 382 |
+
"\N{CYRILLIC CAPITAL LETTER YERU}" # windows-1251:DB
|
| 383 |
+
"\N{CYRILLIC CAPITAL LETTER SOFT SIGN}" # windows-1251:DC
|
| 384 |
+
"\N{CYRILLIC CAPITAL LETTER E}" # windows-1251:DD
|
| 385 |
+
"\N{CYRILLIC CAPITAL LETTER YU}" # windows-1251:DE
|
| 386 |
+
"\N{CYRILLIC CAPITAL LETTER YA}" # windows-1251:DF
|
| 387 |
+
),
|
| 388 |
+
# Letters that decode to 0xE0 - 0xEF in a Latin-1-like encoding
|
| 389 |
+
"utf8_first_of_3": (
|
| 390 |
+
"\N{LATIN SMALL LETTER A WITH ACUTE}" # latin-1:E1
|
| 391 |
+
"\N{LATIN SMALL LETTER A WITH BREVE}" # windows-1250:E3
|
| 392 |
+
"\N{LATIN SMALL LETTER A WITH CIRCUMFLEX}" # latin-1:E2
|
| 393 |
+
"\N{LATIN SMALL LETTER A WITH DIAERESIS}" # latin-1:E4
|
| 394 |
+
"\N{LATIN SMALL LETTER A WITH GRAVE}" # latin-1:E0
|
| 395 |
+
"\N{LATIN SMALL LETTER A WITH MACRON}" # windows-1257:E2
|
| 396 |
+
"\N{LATIN SMALL LETTER A WITH OGONEK}" # windows-1257:E0
|
| 397 |
+
"\N{LATIN SMALL LETTER A WITH RING ABOVE}" # latin-1:E5
|
| 398 |
+
"\N{LATIN SMALL LETTER A WITH TILDE}" # latin-1:E3
|
| 399 |
+
"\N{LATIN SMALL LETTER AE}" # latin-1:E6
|
| 400 |
+
"\N{LATIN SMALL LETTER C WITH ACUTE}" # windows-1250:E6
|
| 401 |
+
"\N{LATIN SMALL LETTER C WITH CARON}" # windows-1250:E8
|
| 402 |
+
"\N{LATIN SMALL LETTER C WITH CEDILLA}" # latin-1:E7
|
| 403 |
+
"\N{LATIN SMALL LETTER D WITH CARON}" # windows-1250:EF
|
| 404 |
+
"\N{LATIN SMALL LETTER E WITH ACUTE}" # latin-1:E9
|
| 405 |
+
"\N{LATIN SMALL LETTER E WITH CARON}" # windows-1250:EC
|
| 406 |
+
"\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}" # latin-1:EA
|
| 407 |
+
"\N{LATIN SMALL LETTER E WITH DIAERESIS}" # latin-1:EB
|
| 408 |
+
"\N{LATIN SMALL LETTER E WITH DOT ABOVE}" # windows-1257:EB
|
| 409 |
+
"\N{LATIN SMALL LETTER E WITH GRAVE}" # latin-1:E8
|
| 410 |
+
"\N{LATIN SMALL LETTER E WITH MACRON}" # windows-1257:E7
|
| 411 |
+
"\N{LATIN SMALL LETTER E WITH OGONEK}" # windows-1250:EA
|
| 412 |
+
"\N{LATIN SMALL LETTER E WITH OGONEK}" # windows-1250:EA
|
| 413 |
+
"\N{LATIN SMALL LETTER G WITH CEDILLA}" # windows-1257:EC
|
| 414 |
+
"\N{LATIN SMALL LETTER I WITH ACUTE}" # latin-1:ED
|
| 415 |
+
"\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}" # latin-1:EE
|
| 416 |
+
"\N{LATIN SMALL LETTER I WITH DIAERESIS}" # latin-1:EF
|
| 417 |
+
"\N{LATIN SMALL LETTER I WITH GRAVE}" # latin-1:EC
|
| 418 |
+
"\N{LATIN SMALL LETTER I WITH MACRON}" # windows-1257:EE
|
| 419 |
+
"\N{LATIN SMALL LETTER I WITH OGONEK}" # windows-1257:E1
|
| 420 |
+
"\N{LATIN SMALL LETTER K WITH CEDILLA}" # windows-1257:ED
|
| 421 |
+
"\N{LATIN SMALL LETTER L WITH ACUTE}" # windows-1250:E5
|
| 422 |
+
"\N{LATIN SMALL LETTER L WITH CEDILLA}" # windows-1257:EF
|
| 423 |
+
"\N{LATIN SMALL LETTER R WITH ACUTE}" # windows-1250:E0
|
| 424 |
+
"\N{LATIN SMALL LETTER Z WITH ACUTE}" # windows-1257:EA
|
| 425 |
+
"\N{GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS}" # windows-1253:E0
|
| 426 |
+
"\N{GREEK SMALL LETTER ALPHA}" # windows-1253:E1
|
| 427 |
+
"\N{GREEK SMALL LETTER BETA}" # windows-1253:E2
|
| 428 |
+
"\N{GREEK SMALL LETTER GAMMA}" # windows-1253:E3
|
| 429 |
+
"\N{GREEK SMALL LETTER DELTA}" # windows-1253:E4
|
| 430 |
+
"\N{GREEK SMALL LETTER EPSILON}" # windows-1253:E5
|
| 431 |
+
"\N{GREEK SMALL LETTER ZETA}" # windows-1253:E6
|
| 432 |
+
"\N{GREEK SMALL LETTER ETA}" # windows-1253:E7
|
| 433 |
+
"\N{GREEK SMALL LETTER THETA}" # windows-1253:E8
|
| 434 |
+
"\N{GREEK SMALL LETTER IOTA}" # windows-1253:E9
|
| 435 |
+
"\N{GREEK SMALL LETTER KAPPA}" # windows-1253:EA
|
| 436 |
+
"\N{GREEK SMALL LETTER LAMDA}" # windows-1253:EB
|
| 437 |
+
"\N{GREEK SMALL LETTER MU}" # windows-1253:EC
|
| 438 |
+
"\N{GREEK SMALL LETTER NU}" # windows-1253:ED
|
| 439 |
+
"\N{GREEK SMALL LETTER XI}" # windows-1253:EE
|
| 440 |
+
"\N{GREEK SMALL LETTER OMICRON}" # windows-1253:EF
|
| 441 |
+
"\N{CYRILLIC SMALL LETTER A}" # windows-1251:E0
|
| 442 |
+
"\N{CYRILLIC SMALL LETTER BE}" # windows-1251:E1
|
| 443 |
+
"\N{CYRILLIC SMALL LETTER VE}" # windows-1251:E2
|
| 444 |
+
"\N{CYRILLIC SMALL LETTER GHE}" # windows-1251:E3
|
| 445 |
+
"\N{CYRILLIC SMALL LETTER DE}" # windows-1251:E4
|
| 446 |
+
"\N{CYRILLIC SMALL LETTER IE}" # windows-1251:E5
|
| 447 |
+
"\N{CYRILLIC SMALL LETTER ZHE}" # windows-1251:E6
|
| 448 |
+
"\N{CYRILLIC SMALL LETTER ZE}" # windows-1251:E7
|
| 449 |
+
"\N{CYRILLIC SMALL LETTER I}" # windows-1251:E8
|
| 450 |
+
"\N{CYRILLIC SMALL LETTER SHORT I}" # windows-1251:E9
|
| 451 |
+
"\N{CYRILLIC SMALL LETTER KA}" # windows-1251:EA
|
| 452 |
+
"\N{CYRILLIC SMALL LETTER EL}" # windows-1251:EB
|
| 453 |
+
"\N{CYRILLIC SMALL LETTER EM}" # windows-1251:EC
|
| 454 |
+
"\N{CYRILLIC SMALL LETTER EN}" # windows-1251:ED
|
| 455 |
+
"\N{CYRILLIC SMALL LETTER O}" # windows-1251:EE
|
| 456 |
+
"\N{CYRILLIC SMALL LETTER PE}" # windows-1251:EF
|
| 457 |
+
),
|
| 458 |
+
# Letters that decode to 0xF0 or 0xF3 in a Latin-1-like encoding.
|
| 459 |
+
# (Other leading bytes correspond only to unassigned codepoints)
|
| 460 |
+
"utf8_first_of_4": (
|
| 461 |
+
"\N{LATIN SMALL LETTER D WITH STROKE}" # windows-1250:F0
|
| 462 |
+
"\N{LATIN SMALL LETTER ETH}" # latin-1:F0
|
| 463 |
+
"\N{LATIN SMALL LETTER G WITH BREVE}" # windows-1254:F0
|
| 464 |
+
"\N{LATIN SMALL LETTER O WITH ACUTE}" # latin-1:F3
|
| 465 |
+
"\N{LATIN SMALL LETTER S WITH CARON}" # windows-1257:F0
|
| 466 |
+
"\N{GREEK SMALL LETTER PI}" # windows-1253:F0
|
| 467 |
+
"\N{GREEK SMALL LETTER SIGMA}" # windows-1253:F3
|
| 468 |
+
"\N{CYRILLIC SMALL LETTER ER}" # windows-1251:F0
|
| 469 |
+
"\N{CYRILLIC SMALL LETTER U}" # windows-1251:F3
|
| 470 |
+
),
|
| 471 |
+
# Letters that decode to 0x80 - 0xBF in a Latin-1-like encoding,
|
| 472 |
+
# including a space standing in for 0xA0
|
| 473 |
+
"utf8_continuation": (
|
| 474 |
+
"\x80-\xbf"
|
| 475 |
+
"\N{SPACE}" # modification of latin-1:A0, NO-BREAK SPACE
|
| 476 |
+
"\N{LATIN CAPITAL LETTER A WITH OGONEK}" # windows-1250:A5
|
| 477 |
+
"\N{LATIN CAPITAL LETTER AE}" # windows-1257:AF
|
| 478 |
+
"\N{LATIN CAPITAL LETTER L WITH CARON}" # windows-1250:BC
|
| 479 |
+
"\N{LATIN CAPITAL LETTER L WITH STROKE}" # windows-1250:A3
|
| 480 |
+
"\N{LATIN CAPITAL LETTER O WITH STROKE}" # windows-1257:A8
|
| 481 |
+
"\N{LATIN CAPITAL LETTER R WITH CEDILLA}" # windows-1257:AA
|
| 482 |
+
"\N{LATIN CAPITAL LETTER S WITH ACUTE}" # windows-1250:8C
|
| 483 |
+
"\N{LATIN CAPITAL LETTER S WITH CARON}" # windows-1252:8A
|
| 484 |
+
"\N{LATIN CAPITAL LETTER S WITH CEDILLA}" # windows-1250:AA
|
| 485 |
+
"\N{LATIN CAPITAL LETTER T WITH CARON}" # windows-1250:8D
|
| 486 |
+
"\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}" # windows-1252:9F
|
| 487 |
+
"\N{LATIN CAPITAL LETTER Z WITH ACUTE}" # windows-1250:8F
|
| 488 |
+
"\N{LATIN CAPITAL LETTER Z WITH CARON}" # windows-1252:8E
|
| 489 |
+
"\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}" # windows-1250:AF
|
| 490 |
+
"\N{LATIN CAPITAL LIGATURE OE}" # windows-1252:8C
|
| 491 |
+
"\N{LATIN SMALL LETTER A WITH OGONEK}" # windows-1250:B9
|
| 492 |
+
"\N{LATIN SMALL LETTER AE}" # windows-1257:BF
|
| 493 |
+
"\N{LATIN SMALL LETTER F WITH HOOK}" # windows-1252:83
|
| 494 |
+
"\N{LATIN SMALL LETTER L WITH CARON}" # windows-1250:BE
|
| 495 |
+
"\N{LATIN SMALL LETTER L WITH STROKE}" # windows-1250:B3
|
| 496 |
+
"\N{LATIN SMALL LETTER O WITH STROKE}" # windows-1257:B8
|
| 497 |
+
"\N{LATIN SMALL LETTER R WITH CEDILLA}" # windows-1257:BA
|
| 498 |
+
"\N{LATIN SMALL LETTER S WITH ACUTE}" # windows-1250:9C
|
| 499 |
+
"\N{LATIN SMALL LETTER S WITH CARON}" # windows-1252:9A
|
| 500 |
+
"\N{LATIN SMALL LETTER S WITH CEDILLA}" # windows-1250:BA
|
| 501 |
+
"\N{LATIN SMALL LETTER T WITH CARON}" # windows-1250:9D
|
| 502 |
+
"\N{LATIN SMALL LETTER Z WITH ACUTE}" # windows-1250:9F
|
| 503 |
+
"\N{LATIN SMALL LETTER Z WITH CARON}" # windows-1252:9E
|
| 504 |
+
"\N{LATIN SMALL LETTER Z WITH DOT ABOVE}" # windows-1250:BF
|
| 505 |
+
"\N{LATIN SMALL LIGATURE OE}" # windows-1252:9C
|
| 506 |
+
"\N{MODIFIER LETTER CIRCUMFLEX ACCENT}" # windows-1252:88
|
| 507 |
+
"\N{CARON}" # windows-1250:A1
|
| 508 |
+
"\N{BREVE}" # windows-1250:A2
|
| 509 |
+
"\N{OGONEK}" # windows-1250:B2
|
| 510 |
+
"\N{SMALL TILDE}" # windows-1252:98
|
| 511 |
+
"\N{DOUBLE ACUTE ACCENT}" # windows-1250:BD
|
| 512 |
+
"\N{GREEK TONOS}" # windows-1253:B4
|
| 513 |
+
"\N{GREEK DIALYTIKA TONOS}" # windows-1253:A1
|
| 514 |
+
"\N{GREEK CAPITAL LETTER ALPHA WITH TONOS}" # windows-1253:A2
|
| 515 |
+
"\N{GREEK CAPITAL LETTER EPSILON WITH TONOS}" # windows-1253:B8
|
| 516 |
+
"\N{GREEK CAPITAL LETTER ETA WITH TONOS}" # windows-1253:B9
|
| 517 |
+
"\N{GREEK CAPITAL LETTER IOTA WITH TONOS}" # windows-1253:BA
|
| 518 |
+
"\N{GREEK CAPITAL LETTER OMICRON WITH TONOS}" # windows-1253:BC
|
| 519 |
+
"\N{GREEK CAPITAL LETTER UPSILON WITH TONOS}" # windows-1253:BE
|
| 520 |
+
"\N{GREEK CAPITAL LETTER OMEGA WITH TONOS}" # windows-1253:BF
|
| 521 |
+
"\N{CYRILLIC CAPITAL LETTER IO}" # windows-1251:A8
|
| 522 |
+
"\N{CYRILLIC CAPITAL LETTER DJE}" # windows-1251:80
|
| 523 |
+
"\N{CYRILLIC CAPITAL LETTER GJE}" # windows-1251:81
|
| 524 |
+
"\N{CYRILLIC CAPITAL LETTER UKRAINIAN IE}" # windows-1251:AA
|
| 525 |
+
"\N{CYRILLIC CAPITAL LETTER DZE}" # windows-1251:BD
|
| 526 |
+
"\N{CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I}" # windows-1251:B2
|
| 527 |
+
"\N{CYRILLIC CAPITAL LETTER YI}" # windows-1251:AF
|
| 528 |
+
"\N{CYRILLIC CAPITAL LETTER JE}" # windows-1251:A3
|
| 529 |
+
"\N{CYRILLIC CAPITAL LETTER LJE}" # windows-1251:8A
|
| 530 |
+
"\N{CYRILLIC CAPITAL LETTER NJE}" # windows-1251:8C
|
| 531 |
+
"\N{CYRILLIC CAPITAL LETTER TSHE}" # windows-1251:8E
|
| 532 |
+
"\N{CYRILLIC CAPITAL LETTER KJE}" # windows-1251:8D
|
| 533 |
+
"\N{CYRILLIC CAPITAL LETTER SHORT U}" # windows-1251:A1
|
| 534 |
+
"\N{CYRILLIC CAPITAL LETTER DZHE}" # windows-1251:8F
|
| 535 |
+
"\N{CYRILLIC SMALL LETTER IO}" # windows-1251:B8
|
| 536 |
+
"\N{CYRILLIC SMALL LETTER DJE}" # windows-1251:90
|
| 537 |
+
"\N{CYRILLIC SMALL LETTER GJE}" # windows-1251:83
|
| 538 |
+
"\N{CYRILLIC SMALL LETTER UKRAINIAN IE}" # windows-1251:BA
|
| 539 |
+
"\N{CYRILLIC SMALL LETTER DZE}" # windows-1251:BE
|
| 540 |
+
"\N{CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I}" # windows-1251:B3
|
| 541 |
+
"\N{CYRILLIC SMALL LETTER YI}" # windows-1251:BF
|
| 542 |
+
"\N{CYRILLIC SMALL LETTER JE}" # windows-1251:BC
|
| 543 |
+
"\N{CYRILLIC SMALL LETTER LJE}" # windows-1251:9A
|
| 544 |
+
"\N{CYRILLIC SMALL LETTER NJE}" # windows-1251:9C
|
| 545 |
+
"\N{CYRILLIC SMALL LETTER TSHE}" # windows-1251:9E
|
| 546 |
+
"\N{CYRILLIC SMALL LETTER KJE}" # windows-1251:9D
|
| 547 |
+
"\N{CYRILLIC SMALL LETTER SHORT U}" # windows-1251:A2
|
| 548 |
+
"\N{CYRILLIC SMALL LETTER DZHE}" # windows-1251:9F
|
| 549 |
+
"\N{CYRILLIC CAPITAL LETTER GHE WITH UPTURN}" # windows-1251:A5
|
| 550 |
+
"\N{CYRILLIC SMALL LETTER GHE WITH UPTURN}" # windows-1251:B4
|
| 551 |
+
"\N{EN DASH}" # windows-1252:96
|
| 552 |
+
"\N{EM DASH}" # windows-1252:97
|
| 553 |
+
"\N{HORIZONTAL BAR}" # windows-1253:AF
|
| 554 |
+
"\N{LEFT SINGLE QUOTATION MARK}" # windows-1252:91
|
| 555 |
+
"\N{RIGHT SINGLE QUOTATION MARK}" # windows-1252:92
|
| 556 |
+
"\N{SINGLE LOW-9 QUOTATION MARK}" # windows-1252:82
|
| 557 |
+
"\N{LEFT DOUBLE QUOTATION MARK}" # windows-1252:93
|
| 558 |
+
"\N{RIGHT DOUBLE QUOTATION MARK}" # windows-1252:94
|
| 559 |
+
"\N{DOUBLE LOW-9 QUOTATION MARK}" # windows-1252:84
|
| 560 |
+
"\N{DAGGER}" # windows-1252:86
|
| 561 |
+
"\N{DOUBLE DAGGER}" # windows-1252:87
|
| 562 |
+
"\N{BULLET}" # windows-1252:95
|
| 563 |
+
"\N{HORIZONTAL ELLIPSIS}" # windows-1252:85
|
| 564 |
+
"\N{PER MILLE SIGN}" # windows-1252:89
|
| 565 |
+
"\N{SINGLE LEFT-POINTING ANGLE QUOTATION MARK}" # windows-1252:8B
|
| 566 |
+
"\N{SINGLE RIGHT-POINTING ANGLE QUOTATION MARK}" # windows-1252:9B
|
| 567 |
+
"\N{EURO SIGN}" # windows-1252:80
|
| 568 |
+
"\N{NUMERO SIGN}" # windows-1251:B9
|
| 569 |
+
"\N{TRADE MARK SIGN}" # windows-1252:99
|
| 570 |
+
),
|
| 571 |
+
# Letters that decode to 0x80 - 0xBF in a Latin-1-like encoding,
|
| 572 |
+
# and don't usually stand for themselves when adjacent to mojibake.
|
| 573 |
+
# This excludes spaces, dashes, 'bullet', quotation marks, and ellipses.
|
| 574 |
+
"utf8_continuation_strict": (
|
| 575 |
+
"\x80-\xbf"
|
| 576 |
+
"\N{LATIN CAPITAL LETTER A WITH OGONEK}" # windows-1250:A5
|
| 577 |
+
"\N{LATIN CAPITAL LETTER AE}" # windows-1257:AF
|
| 578 |
+
"\N{LATIN CAPITAL LETTER L WITH CARON}" # windows-1250:BC
|
| 579 |
+
"\N{LATIN CAPITAL LETTER L WITH STROKE}" # windows-1250:A3
|
| 580 |
+
"\N{LATIN CAPITAL LETTER O WITH STROKE}" # windows-1257:A8
|
| 581 |
+
"\N{LATIN CAPITAL LETTER R WITH CEDILLA}" # windows-1257:AA
|
| 582 |
+
"\N{LATIN CAPITAL LETTER S WITH ACUTE}" # windows-1250:8C
|
| 583 |
+
"\N{LATIN CAPITAL LETTER S WITH CARON}" # windows-1252:8A
|
| 584 |
+
"\N{LATIN CAPITAL LETTER S WITH CEDILLA}" # windows-1250:AA
|
| 585 |
+
"\N{LATIN CAPITAL LETTER T WITH CARON}" # windows-1250:8D
|
| 586 |
+
"\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}" # windows-1252:9F
|
| 587 |
+
"\N{LATIN CAPITAL LETTER Z WITH ACUTE}" # windows-1250:8F
|
| 588 |
+
"\N{LATIN CAPITAL LETTER Z WITH CARON}" # windows-1252:8E
|
| 589 |
+
"\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}" # windows-1250:AF
|
| 590 |
+
"\N{LATIN CAPITAL LIGATURE OE}" # windows-1252:8C
|
| 591 |
+
"\N{LATIN SMALL LETTER A WITH OGONEK}" # windows-1250:B9
|
| 592 |
+
"\N{LATIN SMALL LETTER AE}" # windows-1257:BF
|
| 593 |
+
"\N{LATIN SMALL LETTER F WITH HOOK}" # windows-1252:83
|
| 594 |
+
"\N{LATIN SMALL LETTER L WITH CARON}" # windows-1250:BE
|
| 595 |
+
"\N{LATIN SMALL LETTER L WITH STROKE}" # windows-1250:B3
|
| 596 |
+
"\N{LATIN SMALL LETTER O WITH STROKE}" # windows-1257:B8
|
| 597 |
+
"\N{LATIN SMALL LETTER R WITH CEDILLA}" # windows-1257:BA
|
| 598 |
+
"\N{LATIN SMALL LETTER S WITH ACUTE}" # windows-1250:9C
|
| 599 |
+
"\N{LATIN SMALL LETTER S WITH CARON}" # windows-1252:9A
|
| 600 |
+
"\N{LATIN SMALL LETTER S WITH CEDILLA}" # windows-1250:BA
|
| 601 |
+
"\N{LATIN SMALL LETTER T WITH CARON}" # windows-1250:9D
|
| 602 |
+
"\N{LATIN SMALL LETTER Z WITH ACUTE}" # windows-1250:9F
|
| 603 |
+
"\N{LATIN SMALL LETTER Z WITH CARON}" # windows-1252:9E
|
| 604 |
+
"\N{LATIN SMALL LETTER Z WITH DOT ABOVE}" # windows-1250:BF
|
| 605 |
+
"\N{LATIN SMALL LIGATURE OE}" # windows-1252:9C
|
| 606 |
+
"\N{MODIFIER LETTER CIRCUMFLEX ACCENT}" # windows-1252:88
|
| 607 |
+
"\N{CARON}" # windows-1250:A1
|
| 608 |
+
"\N{BREVE}" # windows-1250:A2
|
| 609 |
+
"\N{OGONEK}" # windows-1250:B2
|
| 610 |
+
"\N{SMALL TILDE}" # windows-1252:98
|
| 611 |
+
"\N{DOUBLE ACUTE ACCENT}" # windows-1250:BD
|
| 612 |
+
"\N{GREEK TONOS}" # windows-1253:B4
|
| 613 |
+
"\N{GREEK DIALYTIKA TONOS}" # windows-1253:A1
|
| 614 |
+
"\N{GREEK CAPITAL LETTER ALPHA WITH TONOS}" # windows-1253:A2
|
| 615 |
+
"\N{GREEK CAPITAL LETTER EPSILON WITH TONOS}" # windows-1253:B8
|
| 616 |
+
"\N{GREEK CAPITAL LETTER ETA WITH TONOS}" # windows-1253:B9
|
| 617 |
+
"\N{GREEK CAPITAL LETTER IOTA WITH TONOS}" # windows-1253:BA
|
| 618 |
+
"\N{GREEK CAPITAL LETTER OMICRON WITH TONOS}" # windows-1253:BC
|
| 619 |
+
"\N{GREEK CAPITAL LETTER UPSILON WITH TONOS}" # windows-1253:BE
|
| 620 |
+
"\N{GREEK CAPITAL LETTER OMEGA WITH TONOS}" # windows-1253:BF
|
| 621 |
+
"\N{CYRILLIC CAPITAL LETTER IO}" # windows-1251:A8
|
| 622 |
+
"\N{CYRILLIC CAPITAL LETTER DJE}" # windows-1251:80
|
| 623 |
+
"\N{CYRILLIC CAPITAL LETTER GJE}" # windows-1251:81
|
| 624 |
+
"\N{CYRILLIC CAPITAL LETTER UKRAINIAN IE}" # windows-1251:AA
|
| 625 |
+
"\N{CYRILLIC CAPITAL LETTER DZE}" # windows-1251:BD
|
| 626 |
+
"\N{CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I}" # windows-1251:B2
|
| 627 |
+
"\N{CYRILLIC CAPITAL LETTER YI}" # windows-1251:AF
|
| 628 |
+
"\N{CYRILLIC CAPITAL LETTER JE}" # windows-1251:A3
|
| 629 |
+
"\N{CYRILLIC CAPITAL LETTER LJE}" # windows-1251:8A
|
| 630 |
+
"\N{CYRILLIC CAPITAL LETTER NJE}" # windows-1251:8C
|
| 631 |
+
"\N{CYRILLIC CAPITAL LETTER TSHE}" # windows-1251:8E
|
| 632 |
+
"\N{CYRILLIC CAPITAL LETTER KJE}" # windows-1251:8D
|
| 633 |
+
"\N{CYRILLIC CAPITAL LETTER SHORT U}" # windows-1251:A1
|
| 634 |
+
"\N{CYRILLIC CAPITAL LETTER DZHE}" # windows-1251:8F
|
| 635 |
+
"\N{CYRILLIC SMALL LETTER IO}" # windows-1251:B8
|
| 636 |
+
"\N{CYRILLIC SMALL LETTER DJE}" # windows-1251:90
|
| 637 |
+
"\N{CYRILLIC SMALL LETTER GJE}" # windows-1251:83
|
| 638 |
+
"\N{CYRILLIC SMALL LETTER UKRAINIAN IE}" # windows-1251:BA
|
| 639 |
+
"\N{CYRILLIC SMALL LETTER DZE}" # windows-1251:BE
|
| 640 |
+
"\N{CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I}" # windows-1251:B3
|
| 641 |
+
"\N{CYRILLIC SMALL LETTER YI}" # windows-1251:BF
|
| 642 |
+
"\N{CYRILLIC SMALL LETTER JE}" # windows-1251:BC
|
| 643 |
+
"\N{CYRILLIC SMALL LETTER LJE}" # windows-1251:9A
|
| 644 |
+
"\N{CYRILLIC SMALL LETTER NJE}" # windows-1251:9C
|
| 645 |
+
"\N{CYRILLIC SMALL LETTER TSHE}" # windows-1251:9E
|
| 646 |
+
"\N{CYRILLIC SMALL LETTER KJE}" # windows-1251:9D
|
| 647 |
+
"\N{CYRILLIC SMALL LETTER SHORT U}" # windows-1251:A2
|
| 648 |
+
"\N{CYRILLIC SMALL LETTER DZHE}" # windows-1251:9F
|
| 649 |
+
"\N{CYRILLIC CAPITAL LETTER GHE WITH UPTURN}" # windows-1251:A5
|
| 650 |
+
"\N{CYRILLIC SMALL LETTER GHE WITH UPTURN}" # windows-1251:B4
|
| 651 |
+
"\N{DAGGER}" # windows-1252:86
|
| 652 |
+
"\N{DOUBLE DAGGER}" # windows-1252:87
|
| 653 |
+
"\N{PER MILLE SIGN}" # windows-1252:89
|
| 654 |
+
"\N{SINGLE LEFT-POINTING ANGLE QUOTATION MARK}" # windows-1252:8B
|
| 655 |
+
"\N{SINGLE RIGHT-POINTING ANGLE QUOTATION MARK}" # windows-1252:9B
|
| 656 |
+
"\N{EURO SIGN}" # windows-1252:80
|
| 657 |
+
"\N{NUMERO SIGN}" # windows-1251:B9
|
| 658 |
+
"\N{TRADE MARK SIGN}" # windows-1252:99
|
| 659 |
+
),
|
| 660 |
+
}
|
| 661 |
+
|
| 662 |
+
# This regex uses UTF8_CLUES to find sequences of likely mojibake.
|
| 663 |
+
# It matches them with + so that several adjacent UTF-8-looking sequences
|
| 664 |
+
# get coalesced into one, allowing them to be fixed more efficiently
|
| 665 |
+
# and not requiring every individual subsequence to be detected as 'badness'.
|
| 666 |
+
#
|
| 667 |
+
# We accept spaces in place of "utf8_continuation", because spaces might have
|
| 668 |
+
# been intended to be U+A0 NO-BREAK SPACE.
|
| 669 |
+
#
|
| 670 |
+
# We do a lookbehind to make sure the previous character isn't a
|
| 671 |
+
# "utf8_continuation_strict" character, so that we don't fix just a few
|
| 672 |
+
# characters in a huge garble and make the situation worse.
|
| 673 |
+
#
|
| 674 |
+
# Unfortunately, the matches to this regular expression won't show their
|
| 675 |
+
# surrounding context, and including context would make the expression much
|
| 676 |
+
# less efficient. The 'badness' rules that require context, such as a preceding
|
| 677 |
+
# lowercase letter, will prevent some cases of inconsistent UTF-8 from being
|
| 678 |
+
# fixed when they don't see it.
|
| 679 |
+
UTF8_DETECTOR_RE = re.compile(
|
| 680 |
+
"""
|
| 681 |
+
(?<! [{utf8_continuation_strict}])
|
| 682 |
+
(
|
| 683 |
+
[{utf8_first_of_2}] [{utf8_continuation}]
|
| 684 |
+
|
|
| 685 |
+
[{utf8_first_of_3}] [{utf8_continuation}]{{2}}
|
| 686 |
+
|
|
| 687 |
+
[{utf8_first_of_4}] [{utf8_continuation}]{{3}}
|
| 688 |
+
)+
|
| 689 |
+
""".format(**UTF8_CLUES),
|
| 690 |
+
re.VERBOSE,
|
| 691 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/ftfy/fixes.py
ADDED
|
@@ -0,0 +1,510 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The `ftfy.fixes` module contains the individual fixes that :func:`ftfy.fix_text`
|
| 3 |
+
can perform, and provides the functions that are named in "explanations"
|
| 4 |
+
such as the output of :func:`ftfy.fix_and_explain`.
|
| 5 |
+
|
| 6 |
+
Two of these functions are particularly useful on their own, as more robust
|
| 7 |
+
versions of functions in the Python standard library:
|
| 8 |
+
|
| 9 |
+
- :func:`ftfy.fixes.decode_escapes`
|
| 10 |
+
- :func:`ftfy.fixes.unescape_html`
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import codecs
|
| 14 |
+
import html
|
| 15 |
+
import re
|
| 16 |
+
import warnings
|
| 17 |
+
from re import Match
|
| 18 |
+
from typing import Any
|
| 19 |
+
|
| 20 |
+
import ftfy
|
| 21 |
+
from ftfy.badness import is_bad
|
| 22 |
+
from ftfy.chardata import (
|
| 23 |
+
ALTERED_UTF8_RE,
|
| 24 |
+
C1_CONTROL_RE,
|
| 25 |
+
CONTROL_CHARS,
|
| 26 |
+
DOUBLE_QUOTE_RE,
|
| 27 |
+
HTML_ENTITIES,
|
| 28 |
+
HTML_ENTITY_RE,
|
| 29 |
+
LIGATURES,
|
| 30 |
+
LOSSY_UTF8_RE,
|
| 31 |
+
SINGLE_QUOTE_RE,
|
| 32 |
+
UTF8_DETECTOR_RE,
|
| 33 |
+
WIDTH_MAP,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def fix_encoding_and_explain(text: str) -> Any:
|
| 38 |
+
"""
|
| 39 |
+
Deprecated copy of `ftfy.fix_encoding_and_explain()`.
|
| 40 |
+
"""
|
| 41 |
+
warnings.warn(
|
| 42 |
+
"`fix_encoding_and_explain()` has moved to the main module of ftfy.",
|
| 43 |
+
DeprecationWarning,
|
| 44 |
+
stacklevel=2,
|
| 45 |
+
)
|
| 46 |
+
return ftfy.fix_encoding_and_explain(text)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def fix_encoding(text: str) -> str:
|
| 50 |
+
"""
|
| 51 |
+
Deprecated copy of `ftfy.fix_encoding()`.
|
| 52 |
+
"""
|
| 53 |
+
warnings.warn(
|
| 54 |
+
"`fix_encoding()` has moved to the main module of ftfy.",
|
| 55 |
+
DeprecationWarning,
|
| 56 |
+
stacklevel=2,
|
| 57 |
+
)
|
| 58 |
+
return ftfy.fix_encoding(text)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def apply_plan(text: str, plan: list[tuple[str, str]]) -> str:
|
| 62 |
+
"""
|
| 63 |
+
Deprecated copy of `ftfy.apply_plan()`.
|
| 64 |
+
"""
|
| 65 |
+
warnings.warn(
|
| 66 |
+
"`apply_plan()` has moved to the main module of ftfy.",
|
| 67 |
+
DeprecationWarning,
|
| 68 |
+
stacklevel=2,
|
| 69 |
+
)
|
| 70 |
+
return ftfy.apply_plan(text, plan)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _unescape_fixup(match: Match[str]) -> str:
|
| 74 |
+
"""
|
| 75 |
+
Replace one matched HTML entity with the character it represents,
|
| 76 |
+
if possible.
|
| 77 |
+
"""
|
| 78 |
+
text = match.group(0)
|
| 79 |
+
if text in HTML_ENTITIES:
|
| 80 |
+
return HTML_ENTITIES[text]
|
| 81 |
+
elif text.startswith("&#"):
|
| 82 |
+
unescaped: str = html.unescape(text)
|
| 83 |
+
|
| 84 |
+
# If html.unescape only decoded part of the string, that's not what
|
| 85 |
+
# we want. The semicolon should be consumed.
|
| 86 |
+
if ";" in unescaped:
|
| 87 |
+
return text
|
| 88 |
+
else:
|
| 89 |
+
return unescaped
|
| 90 |
+
else:
|
| 91 |
+
return text
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def unescape_html(text: str) -> str:
|
| 95 |
+
"""
|
| 96 |
+
Decode HTML entities and character references, including some nonstandard
|
| 97 |
+
ones written in all-caps.
|
| 98 |
+
|
| 99 |
+
Python has a built-in called `html.unescape` that can decode HTML escapes,
|
| 100 |
+
including a bunch of messy edge cases such as decoding escapes without
|
| 101 |
+
semicolons such as "&".
|
| 102 |
+
|
| 103 |
+
If you know you've got HTML-escaped text, applying `html.unescape` is the
|
| 104 |
+
right way to convert it to plain text. But in ambiguous situations, that
|
| 105 |
+
would create false positives. For example, the informally written text
|
| 106 |
+
"this¬ that" should not automatically be decoded as "this¬ that".
|
| 107 |
+
|
| 108 |
+
In this function, we decode the escape sequences that appear in the
|
| 109 |
+
`html.entities.html5` dictionary, as long as they are the unambiguous ones
|
| 110 |
+
that end in semicolons.
|
| 111 |
+
|
| 112 |
+
We also decode all-caps versions of Latin letters and common symbols.
|
| 113 |
+
If a database contains the name 'P&EACUTE;REZ', we can read that and intuit
|
| 114 |
+
that it was supposed to say 'PÉREZ'. This is limited to a smaller set of
|
| 115 |
+
entities, because there are many instances where entity names are
|
| 116 |
+
case-sensitive in complicated ways.
|
| 117 |
+
|
| 118 |
+
>>> unescape_html('<tag>')
|
| 119 |
+
'<tag>'
|
| 120 |
+
|
| 121 |
+
>>> unescape_html('𝒥ohn ℋancock')
|
| 122 |
+
'𝒥ohn ℋancock'
|
| 123 |
+
|
| 124 |
+
>>> unescape_html('✓')
|
| 125 |
+
'✓'
|
| 126 |
+
|
| 127 |
+
>>> unescape_html('Pérez')
|
| 128 |
+
'Pérez'
|
| 129 |
+
|
| 130 |
+
>>> unescape_html('P&EACUTE;REZ')
|
| 131 |
+
'PÉREZ'
|
| 132 |
+
|
| 133 |
+
>>> unescape_html('BUNDESSTRA&SZLIG;E')
|
| 134 |
+
'BUNDESSTRASSE'
|
| 135 |
+
|
| 136 |
+
>>> unescape_html('ñ Ñ &NTILDE; &nTILDE;')
|
| 137 |
+
'ñ Ñ Ñ &nTILDE;'
|
| 138 |
+
"""
|
| 139 |
+
return HTML_ENTITY_RE.sub(_unescape_fixup, text)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
ANSI_RE = re.compile("\033\\[((?:\\d|;)*)([a-zA-Z])")
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def remove_terminal_escapes(text: str) -> str:
|
| 146 |
+
r"""
|
| 147 |
+
Strip out "ANSI" terminal escape sequences, such as those that produce
|
| 148 |
+
colored text on Unix.
|
| 149 |
+
|
| 150 |
+
>>> print(remove_terminal_escapes(
|
| 151 |
+
... "\033[36;44mI'm blue, da ba dee da ba doo...\033[0m"
|
| 152 |
+
... ))
|
| 153 |
+
I'm blue, da ba dee da ba doo...
|
| 154 |
+
"""
|
| 155 |
+
return ANSI_RE.sub("", text)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def uncurl_quotes(text: str) -> str:
|
| 159 |
+
r"""
|
| 160 |
+
Replace curly quotation marks with straight equivalents.
|
| 161 |
+
|
| 162 |
+
>>> print(uncurl_quotes('\u201chere\u2019s a test\u201d'))
|
| 163 |
+
"here's a test"
|
| 164 |
+
"""
|
| 165 |
+
return SINGLE_QUOTE_RE.sub("'", DOUBLE_QUOTE_RE.sub('"', text))
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def fix_latin_ligatures(text: str) -> str:
|
| 169 |
+
"""
|
| 170 |
+
Replace single-character ligatures of Latin letters, such as 'fi', with the
|
| 171 |
+
characters that they contain, as in 'fi'. Latin ligatures are usually not
|
| 172 |
+
intended in text strings (though they're lovely in *rendered* text). If
|
| 173 |
+
you have such a ligature in your string, it is probably a result of a
|
| 174 |
+
copy-and-paste glitch.
|
| 175 |
+
|
| 176 |
+
We leave ligatures in other scripts alone to be safe. They may be intended,
|
| 177 |
+
and removing them may lose information. If you want to take apart nearly
|
| 178 |
+
all ligatures, use NFKC normalization.
|
| 179 |
+
|
| 180 |
+
>>> print(fix_latin_ligatures("fluffiest"))
|
| 181 |
+
fluffiest
|
| 182 |
+
"""
|
| 183 |
+
return text.translate(LIGATURES)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def fix_character_width(text: str) -> str:
|
| 187 |
+
"""
|
| 188 |
+
The ASCII characters, katakana, and Hangul characters have alternate
|
| 189 |
+
"halfwidth" or "fullwidth" forms that help text line up in a grid.
|
| 190 |
+
|
| 191 |
+
If you don't need these width properties, you probably want to replace
|
| 192 |
+
these characters with their standard form, which is what this function
|
| 193 |
+
does.
|
| 194 |
+
|
| 195 |
+
Note that this replaces the ideographic space, U+3000, with the ASCII
|
| 196 |
+
space, U+20.
|
| 197 |
+
|
| 198 |
+
>>> print(fix_character_width("LOUD NOISES"))
|
| 199 |
+
LOUD NOISES
|
| 200 |
+
>>> print(fix_character_width("Uターン")) # this means "U-turn"
|
| 201 |
+
Uターン
|
| 202 |
+
"""
|
| 203 |
+
return text.translate(WIDTH_MAP)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def fix_line_breaks(text: str) -> str:
|
| 207 |
+
r"""
|
| 208 |
+
Convert all line breaks to Unix style.
|
| 209 |
+
|
| 210 |
+
This will convert the following sequences into the standard \\n
|
| 211 |
+
line break:
|
| 212 |
+
|
| 213 |
+
- CRLF (\\r\\n), used on Windows and in some communication protocols
|
| 214 |
+
- CR (\\r), once used on Mac OS Classic, and now kept alive by misguided
|
| 215 |
+
software such as Microsoft Office for Mac
|
| 216 |
+
- LINE SEPARATOR (\\u2028) and PARAGRAPH SEPARATOR (\\u2029), defined by
|
| 217 |
+
Unicode and used to sow confusion and discord
|
| 218 |
+
- NEXT LINE (\\x85), a C1 control character that is certainly not what you
|
| 219 |
+
meant
|
| 220 |
+
|
| 221 |
+
The NEXT LINE character is a bit of an odd case, because it
|
| 222 |
+
usually won't show up if `fix_encoding` is also being run.
|
| 223 |
+
\\x85 is very common mojibake for \\u2026, HORIZONTAL ELLIPSIS.
|
| 224 |
+
|
| 225 |
+
>>> print(fix_line_breaks(
|
| 226 |
+
... "This string is made of two things:\u2029"
|
| 227 |
+
... "1. Unicode\u2028"
|
| 228 |
+
... "2. Spite"
|
| 229 |
+
... ))
|
| 230 |
+
This string is made of two things:
|
| 231 |
+
1. Unicode
|
| 232 |
+
2. Spite
|
| 233 |
+
|
| 234 |
+
For further testing and examples, let's define a function to make sure
|
| 235 |
+
we can see the control characters in their escaped form:
|
| 236 |
+
|
| 237 |
+
>>> def eprint(text):
|
| 238 |
+
... print(text.encode('unicode-escape').decode('ascii'))
|
| 239 |
+
|
| 240 |
+
>>> eprint(fix_line_breaks("Content-type: text/plain\r\n\r\nHi."))
|
| 241 |
+
Content-type: text/plain\n\nHi.
|
| 242 |
+
|
| 243 |
+
>>> eprint(fix_line_breaks("This is how Microsoft \r trolls Mac users"))
|
| 244 |
+
This is how Microsoft \n trolls Mac users
|
| 245 |
+
|
| 246 |
+
>>> eprint(fix_line_breaks("What is this \x85 I don't even"))
|
| 247 |
+
What is this \n I don't even
|
| 248 |
+
"""
|
| 249 |
+
return (
|
| 250 |
+
text.replace("\r\n", "\n")
|
| 251 |
+
.replace("\r", "\n")
|
| 252 |
+
.replace("\u2028", "\n")
|
| 253 |
+
.replace("\u2029", "\n")
|
| 254 |
+
.replace("\u0085", "\n")
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
SURROGATE_RE = re.compile("[\ud800-\udfff]")
|
| 259 |
+
SURROGATE_PAIR_RE = re.compile("[\ud800-\udbff][\udc00-\udfff]")
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def convert_surrogate_pair(match: Match[str]) -> str:
|
| 263 |
+
"""
|
| 264 |
+
Convert a surrogate pair to the single codepoint it represents.
|
| 265 |
+
|
| 266 |
+
This implements the formula described at:
|
| 267 |
+
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
|
| 268 |
+
"""
|
| 269 |
+
pair = match.group(0)
|
| 270 |
+
codept = 0x10000 + (ord(pair[0]) - 0xD800) * 0x400 + (ord(pair[1]) - 0xDC00)
|
| 271 |
+
return chr(codept)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def fix_surrogates(text: str) -> str:
|
| 275 |
+
"""
|
| 276 |
+
Replace 16-bit surrogate codepoints with the characters they represent
|
| 277 |
+
(when properly paired), or with \ufffd otherwise.
|
| 278 |
+
|
| 279 |
+
>>> high_surrogate = chr(0xd83d)
|
| 280 |
+
>>> low_surrogate = chr(0xdca9)
|
| 281 |
+
>>> print(fix_surrogates(high_surrogate + low_surrogate))
|
| 282 |
+
💩
|
| 283 |
+
>>> print(fix_surrogates(low_surrogate + high_surrogate))
|
| 284 |
+
��
|
| 285 |
+
|
| 286 |
+
The above doctest had to be very carefully written, because even putting
|
| 287 |
+
the Unicode escapes of the surrogates in the docstring was causing
|
| 288 |
+
various tools to fail, which I think just goes to show why this fixer is
|
| 289 |
+
necessary.
|
| 290 |
+
"""
|
| 291 |
+
if SURROGATE_RE.search(text):
|
| 292 |
+
text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text)
|
| 293 |
+
text = SURROGATE_RE.sub("\ufffd", text)
|
| 294 |
+
return text
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def remove_control_chars(text: str) -> str:
|
| 298 |
+
"""
|
| 299 |
+
Remove various control characters that you probably didn't intend to be in
|
| 300 |
+
your text. Many of these characters appear in the table of "Characters not
|
| 301 |
+
suitable for use with markup" at
|
| 302 |
+
http://www.unicode.org/reports/tr20/tr20-9.html.
|
| 303 |
+
|
| 304 |
+
This includes:
|
| 305 |
+
|
| 306 |
+
- ASCII control characters, except for the important whitespace characters
|
| 307 |
+
(U+00 to U+08, U+0B, U+0E to U+1F, U+7F)
|
| 308 |
+
- Deprecated Arabic control characters (U+206A to U+206F)
|
| 309 |
+
- Interlinear annotation characters (U+FFF9 to U+FFFB)
|
| 310 |
+
- The Object Replacement Character (U+FFFC)
|
| 311 |
+
- The byte order mark (U+FEFF)
|
| 312 |
+
|
| 313 |
+
However, these similar characters are left alone:
|
| 314 |
+
|
| 315 |
+
- Control characters that produce whitespace (U+09, U+0A, U+0C, U+0D,
|
| 316 |
+
U+2028, and U+2029)
|
| 317 |
+
- C1 control characters (U+80 to U+9F) -- even though they are basically
|
| 318 |
+
never used intentionally, they are important clues about what mojibake
|
| 319 |
+
has happened
|
| 320 |
+
- Control characters that affect glyph rendering, such as joiners and
|
| 321 |
+
right-to-left marks (U+200C to U+200F, U+202A to U+202E)
|
| 322 |
+
- Musical notation control characters (U+1D173 to U+1D17A) because wow if
|
| 323 |
+
you're using those you probably have a good reason
|
| 324 |
+
- Tag characters, because they are now used in emoji sequences such as
|
| 325 |
+
"Flag of Wales"
|
| 326 |
+
"""
|
| 327 |
+
return text.translate(CONTROL_CHARS)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def remove_bom(text: str) -> str:
|
| 331 |
+
r"""
|
| 332 |
+
Remove a byte-order mark that was accidentally decoded as if it were part
|
| 333 |
+
of the text.
|
| 334 |
+
|
| 335 |
+
>>> print(remove_bom(chr(0xfeff) + "Where do you want to go today?"))
|
| 336 |
+
Where do you want to go today?
|
| 337 |
+
"""
|
| 338 |
+
return text.lstrip(chr(0xFEFF))
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
# Define a regex to match valid escape sequences in Python string literals.
|
| 342 |
+
ESCAPE_SEQUENCE_RE = re.compile(
|
| 343 |
+
r"""
|
| 344 |
+
( \\U........ # 8-digit hex escapes
|
| 345 |
+
| \\u.... # 4-digit hex escapes
|
| 346 |
+
| \\x.. # 2-digit hex escapes
|
| 347 |
+
| \\[0-7]{1,3} # Octal escapes
|
| 348 |
+
| \\N\{[^}]+\} # Unicode characters by name
|
| 349 |
+
| \\[\\'"abfnrtv] # Single-character escapes
|
| 350 |
+
)""",
|
| 351 |
+
re.UNICODE | re.VERBOSE,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def decode_escapes(text: str) -> str:
|
| 356 |
+
r"""
|
| 357 |
+
Decode backslashed escape sequences, including \\x, \\u, and \\U character
|
| 358 |
+
references, even in the presence of other Unicode.
|
| 359 |
+
|
| 360 |
+
This function has to be called specifically. It's not run automatically by
|
| 361 |
+
ftfy, because escaped text is not necessarily a mistake, and there is no
|
| 362 |
+
way to distinguish when it is.
|
| 363 |
+
|
| 364 |
+
This is what Python's "string-escape" and "unicode-escape" codecs were
|
| 365 |
+
meant to do, but in contrast, this actually works. It will decode the
|
| 366 |
+
string exactly the same way that the Python interpreter decodes its string
|
| 367 |
+
literals.
|
| 368 |
+
|
| 369 |
+
>>> factoid = '\\u20a1 is the currency symbol for the colón.'
|
| 370 |
+
>>> print(factoid[1:])
|
| 371 |
+
u20a1 is the currency symbol for the colón.
|
| 372 |
+
>>> print(decode_escapes(factoid))
|
| 373 |
+
₡ is the currency symbol for the colón.
|
| 374 |
+
|
| 375 |
+
Even though Python itself can read string literals with a combination of
|
| 376 |
+
escapes and literal Unicode -- you're looking at one right now -- the
|
| 377 |
+
"unicode-escape" codec doesn't work on literal Unicode. (See
|
| 378 |
+
http://stackoverflow.com/a/24519338/773754 for more details.)
|
| 379 |
+
|
| 380 |
+
Instead, this function searches for just the parts of a string that
|
| 381 |
+
represent escape sequences, and decodes them, leaving the rest alone. All
|
| 382 |
+
valid escape sequences are made of ASCII characters, and this allows
|
| 383 |
+
"unicode-escape" to work correctly.
|
| 384 |
+
"""
|
| 385 |
+
|
| 386 |
+
def decode_match(match: Match[str]) -> str:
|
| 387 |
+
"Given a regex match, decode the escape sequence it contains."
|
| 388 |
+
return codecs.decode(match.group(0), "unicode-escape")
|
| 389 |
+
|
| 390 |
+
return ESCAPE_SEQUENCE_RE.sub(decode_match, text)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
# This regex implements an exception to restore_byte_a0, so we can decode the
|
| 394 |
+
# very common mojibake of (for example) "Ã la mode" as "à la mode", not "àla
|
| 395 |
+
# mode".
|
| 396 |
+
#
|
| 397 |
+
# If byte C3 appears with a single space after it -- most commonly this shows
|
| 398 |
+
# up as " Ã " appearing as an entire word -- we'll insert \xa0 while keeping
|
| 399 |
+
# the space. Without this change, we would decode "à" as the start of the next
|
| 400 |
+
# word, such as "àla". It's almost always intended to be a separate word, as in
|
| 401 |
+
# "à la", but when mojibake turns this into "Ã\xa0 la", the two kinds of spaces
|
| 402 |
+
# get coalesced into "Ã la".
|
| 403 |
+
#
|
| 404 |
+
# We make exceptions for the Portuguese words "às", "àquele", "àquela",
|
| 405 |
+
# "àquilo" and their plurals -- these are contractions of, for example, "a
|
| 406 |
+
# aquele" and are very common. Note that the final letter is important to
|
| 407 |
+
# distinguish this case from French "à quel point".
|
| 408 |
+
#
|
| 409 |
+
# Other instances in Portuguese, such as "àfrica", seem to be typos (intended
|
| 410 |
+
# to be "África" with the accent in the other direction).
|
| 411 |
+
#
|
| 412 |
+
# Unfortunately, "à" is a common letter in Catalan, and mojibake of words that
|
| 413 |
+
# contain it will end up with inserted spaces. We can't do the right thing with
|
| 414 |
+
# every word. The cost is that the mojibake text "fà cil" will be interpreted as
|
| 415 |
+
# "fà cil", not "fàcil".
|
| 416 |
+
A_GRAVE_WORD_RE = re.compile(b"\xc3 (?! |quele|quela|quilo|s )")
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def restore_byte_a0(byts: bytes) -> bytes:
|
| 420 |
+
"""
|
| 421 |
+
Some mojibake has been additionally altered by a process that said "hmm,
|
| 422 |
+
byte A0, that's basically a space!" and replaced it with an ASCII space.
|
| 423 |
+
When the A0 is part of a sequence that we intend to decode as UTF-8,
|
| 424 |
+
changing byte A0 to 20 would make it fail to decode.
|
| 425 |
+
|
| 426 |
+
This process finds sequences that would convincingly decode as UTF-8 if
|
| 427 |
+
byte 20 were changed to A0, and puts back the A0. For the purpose of
|
| 428 |
+
deciding whether this is a good idea, this step gets a cost of twice
|
| 429 |
+
the number of bytes that are changed.
|
| 430 |
+
|
| 431 |
+
This is used as a step within `fix_encoding`.
|
| 432 |
+
"""
|
| 433 |
+
byts = A_GRAVE_WORD_RE.sub(b"\xc3\xa0 ", byts)
|
| 434 |
+
|
| 435 |
+
def replacement(match: Match[bytes]) -> bytes:
|
| 436 |
+
"The function to apply when this regex matches."
|
| 437 |
+
return match.group(0).replace(b"\x20", b"\xa0")
|
| 438 |
+
|
| 439 |
+
return ALTERED_UTF8_RE.sub(replacement, byts)
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def replace_lossy_sequences(byts: bytes) -> bytes:
|
| 443 |
+
"""
|
| 444 |
+
This function identifies sequences where information has been lost in
|
| 445 |
+
a "sloppy" codec, indicated by byte 1A, and if they would otherwise look
|
| 446 |
+
like a UTF-8 sequence, it replaces them with the UTF-8 sequence for U+FFFD.
|
| 447 |
+
|
| 448 |
+
A further explanation:
|
| 449 |
+
|
| 450 |
+
ftfy can now fix text in a few cases that it would previously fix
|
| 451 |
+
incompletely, because of the fact that it can't successfully apply the fix
|
| 452 |
+
to the entire string. A very common case of this is when characters have
|
| 453 |
+
been erroneously decoded as windows-1252, but instead of the "sloppy"
|
| 454 |
+
windows-1252 that passes through unassigned bytes, the unassigned bytes get
|
| 455 |
+
turned into U+FFFD (�), so we can't tell what they were.
|
| 456 |
+
|
| 457 |
+
This most commonly happens with curly quotation marks that appear
|
| 458 |
+
``“ like this �``.
|
| 459 |
+
|
| 460 |
+
We can do better by building on ftfy's "sloppy codecs" to let them handle
|
| 461 |
+
less-sloppy but more-lossy text. When they encounter the character ``�``,
|
| 462 |
+
instead of refusing to encode it, they encode it as byte 1A -- an
|
| 463 |
+
ASCII control code called SUBSTITUTE that once was meant for about the same
|
| 464 |
+
purpose. We can then apply a fixer that looks for UTF-8 sequences where
|
| 465 |
+
some continuation bytes have been replaced by byte 1A, and decode the whole
|
| 466 |
+
sequence as �; if that doesn't work, it'll just turn the byte back into �
|
| 467 |
+
itself.
|
| 468 |
+
|
| 469 |
+
As a result, the above text ``“ like this �`` will decode as
|
| 470 |
+
``“ like this �``.
|
| 471 |
+
|
| 472 |
+
If U+1A was actually in the original string, then the sloppy codecs will
|
| 473 |
+
not be used, and this function will not be run, so your weird control
|
| 474 |
+
character will be left alone but wacky fixes like this won't be possible.
|
| 475 |
+
|
| 476 |
+
This is used as a transcoder within `fix_encoding`.
|
| 477 |
+
"""
|
| 478 |
+
return LOSSY_UTF8_RE.sub("\ufffd".encode(), byts)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def decode_inconsistent_utf8(text: str) -> str:
|
| 482 |
+
"""
|
| 483 |
+
Sometimes, text from one encoding ends up embedded within text from a
|
| 484 |
+
different one. This is common enough that we need to be able to fix it.
|
| 485 |
+
|
| 486 |
+
This is used as a transcoder within `fix_encoding`.
|
| 487 |
+
"""
|
| 488 |
+
|
| 489 |
+
def fix_embedded_mojibake(match: Match[str]) -> str:
|
| 490 |
+
substr = match.group(0)
|
| 491 |
+
|
| 492 |
+
# Require the match to be shorter, so that this doesn't recurse infinitely
|
| 493 |
+
if len(substr) < len(text) and is_bad(substr):
|
| 494 |
+
return ftfy.fix_encoding(substr)
|
| 495 |
+
else:
|
| 496 |
+
return substr
|
| 497 |
+
|
| 498 |
+
return UTF8_DETECTOR_RE.sub(fix_embedded_mojibake, text)
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def _c1_fixer(match: Match[str]) -> str:
|
| 502 |
+
return match.group(0).encode("latin-1").decode("sloppy-windows-1252")
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def fix_c1_controls(text: str) -> str:
|
| 506 |
+
"""
|
| 507 |
+
If text still contains C1 control characters, treat them as their
|
| 508 |
+
Windows-1252 equivalents. This matches what Web browsers do.
|
| 509 |
+
"""
|
| 510 |
+
return C1_CONTROL_RE.sub(_c1_fixer, text)
|
evalkit_tf437/lib/python3.10/site-packages/ftfy/py.typed
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
DEBUG = False
|
| 4 |
+
|
| 5 |
+
from openpyxl.compat.numbers import NUMPY
|
| 6 |
+
from openpyxl.xml import DEFUSEDXML, LXML
|
| 7 |
+
from openpyxl.workbook import Workbook
|
| 8 |
+
from openpyxl.reader.excel import load_workbook as open
|
| 9 |
+
from openpyxl.reader.excel import load_workbook
|
| 10 |
+
import openpyxl._constants as constants
|
| 11 |
+
|
| 12 |
+
# Expose constants especially the version number
|
| 13 |
+
|
| 14 |
+
__author__ = constants.__author__
|
| 15 |
+
__author_email__ = constants.__author_email__
|
| 16 |
+
__license__ = constants.__license__
|
| 17 |
+
__maintainer_email__ = constants.__maintainer_email__
|
| 18 |
+
__url__ = constants.__url__
|
| 19 |
+
__version__ = constants.__version__
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/_constants.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
Package metadata
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
__author__ = "See AUTHORS"
|
| 8 |
+
__author_email__ = "[email protected]"
|
| 9 |
+
__license__ = "MIT"
|
| 10 |
+
__maintainer_email__ = "[email protected]"
|
| 11 |
+
__url__ = "https://openpyxl.readthedocs.io"
|
| 12 |
+
__version__ = "3.1.5"
|
| 13 |
+
__python__ = "3.8"
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
from .chartsheet import Chartsheet
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/__pycache__/custom.cpython-310.pyc
ADDED
|
Binary file (1.76 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/__pycache__/properties.cpython-310.pyc
ADDED
|
Binary file (923 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/__pycache__/relation.cpython-310.pyc
ADDED
|
Binary file (2.43 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/custom.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
from openpyxl.worksheet.header_footer import HeaderFooter
|
| 4 |
+
|
| 5 |
+
from openpyxl.descriptors import (
|
| 6 |
+
Bool,
|
| 7 |
+
Integer,
|
| 8 |
+
Set,
|
| 9 |
+
Typed,
|
| 10 |
+
Sequence
|
| 11 |
+
)
|
| 12 |
+
from openpyxl.descriptors.excel import Guid
|
| 13 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 14 |
+
from openpyxl.worksheet.page import (
|
| 15 |
+
PageMargins,
|
| 16 |
+
PrintPageSetup
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class CustomChartsheetView(Serialisable):
|
| 21 |
+
tagname = "customSheetView"
|
| 22 |
+
|
| 23 |
+
guid = Guid()
|
| 24 |
+
scale = Integer()
|
| 25 |
+
state = Set(values=(['visible', 'hidden', 'veryHidden']))
|
| 26 |
+
zoomToFit = Bool(allow_none=True)
|
| 27 |
+
pageMargins = Typed(expected_type=PageMargins, allow_none=True)
|
| 28 |
+
pageSetup = Typed(expected_type=PrintPageSetup, allow_none=True)
|
| 29 |
+
headerFooter = Typed(expected_type=HeaderFooter, allow_none=True)
|
| 30 |
+
|
| 31 |
+
__elements__ = ('pageMargins', 'pageSetup', 'headerFooter')
|
| 32 |
+
|
| 33 |
+
def __init__(self,
|
| 34 |
+
guid=None,
|
| 35 |
+
scale=None,
|
| 36 |
+
state='visible',
|
| 37 |
+
zoomToFit=None,
|
| 38 |
+
pageMargins=None,
|
| 39 |
+
pageSetup=None,
|
| 40 |
+
headerFooter=None,
|
| 41 |
+
):
|
| 42 |
+
self.guid = guid
|
| 43 |
+
self.scale = scale
|
| 44 |
+
self.state = state
|
| 45 |
+
self.zoomToFit = zoomToFit
|
| 46 |
+
self.pageMargins = pageMargins
|
| 47 |
+
self.pageSetup = pageSetup
|
| 48 |
+
self.headerFooter = headerFooter
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class CustomChartsheetViews(Serialisable):
|
| 52 |
+
tagname = "customSheetViews"
|
| 53 |
+
|
| 54 |
+
customSheetView = Sequence(expected_type=CustomChartsheetView, allow_none=True)
|
| 55 |
+
|
| 56 |
+
__elements__ = ('customSheetView',)
|
| 57 |
+
|
| 58 |
+
def __init__(self,
|
| 59 |
+
customSheetView=None,
|
| 60 |
+
):
|
| 61 |
+
self.customSheetView = customSheetView
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/protection.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
|
| 3 |
+
from openpyxl.descriptors import (Bool, Integer, String)
|
| 4 |
+
from openpyxl.descriptors.excel import Base64Binary
|
| 5 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 6 |
+
|
| 7 |
+
from openpyxl.worksheet.protection import (
|
| 8 |
+
hash_password,
|
| 9 |
+
_Protected
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ChartsheetProtection(Serialisable, _Protected):
|
| 14 |
+
tagname = "sheetProtection"
|
| 15 |
+
|
| 16 |
+
algorithmName = String(allow_none=True)
|
| 17 |
+
hashValue = Base64Binary(allow_none=True)
|
| 18 |
+
saltValue = Base64Binary(allow_none=True)
|
| 19 |
+
spinCount = Integer(allow_none=True)
|
| 20 |
+
content = Bool(allow_none=True)
|
| 21 |
+
objects = Bool(allow_none=True)
|
| 22 |
+
|
| 23 |
+
__attrs__ = ("content", "objects", "password", "hashValue", "spinCount", "saltValue", "algorithmName")
|
| 24 |
+
|
| 25 |
+
def __init__(self,
|
| 26 |
+
content=None,
|
| 27 |
+
objects=None,
|
| 28 |
+
hashValue=None,
|
| 29 |
+
spinCount=None,
|
| 30 |
+
saltValue=None,
|
| 31 |
+
algorithmName=None,
|
| 32 |
+
password=None,
|
| 33 |
+
):
|
| 34 |
+
self.content = content
|
| 35 |
+
self.objects = objects
|
| 36 |
+
self.hashValue = hashValue
|
| 37 |
+
self.spinCount = spinCount
|
| 38 |
+
self.saltValue = saltValue
|
| 39 |
+
self.algorithmName = algorithmName
|
| 40 |
+
if password is not None:
|
| 41 |
+
self.password = password
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/chartsheet/relation.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
from openpyxl.descriptors import (
|
| 4 |
+
Integer,
|
| 5 |
+
Alias
|
| 6 |
+
)
|
| 7 |
+
from openpyxl.descriptors.excel import Relation
|
| 8 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SheetBackgroundPicture(Serialisable):
|
| 12 |
+
tagname = "picture"
|
| 13 |
+
id = Relation()
|
| 14 |
+
|
| 15 |
+
def __init__(self, id):
|
| 16 |
+
self.id = id
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class DrawingHF(Serialisable):
|
| 20 |
+
id = Relation()
|
| 21 |
+
lho = Integer(allow_none=True)
|
| 22 |
+
leftHeaderOddPages = Alias('lho')
|
| 23 |
+
lhe = Integer(allow_none=True)
|
| 24 |
+
leftHeaderEvenPages = Alias('lhe')
|
| 25 |
+
lhf = Integer(allow_none=True)
|
| 26 |
+
leftHeaderFirstPage = Alias('lhf')
|
| 27 |
+
cho = Integer(allow_none=True)
|
| 28 |
+
centerHeaderOddPages = Alias('cho')
|
| 29 |
+
che = Integer(allow_none=True)
|
| 30 |
+
centerHeaderEvenPages = Alias('che')
|
| 31 |
+
chf = Integer(allow_none=True)
|
| 32 |
+
centerHeaderFirstPage = Alias('chf')
|
| 33 |
+
rho = Integer(allow_none=True)
|
| 34 |
+
rightHeaderOddPages = Alias('rho')
|
| 35 |
+
rhe = Integer(allow_none=True)
|
| 36 |
+
rightHeaderEvenPages = Alias('rhe')
|
| 37 |
+
rhf = Integer(allow_none=True)
|
| 38 |
+
rightHeaderFirstPage = Alias('rhf')
|
| 39 |
+
lfo = Integer(allow_none=True)
|
| 40 |
+
leftFooterOddPages = Alias('lfo')
|
| 41 |
+
lfe = Integer(allow_none=True)
|
| 42 |
+
leftFooterEvenPages = Alias('lfe')
|
| 43 |
+
lff = Integer(allow_none=True)
|
| 44 |
+
leftFooterFirstPage = Alias('lff')
|
| 45 |
+
cfo = Integer(allow_none=True)
|
| 46 |
+
centerFooterOddPages = Alias('cfo')
|
| 47 |
+
cfe = Integer(allow_none=True)
|
| 48 |
+
centerFooterEvenPages = Alias('cfe')
|
| 49 |
+
cff = Integer(allow_none=True)
|
| 50 |
+
centerFooterFirstPage = Alias('cff')
|
| 51 |
+
rfo = Integer(allow_none=True)
|
| 52 |
+
rightFooterOddPages = Alias('rfo')
|
| 53 |
+
rfe = Integer(allow_none=True)
|
| 54 |
+
rightFooterEvenPages = Alias('rfe')
|
| 55 |
+
rff = Integer(allow_none=True)
|
| 56 |
+
rightFooterFirstPage = Alias('rff')
|
| 57 |
+
|
| 58 |
+
def __init__(self,
|
| 59 |
+
id=None,
|
| 60 |
+
lho=None,
|
| 61 |
+
lhe=None,
|
| 62 |
+
lhf=None,
|
| 63 |
+
cho=None,
|
| 64 |
+
che=None,
|
| 65 |
+
chf=None,
|
| 66 |
+
rho=None,
|
| 67 |
+
rhe=None,
|
| 68 |
+
rhf=None,
|
| 69 |
+
lfo=None,
|
| 70 |
+
lfe=None,
|
| 71 |
+
lff=None,
|
| 72 |
+
cfo=None,
|
| 73 |
+
cfe=None,
|
| 74 |
+
cff=None,
|
| 75 |
+
rfo=None,
|
| 76 |
+
rfe=None,
|
| 77 |
+
rff=None,
|
| 78 |
+
):
|
| 79 |
+
self.id = id
|
| 80 |
+
self.lho = lho
|
| 81 |
+
self.lhe = lhe
|
| 82 |
+
self.lhf = lhf
|
| 83 |
+
self.cho = cho
|
| 84 |
+
self.che = che
|
| 85 |
+
self.chf = chf
|
| 86 |
+
self.rho = rho
|
| 87 |
+
self.rhe = rhe
|
| 88 |
+
self.rhf = rhf
|
| 89 |
+
self.lfo = lfo
|
| 90 |
+
self.lfe = lfe
|
| 91 |
+
self.lff = lff
|
| 92 |
+
self.cfo = cfo
|
| 93 |
+
self.cfe = cfe
|
| 94 |
+
self.cff = cff
|
| 95 |
+
self.rfo = rfo
|
| 96 |
+
self.rfe = rfe
|
| 97 |
+
self.rff = rff
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/_read_only.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
""" Read worksheets on-demand
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .worksheet import Worksheet
|
| 7 |
+
from openpyxl.cell.read_only import ReadOnlyCell, EMPTY_CELL
|
| 8 |
+
from openpyxl.utils import get_column_letter
|
| 9 |
+
|
| 10 |
+
from ._reader import WorkSheetParser
|
| 11 |
+
from openpyxl.workbook.defined_name import DefinedNameDict
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def read_dimension(source):
|
| 15 |
+
parser = WorkSheetParser(source, [])
|
| 16 |
+
return parser.parse_dimensions()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ReadOnlyWorksheet:
|
| 20 |
+
|
| 21 |
+
_min_column = 1
|
| 22 |
+
_min_row = 1
|
| 23 |
+
_max_column = _max_row = None
|
| 24 |
+
|
| 25 |
+
# from Standard Worksheet
|
| 26 |
+
# Methods from Worksheet
|
| 27 |
+
cell = Worksheet.cell
|
| 28 |
+
iter_rows = Worksheet.iter_rows
|
| 29 |
+
values = Worksheet.values
|
| 30 |
+
rows = Worksheet.rows
|
| 31 |
+
__getitem__ = Worksheet.__getitem__
|
| 32 |
+
__iter__ = Worksheet.__iter__
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def __init__(self, parent_workbook, title, worksheet_path, shared_strings):
|
| 36 |
+
self.parent = parent_workbook
|
| 37 |
+
self.title = title
|
| 38 |
+
self.sheet_state = 'visible'
|
| 39 |
+
self._current_row = None
|
| 40 |
+
self._worksheet_path = worksheet_path
|
| 41 |
+
self._shared_strings = shared_strings
|
| 42 |
+
self._get_size()
|
| 43 |
+
self.defined_names = DefinedNameDict()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _get_size(self):
|
| 47 |
+
src = self._get_source()
|
| 48 |
+
parser = WorkSheetParser(src, [])
|
| 49 |
+
dimensions = parser.parse_dimensions()
|
| 50 |
+
src.close()
|
| 51 |
+
if dimensions is not None:
|
| 52 |
+
self._min_column, self._min_row, self._max_column, self._max_row = dimensions
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _get_source(self):
|
| 56 |
+
"""Parse xml source on demand, must close after use"""
|
| 57 |
+
return self.parent._archive.open(self._worksheet_path)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _cells_by_row(self, min_col, min_row, max_col, max_row, values_only=False):
|
| 61 |
+
"""
|
| 62 |
+
The source worksheet file may have columns or rows missing.
|
| 63 |
+
Missing cells will be created.
|
| 64 |
+
"""
|
| 65 |
+
filler = EMPTY_CELL
|
| 66 |
+
if values_only:
|
| 67 |
+
filler = None
|
| 68 |
+
|
| 69 |
+
max_col = max_col or self.max_column
|
| 70 |
+
max_row = max_row or self.max_row
|
| 71 |
+
empty_row = []
|
| 72 |
+
if max_col is not None:
|
| 73 |
+
empty_row = (filler,) * (max_col + 1 - min_col)
|
| 74 |
+
|
| 75 |
+
counter = min_row
|
| 76 |
+
idx = 1
|
| 77 |
+
with self._get_source() as src:
|
| 78 |
+
parser = WorkSheetParser(src,
|
| 79 |
+
self._shared_strings,
|
| 80 |
+
data_only=self.parent.data_only,
|
| 81 |
+
epoch=self.parent.epoch,
|
| 82 |
+
date_formats=self.parent._date_formats,
|
| 83 |
+
timedelta_formats=self.parent._timedelta_formats)
|
| 84 |
+
|
| 85 |
+
for idx, row in parser.parse():
|
| 86 |
+
if max_row is not None and idx > max_row:
|
| 87 |
+
break
|
| 88 |
+
|
| 89 |
+
# some rows are missing
|
| 90 |
+
for _ in range(counter, idx):
|
| 91 |
+
counter += 1
|
| 92 |
+
yield empty_row
|
| 93 |
+
|
| 94 |
+
# return cells from a row
|
| 95 |
+
if counter <= idx:
|
| 96 |
+
row = self._get_row(row, min_col, max_col, values_only)
|
| 97 |
+
counter += 1
|
| 98 |
+
yield row
|
| 99 |
+
|
| 100 |
+
if max_row is not None and max_row < idx:
|
| 101 |
+
for _ in range(counter, max_row+1):
|
| 102 |
+
yield empty_row
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _get_row(self, row, min_col=1, max_col=None, values_only=False):
|
| 106 |
+
"""
|
| 107 |
+
Make sure a row contains always the same number of cells or values
|
| 108 |
+
"""
|
| 109 |
+
if not row and not max_col: # in case someone wants to force rows where there aren't any
|
| 110 |
+
return ()
|
| 111 |
+
|
| 112 |
+
max_col = max_col or row[-1]['column']
|
| 113 |
+
row_width = max_col + 1 - min_col
|
| 114 |
+
|
| 115 |
+
new_row = [EMPTY_CELL] * row_width
|
| 116 |
+
if values_only:
|
| 117 |
+
new_row = [None] * row_width
|
| 118 |
+
|
| 119 |
+
for cell in row:
|
| 120 |
+
counter = cell['column']
|
| 121 |
+
if min_col <= counter <= max_col:
|
| 122 |
+
idx = counter - min_col # position in list of cells returned
|
| 123 |
+
new_row[idx] = cell['value']
|
| 124 |
+
if not values_only:
|
| 125 |
+
new_row[idx] = ReadOnlyCell(self, **cell)
|
| 126 |
+
|
| 127 |
+
return tuple(new_row)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def _get_cell(self, row, column):
|
| 131 |
+
"""Cells are returned by a generator which can be empty"""
|
| 132 |
+
for row in self._cells_by_row(column, row, column, row):
|
| 133 |
+
if row:
|
| 134 |
+
return row[0]
|
| 135 |
+
return EMPTY_CELL
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def calculate_dimension(self, force=False):
|
| 139 |
+
if not all([self.max_column, self.max_row]):
|
| 140 |
+
if force:
|
| 141 |
+
self._calculate_dimension()
|
| 142 |
+
else:
|
| 143 |
+
raise ValueError("Worksheet is unsized, use calculate_dimension(force=True)")
|
| 144 |
+
return f"{get_column_letter(self.min_column)}{self.min_row}:{get_column_letter(self.max_column)}{self.max_row}"
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def _calculate_dimension(self):
|
| 148 |
+
"""
|
| 149 |
+
Loop through all the cells to get the size of a worksheet.
|
| 150 |
+
Do this only if it is explicitly requested.
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
max_col = 0
|
| 154 |
+
for r in self.rows:
|
| 155 |
+
if not r:
|
| 156 |
+
continue
|
| 157 |
+
cell = r[-1]
|
| 158 |
+
max_col = max(max_col, cell.column)
|
| 159 |
+
|
| 160 |
+
self._max_row = cell.row
|
| 161 |
+
self._max_column = max_col
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def reset_dimensions(self):
|
| 165 |
+
"""
|
| 166 |
+
Remove worksheet dimensions if these are incorrect in the worksheet source.
|
| 167 |
+
NB. This probably indicates a bug in the library or application that created
|
| 168 |
+
the workbook.
|
| 169 |
+
"""
|
| 170 |
+
self._max_row = self._max_column = None
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
@property
|
| 174 |
+
def min_row(self):
|
| 175 |
+
return self._min_row
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@property
|
| 179 |
+
def max_row(self):
|
| 180 |
+
return self._max_row
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@property
|
| 184 |
+
def min_column(self):
|
| 185 |
+
return self._min_column
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
@property
|
| 189 |
+
def max_column(self):
|
| 190 |
+
return self._max_column
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/_reader.py
ADDED
|
@@ -0,0 +1,472 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
"""Reader for a single worksheet."""
|
| 4 |
+
from copy import copy
|
| 5 |
+
from warnings import warn
|
| 6 |
+
|
| 7 |
+
# compatibility imports
|
| 8 |
+
from openpyxl.xml.functions import iterparse
|
| 9 |
+
|
| 10 |
+
# package imports
|
| 11 |
+
from openpyxl.cell import Cell, MergedCell
|
| 12 |
+
from openpyxl.cell.text import Text
|
| 13 |
+
from openpyxl.worksheet.dimensions import (
|
| 14 |
+
ColumnDimension,
|
| 15 |
+
RowDimension,
|
| 16 |
+
SheetFormatProperties,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
from openpyxl.xml.constants import (
|
| 20 |
+
SHEET_MAIN_NS,
|
| 21 |
+
EXT_TYPES,
|
| 22 |
+
)
|
| 23 |
+
from openpyxl.formatting.formatting import ConditionalFormatting
|
| 24 |
+
from openpyxl.formula.translate import Translator
|
| 25 |
+
from openpyxl.utils import (
|
| 26 |
+
get_column_letter,
|
| 27 |
+
coordinate_to_tuple,
|
| 28 |
+
)
|
| 29 |
+
from openpyxl.utils.datetime import from_excel, from_ISO8601, WINDOWS_EPOCH
|
| 30 |
+
from openpyxl.descriptors.excel import ExtensionList
|
| 31 |
+
from openpyxl.cell.rich_text import CellRichText
|
| 32 |
+
|
| 33 |
+
from .formula import DataTableFormula, ArrayFormula
|
| 34 |
+
from .filters import AutoFilter
|
| 35 |
+
from .header_footer import HeaderFooter
|
| 36 |
+
from .hyperlink import HyperlinkList
|
| 37 |
+
from .merge import MergeCells
|
| 38 |
+
from .page import PageMargins, PrintOptions, PrintPageSetup
|
| 39 |
+
from .pagebreak import RowBreak, ColBreak
|
| 40 |
+
from .protection import SheetProtection
|
| 41 |
+
from .scenario import ScenarioList
|
| 42 |
+
from .views import SheetViewList
|
| 43 |
+
from .datavalidation import DataValidationList
|
| 44 |
+
from .table import TablePartList
|
| 45 |
+
from .properties import WorksheetProperties
|
| 46 |
+
from .dimensions import SheetDimension
|
| 47 |
+
from .related import Related
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
CELL_TAG = '{%s}c' % SHEET_MAIN_NS
|
| 51 |
+
VALUE_TAG = '{%s}v' % SHEET_MAIN_NS
|
| 52 |
+
FORMULA_TAG = '{%s}f' % SHEET_MAIN_NS
|
| 53 |
+
MERGE_TAG = '{%s}mergeCells' % SHEET_MAIN_NS
|
| 54 |
+
INLINE_STRING = "{%s}is" % SHEET_MAIN_NS
|
| 55 |
+
COL_TAG = '{%s}col' % SHEET_MAIN_NS
|
| 56 |
+
ROW_TAG = '{%s}row' % SHEET_MAIN_NS
|
| 57 |
+
CF_TAG = '{%s}conditionalFormatting' % SHEET_MAIN_NS
|
| 58 |
+
LEGACY_TAG = '{%s}legacyDrawing' % SHEET_MAIN_NS
|
| 59 |
+
PROT_TAG = '{%s}sheetProtection' % SHEET_MAIN_NS
|
| 60 |
+
EXT_TAG = "{%s}extLst" % SHEET_MAIN_NS
|
| 61 |
+
HYPERLINK_TAG = "{%s}hyperlinks" % SHEET_MAIN_NS
|
| 62 |
+
TABLE_TAG = "{%s}tableParts" % SHEET_MAIN_NS
|
| 63 |
+
PRINT_TAG = '{%s}printOptions' % SHEET_MAIN_NS
|
| 64 |
+
MARGINS_TAG = '{%s}pageMargins' % SHEET_MAIN_NS
|
| 65 |
+
PAGE_TAG = '{%s}pageSetup' % SHEET_MAIN_NS
|
| 66 |
+
HEADER_TAG = '{%s}headerFooter' % SHEET_MAIN_NS
|
| 67 |
+
FILTER_TAG = '{%s}autoFilter' % SHEET_MAIN_NS
|
| 68 |
+
VALIDATION_TAG = '{%s}dataValidations' % SHEET_MAIN_NS
|
| 69 |
+
PROPERTIES_TAG = '{%s}sheetPr' % SHEET_MAIN_NS
|
| 70 |
+
VIEWS_TAG = '{%s}sheetViews' % SHEET_MAIN_NS
|
| 71 |
+
FORMAT_TAG = '{%s}sheetFormatPr' % SHEET_MAIN_NS
|
| 72 |
+
ROW_BREAK_TAG = '{%s}rowBreaks' % SHEET_MAIN_NS
|
| 73 |
+
COL_BREAK_TAG = '{%s}colBreaks' % SHEET_MAIN_NS
|
| 74 |
+
SCENARIOS_TAG = '{%s}scenarios' % SHEET_MAIN_NS
|
| 75 |
+
DATA_TAG = '{%s}sheetData' % SHEET_MAIN_NS
|
| 76 |
+
DIMENSION_TAG = '{%s}dimension' % SHEET_MAIN_NS
|
| 77 |
+
CUSTOM_VIEWS_TAG = '{%s}customSheetViews' % SHEET_MAIN_NS
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _cast_number(value):
|
| 81 |
+
"Convert numbers as string to an int or float"
|
| 82 |
+
if "." in value or "E" in value or "e" in value:
|
| 83 |
+
return float(value)
|
| 84 |
+
return int(value)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def parse_richtext_string(element):
|
| 88 |
+
"""
|
| 89 |
+
Parse inline string and preserve rich text formatting
|
| 90 |
+
"""
|
| 91 |
+
value = CellRichText.from_tree(element) or ""
|
| 92 |
+
if len(value) == 1 and isinstance(value[0], str):
|
| 93 |
+
value = value[0]
|
| 94 |
+
return value
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class WorkSheetParser:
|
| 98 |
+
|
| 99 |
+
def __init__(self, src, shared_strings, data_only=False,
|
| 100 |
+
epoch=WINDOWS_EPOCH, date_formats=set(),
|
| 101 |
+
timedelta_formats=set(), rich_text=False):
|
| 102 |
+
self.min_row = self.min_col = None
|
| 103 |
+
self.epoch = epoch
|
| 104 |
+
self.source = src
|
| 105 |
+
self.shared_strings = shared_strings
|
| 106 |
+
self.data_only = data_only
|
| 107 |
+
self.shared_formulae = {}
|
| 108 |
+
self.row_counter = self.col_counter = 0
|
| 109 |
+
self.tables = TablePartList()
|
| 110 |
+
self.date_formats = date_formats
|
| 111 |
+
self.timedelta_formats = timedelta_formats
|
| 112 |
+
self.row_dimensions = {}
|
| 113 |
+
self.column_dimensions = {}
|
| 114 |
+
self.number_formats = []
|
| 115 |
+
self.keep_vba = False
|
| 116 |
+
self.hyperlinks = HyperlinkList()
|
| 117 |
+
self.formatting = []
|
| 118 |
+
self.legacy_drawing = None
|
| 119 |
+
self.merged_cells = None
|
| 120 |
+
self.row_breaks = RowBreak()
|
| 121 |
+
self.col_breaks = ColBreak()
|
| 122 |
+
self.rich_text = rich_text
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def parse(self):
|
| 126 |
+
dispatcher = {
|
| 127 |
+
COL_TAG: self.parse_column_dimensions,
|
| 128 |
+
PROT_TAG: self.parse_sheet_protection,
|
| 129 |
+
EXT_TAG: self.parse_extensions,
|
| 130 |
+
CF_TAG: self.parse_formatting,
|
| 131 |
+
LEGACY_TAG: self.parse_legacy,
|
| 132 |
+
ROW_BREAK_TAG: self.parse_row_breaks,
|
| 133 |
+
COL_BREAK_TAG: self.parse_col_breaks,
|
| 134 |
+
CUSTOM_VIEWS_TAG: self.parse_custom_views,
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
properties = {
|
| 138 |
+
PRINT_TAG: ('print_options', PrintOptions),
|
| 139 |
+
MARGINS_TAG: ('page_margins', PageMargins),
|
| 140 |
+
PAGE_TAG: ('page_setup', PrintPageSetup),
|
| 141 |
+
HEADER_TAG: ('HeaderFooter', HeaderFooter),
|
| 142 |
+
FILTER_TAG: ('auto_filter', AutoFilter),
|
| 143 |
+
VALIDATION_TAG: ('data_validations', DataValidationList),
|
| 144 |
+
PROPERTIES_TAG: ('sheet_properties', WorksheetProperties),
|
| 145 |
+
VIEWS_TAG: ('views', SheetViewList),
|
| 146 |
+
FORMAT_TAG: ('sheet_format', SheetFormatProperties),
|
| 147 |
+
SCENARIOS_TAG: ('scenarios', ScenarioList),
|
| 148 |
+
TABLE_TAG: ('tables', TablePartList),
|
| 149 |
+
HYPERLINK_TAG: ('hyperlinks', HyperlinkList),
|
| 150 |
+
MERGE_TAG: ('merged_cells', MergeCells),
|
| 151 |
+
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
it = iterparse(self.source) # add a finaliser to close the source when this becomes possible
|
| 155 |
+
|
| 156 |
+
for _, element in it:
|
| 157 |
+
tag_name = element.tag
|
| 158 |
+
if tag_name in dispatcher:
|
| 159 |
+
dispatcher[tag_name](element)
|
| 160 |
+
element.clear()
|
| 161 |
+
elif tag_name in properties:
|
| 162 |
+
prop = properties[tag_name]
|
| 163 |
+
obj = prop[1].from_tree(element)
|
| 164 |
+
setattr(self, prop[0], obj)
|
| 165 |
+
element.clear()
|
| 166 |
+
elif tag_name == ROW_TAG:
|
| 167 |
+
row = self.parse_row(element)
|
| 168 |
+
element.clear()
|
| 169 |
+
yield row
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def parse_dimensions(self):
|
| 173 |
+
"""
|
| 174 |
+
Get worksheet dimensions if they are provided.
|
| 175 |
+
"""
|
| 176 |
+
it = iterparse(self.source)
|
| 177 |
+
|
| 178 |
+
for _event, element in it:
|
| 179 |
+
if element.tag == DIMENSION_TAG:
|
| 180 |
+
dim = SheetDimension.from_tree(element)
|
| 181 |
+
return dim.boundaries
|
| 182 |
+
|
| 183 |
+
elif element.tag == DATA_TAG:
|
| 184 |
+
# Dimensions missing
|
| 185 |
+
break
|
| 186 |
+
element.clear()
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def parse_cell(self, element):
|
| 190 |
+
data_type = element.get('t', 'n')
|
| 191 |
+
coordinate = element.get('r')
|
| 192 |
+
style_id = element.get('s', 0)
|
| 193 |
+
if style_id:
|
| 194 |
+
style_id = int(style_id)
|
| 195 |
+
|
| 196 |
+
if data_type == "inlineStr":
|
| 197 |
+
value = None
|
| 198 |
+
else:
|
| 199 |
+
value = element.findtext(VALUE_TAG, None) or None
|
| 200 |
+
|
| 201 |
+
if coordinate:
|
| 202 |
+
row, column = coordinate_to_tuple(coordinate)
|
| 203 |
+
self.col_counter = column
|
| 204 |
+
else:
|
| 205 |
+
self.col_counter += 1
|
| 206 |
+
row, column = self.row_counter, self.col_counter
|
| 207 |
+
|
| 208 |
+
if not self.data_only and element.find(FORMULA_TAG) is not None:
|
| 209 |
+
data_type = 'f'
|
| 210 |
+
value = self.parse_formula(element)
|
| 211 |
+
|
| 212 |
+
elif value is not None:
|
| 213 |
+
if data_type == 'n':
|
| 214 |
+
value = _cast_number(value)
|
| 215 |
+
if style_id in self.date_formats:
|
| 216 |
+
data_type = 'd'
|
| 217 |
+
try:
|
| 218 |
+
value = from_excel(
|
| 219 |
+
value, self.epoch, timedelta=style_id in self.timedelta_formats
|
| 220 |
+
)
|
| 221 |
+
except (OverflowError, ValueError):
|
| 222 |
+
msg = f"""Cell {coordinate} is marked as a date but the serial value {value} is outside the limits for dates. The cell will be treated as an error."""
|
| 223 |
+
warn(msg)
|
| 224 |
+
data_type = "e"
|
| 225 |
+
value = "#VALUE!"
|
| 226 |
+
elif data_type == 's':
|
| 227 |
+
value = self.shared_strings[int(value)]
|
| 228 |
+
elif data_type == 'b':
|
| 229 |
+
value = bool(int(value))
|
| 230 |
+
elif data_type == "str":
|
| 231 |
+
data_type = "s"
|
| 232 |
+
elif data_type == 'd':
|
| 233 |
+
value = from_ISO8601(value)
|
| 234 |
+
|
| 235 |
+
elif data_type == 'inlineStr':
|
| 236 |
+
child = element.find(INLINE_STRING)
|
| 237 |
+
if child is not None:
|
| 238 |
+
data_type = 's'
|
| 239 |
+
if self.rich_text:
|
| 240 |
+
value = parse_richtext_string(child)
|
| 241 |
+
else:
|
| 242 |
+
value = Text.from_tree(child).content
|
| 243 |
+
|
| 244 |
+
return {'row':row, 'column':column, 'value':value, 'data_type':data_type, 'style_id':style_id}
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def parse_formula(self, element):
|
| 248 |
+
"""
|
| 249 |
+
possible formulae types: shared, array, datatable
|
| 250 |
+
"""
|
| 251 |
+
formula = element.find(FORMULA_TAG)
|
| 252 |
+
formula_type = formula.get('t')
|
| 253 |
+
coordinate = element.get('r')
|
| 254 |
+
value = "="
|
| 255 |
+
if formula.text is not None:
|
| 256 |
+
value += formula.text
|
| 257 |
+
|
| 258 |
+
if formula_type == "array":
|
| 259 |
+
value = ArrayFormula(ref=formula.get('ref'), text=value)
|
| 260 |
+
|
| 261 |
+
elif formula_type == "shared":
|
| 262 |
+
idx = formula.get('si')
|
| 263 |
+
if idx in self.shared_formulae:
|
| 264 |
+
trans = self.shared_formulae[idx]
|
| 265 |
+
value = trans.translate_formula(coordinate)
|
| 266 |
+
elif value != "=":
|
| 267 |
+
self.shared_formulae[idx] = Translator(value, coordinate)
|
| 268 |
+
|
| 269 |
+
elif formula_type == "dataTable":
|
| 270 |
+
value = DataTableFormula(**formula.attrib)
|
| 271 |
+
|
| 272 |
+
return value
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def parse_column_dimensions(self, col):
|
| 276 |
+
attrs = dict(col.attrib)
|
| 277 |
+
column = get_column_letter(int(attrs['min']))
|
| 278 |
+
attrs['index'] = column
|
| 279 |
+
self.column_dimensions[column] = attrs
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def parse_row(self, row):
|
| 283 |
+
attrs = dict(row.attrib)
|
| 284 |
+
|
| 285 |
+
if "r" in attrs:
|
| 286 |
+
try:
|
| 287 |
+
self.row_counter = int(attrs['r'])
|
| 288 |
+
except ValueError:
|
| 289 |
+
val = float(attrs['r'])
|
| 290 |
+
if val.is_integer():
|
| 291 |
+
self.row_counter = int(val)
|
| 292 |
+
else:
|
| 293 |
+
raise ValueError(f"{attrs['r']} is not a valid row number")
|
| 294 |
+
else:
|
| 295 |
+
self.row_counter += 1
|
| 296 |
+
self.col_counter = 0
|
| 297 |
+
|
| 298 |
+
keys = {k for k in attrs if not k.startswith('{')}
|
| 299 |
+
if keys - {'r', 'spans'}:
|
| 300 |
+
# don't create dimension objects unless they have relevant information
|
| 301 |
+
self.row_dimensions[str(self.row_counter)] = attrs
|
| 302 |
+
|
| 303 |
+
cells = [self.parse_cell(el) for el in row]
|
| 304 |
+
return self.row_counter, cells
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def parse_formatting(self, element):
|
| 308 |
+
try:
|
| 309 |
+
cf = ConditionalFormatting.from_tree(element)
|
| 310 |
+
self.formatting.append(cf)
|
| 311 |
+
except TypeError as e:
|
| 312 |
+
msg = f"Failed to load a conditional formatting rule. It will be discarded. Cause: {e}"
|
| 313 |
+
warn(msg)
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def parse_sheet_protection(self, element):
|
| 317 |
+
protection = SheetProtection.from_tree(element)
|
| 318 |
+
password = element.get("password")
|
| 319 |
+
if password is not None:
|
| 320 |
+
protection.set_password(password, True)
|
| 321 |
+
self.protection = protection
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def parse_extensions(self, element):
|
| 325 |
+
extLst = ExtensionList.from_tree(element)
|
| 326 |
+
for e in extLst.ext:
|
| 327 |
+
ext_type = EXT_TYPES.get(e.uri.upper(), "Unknown")
|
| 328 |
+
msg = "{0} extension is not supported and will be removed".format(ext_type)
|
| 329 |
+
warn(msg)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def parse_legacy(self, element):
|
| 333 |
+
obj = Related.from_tree(element)
|
| 334 |
+
self.legacy_drawing = obj.id
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def parse_row_breaks(self, element):
|
| 338 |
+
brk = RowBreak.from_tree(element)
|
| 339 |
+
self.row_breaks = brk
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def parse_col_breaks(self, element):
|
| 343 |
+
brk = ColBreak.from_tree(element)
|
| 344 |
+
self.col_breaks = brk
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
def parse_custom_views(self, element):
|
| 348 |
+
# clear page_breaks to avoid duplication which Excel doesn't like
|
| 349 |
+
# basically they're ignored in custom views
|
| 350 |
+
self.row_breaks = RowBreak()
|
| 351 |
+
self.col_breaks = ColBreak()
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
class WorksheetReader:
|
| 355 |
+
"""
|
| 356 |
+
Create a parser and apply it to a workbook
|
| 357 |
+
"""
|
| 358 |
+
|
| 359 |
+
def __init__(self, ws, xml_source, shared_strings, data_only, rich_text):
|
| 360 |
+
self.ws = ws
|
| 361 |
+
self.parser = WorkSheetParser(xml_source, shared_strings,
|
| 362 |
+
data_only, ws.parent.epoch, ws.parent._date_formats,
|
| 363 |
+
ws.parent._timedelta_formats, rich_text)
|
| 364 |
+
self.tables = []
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def bind_cells(self):
|
| 368 |
+
for idx, row in self.parser.parse():
|
| 369 |
+
for cell in row:
|
| 370 |
+
style = self.ws.parent._cell_styles[cell['style_id']]
|
| 371 |
+
c = Cell(self.ws, row=cell['row'], column=cell['column'], style_array=style)
|
| 372 |
+
c._value = cell['value']
|
| 373 |
+
c.data_type = cell['data_type']
|
| 374 |
+
self.ws._cells[(cell['row'], cell['column'])] = c
|
| 375 |
+
|
| 376 |
+
if self.ws._cells:
|
| 377 |
+
self.ws._current_row = self.ws.max_row # use cells not row dimensions
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def bind_formatting(self):
|
| 381 |
+
for cf in self.parser.formatting:
|
| 382 |
+
for rule in cf.rules:
|
| 383 |
+
if rule.dxfId is not None:
|
| 384 |
+
rule.dxf = self.ws.parent._differential_styles[rule.dxfId]
|
| 385 |
+
self.ws.conditional_formatting[cf] = rule
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def bind_tables(self):
|
| 389 |
+
for t in self.parser.tables.tablePart:
|
| 390 |
+
rel = self.ws._rels.get(t.id)
|
| 391 |
+
self.tables.append(rel.Target)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def bind_merged_cells(self):
|
| 395 |
+
from openpyxl.worksheet.cell_range import MultiCellRange
|
| 396 |
+
from openpyxl.worksheet.merge import MergedCellRange
|
| 397 |
+
if not self.parser.merged_cells:
|
| 398 |
+
return
|
| 399 |
+
|
| 400 |
+
ranges = []
|
| 401 |
+
for cr in self.parser.merged_cells.mergeCell:
|
| 402 |
+
mcr = MergedCellRange(self.ws, cr.ref)
|
| 403 |
+
self.ws._clean_merge_range(mcr)
|
| 404 |
+
ranges.append(mcr)
|
| 405 |
+
self.ws.merged_cells = MultiCellRange(ranges)
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def bind_hyperlinks(self):
|
| 409 |
+
for link in self.parser.hyperlinks.hyperlink:
|
| 410 |
+
if link.id:
|
| 411 |
+
rel = self.ws._rels.get(link.id)
|
| 412 |
+
link.target = rel.Target
|
| 413 |
+
if ":" in link.ref:
|
| 414 |
+
# range of cells
|
| 415 |
+
for row in self.ws[link.ref]:
|
| 416 |
+
for cell in row:
|
| 417 |
+
try:
|
| 418 |
+
cell.hyperlink = copy(link)
|
| 419 |
+
except AttributeError:
|
| 420 |
+
pass
|
| 421 |
+
else:
|
| 422 |
+
cell = self.ws[link.ref]
|
| 423 |
+
if isinstance(cell, MergedCell):
|
| 424 |
+
cell = self.normalize_merged_cell_link(cell.coordinate)
|
| 425 |
+
cell.hyperlink = link
|
| 426 |
+
|
| 427 |
+
def normalize_merged_cell_link(self, coord):
|
| 428 |
+
"""
|
| 429 |
+
Returns the appropriate cell to which a hyperlink, which references a merged cell at the specified coordinates,
|
| 430 |
+
should be bound.
|
| 431 |
+
"""
|
| 432 |
+
for rng in self.ws.merged_cells:
|
| 433 |
+
if coord in rng:
|
| 434 |
+
return self.ws.cell(*rng.top[0])
|
| 435 |
+
|
| 436 |
+
def bind_col_dimensions(self):
|
| 437 |
+
for col, cd in self.parser.column_dimensions.items():
|
| 438 |
+
if 'style' in cd:
|
| 439 |
+
key = int(cd['style'])
|
| 440 |
+
cd['style'] = self.ws.parent._cell_styles[key]
|
| 441 |
+
self.ws.column_dimensions[col] = ColumnDimension(self.ws, **cd)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def bind_row_dimensions(self):
|
| 445 |
+
for row, rd in self.parser.row_dimensions.items():
|
| 446 |
+
if 's' in rd:
|
| 447 |
+
key = int(rd['s'])
|
| 448 |
+
rd['s'] = self.ws.parent._cell_styles[key]
|
| 449 |
+
self.ws.row_dimensions[int(row)] = RowDimension(self.ws, **rd)
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def bind_properties(self):
|
| 453 |
+
for k in ('print_options', 'page_margins', 'page_setup',
|
| 454 |
+
'HeaderFooter', 'auto_filter', 'data_validations',
|
| 455 |
+
'sheet_properties', 'views', 'sheet_format',
|
| 456 |
+
'row_breaks', 'col_breaks', 'scenarios', 'legacy_drawing',
|
| 457 |
+
'protection',
|
| 458 |
+
):
|
| 459 |
+
v = getattr(self.parser, k, None)
|
| 460 |
+
if v is not None:
|
| 461 |
+
setattr(self.ws, k, v)
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def bind_all(self):
|
| 465 |
+
self.bind_cells()
|
| 466 |
+
self.bind_merged_cells()
|
| 467 |
+
self.bind_hyperlinks()
|
| 468 |
+
self.bind_formatting()
|
| 469 |
+
self.bind_col_dimensions()
|
| 470 |
+
self.bind_row_dimensions()
|
| 471 |
+
self.bind_tables()
|
| 472 |
+
self.bind_properties()
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/_write_only.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Write worksheets to xml representations in an optimized way"""
|
| 5 |
+
|
| 6 |
+
from inspect import isgenerator
|
| 7 |
+
|
| 8 |
+
from openpyxl.cell import Cell, WriteOnlyCell
|
| 9 |
+
from openpyxl.workbook.child import _WorkbookChild
|
| 10 |
+
from .worksheet import Worksheet
|
| 11 |
+
from openpyxl.utils.exceptions import WorkbookAlreadySaved
|
| 12 |
+
|
| 13 |
+
from ._writer import WorksheetWriter
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class WriteOnlyWorksheet(_WorkbookChild):
|
| 17 |
+
"""
|
| 18 |
+
Streaming worksheet. Optimised to reduce memory by writing rows just in
|
| 19 |
+
time.
|
| 20 |
+
Cells can be styled and have comments Styles for rows and columns
|
| 21 |
+
must be applied before writing cells
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
__saved = False
|
| 25 |
+
_writer = None
|
| 26 |
+
_rows = None
|
| 27 |
+
_rel_type = Worksheet._rel_type
|
| 28 |
+
_path = Worksheet._path
|
| 29 |
+
mime_type = Worksheet.mime_type
|
| 30 |
+
|
| 31 |
+
# copy methods from Standard worksheet
|
| 32 |
+
_add_row = Worksheet._add_row
|
| 33 |
+
_add_column = Worksheet._add_column
|
| 34 |
+
add_chart = Worksheet.add_chart
|
| 35 |
+
add_image = Worksheet.add_image
|
| 36 |
+
add_table = Worksheet.add_table
|
| 37 |
+
tables = Worksheet.tables
|
| 38 |
+
print_titles = Worksheet.print_titles
|
| 39 |
+
print_title_cols = Worksheet.print_title_cols
|
| 40 |
+
print_title_rows = Worksheet.print_title_rows
|
| 41 |
+
freeze_panes = Worksheet.freeze_panes
|
| 42 |
+
print_area = Worksheet.print_area
|
| 43 |
+
sheet_view = Worksheet.sheet_view
|
| 44 |
+
_setup = Worksheet._setup
|
| 45 |
+
|
| 46 |
+
def __init__(self, parent, title):
|
| 47 |
+
super().__init__(parent, title)
|
| 48 |
+
self._max_col = 0
|
| 49 |
+
self._max_row = 0
|
| 50 |
+
self._setup()
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def closed(self):
|
| 54 |
+
return self.__saved
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _write_rows(self):
|
| 58 |
+
"""
|
| 59 |
+
Send rows to the writer's stream
|
| 60 |
+
"""
|
| 61 |
+
try:
|
| 62 |
+
xf = self._writer.xf.send(True)
|
| 63 |
+
except StopIteration:
|
| 64 |
+
self._already_saved()
|
| 65 |
+
|
| 66 |
+
with xf.element("sheetData"):
|
| 67 |
+
row_idx = 1
|
| 68 |
+
try:
|
| 69 |
+
while True:
|
| 70 |
+
row = (yield)
|
| 71 |
+
row = self._values_to_row(row, row_idx)
|
| 72 |
+
self._writer.write_row(xf, row, row_idx)
|
| 73 |
+
row_idx += 1
|
| 74 |
+
except GeneratorExit:
|
| 75 |
+
pass
|
| 76 |
+
|
| 77 |
+
self._writer.xf.send(None)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _get_writer(self):
|
| 81 |
+
if self._writer is None:
|
| 82 |
+
self._writer = WorksheetWriter(self)
|
| 83 |
+
self._writer.write_top()
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def close(self):
|
| 87 |
+
if self.__saved:
|
| 88 |
+
self._already_saved()
|
| 89 |
+
|
| 90 |
+
self._get_writer()
|
| 91 |
+
|
| 92 |
+
if self._rows is None:
|
| 93 |
+
self._writer.write_rows()
|
| 94 |
+
else:
|
| 95 |
+
self._rows.close()
|
| 96 |
+
|
| 97 |
+
self._writer.write_tail()
|
| 98 |
+
|
| 99 |
+
self._writer.close()
|
| 100 |
+
self.__saved = True
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def append(self, row):
|
| 104 |
+
"""
|
| 105 |
+
:param row: iterable containing values to append
|
| 106 |
+
:type row: iterable
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
if (not isgenerator(row) and
|
| 110 |
+
not isinstance(row, (list, tuple, range))
|
| 111 |
+
):
|
| 112 |
+
self._invalid_row(row)
|
| 113 |
+
|
| 114 |
+
self._get_writer()
|
| 115 |
+
|
| 116 |
+
if self._rows is None:
|
| 117 |
+
self._rows = self._write_rows()
|
| 118 |
+
next(self._rows)
|
| 119 |
+
|
| 120 |
+
self._rows.send(row)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _values_to_row(self, values, row_idx):
|
| 124 |
+
"""
|
| 125 |
+
Convert whatever has been appended into a form suitable for work_rows
|
| 126 |
+
"""
|
| 127 |
+
cell = WriteOnlyCell(self)
|
| 128 |
+
|
| 129 |
+
for col_idx, value in enumerate(values, 1):
|
| 130 |
+
if value is None:
|
| 131 |
+
continue
|
| 132 |
+
try:
|
| 133 |
+
cell.value = value
|
| 134 |
+
except ValueError:
|
| 135 |
+
if isinstance(value, Cell):
|
| 136 |
+
cell = value
|
| 137 |
+
else:
|
| 138 |
+
raise ValueError
|
| 139 |
+
|
| 140 |
+
cell.column = col_idx
|
| 141 |
+
cell.row = row_idx
|
| 142 |
+
|
| 143 |
+
if cell.hyperlink is not None:
|
| 144 |
+
cell.hyperlink.ref = cell.coordinate
|
| 145 |
+
|
| 146 |
+
yield cell
|
| 147 |
+
|
| 148 |
+
# reset cell if style applied
|
| 149 |
+
if cell.has_style or cell.hyperlink:
|
| 150 |
+
cell = WriteOnlyCell(self)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _already_saved(self):
|
| 154 |
+
raise WorkbookAlreadySaved('Workbook has already been saved and cannot be modified or saved anymore.')
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def _invalid_row(self, iterable):
|
| 158 |
+
raise TypeError('Value must be a list, tuple, range or a generator Supplied value is {0}'.format(
|
| 159 |
+
type(iterable))
|
| 160 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/cell_range.py
ADDED
|
@@ -0,0 +1,512 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
from copy import copy
|
| 4 |
+
from operator import attrgetter
|
| 5 |
+
|
| 6 |
+
from openpyxl.descriptors import Strict
|
| 7 |
+
from openpyxl.descriptors import MinMax
|
| 8 |
+
from openpyxl.descriptors.sequence import UniqueSequence
|
| 9 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 10 |
+
|
| 11 |
+
from openpyxl.utils import (
|
| 12 |
+
range_boundaries,
|
| 13 |
+
range_to_tuple,
|
| 14 |
+
get_column_letter,
|
| 15 |
+
quote_sheetname,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
class CellRange(Serialisable):
|
| 19 |
+
"""
|
| 20 |
+
Represents a range in a sheet: title and coordinates.
|
| 21 |
+
|
| 22 |
+
This object is used to perform operations on ranges, like:
|
| 23 |
+
|
| 24 |
+
- shift, expand or shrink
|
| 25 |
+
- union/intersection with another sheet range,
|
| 26 |
+
|
| 27 |
+
We can check whether a range is:
|
| 28 |
+
|
| 29 |
+
- equal or not equal to another,
|
| 30 |
+
- disjoint of another,
|
| 31 |
+
- contained in another.
|
| 32 |
+
|
| 33 |
+
We can get:
|
| 34 |
+
|
| 35 |
+
- the size of a range.
|
| 36 |
+
- the range bounds (vertices)
|
| 37 |
+
- the coordinates,
|
| 38 |
+
- the string representation,
|
| 39 |
+
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
min_col = MinMax(min=1, max=18278, expected_type=int)
|
| 43 |
+
min_row = MinMax(min=1, max=1048576, expected_type=int)
|
| 44 |
+
max_col = MinMax(min=1, max=18278, expected_type=int)
|
| 45 |
+
max_row = MinMax(min=1, max=1048576, expected_type=int)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def __init__(self, range_string=None, min_col=None, min_row=None,
|
| 49 |
+
max_col=None, max_row=None, title=None):
|
| 50 |
+
if range_string is not None:
|
| 51 |
+
if "!" in range_string:
|
| 52 |
+
title, (min_col, min_row, max_col, max_row) = range_to_tuple(range_string)
|
| 53 |
+
else:
|
| 54 |
+
min_col, min_row, max_col, max_row = range_boundaries(range_string)
|
| 55 |
+
|
| 56 |
+
self.min_col = min_col
|
| 57 |
+
self.min_row = min_row
|
| 58 |
+
self.max_col = max_col
|
| 59 |
+
self.max_row = max_row
|
| 60 |
+
self.title = title
|
| 61 |
+
|
| 62 |
+
if min_col > max_col:
|
| 63 |
+
fmt = "{max_col} must be greater than {min_col}"
|
| 64 |
+
raise ValueError(fmt.format(min_col=min_col, max_col=max_col))
|
| 65 |
+
if min_row > max_row:
|
| 66 |
+
fmt = "{max_row} must be greater than {min_row}"
|
| 67 |
+
raise ValueError(fmt.format(min_row=min_row, max_row=max_row))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def bounds(self):
|
| 72 |
+
"""
|
| 73 |
+
Vertices of the range as a tuple
|
| 74 |
+
"""
|
| 75 |
+
return self.min_col, self.min_row, self.max_col, self.max_row
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def coord(self):
|
| 80 |
+
"""
|
| 81 |
+
Excel-style representation of the range
|
| 82 |
+
"""
|
| 83 |
+
fmt = "{min_col}{min_row}:{max_col}{max_row}"
|
| 84 |
+
if (self.min_col == self.max_col
|
| 85 |
+
and self.min_row == self.max_row):
|
| 86 |
+
fmt = "{min_col}{min_row}"
|
| 87 |
+
|
| 88 |
+
return fmt.format(
|
| 89 |
+
min_col=get_column_letter(self.min_col),
|
| 90 |
+
min_row=self.min_row,
|
| 91 |
+
max_col=get_column_letter(self.max_col),
|
| 92 |
+
max_row=self.max_row
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def rows(self):
|
| 97 |
+
"""
|
| 98 |
+
Return cell coordinates as rows
|
| 99 |
+
"""
|
| 100 |
+
for row in range(self.min_row, self.max_row+1):
|
| 101 |
+
yield [(row, col) for col in range(self.min_col, self.max_col+1)]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def cols(self):
|
| 106 |
+
"""
|
| 107 |
+
Return cell coordinates as columns
|
| 108 |
+
"""
|
| 109 |
+
for col in range(self.min_col, self.max_col+1):
|
| 110 |
+
yield [(row, col) for row in range(self.min_row, self.max_row+1)]
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
@property
|
| 114 |
+
def cells(self):
|
| 115 |
+
from itertools import product
|
| 116 |
+
return product(range(self.min_row, self.max_row+1), range(self.min_col, self.max_col+1))
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def _check_title(self, other):
|
| 120 |
+
"""
|
| 121 |
+
Check whether comparisons between ranges are possible.
|
| 122 |
+
Cannot compare ranges from different worksheets
|
| 123 |
+
Skip if the range passed in has no title.
|
| 124 |
+
"""
|
| 125 |
+
if not isinstance(other, CellRange):
|
| 126 |
+
raise TypeError(repr(type(other)))
|
| 127 |
+
|
| 128 |
+
if other.title and self.title != other.title:
|
| 129 |
+
raise ValueError("Cannot work with ranges from different worksheets")
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def __repr__(self):
|
| 133 |
+
fmt = u"<{cls} {coord}>"
|
| 134 |
+
if self.title:
|
| 135 |
+
fmt = u"<{cls} {title!r}!{coord}>"
|
| 136 |
+
return fmt.format(cls=self.__class__.__name__, title=self.title, coord=self.coord)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def __hash__(self):
|
| 140 |
+
return hash((self.min_row, self.min_col, self.max_row, self.max_col))
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def __str__(self):
|
| 144 |
+
fmt = "{coord}"
|
| 145 |
+
title = self.title
|
| 146 |
+
if title:
|
| 147 |
+
fmt = u"{title}!{coord}"
|
| 148 |
+
title = quote_sheetname(title)
|
| 149 |
+
return fmt.format(title=title, coord=self.coord)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def __copy__(self):
|
| 153 |
+
return self.__class__(min_col=self.min_col, min_row=self.min_row,
|
| 154 |
+
max_col=self.max_col, max_row=self.max_row,
|
| 155 |
+
title=self.title)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def shift(self, col_shift=0, row_shift=0):
|
| 159 |
+
"""
|
| 160 |
+
Shift the focus of the range according to the shift values (*col_shift*, *row_shift*).
|
| 161 |
+
|
| 162 |
+
:type col_shift: int
|
| 163 |
+
:param col_shift: number of columns to be moved by, can be negative
|
| 164 |
+
:type row_shift: int
|
| 165 |
+
:param row_shift: number of rows to be moved by, can be negative
|
| 166 |
+
:raise: :class:`ValueError` if any row or column index < 1
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
if (self.min_col + col_shift <= 0
|
| 170 |
+
or self.min_row + row_shift <= 0):
|
| 171 |
+
raise ValueError("Invalid shift value: col_shift={0}, row_shift={1}".format(col_shift, row_shift))
|
| 172 |
+
self.min_col += col_shift
|
| 173 |
+
self.min_row += row_shift
|
| 174 |
+
self.max_col += col_shift
|
| 175 |
+
self.max_row += row_shift
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def __ne__(self, other):
|
| 179 |
+
"""
|
| 180 |
+
Test whether the ranges are not equal.
|
| 181 |
+
|
| 182 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 183 |
+
:param other: Other sheet range
|
| 184 |
+
:return: ``True`` if *range* != *other*.
|
| 185 |
+
"""
|
| 186 |
+
try:
|
| 187 |
+
self._check_title(other)
|
| 188 |
+
except ValueError:
|
| 189 |
+
return True
|
| 190 |
+
|
| 191 |
+
return (
|
| 192 |
+
other.min_row != self.min_row
|
| 193 |
+
or self.max_row != other.max_row
|
| 194 |
+
or other.min_col != self.min_col
|
| 195 |
+
or self.max_col != other.max_col
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def __eq__(self, other):
|
| 200 |
+
"""
|
| 201 |
+
Test whether the ranges are equal.
|
| 202 |
+
|
| 203 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 204 |
+
:param other: Other sheet range
|
| 205 |
+
:return: ``True`` if *range* == *other*.
|
| 206 |
+
"""
|
| 207 |
+
return not self.__ne__(other)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def issubset(self, other):
|
| 211 |
+
"""
|
| 212 |
+
Test whether every cell in this range is also in *other*.
|
| 213 |
+
|
| 214 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 215 |
+
:param other: Other sheet range
|
| 216 |
+
:return: ``True`` if *range* <= *other*.
|
| 217 |
+
"""
|
| 218 |
+
self._check_title(other)
|
| 219 |
+
|
| 220 |
+
return other.__superset(self)
|
| 221 |
+
|
| 222 |
+
__le__ = issubset
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def __lt__(self, other):
|
| 226 |
+
"""
|
| 227 |
+
Test whether *other* contains every cell of this range, and more.
|
| 228 |
+
|
| 229 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 230 |
+
:param other: Other sheet range
|
| 231 |
+
:return: ``True`` if *range* < *other*.
|
| 232 |
+
"""
|
| 233 |
+
return self.__le__(other) and self.__ne__(other)
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def __superset(self, other):
|
| 237 |
+
return (
|
| 238 |
+
(self.min_row <= other.min_row <= other.max_row <= self.max_row)
|
| 239 |
+
and
|
| 240 |
+
(self.min_col <= other.min_col <= other.max_col <= self.max_col)
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def issuperset(self, other):
|
| 245 |
+
"""
|
| 246 |
+
Test whether every cell in *other* is in this range.
|
| 247 |
+
|
| 248 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 249 |
+
:param other: Other sheet range
|
| 250 |
+
:return: ``True`` if *range* >= *other* (or *other* in *range*).
|
| 251 |
+
"""
|
| 252 |
+
self._check_title(other)
|
| 253 |
+
|
| 254 |
+
return self.__superset(other)
|
| 255 |
+
|
| 256 |
+
__ge__ = issuperset
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def __contains__(self, coord):
|
| 260 |
+
"""
|
| 261 |
+
Check whether the range contains a particular cell coordinate
|
| 262 |
+
"""
|
| 263 |
+
cr = self.__class__(coord)
|
| 264 |
+
return self.__superset(cr)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def __gt__(self, other):
|
| 268 |
+
"""
|
| 269 |
+
Test whether this range contains every cell in *other*, and more.
|
| 270 |
+
|
| 271 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 272 |
+
:param other: Other sheet range
|
| 273 |
+
:return: ``True`` if *range* > *other*.
|
| 274 |
+
"""
|
| 275 |
+
return self.__ge__(other) and self.__ne__(other)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def isdisjoint(self, other):
|
| 279 |
+
"""
|
| 280 |
+
Return ``True`` if this range has no cell in common with *other*.
|
| 281 |
+
Ranges are disjoint if and only if their intersection is the empty range.
|
| 282 |
+
|
| 283 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 284 |
+
:param other: Other sheet range.
|
| 285 |
+
:return: ``True`` if the range has no cells in common with other.
|
| 286 |
+
"""
|
| 287 |
+
self._check_title(other)
|
| 288 |
+
|
| 289 |
+
# Sort by top-left vertex
|
| 290 |
+
if self.bounds > other.bounds:
|
| 291 |
+
self, other = other, self
|
| 292 |
+
|
| 293 |
+
return (self.max_col < other.min_col
|
| 294 |
+
or self.max_row < other.min_row
|
| 295 |
+
or other.max_row < self.min_row)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def intersection(self, other):
|
| 299 |
+
"""
|
| 300 |
+
Return a new range with cells common to this range and *other*
|
| 301 |
+
|
| 302 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 303 |
+
:param other: Other sheet range.
|
| 304 |
+
:return: the intersecting sheet range.
|
| 305 |
+
:raise: :class:`ValueError` if the *other* range doesn't intersect
|
| 306 |
+
with this range.
|
| 307 |
+
"""
|
| 308 |
+
if self.isdisjoint(other):
|
| 309 |
+
raise ValueError("Range {0} doesn't intersect {0}".format(self, other))
|
| 310 |
+
|
| 311 |
+
min_row = max(self.min_row, other.min_row)
|
| 312 |
+
max_row = min(self.max_row, other.max_row)
|
| 313 |
+
min_col = max(self.min_col, other.min_col)
|
| 314 |
+
max_col = min(self.max_col, other.max_col)
|
| 315 |
+
|
| 316 |
+
return CellRange(min_col=min_col, min_row=min_row, max_col=max_col,
|
| 317 |
+
max_row=max_row)
|
| 318 |
+
|
| 319 |
+
__and__ = intersection
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def union(self, other):
|
| 323 |
+
"""
|
| 324 |
+
Return the minimal superset of this range and *other*. This new range
|
| 325 |
+
will contain all cells from this range, *other*, and any additional
|
| 326 |
+
cells required to form a rectangular ``CellRange``.
|
| 327 |
+
|
| 328 |
+
:type other: openpyxl.worksheet.cell_range.CellRange
|
| 329 |
+
:param other: Other sheet range.
|
| 330 |
+
:return: a ``CellRange`` that is a superset of this and *other*.
|
| 331 |
+
"""
|
| 332 |
+
self._check_title(other)
|
| 333 |
+
|
| 334 |
+
min_row = min(self.min_row, other.min_row)
|
| 335 |
+
max_row = max(self.max_row, other.max_row)
|
| 336 |
+
min_col = min(self.min_col, other.min_col)
|
| 337 |
+
max_col = max(self.max_col, other.max_col)
|
| 338 |
+
return CellRange(min_col=min_col, min_row=min_row, max_col=max_col,
|
| 339 |
+
max_row=max_row, title=self.title)
|
| 340 |
+
|
| 341 |
+
__or__ = union
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def __iter__(self):
|
| 345 |
+
"""
|
| 346 |
+
For use as a dictionary elsewhere in the library.
|
| 347 |
+
"""
|
| 348 |
+
for x in self.__attrs__:
|
| 349 |
+
if x == "title":
|
| 350 |
+
continue
|
| 351 |
+
v = getattr(self, x)
|
| 352 |
+
yield x, v
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def expand(self, right=0, down=0, left=0, up=0):
|
| 356 |
+
"""
|
| 357 |
+
Expand the range by the dimensions provided.
|
| 358 |
+
|
| 359 |
+
:type right: int
|
| 360 |
+
:param right: expand range to the right by this number of cells
|
| 361 |
+
:type down: int
|
| 362 |
+
:param down: expand range down by this number of cells
|
| 363 |
+
:type left: int
|
| 364 |
+
:param left: expand range to the left by this number of cells
|
| 365 |
+
:type up: int
|
| 366 |
+
:param up: expand range up by this number of cells
|
| 367 |
+
"""
|
| 368 |
+
self.min_col -= left
|
| 369 |
+
self.min_row -= up
|
| 370 |
+
self.max_col += right
|
| 371 |
+
self.max_row += down
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def shrink(self, right=0, bottom=0, left=0, top=0):
|
| 375 |
+
"""
|
| 376 |
+
Shrink the range by the dimensions provided.
|
| 377 |
+
|
| 378 |
+
:type right: int
|
| 379 |
+
:param right: shrink range from the right by this number of cells
|
| 380 |
+
:type down: int
|
| 381 |
+
:param down: shrink range from the top by this number of cells
|
| 382 |
+
:type left: int
|
| 383 |
+
:param left: shrink range from the left by this number of cells
|
| 384 |
+
:type up: int
|
| 385 |
+
:param up: shrink range from the bottom by this number of cells
|
| 386 |
+
"""
|
| 387 |
+
self.min_col += left
|
| 388 |
+
self.min_row += top
|
| 389 |
+
self.max_col -= right
|
| 390 |
+
self.max_row -= bottom
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
@property
|
| 394 |
+
def size(self):
|
| 395 |
+
""" Return the size of the range as a dictionary of rows and columns. """
|
| 396 |
+
cols = self.max_col + 1 - self.min_col
|
| 397 |
+
rows = self.max_row + 1 - self.min_row
|
| 398 |
+
return {'columns':cols, 'rows':rows}
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
@property
|
| 402 |
+
def top(self):
|
| 403 |
+
"""A list of cell coordinates that comprise the top of the range"""
|
| 404 |
+
return [(self.min_row, col) for col in range(self.min_col, self.max_col+1)]
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
@property
|
| 408 |
+
def bottom(self):
|
| 409 |
+
"""A list of cell coordinates that comprise the bottom of the range"""
|
| 410 |
+
return [(self.max_row, col) for col in range(self.min_col, self.max_col+1)]
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
@property
|
| 414 |
+
def left(self):
|
| 415 |
+
"""A list of cell coordinates that comprise the left-side of the range"""
|
| 416 |
+
return [(row, self.min_col) for row in range(self.min_row, self.max_row+1)]
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
@property
|
| 420 |
+
def right(self):
|
| 421 |
+
"""A list of cell coordinates that comprise the right-side of the range"""
|
| 422 |
+
return [(row, self.max_col) for row in range(self.min_row, self.max_row+1)]
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
class MultiCellRange(Strict):
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
ranges = UniqueSequence(expected_type=CellRange)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
def __init__(self, ranges=set()):
|
| 432 |
+
if isinstance(ranges, str):
|
| 433 |
+
ranges = [CellRange(r) for r in ranges.split()]
|
| 434 |
+
self.ranges = set(ranges)
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def __contains__(self, coord):
|
| 438 |
+
if isinstance(coord, str):
|
| 439 |
+
coord = CellRange(coord)
|
| 440 |
+
for r in self.ranges:
|
| 441 |
+
if coord <= r:
|
| 442 |
+
return True
|
| 443 |
+
return False
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def __repr__(self):
|
| 447 |
+
ranges = " ".join([str(r) for r in self.sorted()])
|
| 448 |
+
return f"<{self.__class__.__name__} [{ranges}]>"
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def __str__(self):
|
| 452 |
+
ranges = u" ".join([str(r) for r in self.sorted()])
|
| 453 |
+
return ranges
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def __hash__(self):
|
| 457 |
+
return hash(str(self))
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
def sorted(self):
|
| 461 |
+
"""
|
| 462 |
+
Return a sorted list of items
|
| 463 |
+
"""
|
| 464 |
+
return sorted(self.ranges, key=attrgetter('min_col', 'min_row', 'max_col', 'max_row'))
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def add(self, coord):
|
| 468 |
+
"""
|
| 469 |
+
Add a cell coordinate or CellRange
|
| 470 |
+
"""
|
| 471 |
+
cr = coord
|
| 472 |
+
if isinstance(coord, str):
|
| 473 |
+
cr = CellRange(coord)
|
| 474 |
+
elif not isinstance(coord, CellRange):
|
| 475 |
+
raise ValueError("You can only add CellRanges")
|
| 476 |
+
if cr not in self:
|
| 477 |
+
self.ranges.add(cr)
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def __iadd__(self, coord):
|
| 481 |
+
self.add(coord)
|
| 482 |
+
return self
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def __eq__(self, other):
|
| 486 |
+
if isinstance(other, str):
|
| 487 |
+
other = self.__class__(other)
|
| 488 |
+
return self.ranges == other.ranges
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def __ne__(self, other):
|
| 492 |
+
return not self == other
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def __bool__(self):
|
| 496 |
+
return bool(self.ranges)
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
def remove(self, coord):
|
| 500 |
+
if not isinstance(coord, CellRange):
|
| 501 |
+
coord = CellRange(coord)
|
| 502 |
+
self.ranges.remove(coord)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def __iter__(self):
|
| 506 |
+
for cr in self.ranges:
|
| 507 |
+
yield cr
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def __copy__(self):
|
| 511 |
+
ranges = {copy(r) for r in self.ranges}
|
| 512 |
+
return MultiCellRange(ranges)
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/header_footer.py
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
# Simplified implementation of headers and footers: let worksheets have separate items
|
| 4 |
+
|
| 5 |
+
import re
|
| 6 |
+
from warnings import warn
|
| 7 |
+
|
| 8 |
+
from openpyxl.descriptors import (
|
| 9 |
+
Alias,
|
| 10 |
+
Bool,
|
| 11 |
+
Strict,
|
| 12 |
+
String,
|
| 13 |
+
Integer,
|
| 14 |
+
MatchPattern,
|
| 15 |
+
Typed,
|
| 16 |
+
)
|
| 17 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
from openpyxl.xml.functions import Element
|
| 21 |
+
from openpyxl.utils.escape import escape, unescape
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
FONT_PATTERN = '&"(?P<font>.+)"'
|
| 25 |
+
COLOR_PATTERN = "&K(?P<color>[A-F0-9]{6})"
|
| 26 |
+
SIZE_REGEX = r"&(?P<size>\d+\s?)"
|
| 27 |
+
FORMAT_REGEX = re.compile("{0}|{1}|{2}".format(FONT_PATTERN, COLOR_PATTERN,
|
| 28 |
+
SIZE_REGEX)
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
def _split_string(text):
|
| 32 |
+
"""
|
| 33 |
+
Split the combined (decoded) string into left, center and right parts
|
| 34 |
+
|
| 35 |
+
# See http://stackoverflow.com/questions/27711175/regex-with-multiple-optional-groups for discussion
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
ITEM_REGEX = re.compile("""
|
| 39 |
+
(&L(?P<left>.+?))?
|
| 40 |
+
(&C(?P<center>.+?))?
|
| 41 |
+
(&R(?P<right>.+?))?
|
| 42 |
+
$""", re.VERBOSE | re.DOTALL)
|
| 43 |
+
|
| 44 |
+
m = ITEM_REGEX.match(text)
|
| 45 |
+
try:
|
| 46 |
+
parts = m.groupdict()
|
| 47 |
+
except AttributeError:
|
| 48 |
+
warn("""Cannot parse header or footer so it will be ignored""")
|
| 49 |
+
parts = {'left':'', 'right':'', 'center':''}
|
| 50 |
+
return parts
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class _HeaderFooterPart(Strict):
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
Individual left/center/right header/footer part
|
| 57 |
+
|
| 58 |
+
Do not use directly.
|
| 59 |
+
|
| 60 |
+
Header & Footer ampersand codes:
|
| 61 |
+
|
| 62 |
+
* &A Inserts the worksheet name
|
| 63 |
+
* &B Toggles bold
|
| 64 |
+
* &D or &[Date] Inserts the current date
|
| 65 |
+
* &E Toggles double-underline
|
| 66 |
+
* &F or &[File] Inserts the workbook name
|
| 67 |
+
* &I Toggles italic
|
| 68 |
+
* &N or &[Pages] Inserts the total page count
|
| 69 |
+
* &S Toggles strikethrough
|
| 70 |
+
* &T Inserts the current time
|
| 71 |
+
* &[Tab] Inserts the worksheet name
|
| 72 |
+
* &U Toggles underline
|
| 73 |
+
* &X Toggles superscript
|
| 74 |
+
* &Y Toggles subscript
|
| 75 |
+
* &P or &[Page] Inserts the current page number
|
| 76 |
+
* &P+n Inserts the page number incremented by n
|
| 77 |
+
* &P-n Inserts the page number decremented by n
|
| 78 |
+
* &[Path] Inserts the workbook path
|
| 79 |
+
* && Escapes the ampersand character
|
| 80 |
+
* &"fontname" Selects the named font
|
| 81 |
+
* &nn Selects the specified 2-digit font point size
|
| 82 |
+
|
| 83 |
+
Colours are in RGB Hex
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
text = String(allow_none=True)
|
| 87 |
+
font = String(allow_none=True)
|
| 88 |
+
size = Integer(allow_none=True)
|
| 89 |
+
RGB = ("^[A-Fa-f0-9]{6}$")
|
| 90 |
+
color = MatchPattern(allow_none=True, pattern=RGB)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def __init__(self, text=None, font=None, size=None, color=None):
|
| 94 |
+
self.text = text
|
| 95 |
+
self.font = font
|
| 96 |
+
self.size = size
|
| 97 |
+
self.color = color
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def __str__(self):
|
| 101 |
+
"""
|
| 102 |
+
Convert to Excel HeaderFooter miniformat minus position
|
| 103 |
+
"""
|
| 104 |
+
fmt = []
|
| 105 |
+
if self.font:
|
| 106 |
+
fmt.append(u'&"{0}"'.format(self.font))
|
| 107 |
+
if self.size:
|
| 108 |
+
fmt.append("&{0} ".format(self.size))
|
| 109 |
+
if self.color:
|
| 110 |
+
fmt.append("&K{0}".format(self.color))
|
| 111 |
+
return u"".join(fmt + [self.text])
|
| 112 |
+
|
| 113 |
+
def __bool__(self):
|
| 114 |
+
return bool(self.text)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@classmethod
|
| 119 |
+
def from_str(cls, text):
|
| 120 |
+
"""
|
| 121 |
+
Convert from miniformat to object
|
| 122 |
+
"""
|
| 123 |
+
keys = ('font', 'color', 'size')
|
| 124 |
+
kw = dict((k, v) for match in FORMAT_REGEX.findall(text)
|
| 125 |
+
for k, v in zip(keys, match) if v)
|
| 126 |
+
|
| 127 |
+
kw['text'] = FORMAT_REGEX.sub('', text)
|
| 128 |
+
|
| 129 |
+
return cls(**kw)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class HeaderFooterItem(Strict):
|
| 133 |
+
"""
|
| 134 |
+
Header or footer item
|
| 135 |
+
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
left = Typed(expected_type=_HeaderFooterPart)
|
| 139 |
+
center = Typed(expected_type=_HeaderFooterPart)
|
| 140 |
+
centre = Alias("center")
|
| 141 |
+
right = Typed(expected_type=_HeaderFooterPart)
|
| 142 |
+
|
| 143 |
+
__keys = ('L', 'C', 'R')
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def __init__(self, left=None, right=None, center=None):
|
| 147 |
+
if left is None:
|
| 148 |
+
left = _HeaderFooterPart()
|
| 149 |
+
self.left = left
|
| 150 |
+
if center is None:
|
| 151 |
+
center = _HeaderFooterPart()
|
| 152 |
+
self.center = center
|
| 153 |
+
if right is None:
|
| 154 |
+
right = _HeaderFooterPart()
|
| 155 |
+
self.right = right
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def __str__(self):
|
| 159 |
+
"""
|
| 160 |
+
Pack parts into a single string
|
| 161 |
+
"""
|
| 162 |
+
TRANSFORM = {'&[Tab]': '&A', '&[Pages]': '&N', '&[Date]': '&D',
|
| 163 |
+
'&[Path]': '&Z', '&[Page]': '&P', '&[Time]': '&T', '&[File]': '&F',
|
| 164 |
+
'&[Picture]': '&G'}
|
| 165 |
+
|
| 166 |
+
# escape keys and create regex
|
| 167 |
+
SUBS_REGEX = re.compile("|".join(["({0})".format(re.escape(k))
|
| 168 |
+
for k in TRANSFORM]))
|
| 169 |
+
|
| 170 |
+
def replace(match):
|
| 171 |
+
"""
|
| 172 |
+
Callback for re.sub
|
| 173 |
+
Replace expanded control with mini-format equivalent
|
| 174 |
+
"""
|
| 175 |
+
sub = match.group(0)
|
| 176 |
+
return TRANSFORM[sub]
|
| 177 |
+
|
| 178 |
+
txt = []
|
| 179 |
+
for key, part in zip(
|
| 180 |
+
self.__keys, [self.left, self.center, self.right]):
|
| 181 |
+
if part.text is not None:
|
| 182 |
+
txt.append(u"&{0}{1}".format(key, str(part)))
|
| 183 |
+
txt = "".join(txt)
|
| 184 |
+
txt = SUBS_REGEX.sub(replace, txt)
|
| 185 |
+
return escape(txt)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def __bool__(self):
|
| 189 |
+
return any([self.left, self.center, self.right])
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def to_tree(self, tagname):
|
| 194 |
+
"""
|
| 195 |
+
Return as XML node
|
| 196 |
+
"""
|
| 197 |
+
el = Element(tagname)
|
| 198 |
+
el.text = str(self)
|
| 199 |
+
return el
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
@classmethod
|
| 203 |
+
def from_tree(cls, node):
|
| 204 |
+
if node.text:
|
| 205 |
+
text = unescape(node.text)
|
| 206 |
+
parts = _split_string(text)
|
| 207 |
+
for k, v in parts.items():
|
| 208 |
+
if v is not None:
|
| 209 |
+
parts[k] = _HeaderFooterPart.from_str(v)
|
| 210 |
+
self = cls(**parts)
|
| 211 |
+
return self
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
class HeaderFooter(Serialisable):
|
| 215 |
+
|
| 216 |
+
tagname = "headerFooter"
|
| 217 |
+
|
| 218 |
+
differentOddEven = Bool(allow_none=True)
|
| 219 |
+
differentFirst = Bool(allow_none=True)
|
| 220 |
+
scaleWithDoc = Bool(allow_none=True)
|
| 221 |
+
alignWithMargins = Bool(allow_none=True)
|
| 222 |
+
oddHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
|
| 223 |
+
oddFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
|
| 224 |
+
evenHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
|
| 225 |
+
evenFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
|
| 226 |
+
firstHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
|
| 227 |
+
firstFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
|
| 228 |
+
|
| 229 |
+
__elements__ = ("oddHeader", "oddFooter", "evenHeader", "evenFooter", "firstHeader", "firstFooter")
|
| 230 |
+
|
| 231 |
+
def __init__(self,
|
| 232 |
+
differentOddEven=None,
|
| 233 |
+
differentFirst=None,
|
| 234 |
+
scaleWithDoc=None,
|
| 235 |
+
alignWithMargins=None,
|
| 236 |
+
oddHeader=None,
|
| 237 |
+
oddFooter=None,
|
| 238 |
+
evenHeader=None,
|
| 239 |
+
evenFooter=None,
|
| 240 |
+
firstHeader=None,
|
| 241 |
+
firstFooter=None,
|
| 242 |
+
):
|
| 243 |
+
self.differentOddEven = differentOddEven
|
| 244 |
+
self.differentFirst = differentFirst
|
| 245 |
+
self.scaleWithDoc = scaleWithDoc
|
| 246 |
+
self.alignWithMargins = alignWithMargins
|
| 247 |
+
if oddHeader is None:
|
| 248 |
+
oddHeader = HeaderFooterItem()
|
| 249 |
+
self.oddHeader = oddHeader
|
| 250 |
+
if oddFooter is None:
|
| 251 |
+
oddFooter = HeaderFooterItem()
|
| 252 |
+
self.oddFooter = oddFooter
|
| 253 |
+
if evenHeader is None:
|
| 254 |
+
evenHeader = HeaderFooterItem()
|
| 255 |
+
self.evenHeader = evenHeader
|
| 256 |
+
if evenFooter is None:
|
| 257 |
+
evenFooter = HeaderFooterItem()
|
| 258 |
+
self.evenFooter = evenFooter
|
| 259 |
+
if firstHeader is None:
|
| 260 |
+
firstHeader = HeaderFooterItem()
|
| 261 |
+
self.firstHeader = firstHeader
|
| 262 |
+
if firstFooter is None:
|
| 263 |
+
firstFooter = HeaderFooterItem()
|
| 264 |
+
self.firstFooter = firstFooter
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def __bool__(self):
|
| 268 |
+
parts = [getattr(self, attr) for attr in self.__attrs__ + self.__elements__]
|
| 269 |
+
return any(parts)
|
| 270 |
+
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/ole.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 4 |
+
from openpyxl.descriptors import (
|
| 5 |
+
Typed,
|
| 6 |
+
Integer,
|
| 7 |
+
String,
|
| 8 |
+
Set,
|
| 9 |
+
Bool,
|
| 10 |
+
Sequence,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
from openpyxl.drawing.spreadsheet_drawing import AnchorMarker
|
| 14 |
+
from openpyxl.xml.constants import SHEET_DRAWING_NS
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ObjectAnchor(Serialisable):
|
| 18 |
+
|
| 19 |
+
tagname = "anchor"
|
| 20 |
+
|
| 21 |
+
_from = Typed(expected_type=AnchorMarker, namespace=SHEET_DRAWING_NS)
|
| 22 |
+
to = Typed(expected_type=AnchorMarker, namespace=SHEET_DRAWING_NS)
|
| 23 |
+
moveWithCells = Bool(allow_none=True)
|
| 24 |
+
sizeWithCells = Bool(allow_none=True)
|
| 25 |
+
z_order = Integer(allow_none=True, hyphenated=True)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def __init__(self,
|
| 29 |
+
_from=None,
|
| 30 |
+
to=None,
|
| 31 |
+
moveWithCells=False,
|
| 32 |
+
sizeWithCells=False,
|
| 33 |
+
z_order=None,
|
| 34 |
+
):
|
| 35 |
+
self._from = _from
|
| 36 |
+
self.to = to
|
| 37 |
+
self.moveWithCells = moveWithCells
|
| 38 |
+
self.sizeWithCells = sizeWithCells
|
| 39 |
+
self.z_order = z_order
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class ObjectPr(Serialisable):
|
| 43 |
+
|
| 44 |
+
tagname = "objectPr"
|
| 45 |
+
|
| 46 |
+
anchor = Typed(expected_type=ObjectAnchor, )
|
| 47 |
+
locked = Bool(allow_none=True)
|
| 48 |
+
defaultSize = Bool(allow_none=True)
|
| 49 |
+
_print = Bool(allow_none=True)
|
| 50 |
+
disabled = Bool(allow_none=True)
|
| 51 |
+
uiObject = Bool(allow_none=True)
|
| 52 |
+
autoFill = Bool(allow_none=True)
|
| 53 |
+
autoLine = Bool(allow_none=True)
|
| 54 |
+
autoPict = Bool(allow_none=True)
|
| 55 |
+
macro = String()
|
| 56 |
+
altText = String(allow_none=True)
|
| 57 |
+
dde = Bool(allow_none=True)
|
| 58 |
+
|
| 59 |
+
__elements__ = ('anchor',)
|
| 60 |
+
|
| 61 |
+
def __init__(self,
|
| 62 |
+
anchor=None,
|
| 63 |
+
locked=True,
|
| 64 |
+
defaultSize=True,
|
| 65 |
+
_print=True,
|
| 66 |
+
disabled=False,
|
| 67 |
+
uiObject=False,
|
| 68 |
+
autoFill=True,
|
| 69 |
+
autoLine=True,
|
| 70 |
+
autoPict=True,
|
| 71 |
+
macro=None,
|
| 72 |
+
altText=None,
|
| 73 |
+
dde=False,
|
| 74 |
+
):
|
| 75 |
+
self.anchor = anchor
|
| 76 |
+
self.locked = locked
|
| 77 |
+
self.defaultSize = defaultSize
|
| 78 |
+
self._print = _print
|
| 79 |
+
self.disabled = disabled
|
| 80 |
+
self.uiObject = uiObject
|
| 81 |
+
self.autoFill = autoFill
|
| 82 |
+
self.autoLine = autoLine
|
| 83 |
+
self.autoPict = autoPict
|
| 84 |
+
self.macro = macro
|
| 85 |
+
self.altText = altText
|
| 86 |
+
self.dde = dde
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class OleObject(Serialisable):
|
| 90 |
+
|
| 91 |
+
tagname = "oleObject"
|
| 92 |
+
|
| 93 |
+
objectPr = Typed(expected_type=ObjectPr, allow_none=True)
|
| 94 |
+
progId = String(allow_none=True)
|
| 95 |
+
dvAspect = Set(values=(['DVASPECT_CONTENT', 'DVASPECT_ICON']))
|
| 96 |
+
link = String(allow_none=True)
|
| 97 |
+
oleUpdate = Set(values=(['OLEUPDATE_ALWAYS', 'OLEUPDATE_ONCALL']))
|
| 98 |
+
autoLoad = Bool(allow_none=True)
|
| 99 |
+
shapeId = Integer()
|
| 100 |
+
|
| 101 |
+
__elements__ = ('objectPr',)
|
| 102 |
+
|
| 103 |
+
def __init__(self,
|
| 104 |
+
objectPr=None,
|
| 105 |
+
progId=None,
|
| 106 |
+
dvAspect='DVASPECT_CONTENT',
|
| 107 |
+
link=None,
|
| 108 |
+
oleUpdate=None,
|
| 109 |
+
autoLoad=False,
|
| 110 |
+
shapeId=None,
|
| 111 |
+
):
|
| 112 |
+
self.objectPr = objectPr
|
| 113 |
+
self.progId = progId
|
| 114 |
+
self.dvAspect = dvAspect
|
| 115 |
+
self.link = link
|
| 116 |
+
self.oleUpdate = oleUpdate
|
| 117 |
+
self.autoLoad = autoLoad
|
| 118 |
+
self.shapeId = shapeId
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class OleObjects(Serialisable):
|
| 122 |
+
|
| 123 |
+
tagname = "oleObjects"
|
| 124 |
+
|
| 125 |
+
oleObject = Sequence(expected_type=OleObject)
|
| 126 |
+
|
| 127 |
+
__elements__ = ('oleObject',)
|
| 128 |
+
|
| 129 |
+
def __init__(self,
|
| 130 |
+
oleObject=(),
|
| 131 |
+
):
|
| 132 |
+
self.oleObject = oleObject
|
| 133 |
+
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/picture.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#Autogenerated schema
|
| 2 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 3 |
+
|
| 4 |
+
# same as related
|
| 5 |
+
|
| 6 |
+
class SheetBackgroundPicture(Serialisable):
|
| 7 |
+
|
| 8 |
+
tagname = "sheetBackgroundPicture"
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/related.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 4 |
+
from openpyxl.descriptors.excel import Relation
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Related(Serialisable):
|
| 8 |
+
|
| 9 |
+
id = Relation()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def __init__(self, id=None):
|
| 13 |
+
self.id = id
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def to_tree(self, tagname, idx=None):
|
| 17 |
+
return super().to_tree(tagname)
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/scenario.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 4 |
+
from openpyxl.descriptors import (
|
| 5 |
+
String,
|
| 6 |
+
Integer,
|
| 7 |
+
Bool,
|
| 8 |
+
Sequence,
|
| 9 |
+
Convertible,
|
| 10 |
+
)
|
| 11 |
+
from .cell_range import MultiCellRange
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class InputCells(Serialisable):
|
| 15 |
+
|
| 16 |
+
tagname = "inputCells"
|
| 17 |
+
|
| 18 |
+
r = String()
|
| 19 |
+
deleted = Bool(allow_none=True)
|
| 20 |
+
undone = Bool(allow_none=True)
|
| 21 |
+
val = String()
|
| 22 |
+
numFmtId = Integer(allow_none=True)
|
| 23 |
+
|
| 24 |
+
def __init__(self,
|
| 25 |
+
r=None,
|
| 26 |
+
deleted=False,
|
| 27 |
+
undone=False,
|
| 28 |
+
val=None,
|
| 29 |
+
numFmtId=None,
|
| 30 |
+
):
|
| 31 |
+
self.r = r
|
| 32 |
+
self.deleted = deleted
|
| 33 |
+
self.undone = undone
|
| 34 |
+
self.val = val
|
| 35 |
+
self.numFmtId = numFmtId
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class Scenario(Serialisable):
|
| 39 |
+
|
| 40 |
+
tagname = "scenario"
|
| 41 |
+
|
| 42 |
+
inputCells = Sequence(expected_type=InputCells)
|
| 43 |
+
name = String()
|
| 44 |
+
locked = Bool(allow_none=True)
|
| 45 |
+
hidden = Bool(allow_none=True)
|
| 46 |
+
user = String(allow_none=True)
|
| 47 |
+
comment = String(allow_none=True)
|
| 48 |
+
|
| 49 |
+
__elements__ = ('inputCells',)
|
| 50 |
+
__attrs__ = ('name', 'locked', 'hidden', 'user', 'comment', 'count')
|
| 51 |
+
|
| 52 |
+
def __init__(self,
|
| 53 |
+
inputCells=(),
|
| 54 |
+
name=None,
|
| 55 |
+
locked=False,
|
| 56 |
+
hidden=False,
|
| 57 |
+
count=None,
|
| 58 |
+
user=None,
|
| 59 |
+
comment=None,
|
| 60 |
+
):
|
| 61 |
+
self.inputCells = inputCells
|
| 62 |
+
self.name = name
|
| 63 |
+
self.locked = locked
|
| 64 |
+
self.hidden = hidden
|
| 65 |
+
self.user = user
|
| 66 |
+
self.comment = comment
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@property
|
| 70 |
+
def count(self):
|
| 71 |
+
return len(self.inputCells)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class ScenarioList(Serialisable):
|
| 75 |
+
|
| 76 |
+
tagname = "scenarios"
|
| 77 |
+
|
| 78 |
+
scenario = Sequence(expected_type=Scenario)
|
| 79 |
+
current = Integer(allow_none=True)
|
| 80 |
+
show = Integer(allow_none=True)
|
| 81 |
+
sqref = Convertible(expected_type=MultiCellRange, allow_none=True)
|
| 82 |
+
|
| 83 |
+
__elements__ = ('scenario',)
|
| 84 |
+
|
| 85 |
+
def __init__(self,
|
| 86 |
+
scenario=(),
|
| 87 |
+
current=None,
|
| 88 |
+
show=None,
|
| 89 |
+
sqref=None,
|
| 90 |
+
):
|
| 91 |
+
self.scenario = scenario
|
| 92 |
+
self.current = current
|
| 93 |
+
self.show = show
|
| 94 |
+
self.sqref = sqref
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def append(self, scenario):
|
| 98 |
+
s = self.scenario
|
| 99 |
+
s.append(scenario)
|
| 100 |
+
self.scenario = s
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def __bool__(self):
|
| 104 |
+
return bool(self.scenario)
|
| 105 |
+
|
evalkit_tf437/lib/python3.10/site-packages/openpyxl/worksheet/views.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2010-2024 openpyxl
|
| 2 |
+
|
| 3 |
+
from openpyxl.descriptors import (
|
| 4 |
+
Bool,
|
| 5 |
+
Integer,
|
| 6 |
+
String,
|
| 7 |
+
Set,
|
| 8 |
+
Float,
|
| 9 |
+
Typed,
|
| 10 |
+
NoneSet,
|
| 11 |
+
Sequence,
|
| 12 |
+
)
|
| 13 |
+
from openpyxl.descriptors.excel import ExtensionList
|
| 14 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class Pane(Serialisable):
|
| 18 |
+
xSplit = Float(allow_none=True)
|
| 19 |
+
ySplit = Float(allow_none=True)
|
| 20 |
+
topLeftCell = String(allow_none=True)
|
| 21 |
+
activePane = Set(values=("bottomRight", "topRight", "bottomLeft", "topLeft"))
|
| 22 |
+
state = Set(values=("split", "frozen", "frozenSplit"))
|
| 23 |
+
|
| 24 |
+
def __init__(self,
|
| 25 |
+
xSplit=None,
|
| 26 |
+
ySplit=None,
|
| 27 |
+
topLeftCell=None,
|
| 28 |
+
activePane="topLeft",
|
| 29 |
+
state="split"):
|
| 30 |
+
self.xSplit = xSplit
|
| 31 |
+
self.ySplit = ySplit
|
| 32 |
+
self.topLeftCell = topLeftCell
|
| 33 |
+
self.activePane = activePane
|
| 34 |
+
self.state = state
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class Selection(Serialisable):
|
| 38 |
+
pane = NoneSet(values=("bottomRight", "topRight", "bottomLeft", "topLeft"))
|
| 39 |
+
activeCell = String(allow_none=True)
|
| 40 |
+
activeCellId = Integer(allow_none=True)
|
| 41 |
+
sqref = String(allow_none=True)
|
| 42 |
+
|
| 43 |
+
def __init__(self,
|
| 44 |
+
pane=None,
|
| 45 |
+
activeCell="A1",
|
| 46 |
+
activeCellId=None,
|
| 47 |
+
sqref="A1"):
|
| 48 |
+
self.pane = pane
|
| 49 |
+
self.activeCell = activeCell
|
| 50 |
+
self.activeCellId = activeCellId
|
| 51 |
+
self.sqref = sqref
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class SheetView(Serialisable):
|
| 55 |
+
|
| 56 |
+
"""Information about the visible portions of this sheet."""
|
| 57 |
+
|
| 58 |
+
tagname = "sheetView"
|
| 59 |
+
|
| 60 |
+
windowProtection = Bool(allow_none=True)
|
| 61 |
+
showFormulas = Bool(allow_none=True)
|
| 62 |
+
showGridLines = Bool(allow_none=True)
|
| 63 |
+
showRowColHeaders = Bool(allow_none=True)
|
| 64 |
+
showZeros = Bool(allow_none=True)
|
| 65 |
+
rightToLeft = Bool(allow_none=True)
|
| 66 |
+
tabSelected = Bool(allow_none=True)
|
| 67 |
+
showRuler = Bool(allow_none=True)
|
| 68 |
+
showOutlineSymbols = Bool(allow_none=True)
|
| 69 |
+
defaultGridColor = Bool(allow_none=True)
|
| 70 |
+
showWhiteSpace = Bool(allow_none=True)
|
| 71 |
+
view = NoneSet(values=("normal", "pageBreakPreview", "pageLayout"))
|
| 72 |
+
topLeftCell = String(allow_none=True)
|
| 73 |
+
colorId = Integer(allow_none=True)
|
| 74 |
+
zoomScale = Integer(allow_none=True)
|
| 75 |
+
zoomScaleNormal = Integer(allow_none=True)
|
| 76 |
+
zoomScaleSheetLayoutView = Integer(allow_none=True)
|
| 77 |
+
zoomScalePageLayoutView = Integer(allow_none=True)
|
| 78 |
+
zoomToFit = Bool(allow_none=True) # Chart sheets only
|
| 79 |
+
workbookViewId = Integer()
|
| 80 |
+
selection = Sequence(expected_type=Selection)
|
| 81 |
+
pane = Typed(expected_type=Pane, allow_none=True)
|
| 82 |
+
|
| 83 |
+
def __init__(self,
|
| 84 |
+
windowProtection=None,
|
| 85 |
+
showFormulas=None,
|
| 86 |
+
showGridLines=None,
|
| 87 |
+
showRowColHeaders=None,
|
| 88 |
+
showZeros=None,
|
| 89 |
+
rightToLeft=None,
|
| 90 |
+
tabSelected=None,
|
| 91 |
+
showRuler=None,
|
| 92 |
+
showOutlineSymbols=None,
|
| 93 |
+
defaultGridColor=None,
|
| 94 |
+
showWhiteSpace=None,
|
| 95 |
+
view=None,
|
| 96 |
+
topLeftCell=None,
|
| 97 |
+
colorId=None,
|
| 98 |
+
zoomScale=None,
|
| 99 |
+
zoomScaleNormal=None,
|
| 100 |
+
zoomScaleSheetLayoutView=None,
|
| 101 |
+
zoomScalePageLayoutView=None,
|
| 102 |
+
zoomToFit=None,
|
| 103 |
+
workbookViewId=0,
|
| 104 |
+
selection=None,
|
| 105 |
+
pane=None,):
|
| 106 |
+
self.windowProtection = windowProtection
|
| 107 |
+
self.showFormulas = showFormulas
|
| 108 |
+
self.showGridLines = showGridLines
|
| 109 |
+
self.showRowColHeaders = showRowColHeaders
|
| 110 |
+
self.showZeros = showZeros
|
| 111 |
+
self.rightToLeft = rightToLeft
|
| 112 |
+
self.tabSelected = tabSelected
|
| 113 |
+
self.showRuler = showRuler
|
| 114 |
+
self.showOutlineSymbols = showOutlineSymbols
|
| 115 |
+
self.defaultGridColor = defaultGridColor
|
| 116 |
+
self.showWhiteSpace = showWhiteSpace
|
| 117 |
+
self.view = view
|
| 118 |
+
self.topLeftCell = topLeftCell
|
| 119 |
+
self.colorId = colorId
|
| 120 |
+
self.zoomScale = zoomScale
|
| 121 |
+
self.zoomScaleNormal = zoomScaleNormal
|
| 122 |
+
self.zoomScaleSheetLayoutView = zoomScaleSheetLayoutView
|
| 123 |
+
self.zoomScalePageLayoutView = zoomScalePageLayoutView
|
| 124 |
+
self.zoomToFit = zoomToFit
|
| 125 |
+
self.workbookViewId = workbookViewId
|
| 126 |
+
self.pane = pane
|
| 127 |
+
if selection is None:
|
| 128 |
+
selection = (Selection(), )
|
| 129 |
+
self.selection = selection
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class SheetViewList(Serialisable):
|
| 133 |
+
|
| 134 |
+
tagname = "sheetViews"
|
| 135 |
+
|
| 136 |
+
sheetView = Sequence(expected_type=SheetView, )
|
| 137 |
+
extLst = Typed(expected_type=ExtensionList, allow_none=True)
|
| 138 |
+
|
| 139 |
+
__elements__ = ('sheetView',)
|
| 140 |
+
|
| 141 |
+
def __init__(self,
|
| 142 |
+
sheetView=None,
|
| 143 |
+
extLst=None,
|
| 144 |
+
):
|
| 145 |
+
if sheetView is None:
|
| 146 |
+
sheetView = [SheetView()]
|
| 147 |
+
self.sheetView = sheetView
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def active(self):
|
| 152 |
+
"""
|
| 153 |
+
Returns the first sheet view which is assumed to be active
|
| 154 |
+
"""
|
| 155 |
+
return self.sheetView[0]
|
evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/AUTHORS
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
James Robert
|
| 2 |
+
github: jiaaro
|
| 3 |
+
twitter: @jiaaro
|
| 4 |
+
web: jiaaro.com
|
| 5 |
+
email: [email protected]
|
| 6 |
+
|
| 7 |
+
Marc Webbie
|
| 8 |
+
github: marcwebbie
|
| 9 |
+
|
| 10 |
+
Jean-philippe Serafin
|
| 11 |
+
github: jeanphix
|
| 12 |
+
|
| 13 |
+
Anurag Ramdasan
|
| 14 |
+
github: AnuragRamdasan
|
| 15 |
+
|
| 16 |
+
Choongmin Lee
|
| 17 |
+
github: clee704
|
| 18 |
+
|
| 19 |
+
Patrick Pittman
|
| 20 |
+
github: ptpittman
|
| 21 |
+
|
| 22 |
+
Hunter Lang
|
| 23 |
+
github: hunterlang
|
| 24 |
+
|
| 25 |
+
Alexey
|
| 26 |
+
github: nihisil
|
| 27 |
+
|
| 28 |
+
Jaymz Campbell
|
| 29 |
+
github: jaymzcd
|
| 30 |
+
|
| 31 |
+
Ross McFarland
|
| 32 |
+
github: ross
|
| 33 |
+
|
| 34 |
+
John McMellen
|
| 35 |
+
github: jmcmellen
|
| 36 |
+
|
| 37 |
+
Johan Lövgren
|
| 38 |
+
github: dashj
|
| 39 |
+
|
| 40 |
+
Joachim Krüger
|
| 41 |
+
github: jkrgr
|
| 42 |
+
|
| 43 |
+
Shichao An
|
| 44 |
+
github: shichao-an
|
| 45 |
+
|
| 46 |
+
Michael Bortnyck
|
| 47 |
+
github: mbortnyck
|
| 48 |
+
|
| 49 |
+
André Cloete
|
| 50 |
+
github: aj-cloete
|
| 51 |
+
|
| 52 |
+
David Acacio
|
| 53 |
+
github: dacacioa
|
| 54 |
+
|
| 55 |
+
Thiago Abdnur
|
| 56 |
+
github: bolaum
|
| 57 |
+
|
| 58 |
+
Aurélien Ooms
|
| 59 |
+
github: aureooms
|
| 60 |
+
|
| 61 |
+
Mike Mattozzi
|
| 62 |
+
github: mmattozzi
|
| 63 |
+
|
| 64 |
+
Marcio Mazza
|
| 65 |
+
github: marciomazza
|
| 66 |
+
|
| 67 |
+
Sungsu Lim
|
| 68 |
+
github: proflim
|
| 69 |
+
|
| 70 |
+
Evandro Myller
|
| 71 |
+
github: emyller
|
| 72 |
+
|
| 73 |
+
Sérgio Agostinho
|
| 74 |
+
github: SergioRAgostinho
|
| 75 |
+
|
| 76 |
+
Antonio Larrosa
|
| 77 |
+
github: antlarr
|
| 78 |
+
|
| 79 |
+
Aaron Craig
|
| 80 |
+
github: craigthelinguist
|
| 81 |
+
|
| 82 |
+
Carlos del Castillo
|
| 83 |
+
github: greyalien502
|
| 84 |
+
|
| 85 |
+
Yudong Sun
|
| 86 |
+
github: sunjerry019
|
| 87 |
+
|
| 88 |
+
Jorge Perianez
|
| 89 |
+
github: JPery
|
| 90 |
+
|
| 91 |
+
Chendi Luo
|
| 92 |
+
github: Creonalia
|
| 93 |
+
|
| 94 |
+
Daniel Lefevre
|
| 95 |
+
gitHub: dplefevre
|
| 96 |
+
|
| 97 |
+
Grzegorz Kotfis
|
| 98 |
+
github: gkotfis
|
evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (c) 2011 James Robert, http://jiaaro.com
|
| 2 |
+
|
| 3 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
| 4 |
+
a copy of this software and associated documentation files (the
|
| 5 |
+
"Software"), to deal in the Software without restriction, including
|
| 6 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
| 7 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
| 8 |
+
permit persons to whom the Software is furnished to do so, subject to
|
| 9 |
+
the following conditions:
|
| 10 |
+
|
| 11 |
+
The above copyright notice and this permission notice shall be
|
| 12 |
+
included in all copies or substantial portions of the Software.
|
| 13 |
+
|
| 14 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 15 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 16 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 17 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
| 18 |
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
| 19 |
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
| 20 |
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/RECORD
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pydub-0.25.1.dist-info/AUTHORS,sha256=AyY2PS9I2enOyBnUnxcpeAX-NnMNWLQT4yDtg8IIy78,1250
|
| 2 |
+
pydub-0.25.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 3 |
+
pydub-0.25.1.dist-info/LICENSE,sha256=roVlNiJMx6OJ6Wh3H8XyWYFL3Q2mNTnPcigq2672iXo,1074
|
| 4 |
+
pydub-0.25.1.dist-info/METADATA,sha256=f0M8_ZVtbiYoUI9ejXIeJ03Jo9A5Nbi-0V1bVqs5iYk,1406
|
| 5 |
+
pydub-0.25.1.dist-info/RECORD,,
|
| 6 |
+
pydub-0.25.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 7 |
+
pydub-0.25.1.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
|
| 8 |
+
pydub-0.25.1.dist-info/top_level.txt,sha256=PHhiDCQVZdycZxfKL2lQozruBT6ZhvyZAwqjRrw3t0w,6
|
| 9 |
+
pydub/__init__.py,sha256=w1Xv1awbaR3fMhTNE1-grnfswgARTNQrKpBzfZ--VBA,39
|
| 10 |
+
pydub/__pycache__/__init__.cpython-310.pyc,,
|
| 11 |
+
pydub/__pycache__/audio_segment.cpython-310.pyc,,
|
| 12 |
+
pydub/__pycache__/effects.cpython-310.pyc,,
|
| 13 |
+
pydub/__pycache__/exceptions.cpython-310.pyc,,
|
| 14 |
+
pydub/__pycache__/generators.cpython-310.pyc,,
|
| 15 |
+
pydub/__pycache__/logging_utils.cpython-310.pyc,,
|
| 16 |
+
pydub/__pycache__/playback.cpython-310.pyc,,
|
| 17 |
+
pydub/__pycache__/pyaudioop.cpython-310.pyc,,
|
| 18 |
+
pydub/__pycache__/scipy_effects.cpython-310.pyc,,
|
| 19 |
+
pydub/__pycache__/silence.cpython-310.pyc,,
|
| 20 |
+
pydub/__pycache__/utils.cpython-310.pyc,,
|
| 21 |
+
pydub/audio_segment.py,sha256=Nf5VkHGY1v9Jqb7NtEYfwRpLrfqusfBdPGOZsi7R5Cg,49185
|
| 22 |
+
pydub/effects.py,sha256=1HUMzhefrwG_E1rTnzvbl-P0-KNuwHklCnu8QCGS7jA,11507
|
| 23 |
+
pydub/exceptions.py,sha256=osgXoUujwpH8K6hr80iYpW30CMBDFwqyaRD-5d7ZpKs,455
|
| 24 |
+
pydub/generators.py,sha256=u6q7J8JLOY-uEZqMPUTzakxyua3XNQcPiDsuiK2-lLA,4045
|
| 25 |
+
pydub/logging_utils.py,sha256=WuSqfzn4zyT7PxXHGV-PXMDynufeM6sC6eSmVlGX2RU,374
|
| 26 |
+
pydub/playback.py,sha256=zFngVclUL_7oDipjzKC8b7jToPNV11DV28rGyH8pio0,1987
|
| 27 |
+
pydub/pyaudioop.py,sha256=Dp_cQgAyYjD4OV2ZHuxtKI2KABuPi9YYNRUF8giR80Q,13094
|
| 28 |
+
pydub/scipy_effects.py,sha256=U2p8AQuVreTp5MrtUAzRbWgOHUc6Dwq0TAG_RtEg-7g,6637
|
| 29 |
+
pydub/silence.py,sha256=F6MV0VlaO6mtuisjLGks_UR-GVmzO1v87_NKvzwRc30,6457
|
| 30 |
+
pydub/utils.py,sha256=W71pgJFbbNP3adH63yn0Eo0CLLVgzXG7WHYSXpWvdyc,12368
|
evalkit_tf437/lib/python3.10/site-packages/pydub-0.25.1.dist-info/REQUESTED
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE.APACHE2
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright [yyyy] [name of copyright owner]
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
evalkit_tf437/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/RECORD
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
sniffio-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
sniffio-1.3.1.dist-info/LICENSE,sha256=ZSyHhIjRRWNh4Iw_hgf9e6WYkqFBA9Fczk_5PIW1zIs,185
|
| 3 |
+
sniffio-1.3.1.dist-info/LICENSE.APACHE2,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
| 4 |
+
sniffio-1.3.1.dist-info/LICENSE.MIT,sha256=Pm2uVV65J4f8gtHUg1Vnf0VMf2Wus40_nnK_mj2vA0s,1046
|
| 5 |
+
sniffio-1.3.1.dist-info/METADATA,sha256=CzGLVwmO3sz1heYKiJprantcQIbzqapi7_dqHTzuEtk,3875
|
| 6 |
+
sniffio-1.3.1.dist-info/RECORD,,
|
| 7 |
+
sniffio-1.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 8 |
+
sniffio-1.3.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
| 9 |
+
sniffio-1.3.1.dist-info/top_level.txt,sha256=v9UJXGs5CyddCVeAqXkQiWOrpp6Wtx6GeRrPt9-jjHg,8
|
| 10 |
+
sniffio/__init__.py,sha256=9WJEJlXu7yluP0YtI5SQ9M9OTQfbNHkadarK1vXGDPM,335
|
| 11 |
+
sniffio/__pycache__/__init__.cpython-310.pyc,,
|
| 12 |
+
sniffio/__pycache__/_impl.cpython-310.pyc,,
|
| 13 |
+
sniffio/__pycache__/_version.cpython-310.pyc,,
|
| 14 |
+
sniffio/_impl.py,sha256=UmUFMZpiuOrcjnuHhuYiYMxeCNWfqu9kBlaPf0xk6X8,2843
|
| 15 |
+
sniffio/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 16 |
+
sniffio/_tests/__pycache__/__init__.cpython-310.pyc,,
|
| 17 |
+
sniffio/_tests/__pycache__/test_sniffio.cpython-310.pyc,,
|
| 18 |
+
sniffio/_tests/test_sniffio.py,sha256=MMJZZJjQrUi95RANNM-a_55BZquA_gv4rHU1pevcTCM,2058
|
| 19 |
+
sniffio/_version.py,sha256=iVes5xwsHeRzQDexBaAhyx_taNt2ucfA7CWAo4QDt6Q,89
|
| 20 |
+
sniffio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
evalkit_tf437/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/REQUESTED
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/__init__.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tomlkit.api import TOMLDocument
|
| 2 |
+
from tomlkit.api import aot
|
| 3 |
+
from tomlkit.api import array
|
| 4 |
+
from tomlkit.api import boolean
|
| 5 |
+
from tomlkit.api import comment
|
| 6 |
+
from tomlkit.api import date
|
| 7 |
+
from tomlkit.api import datetime
|
| 8 |
+
from tomlkit.api import document
|
| 9 |
+
from tomlkit.api import dump
|
| 10 |
+
from tomlkit.api import dumps
|
| 11 |
+
from tomlkit.api import float_
|
| 12 |
+
from tomlkit.api import inline_table
|
| 13 |
+
from tomlkit.api import integer
|
| 14 |
+
from tomlkit.api import item
|
| 15 |
+
from tomlkit.api import key
|
| 16 |
+
from tomlkit.api import key_value
|
| 17 |
+
from tomlkit.api import load
|
| 18 |
+
from tomlkit.api import loads
|
| 19 |
+
from tomlkit.api import nl
|
| 20 |
+
from tomlkit.api import parse
|
| 21 |
+
from tomlkit.api import register_encoder
|
| 22 |
+
from tomlkit.api import string
|
| 23 |
+
from tomlkit.api import table
|
| 24 |
+
from tomlkit.api import time
|
| 25 |
+
from tomlkit.api import unregister_encoder
|
| 26 |
+
from tomlkit.api import value
|
| 27 |
+
from tomlkit.api import ws
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
__version__ = "0.12.0"
|
| 31 |
+
__all__ = [
|
| 32 |
+
"aot",
|
| 33 |
+
"array",
|
| 34 |
+
"boolean",
|
| 35 |
+
"comment",
|
| 36 |
+
"date",
|
| 37 |
+
"datetime",
|
| 38 |
+
"document",
|
| 39 |
+
"dump",
|
| 40 |
+
"dumps",
|
| 41 |
+
"float_",
|
| 42 |
+
"inline_table",
|
| 43 |
+
"integer",
|
| 44 |
+
"item",
|
| 45 |
+
"key",
|
| 46 |
+
"key_value",
|
| 47 |
+
"load",
|
| 48 |
+
"loads",
|
| 49 |
+
"nl",
|
| 50 |
+
"parse",
|
| 51 |
+
"string",
|
| 52 |
+
"table",
|
| 53 |
+
"time",
|
| 54 |
+
"TOMLDocument",
|
| 55 |
+
"value",
|
| 56 |
+
"ws",
|
| 57 |
+
"register_encoder",
|
| 58 |
+
"unregister_encoder",
|
| 59 |
+
]
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.16 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (4.14 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/exceptions.cpython-310.pyc
ADDED
|
Binary file (8.1 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/items.cpython-310.pyc
ADDED
|
Binary file (56.7 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/source.cpython-310.pyc
ADDED
|
Binary file (6.19 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/__pycache__/toml_file.cpython-310.pyc
ADDED
|
Binary file (1.76 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/_compat.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import contextlib
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
PY38 = sys.version_info >= (3, 8)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def decode(string: Any, encodings: list[str] | None = None):
|
| 13 |
+
if not isinstance(string, bytes):
|
| 14 |
+
return string
|
| 15 |
+
|
| 16 |
+
encodings = encodings or ["utf-8", "latin1", "ascii"]
|
| 17 |
+
|
| 18 |
+
for encoding in encodings:
|
| 19 |
+
with contextlib.suppress(UnicodeEncodeError, UnicodeDecodeError):
|
| 20 |
+
return string.decode(encoding)
|
| 21 |
+
|
| 22 |
+
return string.decode(encodings[0], errors="ignore")
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/_utils.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
from collections.abc import Mapping
|
| 6 |
+
from datetime import date
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from datetime import time
|
| 9 |
+
from datetime import timedelta
|
| 10 |
+
from datetime import timezone
|
| 11 |
+
from typing import Collection
|
| 12 |
+
|
| 13 |
+
from tomlkit._compat import decode
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
RFC_3339_LOOSE = re.compile(
|
| 17 |
+
"^"
|
| 18 |
+
r"(([0-9]+)-(\d{2})-(\d{2}))?" # Date
|
| 19 |
+
"("
|
| 20 |
+
"([Tt ])?" # Separator
|
| 21 |
+
r"(\d{2}):(\d{2}):(\d{2})(\.([0-9]+))?" # Time
|
| 22 |
+
r"(([Zz])|([\+|\-]([01][0-9]|2[0-3]):([0-5][0-9])))?" # Timezone
|
| 23 |
+
")?"
|
| 24 |
+
"$"
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
RFC_3339_DATETIME = re.compile(
|
| 28 |
+
"^"
|
| 29 |
+
"([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])" # Date
|
| 30 |
+
"[Tt ]" # Separator
|
| 31 |
+
r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.([0-9]+))?" # Time
|
| 32 |
+
r"(([Zz])|([\+|\-]([01][0-9]|2[0-3]):([0-5][0-9])))?" # Timezone
|
| 33 |
+
"$"
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
RFC_3339_DATE = re.compile("^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$")
|
| 37 |
+
|
| 38 |
+
RFC_3339_TIME = re.compile(
|
| 39 |
+
r"^([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.([0-9]+))?$"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
_utc = timezone(timedelta(), "UTC")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def parse_rfc3339(string: str) -> datetime | date | time:
|
| 46 |
+
m = RFC_3339_DATETIME.match(string)
|
| 47 |
+
if m:
|
| 48 |
+
year = int(m.group(1))
|
| 49 |
+
month = int(m.group(2))
|
| 50 |
+
day = int(m.group(3))
|
| 51 |
+
hour = int(m.group(4))
|
| 52 |
+
minute = int(m.group(5))
|
| 53 |
+
second = int(m.group(6))
|
| 54 |
+
microsecond = 0
|
| 55 |
+
|
| 56 |
+
if m.group(7):
|
| 57 |
+
microsecond = int((f"{m.group(8):<06s}")[:6])
|
| 58 |
+
|
| 59 |
+
if m.group(9):
|
| 60 |
+
# Timezone
|
| 61 |
+
tz = m.group(9)
|
| 62 |
+
if tz.upper() == "Z":
|
| 63 |
+
tzinfo = _utc
|
| 64 |
+
else:
|
| 65 |
+
sign = m.group(11)[0]
|
| 66 |
+
hour_offset, minute_offset = int(m.group(12)), int(m.group(13))
|
| 67 |
+
offset = timedelta(seconds=hour_offset * 3600 + minute_offset * 60)
|
| 68 |
+
if sign == "-":
|
| 69 |
+
offset = -offset
|
| 70 |
+
|
| 71 |
+
tzinfo = timezone(offset, f"{sign}{m.group(12)}:{m.group(13)}")
|
| 72 |
+
|
| 73 |
+
return datetime(
|
| 74 |
+
year, month, day, hour, minute, second, microsecond, tzinfo=tzinfo
|
| 75 |
+
)
|
| 76 |
+
else:
|
| 77 |
+
return datetime(year, month, day, hour, minute, second, microsecond)
|
| 78 |
+
|
| 79 |
+
m = RFC_3339_DATE.match(string)
|
| 80 |
+
if m:
|
| 81 |
+
year = int(m.group(1))
|
| 82 |
+
month = int(m.group(2))
|
| 83 |
+
day = int(m.group(3))
|
| 84 |
+
|
| 85 |
+
return date(year, month, day)
|
| 86 |
+
|
| 87 |
+
m = RFC_3339_TIME.match(string)
|
| 88 |
+
if m:
|
| 89 |
+
hour = int(m.group(1))
|
| 90 |
+
minute = int(m.group(2))
|
| 91 |
+
second = int(m.group(3))
|
| 92 |
+
microsecond = 0
|
| 93 |
+
|
| 94 |
+
if m.group(4):
|
| 95 |
+
microsecond = int((f"{m.group(5):<06s}")[:6])
|
| 96 |
+
|
| 97 |
+
return time(hour, minute, second, microsecond)
|
| 98 |
+
|
| 99 |
+
raise ValueError("Invalid RFC 339 string")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
# https://toml.io/en/v1.0.0#string
|
| 103 |
+
CONTROL_CHARS = frozenset(chr(c) for c in range(0x20)) | {chr(0x7F)}
|
| 104 |
+
_escaped = {
|
| 105 |
+
"b": "\b",
|
| 106 |
+
"t": "\t",
|
| 107 |
+
"n": "\n",
|
| 108 |
+
"f": "\f",
|
| 109 |
+
"r": "\r",
|
| 110 |
+
'"': '"',
|
| 111 |
+
"\\": "\\",
|
| 112 |
+
}
|
| 113 |
+
_compact_escapes = {
|
| 114 |
+
**{v: f"\\{k}" for k, v in _escaped.items()},
|
| 115 |
+
'"""': '""\\"',
|
| 116 |
+
}
|
| 117 |
+
_basic_escapes = CONTROL_CHARS | {'"', "\\"}
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def _unicode_escape(seq: str) -> str:
|
| 121 |
+
return "".join(f"\\u{ord(c):04x}" for c in seq)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def escape_string(s: str, escape_sequences: Collection[str] = _basic_escapes) -> str:
|
| 125 |
+
s = decode(s)
|
| 126 |
+
|
| 127 |
+
res = []
|
| 128 |
+
start = 0
|
| 129 |
+
|
| 130 |
+
def flush(inc=1):
|
| 131 |
+
if start != i:
|
| 132 |
+
res.append(s[start:i])
|
| 133 |
+
|
| 134 |
+
return i + inc
|
| 135 |
+
|
| 136 |
+
found_sequences = {seq for seq in escape_sequences if seq in s}
|
| 137 |
+
|
| 138 |
+
i = 0
|
| 139 |
+
while i < len(s):
|
| 140 |
+
for seq in found_sequences:
|
| 141 |
+
seq_len = len(seq)
|
| 142 |
+
if s[i:].startswith(seq):
|
| 143 |
+
start = flush(seq_len)
|
| 144 |
+
res.append(_compact_escapes.get(seq) or _unicode_escape(seq))
|
| 145 |
+
i += seq_len - 1 # fast-forward escape sequence
|
| 146 |
+
i += 1
|
| 147 |
+
|
| 148 |
+
flush()
|
| 149 |
+
|
| 150 |
+
return "".join(res)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def merge_dicts(d1: dict, d2: dict) -> dict:
|
| 154 |
+
for k, v in d2.items():
|
| 155 |
+
if k in d1 and isinstance(d1[k], dict) and isinstance(v, Mapping):
|
| 156 |
+
merge_dicts(d1[k], v)
|
| 157 |
+
else:
|
| 158 |
+
d1[k] = d2[k]
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/api.py
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import contextlib
|
| 4 |
+
import datetime as _datetime
|
| 5 |
+
|
| 6 |
+
from collections.abc import Mapping
|
| 7 |
+
from typing import IO
|
| 8 |
+
from typing import Iterable
|
| 9 |
+
from typing import TypeVar
|
| 10 |
+
|
| 11 |
+
from tomlkit._utils import parse_rfc3339
|
| 12 |
+
from tomlkit.container import Container
|
| 13 |
+
from tomlkit.exceptions import UnexpectedCharError
|
| 14 |
+
from tomlkit.items import CUSTOM_ENCODERS
|
| 15 |
+
from tomlkit.items import AoT
|
| 16 |
+
from tomlkit.items import Array
|
| 17 |
+
from tomlkit.items import Bool
|
| 18 |
+
from tomlkit.items import Comment
|
| 19 |
+
from tomlkit.items import Date
|
| 20 |
+
from tomlkit.items import DateTime
|
| 21 |
+
from tomlkit.items import DottedKey
|
| 22 |
+
from tomlkit.items import Encoder
|
| 23 |
+
from tomlkit.items import Float
|
| 24 |
+
from tomlkit.items import InlineTable
|
| 25 |
+
from tomlkit.items import Integer
|
| 26 |
+
from tomlkit.items import Item as _Item
|
| 27 |
+
from tomlkit.items import Key
|
| 28 |
+
from tomlkit.items import SingleKey
|
| 29 |
+
from tomlkit.items import String
|
| 30 |
+
from tomlkit.items import StringType as _StringType
|
| 31 |
+
from tomlkit.items import Table
|
| 32 |
+
from tomlkit.items import Time
|
| 33 |
+
from tomlkit.items import Trivia
|
| 34 |
+
from tomlkit.items import Whitespace
|
| 35 |
+
from tomlkit.items import item
|
| 36 |
+
from tomlkit.parser import Parser
|
| 37 |
+
from tomlkit.toml_document import TOMLDocument
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def loads(string: str | bytes) -> TOMLDocument:
|
| 41 |
+
"""
|
| 42 |
+
Parses a string into a TOMLDocument.
|
| 43 |
+
|
| 44 |
+
Alias for parse().
|
| 45 |
+
"""
|
| 46 |
+
return parse(string)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def dumps(data: Mapping, sort_keys: bool = False) -> str:
|
| 50 |
+
"""
|
| 51 |
+
Dumps a TOMLDocument into a string.
|
| 52 |
+
"""
|
| 53 |
+
if not isinstance(data, Container) and isinstance(data, Mapping):
|
| 54 |
+
data = item(dict(data), _sort_keys=sort_keys)
|
| 55 |
+
|
| 56 |
+
try:
|
| 57 |
+
# data should be a `Container` (and therefore implement `as_string`)
|
| 58 |
+
# for all type safe invocations of this function
|
| 59 |
+
return data.as_string() # type: ignore[attr-defined]
|
| 60 |
+
except AttributeError as ex:
|
| 61 |
+
msg = f"Expecting Mapping or TOML Container, {type(data)} given"
|
| 62 |
+
raise TypeError(msg) from ex
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def load(fp: IO[str] | IO[bytes]) -> TOMLDocument:
|
| 66 |
+
"""
|
| 67 |
+
Load toml document from a file-like object.
|
| 68 |
+
"""
|
| 69 |
+
return parse(fp.read())
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def dump(data: Mapping, fp: IO[str], *, sort_keys: bool = False) -> None:
|
| 73 |
+
"""
|
| 74 |
+
Dump a TOMLDocument into a writable file stream.
|
| 75 |
+
|
| 76 |
+
:param data: a dict-like object to dump
|
| 77 |
+
:param sort_keys: if true, sort the keys in alphabetic order
|
| 78 |
+
"""
|
| 79 |
+
fp.write(dumps(data, sort_keys=sort_keys))
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def parse(string: str | bytes) -> TOMLDocument:
|
| 83 |
+
"""
|
| 84 |
+
Parses a string or bytes into a TOMLDocument.
|
| 85 |
+
"""
|
| 86 |
+
return Parser(string).parse()
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def document() -> TOMLDocument:
|
| 90 |
+
"""
|
| 91 |
+
Returns a new TOMLDocument instance.
|
| 92 |
+
"""
|
| 93 |
+
return TOMLDocument()
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# Items
|
| 97 |
+
def integer(raw: str | int) -> Integer:
|
| 98 |
+
"""Create an integer item from a number or string."""
|
| 99 |
+
return item(int(raw))
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def float_(raw: str | float) -> Float:
|
| 103 |
+
"""Create an float item from a number or string."""
|
| 104 |
+
return item(float(raw))
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def boolean(raw: str) -> Bool:
|
| 108 |
+
"""Turn `true` or `false` into a boolean item."""
|
| 109 |
+
return item(raw == "true")
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def string(
|
| 113 |
+
raw: str,
|
| 114 |
+
*,
|
| 115 |
+
literal: bool = False,
|
| 116 |
+
multiline: bool = False,
|
| 117 |
+
escape: bool = True,
|
| 118 |
+
) -> String:
|
| 119 |
+
"""Create a string item.
|
| 120 |
+
|
| 121 |
+
By default, this function will create *single line basic* strings, but
|
| 122 |
+
boolean flags (e.g. ``literal=True`` and/or ``multiline=True``)
|
| 123 |
+
can be used for personalization.
|
| 124 |
+
|
| 125 |
+
For more information, please check the spec: `<https://toml.io/en/v1.0.0#string>`__.
|
| 126 |
+
|
| 127 |
+
Common escaping rules will be applied for basic strings.
|
| 128 |
+
This can be controlled by explicitly setting ``escape=False``.
|
| 129 |
+
Please note that, if you disable escaping, you will have to make sure that
|
| 130 |
+
the given strings don't contain any forbidden character or sequence.
|
| 131 |
+
"""
|
| 132 |
+
type_ = _StringType.select(literal, multiline)
|
| 133 |
+
return String.from_raw(raw, type_, escape)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def date(raw: str) -> Date:
|
| 137 |
+
"""Create a TOML date."""
|
| 138 |
+
value = parse_rfc3339(raw)
|
| 139 |
+
if not isinstance(value, _datetime.date):
|
| 140 |
+
raise ValueError("date() only accepts date strings.")
|
| 141 |
+
|
| 142 |
+
return item(value)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def time(raw: str) -> Time:
|
| 146 |
+
"""Create a TOML time."""
|
| 147 |
+
value = parse_rfc3339(raw)
|
| 148 |
+
if not isinstance(value, _datetime.time):
|
| 149 |
+
raise ValueError("time() only accepts time strings.")
|
| 150 |
+
|
| 151 |
+
return item(value)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def datetime(raw: str) -> DateTime:
|
| 155 |
+
"""Create a TOML datetime."""
|
| 156 |
+
value = parse_rfc3339(raw)
|
| 157 |
+
if not isinstance(value, _datetime.datetime):
|
| 158 |
+
raise ValueError("datetime() only accepts datetime strings.")
|
| 159 |
+
|
| 160 |
+
return item(value)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def array(raw: str = None) -> Array:
|
| 164 |
+
"""Create an array item for its string representation.
|
| 165 |
+
|
| 166 |
+
:Example:
|
| 167 |
+
|
| 168 |
+
>>> array("[1, 2, 3]") # Create from a string
|
| 169 |
+
[1, 2, 3]
|
| 170 |
+
>>> a = array()
|
| 171 |
+
>>> a.extend([1, 2, 3]) # Create from a list
|
| 172 |
+
>>> a
|
| 173 |
+
[1, 2, 3]
|
| 174 |
+
"""
|
| 175 |
+
if raw is None:
|
| 176 |
+
raw = "[]"
|
| 177 |
+
|
| 178 |
+
return value(raw)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def table(is_super_table: bool | None = None) -> Table:
|
| 182 |
+
"""Create an empty table.
|
| 183 |
+
|
| 184 |
+
:param is_super_table: if true, the table is a super table
|
| 185 |
+
|
| 186 |
+
:Example:
|
| 187 |
+
|
| 188 |
+
>>> doc = document()
|
| 189 |
+
>>> foo = table(True)
|
| 190 |
+
>>> bar = table()
|
| 191 |
+
>>> bar.update({'x': 1})
|
| 192 |
+
>>> foo.append('bar', bar)
|
| 193 |
+
>>> doc.append('foo', foo)
|
| 194 |
+
>>> print(doc.as_string())
|
| 195 |
+
[foo.bar]
|
| 196 |
+
x = 1
|
| 197 |
+
"""
|
| 198 |
+
return Table(Container(), Trivia(), False, is_super_table)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def inline_table() -> InlineTable:
|
| 202 |
+
"""Create an inline table.
|
| 203 |
+
|
| 204 |
+
:Example:
|
| 205 |
+
|
| 206 |
+
>>> table = inline_table()
|
| 207 |
+
>>> table.update({'x': 1, 'y': 2})
|
| 208 |
+
>>> print(table.as_string())
|
| 209 |
+
{x = 1, y = 2}
|
| 210 |
+
"""
|
| 211 |
+
return InlineTable(Container(), Trivia(), new=True)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def aot() -> AoT:
|
| 215 |
+
"""Create an array of table.
|
| 216 |
+
|
| 217 |
+
:Example:
|
| 218 |
+
|
| 219 |
+
>>> doc = document()
|
| 220 |
+
>>> aot = aot()
|
| 221 |
+
>>> aot.append(item({'x': 1}))
|
| 222 |
+
>>> doc.append('foo', aot)
|
| 223 |
+
>>> print(doc.as_string())
|
| 224 |
+
[[foo]]
|
| 225 |
+
x = 1
|
| 226 |
+
"""
|
| 227 |
+
return AoT([])
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def key(k: str | Iterable[str]) -> Key:
|
| 231 |
+
"""Create a key from a string. When a list of string is given,
|
| 232 |
+
it will create a dotted key.
|
| 233 |
+
|
| 234 |
+
:Example:
|
| 235 |
+
|
| 236 |
+
>>> doc = document()
|
| 237 |
+
>>> doc.append(key('foo'), 1)
|
| 238 |
+
>>> doc.append(key(['bar', 'baz']), 2)
|
| 239 |
+
>>> print(doc.as_string())
|
| 240 |
+
foo = 1
|
| 241 |
+
bar.baz = 2
|
| 242 |
+
"""
|
| 243 |
+
if isinstance(k, str):
|
| 244 |
+
return SingleKey(k)
|
| 245 |
+
return DottedKey([key(_k) for _k in k])
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def value(raw: str) -> _Item:
|
| 249 |
+
"""Parse a simple value from a string.
|
| 250 |
+
|
| 251 |
+
:Example:
|
| 252 |
+
|
| 253 |
+
>>> value("1")
|
| 254 |
+
1
|
| 255 |
+
>>> value("true")
|
| 256 |
+
True
|
| 257 |
+
>>> value("[1, 2, 3]")
|
| 258 |
+
[1, 2, 3]
|
| 259 |
+
"""
|
| 260 |
+
parser = Parser(raw)
|
| 261 |
+
v = parser._parse_value()
|
| 262 |
+
if not parser.end():
|
| 263 |
+
raise parser.parse_error(UnexpectedCharError, char=parser._current)
|
| 264 |
+
return v
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def key_value(src: str) -> tuple[Key, _Item]:
|
| 268 |
+
"""Parse a key-value pair from a string.
|
| 269 |
+
|
| 270 |
+
:Example:
|
| 271 |
+
|
| 272 |
+
>>> key_value("foo = 1")
|
| 273 |
+
(Key('foo'), 1)
|
| 274 |
+
"""
|
| 275 |
+
return Parser(src)._parse_key_value()
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def ws(src: str) -> Whitespace:
|
| 279 |
+
"""Create a whitespace from a string."""
|
| 280 |
+
return Whitespace(src, fixed=True)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def nl() -> Whitespace:
|
| 284 |
+
"""Create a newline item."""
|
| 285 |
+
return ws("\n")
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def comment(string: str) -> Comment:
|
| 289 |
+
"""Create a comment item."""
|
| 290 |
+
return Comment(Trivia(comment_ws=" ", comment="# " + string))
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
E = TypeVar("E", bound=Encoder)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def register_encoder(encoder: E) -> E:
|
| 297 |
+
"""Add a custom encoder, which should be a function that will be called
|
| 298 |
+
if the value can't otherwise be converted. It should takes a single value
|
| 299 |
+
and return a TOMLKit item or raise a ``TypeError``.
|
| 300 |
+
"""
|
| 301 |
+
CUSTOM_ENCODERS.append(encoder)
|
| 302 |
+
return encoder
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def unregister_encoder(encoder: Encoder) -> None:
|
| 306 |
+
"""Unregister a custom encoder."""
|
| 307 |
+
with contextlib.suppress(ValueError):
|
| 308 |
+
CUSTOM_ENCODERS.remove(encoder)
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/container.py
ADDED
|
@@ -0,0 +1,866 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import copy
|
| 4 |
+
|
| 5 |
+
from typing import Any
|
| 6 |
+
from typing import Iterator
|
| 7 |
+
|
| 8 |
+
from tomlkit._compat import decode
|
| 9 |
+
from tomlkit._types import _CustomDict
|
| 10 |
+
from tomlkit._utils import merge_dicts
|
| 11 |
+
from tomlkit.exceptions import KeyAlreadyPresent
|
| 12 |
+
from tomlkit.exceptions import NonExistentKey
|
| 13 |
+
from tomlkit.exceptions import TOMLKitError
|
| 14 |
+
from tomlkit.items import AoT
|
| 15 |
+
from tomlkit.items import Comment
|
| 16 |
+
from tomlkit.items import Item
|
| 17 |
+
from tomlkit.items import Key
|
| 18 |
+
from tomlkit.items import Null
|
| 19 |
+
from tomlkit.items import SingleKey
|
| 20 |
+
from tomlkit.items import Table
|
| 21 |
+
from tomlkit.items import Trivia
|
| 22 |
+
from tomlkit.items import Whitespace
|
| 23 |
+
from tomlkit.items import item as _item
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_NOT_SET = object()
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class Container(_CustomDict):
|
| 30 |
+
"""
|
| 31 |
+
A container for items within a TOMLDocument.
|
| 32 |
+
|
| 33 |
+
This class implements the `dict` interface with copy/deepcopy protocol.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self, parsed: bool = False) -> None:
|
| 37 |
+
self._map: dict[SingleKey, int | tuple[int, ...]] = {}
|
| 38 |
+
self._body: list[tuple[Key | None, Item]] = []
|
| 39 |
+
self._parsed = parsed
|
| 40 |
+
self._table_keys = []
|
| 41 |
+
|
| 42 |
+
@property
|
| 43 |
+
def body(self) -> list[tuple[Key | None, Item]]:
|
| 44 |
+
return self._body
|
| 45 |
+
|
| 46 |
+
def unwrap(self) -> dict[str, Any]:
|
| 47 |
+
unwrapped = {}
|
| 48 |
+
for k, v in self.items():
|
| 49 |
+
if k is None:
|
| 50 |
+
continue
|
| 51 |
+
|
| 52 |
+
if isinstance(k, Key):
|
| 53 |
+
k = k.key
|
| 54 |
+
|
| 55 |
+
if hasattr(v, "unwrap"):
|
| 56 |
+
v = v.unwrap()
|
| 57 |
+
|
| 58 |
+
if k in unwrapped:
|
| 59 |
+
merge_dicts(unwrapped[k], v)
|
| 60 |
+
else:
|
| 61 |
+
unwrapped[k] = v
|
| 62 |
+
|
| 63 |
+
return unwrapped
|
| 64 |
+
|
| 65 |
+
@property
|
| 66 |
+
def value(self) -> dict[str, Any]:
|
| 67 |
+
d = {}
|
| 68 |
+
for k, v in self._body:
|
| 69 |
+
if k is None:
|
| 70 |
+
continue
|
| 71 |
+
|
| 72 |
+
k = k.key
|
| 73 |
+
v = v.value
|
| 74 |
+
|
| 75 |
+
if isinstance(v, Container):
|
| 76 |
+
v = v.value
|
| 77 |
+
|
| 78 |
+
if k in d:
|
| 79 |
+
merge_dicts(d[k], v)
|
| 80 |
+
else:
|
| 81 |
+
d[k] = v
|
| 82 |
+
|
| 83 |
+
return d
|
| 84 |
+
|
| 85 |
+
def parsing(self, parsing: bool) -> None:
|
| 86 |
+
self._parsed = parsing
|
| 87 |
+
|
| 88 |
+
for _, v in self._body:
|
| 89 |
+
if isinstance(v, Table):
|
| 90 |
+
v.value.parsing(parsing)
|
| 91 |
+
elif isinstance(v, AoT):
|
| 92 |
+
for t in v.body:
|
| 93 |
+
t.value.parsing(parsing)
|
| 94 |
+
|
| 95 |
+
def add(self, key: Key | Item | str, item: Item | None = None) -> Container:
|
| 96 |
+
"""
|
| 97 |
+
Adds an item to the current Container.
|
| 98 |
+
|
| 99 |
+
:Example:
|
| 100 |
+
|
| 101 |
+
>>> # add a key-value pair
|
| 102 |
+
>>> doc.add('key', 'value')
|
| 103 |
+
>>> # add a comment or whitespace or newline
|
| 104 |
+
>>> doc.add(comment('# comment'))
|
| 105 |
+
"""
|
| 106 |
+
if item is None:
|
| 107 |
+
if not isinstance(key, (Comment, Whitespace)):
|
| 108 |
+
raise ValueError(
|
| 109 |
+
"Non comment/whitespace items must have an associated key"
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
key, item = None, key
|
| 113 |
+
|
| 114 |
+
return self.append(key, item)
|
| 115 |
+
|
| 116 |
+
def _handle_dotted_key(self, key: Key, value: Item) -> None:
|
| 117 |
+
if isinstance(value, (Table, AoT)):
|
| 118 |
+
raise TOMLKitError("Can't add a table to a dotted key")
|
| 119 |
+
name, *mid, last = key
|
| 120 |
+
name._dotted = True
|
| 121 |
+
table = current = Table(Container(True), Trivia(), False, is_super_table=True)
|
| 122 |
+
for _name in mid:
|
| 123 |
+
_name._dotted = True
|
| 124 |
+
new_table = Table(Container(True), Trivia(), False, is_super_table=True)
|
| 125 |
+
current.append(_name, new_table)
|
| 126 |
+
current = new_table
|
| 127 |
+
|
| 128 |
+
last.sep = key.sep
|
| 129 |
+
current.append(last, value)
|
| 130 |
+
|
| 131 |
+
self.append(name, table)
|
| 132 |
+
return
|
| 133 |
+
|
| 134 |
+
def _get_last_index_before_table(self) -> int:
|
| 135 |
+
last_index = -1
|
| 136 |
+
for i, (k, v) in enumerate(self._body):
|
| 137 |
+
if isinstance(v, Null):
|
| 138 |
+
continue # Null elements are inserted after deletion
|
| 139 |
+
|
| 140 |
+
if isinstance(v, Whitespace) and not v.is_fixed():
|
| 141 |
+
continue
|
| 142 |
+
|
| 143 |
+
if isinstance(v, (Table, AoT)) and not k.is_dotted():
|
| 144 |
+
break
|
| 145 |
+
last_index = i
|
| 146 |
+
return last_index + 1
|
| 147 |
+
|
| 148 |
+
def append(self, key: Key | str | None, item: Item) -> Container:
|
| 149 |
+
"""Similar to :meth:`add` but both key and value must be given."""
|
| 150 |
+
if not isinstance(key, Key) and key is not None:
|
| 151 |
+
key = SingleKey(key)
|
| 152 |
+
|
| 153 |
+
if not isinstance(item, Item):
|
| 154 |
+
item = _item(item)
|
| 155 |
+
|
| 156 |
+
if key is not None and key.is_multi():
|
| 157 |
+
self._handle_dotted_key(key, item)
|
| 158 |
+
return self
|
| 159 |
+
|
| 160 |
+
if isinstance(item, (AoT, Table)) and item.name is None:
|
| 161 |
+
item.name = key.key
|
| 162 |
+
|
| 163 |
+
prev = self._previous_item()
|
| 164 |
+
prev_ws = isinstance(prev, Whitespace) or ends_with_whitespace(prev)
|
| 165 |
+
if isinstance(item, Table):
|
| 166 |
+
if not self._parsed:
|
| 167 |
+
item.invalidate_display_name()
|
| 168 |
+
if (
|
| 169 |
+
self._body
|
| 170 |
+
and not (self._parsed or item.trivia.indent or prev_ws)
|
| 171 |
+
and not key.is_dotted()
|
| 172 |
+
):
|
| 173 |
+
item.trivia.indent = "\n"
|
| 174 |
+
|
| 175 |
+
if isinstance(item, AoT) and self._body and not self._parsed:
|
| 176 |
+
item.invalidate_display_name()
|
| 177 |
+
if item and not ("\n" in item[0].trivia.indent or prev_ws):
|
| 178 |
+
item[0].trivia.indent = "\n" + item[0].trivia.indent
|
| 179 |
+
|
| 180 |
+
if key is not None and key in self:
|
| 181 |
+
current_idx = self._map[key]
|
| 182 |
+
if isinstance(current_idx, tuple):
|
| 183 |
+
current_body_element = self._body[current_idx[-1]]
|
| 184 |
+
else:
|
| 185 |
+
current_body_element = self._body[current_idx]
|
| 186 |
+
|
| 187 |
+
current = current_body_element[1]
|
| 188 |
+
|
| 189 |
+
if isinstance(item, Table):
|
| 190 |
+
if not isinstance(current, (Table, AoT)):
|
| 191 |
+
raise KeyAlreadyPresent(key)
|
| 192 |
+
|
| 193 |
+
if item.is_aot_element():
|
| 194 |
+
# New AoT element found later on
|
| 195 |
+
# Adding it to the current AoT
|
| 196 |
+
if not isinstance(current, AoT):
|
| 197 |
+
current = AoT([current, item], parsed=self._parsed)
|
| 198 |
+
|
| 199 |
+
self._replace(key, key, current)
|
| 200 |
+
else:
|
| 201 |
+
current.append(item)
|
| 202 |
+
|
| 203 |
+
return self
|
| 204 |
+
elif current.is_aot():
|
| 205 |
+
if not item.is_aot_element():
|
| 206 |
+
# Tried to define a table after an AoT with the same name.
|
| 207 |
+
raise KeyAlreadyPresent(key)
|
| 208 |
+
|
| 209 |
+
current.append(item)
|
| 210 |
+
|
| 211 |
+
return self
|
| 212 |
+
elif current.is_super_table():
|
| 213 |
+
if item.is_super_table():
|
| 214 |
+
# We need to merge both super tables
|
| 215 |
+
if (
|
| 216 |
+
self._table_keys[-1] != current_body_element[0]
|
| 217 |
+
or key.is_dotted()
|
| 218 |
+
or current_body_element[0].is_dotted()
|
| 219 |
+
):
|
| 220 |
+
if key.is_dotted() and not self._parsed:
|
| 221 |
+
idx = self._get_last_index_before_table()
|
| 222 |
+
else:
|
| 223 |
+
idx = len(self._body)
|
| 224 |
+
|
| 225 |
+
if idx < len(self._body):
|
| 226 |
+
self._insert_at(idx, key, item)
|
| 227 |
+
else:
|
| 228 |
+
self._raw_append(key, item)
|
| 229 |
+
|
| 230 |
+
# Building a temporary proxy to check for errors
|
| 231 |
+
OutOfOrderTableProxy(self, self._map[key])
|
| 232 |
+
|
| 233 |
+
return self
|
| 234 |
+
|
| 235 |
+
# Create a new element to replace the old one
|
| 236 |
+
current = copy.deepcopy(current)
|
| 237 |
+
for k, v in item.value.body:
|
| 238 |
+
current.append(k, v)
|
| 239 |
+
self._body[
|
| 240 |
+
current_idx[-1]
|
| 241 |
+
if isinstance(current_idx, tuple)
|
| 242 |
+
else current_idx
|
| 243 |
+
] = (current_body_element[0], current)
|
| 244 |
+
|
| 245 |
+
return self
|
| 246 |
+
elif current_body_element[0].is_dotted():
|
| 247 |
+
raise TOMLKitError("Redefinition of an existing table")
|
| 248 |
+
elif not item.is_super_table():
|
| 249 |
+
raise KeyAlreadyPresent(key)
|
| 250 |
+
elif isinstance(item, AoT):
|
| 251 |
+
if not isinstance(current, AoT):
|
| 252 |
+
# Tried to define an AoT after a table with the same name.
|
| 253 |
+
raise KeyAlreadyPresent(key)
|
| 254 |
+
|
| 255 |
+
for table in item.body:
|
| 256 |
+
current.append(table)
|
| 257 |
+
|
| 258 |
+
return self
|
| 259 |
+
else:
|
| 260 |
+
raise KeyAlreadyPresent(key)
|
| 261 |
+
|
| 262 |
+
is_table = isinstance(item, (Table, AoT))
|
| 263 |
+
if (
|
| 264 |
+
key is not None
|
| 265 |
+
and self._body
|
| 266 |
+
and not self._parsed
|
| 267 |
+
and (not is_table or key.is_dotted())
|
| 268 |
+
):
|
| 269 |
+
# If there is already at least one table in the current container
|
| 270 |
+
# and the given item is not a table, we need to find the last
|
| 271 |
+
# item that is not a table and insert after it
|
| 272 |
+
# If no such item exists, insert at the top of the table
|
| 273 |
+
last_index = self._get_last_index_before_table()
|
| 274 |
+
|
| 275 |
+
if last_index < len(self._body):
|
| 276 |
+
return self._insert_at(last_index, key, item)
|
| 277 |
+
else:
|
| 278 |
+
previous_item = self._body[-1][1]
|
| 279 |
+
if not (
|
| 280 |
+
isinstance(previous_item, Whitespace)
|
| 281 |
+
or ends_with_whitespace(previous_item)
|
| 282 |
+
or "\n" in previous_item.trivia.trail
|
| 283 |
+
):
|
| 284 |
+
previous_item.trivia.trail += "\n"
|
| 285 |
+
|
| 286 |
+
self._raw_append(key, item)
|
| 287 |
+
return self
|
| 288 |
+
|
| 289 |
+
def _raw_append(self, key: Key, item: Item) -> None:
|
| 290 |
+
if key in self._map:
|
| 291 |
+
current_idx = self._map[key]
|
| 292 |
+
if not isinstance(current_idx, tuple):
|
| 293 |
+
current_idx = (current_idx,)
|
| 294 |
+
|
| 295 |
+
current = self._body[current_idx[-1]][1]
|
| 296 |
+
if key is not None and not isinstance(current, Table):
|
| 297 |
+
raise KeyAlreadyPresent(key)
|
| 298 |
+
|
| 299 |
+
self._map[key] = current_idx + (len(self._body),)
|
| 300 |
+
else:
|
| 301 |
+
self._map[key] = len(self._body)
|
| 302 |
+
|
| 303 |
+
self._body.append((key, item))
|
| 304 |
+
if item.is_table():
|
| 305 |
+
self._table_keys.append(key)
|
| 306 |
+
|
| 307 |
+
if key is not None:
|
| 308 |
+
dict.__setitem__(self, key.key, item.value)
|
| 309 |
+
|
| 310 |
+
return self
|
| 311 |
+
|
| 312 |
+
def _remove_at(self, idx: int) -> None:
|
| 313 |
+
key = self._body[idx][0]
|
| 314 |
+
index = self._map.get(key)
|
| 315 |
+
if index is None:
|
| 316 |
+
raise NonExistentKey(key)
|
| 317 |
+
self._body[idx] = (None, Null())
|
| 318 |
+
|
| 319 |
+
if isinstance(index, tuple):
|
| 320 |
+
index = list(index)
|
| 321 |
+
index.remove(idx)
|
| 322 |
+
if len(index) == 1:
|
| 323 |
+
index = index.pop()
|
| 324 |
+
else:
|
| 325 |
+
index = tuple(index)
|
| 326 |
+
self._map[key] = index
|
| 327 |
+
else:
|
| 328 |
+
dict.__delitem__(self, key.key)
|
| 329 |
+
self._map.pop(key)
|
| 330 |
+
|
| 331 |
+
def remove(self, key: Key | str) -> Container:
|
| 332 |
+
"""Remove a key from the container."""
|
| 333 |
+
if not isinstance(key, Key):
|
| 334 |
+
key = SingleKey(key)
|
| 335 |
+
|
| 336 |
+
idx = self._map.pop(key, None)
|
| 337 |
+
if idx is None:
|
| 338 |
+
raise NonExistentKey(key)
|
| 339 |
+
|
| 340 |
+
if isinstance(idx, tuple):
|
| 341 |
+
for i in idx:
|
| 342 |
+
self._body[i] = (None, Null())
|
| 343 |
+
else:
|
| 344 |
+
self._body[idx] = (None, Null())
|
| 345 |
+
|
| 346 |
+
dict.__delitem__(self, key.key)
|
| 347 |
+
|
| 348 |
+
return self
|
| 349 |
+
|
| 350 |
+
def _insert_after(
|
| 351 |
+
self, key: Key | str, other_key: Key | str, item: Any
|
| 352 |
+
) -> Container:
|
| 353 |
+
if key is None:
|
| 354 |
+
raise ValueError("Key cannot be null in insert_after()")
|
| 355 |
+
|
| 356 |
+
if key not in self:
|
| 357 |
+
raise NonExistentKey(key)
|
| 358 |
+
|
| 359 |
+
if not isinstance(key, Key):
|
| 360 |
+
key = SingleKey(key)
|
| 361 |
+
|
| 362 |
+
if not isinstance(other_key, Key):
|
| 363 |
+
other_key = SingleKey(other_key)
|
| 364 |
+
|
| 365 |
+
item = _item(item)
|
| 366 |
+
|
| 367 |
+
idx = self._map[key]
|
| 368 |
+
# Insert after the max index if there are many.
|
| 369 |
+
if isinstance(idx, tuple):
|
| 370 |
+
idx = max(idx)
|
| 371 |
+
current_item = self._body[idx][1]
|
| 372 |
+
if "\n" not in current_item.trivia.trail:
|
| 373 |
+
current_item.trivia.trail += "\n"
|
| 374 |
+
|
| 375 |
+
# Increment indices after the current index
|
| 376 |
+
for k, v in self._map.items():
|
| 377 |
+
if isinstance(v, tuple):
|
| 378 |
+
new_indices = []
|
| 379 |
+
for v_ in v:
|
| 380 |
+
if v_ > idx:
|
| 381 |
+
v_ = v_ + 1
|
| 382 |
+
|
| 383 |
+
new_indices.append(v_)
|
| 384 |
+
|
| 385 |
+
self._map[k] = tuple(new_indices)
|
| 386 |
+
elif v > idx:
|
| 387 |
+
self._map[k] = v + 1
|
| 388 |
+
|
| 389 |
+
self._map[other_key] = idx + 1
|
| 390 |
+
self._body.insert(idx + 1, (other_key, item))
|
| 391 |
+
|
| 392 |
+
if key is not None:
|
| 393 |
+
dict.__setitem__(self, other_key.key, item.value)
|
| 394 |
+
|
| 395 |
+
return self
|
| 396 |
+
|
| 397 |
+
def _insert_at(self, idx: int, key: Key | str, item: Any) -> Container:
|
| 398 |
+
if idx > len(self._body) - 1:
|
| 399 |
+
raise ValueError(f"Unable to insert at position {idx}")
|
| 400 |
+
|
| 401 |
+
if not isinstance(key, Key):
|
| 402 |
+
key = SingleKey(key)
|
| 403 |
+
|
| 404 |
+
item = _item(item)
|
| 405 |
+
|
| 406 |
+
if idx > 0:
|
| 407 |
+
previous_item = self._body[idx - 1][1]
|
| 408 |
+
if not (
|
| 409 |
+
isinstance(previous_item, Whitespace)
|
| 410 |
+
or ends_with_whitespace(previous_item)
|
| 411 |
+
or isinstance(item, (AoT, Table))
|
| 412 |
+
or "\n" in previous_item.trivia.trail
|
| 413 |
+
):
|
| 414 |
+
previous_item.trivia.trail += "\n"
|
| 415 |
+
|
| 416 |
+
# Increment indices after the current index
|
| 417 |
+
for k, v in self._map.items():
|
| 418 |
+
if isinstance(v, tuple):
|
| 419 |
+
new_indices = []
|
| 420 |
+
for v_ in v:
|
| 421 |
+
if v_ >= idx:
|
| 422 |
+
v_ = v_ + 1
|
| 423 |
+
|
| 424 |
+
new_indices.append(v_)
|
| 425 |
+
|
| 426 |
+
self._map[k] = tuple(new_indices)
|
| 427 |
+
elif v >= idx:
|
| 428 |
+
self._map[k] = v + 1
|
| 429 |
+
|
| 430 |
+
if key in self._map:
|
| 431 |
+
current_idx = self._map[key]
|
| 432 |
+
if not isinstance(current_idx, tuple):
|
| 433 |
+
current_idx = (current_idx,)
|
| 434 |
+
self._map[key] = current_idx + (idx,)
|
| 435 |
+
else:
|
| 436 |
+
self._map[key] = idx
|
| 437 |
+
self._body.insert(idx, (key, item))
|
| 438 |
+
|
| 439 |
+
dict.__setitem__(self, key.key, item.value)
|
| 440 |
+
|
| 441 |
+
return self
|
| 442 |
+
|
| 443 |
+
def item(self, key: Key | str) -> Item:
|
| 444 |
+
"""Get an item for the given key."""
|
| 445 |
+
if not isinstance(key, Key):
|
| 446 |
+
key = SingleKey(key)
|
| 447 |
+
|
| 448 |
+
idx = self._map.get(key)
|
| 449 |
+
if idx is None:
|
| 450 |
+
raise NonExistentKey(key)
|
| 451 |
+
|
| 452 |
+
if isinstance(idx, tuple):
|
| 453 |
+
# The item we are getting is an out of order table
|
| 454 |
+
# so we need a proxy to retrieve the proper objects
|
| 455 |
+
# from the parent container
|
| 456 |
+
return OutOfOrderTableProxy(self, idx)
|
| 457 |
+
|
| 458 |
+
return self._body[idx][1]
|
| 459 |
+
|
| 460 |
+
def last_item(self) -> Item | None:
|
| 461 |
+
"""Get the last item."""
|
| 462 |
+
if self._body:
|
| 463 |
+
return self._body[-1][1]
|
| 464 |
+
|
| 465 |
+
def as_string(self) -> str:
|
| 466 |
+
"""Render as TOML string."""
|
| 467 |
+
s = ""
|
| 468 |
+
for k, v in self._body:
|
| 469 |
+
if k is not None:
|
| 470 |
+
if isinstance(v, Table):
|
| 471 |
+
s += self._render_table(k, v)
|
| 472 |
+
elif isinstance(v, AoT):
|
| 473 |
+
s += self._render_aot(k, v)
|
| 474 |
+
else:
|
| 475 |
+
s += self._render_simple_item(k, v)
|
| 476 |
+
else:
|
| 477 |
+
s += self._render_simple_item(k, v)
|
| 478 |
+
|
| 479 |
+
return s
|
| 480 |
+
|
| 481 |
+
def _render_table(self, key: Key, table: Table, prefix: str | None = None) -> str:
|
| 482 |
+
cur = ""
|
| 483 |
+
|
| 484 |
+
if table.display_name is not None:
|
| 485 |
+
_key = table.display_name
|
| 486 |
+
else:
|
| 487 |
+
_key = key.as_string()
|
| 488 |
+
|
| 489 |
+
if prefix is not None:
|
| 490 |
+
_key = prefix + "." + _key
|
| 491 |
+
|
| 492 |
+
if not table.is_super_table() or (
|
| 493 |
+
any(
|
| 494 |
+
not isinstance(v, (Table, AoT, Whitespace, Null))
|
| 495 |
+
for _, v in table.value.body
|
| 496 |
+
)
|
| 497 |
+
and not key.is_dotted()
|
| 498 |
+
):
|
| 499 |
+
open_, close = "[", "]"
|
| 500 |
+
if table.is_aot_element():
|
| 501 |
+
open_, close = "[[", "]]"
|
| 502 |
+
|
| 503 |
+
newline_in_table_trivia = (
|
| 504 |
+
"\n" if "\n" not in table.trivia.trail and len(table.value) > 0 else ""
|
| 505 |
+
)
|
| 506 |
+
cur += (
|
| 507 |
+
f"{table.trivia.indent}"
|
| 508 |
+
f"{open_}"
|
| 509 |
+
f"{decode(_key)}"
|
| 510 |
+
f"{close}"
|
| 511 |
+
f"{table.trivia.comment_ws}"
|
| 512 |
+
f"{decode(table.trivia.comment)}"
|
| 513 |
+
f"{table.trivia.trail}"
|
| 514 |
+
f"{newline_in_table_trivia}"
|
| 515 |
+
)
|
| 516 |
+
elif table.trivia.indent == "\n":
|
| 517 |
+
cur += table.trivia.indent
|
| 518 |
+
|
| 519 |
+
for k, v in table.value.body:
|
| 520 |
+
if isinstance(v, Table):
|
| 521 |
+
if v.is_super_table():
|
| 522 |
+
if k.is_dotted() and not key.is_dotted():
|
| 523 |
+
# Dotted key inside table
|
| 524 |
+
cur += self._render_table(k, v)
|
| 525 |
+
else:
|
| 526 |
+
cur += self._render_table(k, v, prefix=_key)
|
| 527 |
+
else:
|
| 528 |
+
cur += self._render_table(k, v, prefix=_key)
|
| 529 |
+
elif isinstance(v, AoT):
|
| 530 |
+
cur += self._render_aot(k, v, prefix=_key)
|
| 531 |
+
else:
|
| 532 |
+
cur += self._render_simple_item(
|
| 533 |
+
k, v, prefix=_key if key.is_dotted() else None
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
return cur
|
| 537 |
+
|
| 538 |
+
def _render_aot(self, key, aot, prefix=None):
|
| 539 |
+
_key = key.as_string()
|
| 540 |
+
if prefix is not None:
|
| 541 |
+
_key = prefix + "." + _key
|
| 542 |
+
|
| 543 |
+
cur = ""
|
| 544 |
+
_key = decode(_key)
|
| 545 |
+
for table in aot.body:
|
| 546 |
+
cur += self._render_aot_table(table, prefix=_key)
|
| 547 |
+
|
| 548 |
+
return cur
|
| 549 |
+
|
| 550 |
+
def _render_aot_table(self, table: Table, prefix: str | None = None) -> str:
|
| 551 |
+
cur = ""
|
| 552 |
+
_key = prefix or ""
|
| 553 |
+
open_, close = "[[", "]]"
|
| 554 |
+
|
| 555 |
+
cur += (
|
| 556 |
+
f"{table.trivia.indent}"
|
| 557 |
+
f"{open_}"
|
| 558 |
+
f"{decode(_key)}"
|
| 559 |
+
f"{close}"
|
| 560 |
+
f"{table.trivia.comment_ws}"
|
| 561 |
+
f"{decode(table.trivia.comment)}"
|
| 562 |
+
f"{table.trivia.trail}"
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
for k, v in table.value.body:
|
| 566 |
+
if isinstance(v, Table):
|
| 567 |
+
if v.is_super_table():
|
| 568 |
+
if k.is_dotted():
|
| 569 |
+
# Dotted key inside table
|
| 570 |
+
cur += self._render_table(k, v)
|
| 571 |
+
else:
|
| 572 |
+
cur += self._render_table(k, v, prefix=_key)
|
| 573 |
+
else:
|
| 574 |
+
cur += self._render_table(k, v, prefix=_key)
|
| 575 |
+
elif isinstance(v, AoT):
|
| 576 |
+
cur += self._render_aot(k, v, prefix=_key)
|
| 577 |
+
else:
|
| 578 |
+
cur += self._render_simple_item(k, v)
|
| 579 |
+
|
| 580 |
+
return cur
|
| 581 |
+
|
| 582 |
+
def _render_simple_item(self, key, item, prefix=None):
|
| 583 |
+
if key is None:
|
| 584 |
+
return item.as_string()
|
| 585 |
+
|
| 586 |
+
_key = key.as_string()
|
| 587 |
+
if prefix is not None:
|
| 588 |
+
_key = prefix + "." + _key
|
| 589 |
+
|
| 590 |
+
return (
|
| 591 |
+
f"{item.trivia.indent}"
|
| 592 |
+
f"{decode(_key)}"
|
| 593 |
+
f"{key.sep}"
|
| 594 |
+
f"{decode(item.as_string())}"
|
| 595 |
+
f"{item.trivia.comment_ws}"
|
| 596 |
+
f"{decode(item.trivia.comment)}"
|
| 597 |
+
f"{item.trivia.trail}"
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
def __len__(self) -> int:
|
| 601 |
+
return dict.__len__(self)
|
| 602 |
+
|
| 603 |
+
def __iter__(self) -> Iterator[str]:
|
| 604 |
+
return iter(dict.keys(self))
|
| 605 |
+
|
| 606 |
+
# Dictionary methods
|
| 607 |
+
def __getitem__(self, key: Key | str) -> Item | Container:
|
| 608 |
+
if not isinstance(key, Key):
|
| 609 |
+
key = SingleKey(key)
|
| 610 |
+
|
| 611 |
+
idx = self._map.get(key)
|
| 612 |
+
if idx is None:
|
| 613 |
+
raise NonExistentKey(key)
|
| 614 |
+
|
| 615 |
+
if isinstance(idx, tuple):
|
| 616 |
+
# The item we are getting is an out of order table
|
| 617 |
+
# so we need a proxy to retrieve the proper objects
|
| 618 |
+
# from the parent container
|
| 619 |
+
return OutOfOrderTableProxy(self, idx)
|
| 620 |
+
|
| 621 |
+
item = self._body[idx][1]
|
| 622 |
+
if item.is_boolean():
|
| 623 |
+
return item.value
|
| 624 |
+
|
| 625 |
+
return item
|
| 626 |
+
|
| 627 |
+
def __setitem__(self, key: Key | str, value: Any) -> None:
|
| 628 |
+
if key is not None and key in self:
|
| 629 |
+
old_key = next(filter(lambda k: k == key, self._map))
|
| 630 |
+
self._replace(old_key, key, value)
|
| 631 |
+
else:
|
| 632 |
+
self.append(key, value)
|
| 633 |
+
|
| 634 |
+
def __delitem__(self, key: Key | str) -> None:
|
| 635 |
+
self.remove(key)
|
| 636 |
+
|
| 637 |
+
def setdefault(self, key: Key | str, default: Any) -> Any:
|
| 638 |
+
super().setdefault(key, default=default)
|
| 639 |
+
return self[key]
|
| 640 |
+
|
| 641 |
+
def _replace(self, key: Key | str, new_key: Key | str, value: Item) -> None:
|
| 642 |
+
if not isinstance(key, Key):
|
| 643 |
+
key = SingleKey(key)
|
| 644 |
+
|
| 645 |
+
idx = self._map.get(key)
|
| 646 |
+
if idx is None:
|
| 647 |
+
raise NonExistentKey(key)
|
| 648 |
+
|
| 649 |
+
self._replace_at(idx, new_key, value)
|
| 650 |
+
|
| 651 |
+
def _replace_at(
|
| 652 |
+
self, idx: int | tuple[int], new_key: Key | str, value: Item
|
| 653 |
+
) -> None:
|
| 654 |
+
value = _item(value)
|
| 655 |
+
|
| 656 |
+
if isinstance(idx, tuple):
|
| 657 |
+
for i in idx[1:]:
|
| 658 |
+
self._body[i] = (None, Null())
|
| 659 |
+
|
| 660 |
+
idx = idx[0]
|
| 661 |
+
|
| 662 |
+
k, v = self._body[idx]
|
| 663 |
+
if not isinstance(new_key, Key):
|
| 664 |
+
if (
|
| 665 |
+
isinstance(value, (AoT, Table)) != isinstance(v, (AoT, Table))
|
| 666 |
+
or new_key != k.key
|
| 667 |
+
):
|
| 668 |
+
new_key = SingleKey(new_key)
|
| 669 |
+
else: # Inherit the sep of the old key
|
| 670 |
+
new_key = k
|
| 671 |
+
|
| 672 |
+
del self._map[k]
|
| 673 |
+
self._map[new_key] = idx
|
| 674 |
+
if new_key != k:
|
| 675 |
+
dict.__delitem__(self, k)
|
| 676 |
+
|
| 677 |
+
if isinstance(value, (AoT, Table)) != isinstance(v, (AoT, Table)):
|
| 678 |
+
# new tables should appear after all non-table values
|
| 679 |
+
self.remove(k)
|
| 680 |
+
for i in range(idx, len(self._body)):
|
| 681 |
+
if isinstance(self._body[i][1], (AoT, Table)):
|
| 682 |
+
self._insert_at(i, new_key, value)
|
| 683 |
+
idx = i
|
| 684 |
+
break
|
| 685 |
+
else:
|
| 686 |
+
idx = -1
|
| 687 |
+
self.append(new_key, value)
|
| 688 |
+
else:
|
| 689 |
+
# Copying trivia
|
| 690 |
+
if not isinstance(value, (Whitespace, AoT)):
|
| 691 |
+
value.trivia.indent = v.trivia.indent
|
| 692 |
+
value.trivia.comment_ws = value.trivia.comment_ws or v.trivia.comment_ws
|
| 693 |
+
value.trivia.comment = value.trivia.comment or v.trivia.comment
|
| 694 |
+
value.trivia.trail = v.trivia.trail
|
| 695 |
+
self._body[idx] = (new_key, value)
|
| 696 |
+
|
| 697 |
+
if hasattr(value, "invalidate_display_name"):
|
| 698 |
+
value.invalidate_display_name() # type: ignore[attr-defined]
|
| 699 |
+
|
| 700 |
+
if isinstance(value, Table):
|
| 701 |
+
# Insert a cosmetic new line for tables if:
|
| 702 |
+
# - it does not have it yet OR is not followed by one
|
| 703 |
+
# - it is not the last item
|
| 704 |
+
last, _ = self._previous_item_with_index()
|
| 705 |
+
idx = last if idx < 0 else idx
|
| 706 |
+
has_ws = ends_with_whitespace(value)
|
| 707 |
+
next_ws = idx < last and isinstance(self._body[idx + 1][1], Whitespace)
|
| 708 |
+
if idx < last and not (next_ws or has_ws):
|
| 709 |
+
value.append(None, Whitespace("\n"))
|
| 710 |
+
|
| 711 |
+
dict.__setitem__(self, new_key.key, value.value)
|
| 712 |
+
|
| 713 |
+
def __str__(self) -> str:
|
| 714 |
+
return str(self.value)
|
| 715 |
+
|
| 716 |
+
def __repr__(self) -> str:
|
| 717 |
+
return repr(self.value)
|
| 718 |
+
|
| 719 |
+
def __eq__(self, other: dict) -> bool:
|
| 720 |
+
if not isinstance(other, dict):
|
| 721 |
+
return NotImplemented
|
| 722 |
+
|
| 723 |
+
return self.value == other
|
| 724 |
+
|
| 725 |
+
def _getstate(self, protocol):
|
| 726 |
+
return (self._parsed,)
|
| 727 |
+
|
| 728 |
+
def __reduce__(self):
|
| 729 |
+
return self.__reduce_ex__(2)
|
| 730 |
+
|
| 731 |
+
def __reduce_ex__(self, protocol):
|
| 732 |
+
return (
|
| 733 |
+
self.__class__,
|
| 734 |
+
self._getstate(protocol),
|
| 735 |
+
(self._map, self._body, self._parsed, self._table_keys),
|
| 736 |
+
)
|
| 737 |
+
|
| 738 |
+
def __setstate__(self, state):
|
| 739 |
+
self._map = state[0]
|
| 740 |
+
self._body = state[1]
|
| 741 |
+
self._parsed = state[2]
|
| 742 |
+
self._table_keys = state[3]
|
| 743 |
+
|
| 744 |
+
for key, item in self._body:
|
| 745 |
+
if key is not None:
|
| 746 |
+
dict.__setitem__(self, key.key, item.value)
|
| 747 |
+
|
| 748 |
+
def copy(self) -> Container:
|
| 749 |
+
return copy.copy(self)
|
| 750 |
+
|
| 751 |
+
def __copy__(self) -> Container:
|
| 752 |
+
c = self.__class__(self._parsed)
|
| 753 |
+
for k, v in dict.items(self):
|
| 754 |
+
dict.__setitem__(c, k, v)
|
| 755 |
+
|
| 756 |
+
c._body += self.body
|
| 757 |
+
c._map.update(self._map)
|
| 758 |
+
|
| 759 |
+
return c
|
| 760 |
+
|
| 761 |
+
def _previous_item_with_index(
|
| 762 |
+
self, idx: int | None = None, ignore=(Null,)
|
| 763 |
+
) -> tuple[int, Item] | None:
|
| 764 |
+
"""Find the immediate previous item before index ``idx``"""
|
| 765 |
+
if idx is None or idx > len(self._body):
|
| 766 |
+
idx = len(self._body)
|
| 767 |
+
for i in range(idx - 1, -1, -1):
|
| 768 |
+
v = self._body[i][-1]
|
| 769 |
+
if not isinstance(v, ignore):
|
| 770 |
+
return i, v
|
| 771 |
+
return None
|
| 772 |
+
|
| 773 |
+
def _previous_item(self, idx: int | None = None, ignore=(Null,)) -> Item | None:
|
| 774 |
+
"""Find the immediate previous item before index ``idx``.
|
| 775 |
+
If ``idx`` is not given, the last item is returned.
|
| 776 |
+
"""
|
| 777 |
+
prev = self._previous_item_with_index(idx, ignore)
|
| 778 |
+
return prev[-1] if prev else None
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
class OutOfOrderTableProxy(_CustomDict):
|
| 782 |
+
def __init__(self, container: Container, indices: tuple[int]) -> None:
|
| 783 |
+
self._container = container
|
| 784 |
+
self._internal_container = Container(True)
|
| 785 |
+
self._tables = []
|
| 786 |
+
self._tables_map = {}
|
| 787 |
+
|
| 788 |
+
for i in indices:
|
| 789 |
+
_, item = self._container._body[i]
|
| 790 |
+
|
| 791 |
+
if isinstance(item, Table):
|
| 792 |
+
self._tables.append(item)
|
| 793 |
+
table_idx = len(self._tables) - 1
|
| 794 |
+
for k, v in item.value.body:
|
| 795 |
+
self._internal_container.append(k, v)
|
| 796 |
+
self._tables_map[k] = table_idx
|
| 797 |
+
if k is not None:
|
| 798 |
+
dict.__setitem__(self, k.key, v)
|
| 799 |
+
|
| 800 |
+
def unwrap(self) -> str:
|
| 801 |
+
return self._internal_container.unwrap()
|
| 802 |
+
|
| 803 |
+
@property
|
| 804 |
+
def value(self):
|
| 805 |
+
return self._internal_container.value
|
| 806 |
+
|
| 807 |
+
def __getitem__(self, key: Key | str) -> Any:
|
| 808 |
+
if key not in self._internal_container:
|
| 809 |
+
raise NonExistentKey(key)
|
| 810 |
+
|
| 811 |
+
return self._internal_container[key]
|
| 812 |
+
|
| 813 |
+
def __setitem__(self, key: Key | str, item: Any) -> None:
|
| 814 |
+
if key in self._tables_map:
|
| 815 |
+
table = self._tables[self._tables_map[key]]
|
| 816 |
+
table[key] = item
|
| 817 |
+
elif self._tables:
|
| 818 |
+
table = self._tables[0]
|
| 819 |
+
table[key] = item
|
| 820 |
+
else:
|
| 821 |
+
self._container[key] = item
|
| 822 |
+
|
| 823 |
+
self._internal_container[key] = item
|
| 824 |
+
if key is not None:
|
| 825 |
+
dict.__setitem__(self, key, item)
|
| 826 |
+
|
| 827 |
+
def _remove_table(self, table: Table) -> None:
|
| 828 |
+
"""Remove table from the parent container"""
|
| 829 |
+
self._tables.remove(table)
|
| 830 |
+
for idx, item in enumerate(self._container._body):
|
| 831 |
+
if item[1] is table:
|
| 832 |
+
self._container._remove_at(idx)
|
| 833 |
+
break
|
| 834 |
+
|
| 835 |
+
def __delitem__(self, key: Key | str) -> None:
|
| 836 |
+
if key in self._tables_map:
|
| 837 |
+
table = self._tables[self._tables_map[key]]
|
| 838 |
+
del table[key]
|
| 839 |
+
if not table and len(self._tables) > 1:
|
| 840 |
+
self._remove_table(table)
|
| 841 |
+
del self._tables_map[key]
|
| 842 |
+
else:
|
| 843 |
+
raise NonExistentKey(key)
|
| 844 |
+
|
| 845 |
+
del self._internal_container[key]
|
| 846 |
+
if key is not None:
|
| 847 |
+
dict.__delitem__(self, key)
|
| 848 |
+
|
| 849 |
+
def __iter__(self) -> Iterator[str]:
|
| 850 |
+
return iter(dict.keys(self))
|
| 851 |
+
|
| 852 |
+
def __len__(self) -> int:
|
| 853 |
+
return dict.__len__(self)
|
| 854 |
+
|
| 855 |
+
def setdefault(self, key: Key | str, default: Any) -> Any:
|
| 856 |
+
super().setdefault(key, default=default)
|
| 857 |
+
return self[key]
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
def ends_with_whitespace(it: Any) -> bool:
|
| 861 |
+
"""Returns ``True`` if the given item ``it`` is a ``Table`` or ``AoT`` object
|
| 862 |
+
ending with a ``Whitespace``.
|
| 863 |
+
"""
|
| 864 |
+
return (
|
| 865 |
+
isinstance(it, Table) and isinstance(it.value._previous_item(), Whitespace)
|
| 866 |
+
) or (isinstance(it, AoT) and len(it) > 0 and isinstance(it[-1], Whitespace))
|
evalkit_tf437/lib/python3.10/site-packages/tomlkit/exceptions.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Collection
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TOMLKitError(Exception):
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ParseError(ValueError, TOMLKitError):
|
| 11 |
+
"""
|
| 12 |
+
This error occurs when the parser encounters a syntax error
|
| 13 |
+
in the TOML being parsed. The error references the line and
|
| 14 |
+
location within the line where the error was encountered.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, line: int, col: int, message: str | None = None) -> None:
|
| 18 |
+
self._line = line
|
| 19 |
+
self._col = col
|
| 20 |
+
|
| 21 |
+
if message is None:
|
| 22 |
+
message = "TOML parse error"
|
| 23 |
+
|
| 24 |
+
super().__init__(f"{message} at line {self._line} col {self._col}")
|
| 25 |
+
|
| 26 |
+
@property
|
| 27 |
+
def line(self):
|
| 28 |
+
return self._line
|
| 29 |
+
|
| 30 |
+
@property
|
| 31 |
+
def col(self):
|
| 32 |
+
return self._col
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class MixedArrayTypesError(ParseError):
|
| 36 |
+
"""
|
| 37 |
+
An array was found that had two or more element types.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(self, line: int, col: int) -> None:
|
| 41 |
+
message = "Mixed types found in array"
|
| 42 |
+
|
| 43 |
+
super().__init__(line, col, message=message)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class InvalidNumberError(ParseError):
|
| 47 |
+
"""
|
| 48 |
+
A numeric field was improperly specified.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, line: int, col: int) -> None:
|
| 52 |
+
message = "Invalid number"
|
| 53 |
+
|
| 54 |
+
super().__init__(line, col, message=message)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class InvalidDateTimeError(ParseError):
|
| 58 |
+
"""
|
| 59 |
+
A datetime field was improperly specified.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(self, line: int, col: int) -> None:
|
| 63 |
+
message = "Invalid datetime"
|
| 64 |
+
|
| 65 |
+
super().__init__(line, col, message=message)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class InvalidDateError(ParseError):
|
| 69 |
+
"""
|
| 70 |
+
A date field was improperly specified.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
def __init__(self, line: int, col: int) -> None:
|
| 74 |
+
message = "Invalid date"
|
| 75 |
+
|
| 76 |
+
super().__init__(line, col, message=message)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class InvalidTimeError(ParseError):
|
| 80 |
+
"""
|
| 81 |
+
A date field was improperly specified.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
def __init__(self, line: int, col: int) -> None:
|
| 85 |
+
message = "Invalid time"
|
| 86 |
+
|
| 87 |
+
super().__init__(line, col, message=message)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class InvalidNumberOrDateError(ParseError):
|
| 91 |
+
"""
|
| 92 |
+
A numeric or date field was improperly specified.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
def __init__(self, line: int, col: int) -> None:
|
| 96 |
+
message = "Invalid number or date format"
|
| 97 |
+
|
| 98 |
+
super().__init__(line, col, message=message)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class InvalidUnicodeValueError(ParseError):
|
| 102 |
+
"""
|
| 103 |
+
A unicode code was improperly specified.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(self, line: int, col: int) -> None:
|
| 107 |
+
message = "Invalid unicode value"
|
| 108 |
+
|
| 109 |
+
super().__init__(line, col, message=message)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class UnexpectedCharError(ParseError):
|
| 113 |
+
"""
|
| 114 |
+
An unexpected character was found during parsing.
|
| 115 |
+
"""
|
| 116 |
+
|
| 117 |
+
def __init__(self, line: int, col: int, char: str) -> None:
|
| 118 |
+
message = f"Unexpected character: {repr(char)}"
|
| 119 |
+
|
| 120 |
+
super().__init__(line, col, message=message)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class EmptyKeyError(ParseError):
|
| 124 |
+
"""
|
| 125 |
+
An empty key was found during parsing.
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
def __init__(self, line: int, col: int) -> None:
|
| 129 |
+
message = "Empty key"
|
| 130 |
+
|
| 131 |
+
super().__init__(line, col, message=message)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class EmptyTableNameError(ParseError):
|
| 135 |
+
"""
|
| 136 |
+
An empty table name was found during parsing.
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
def __init__(self, line: int, col: int) -> None:
|
| 140 |
+
message = "Empty table name"
|
| 141 |
+
|
| 142 |
+
super().__init__(line, col, message=message)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class InvalidCharInStringError(ParseError):
|
| 146 |
+
"""
|
| 147 |
+
The string being parsed contains an invalid character.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
def __init__(self, line: int, col: int, char: str) -> None:
|
| 151 |
+
message = f"Invalid character {repr(char)} in string"
|
| 152 |
+
|
| 153 |
+
super().__init__(line, col, message=message)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class UnexpectedEofError(ParseError):
|
| 157 |
+
"""
|
| 158 |
+
The TOML being parsed ended before the end of a statement.
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
def __init__(self, line: int, col: int) -> None:
|
| 162 |
+
message = "Unexpected end of file"
|
| 163 |
+
|
| 164 |
+
super().__init__(line, col, message=message)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class InternalParserError(ParseError):
|
| 168 |
+
"""
|
| 169 |
+
An error that indicates a bug in the parser.
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
def __init__(self, line: int, col: int, message: str | None = None) -> None:
|
| 173 |
+
msg = "Internal parser error"
|
| 174 |
+
if message:
|
| 175 |
+
msg += f" ({message})"
|
| 176 |
+
|
| 177 |
+
super().__init__(line, col, message=msg)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class NonExistentKey(KeyError, TOMLKitError):
|
| 181 |
+
"""
|
| 182 |
+
A non-existent key was used.
|
| 183 |
+
"""
|
| 184 |
+
|
| 185 |
+
def __init__(self, key):
|
| 186 |
+
message = f'Key "{key}" does not exist.'
|
| 187 |
+
|
| 188 |
+
super().__init__(message)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class KeyAlreadyPresent(TOMLKitError):
|
| 192 |
+
"""
|
| 193 |
+
An already present key was used.
|
| 194 |
+
"""
|
| 195 |
+
|
| 196 |
+
def __init__(self, key):
|
| 197 |
+
key = getattr(key, "key", key)
|
| 198 |
+
message = f'Key "{key}" already exists.'
|
| 199 |
+
|
| 200 |
+
super().__init__(message)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class InvalidControlChar(ParseError):
|
| 204 |
+
def __init__(self, line: int, col: int, char: int, type: str) -> None:
|
| 205 |
+
display_code = "\\u00"
|
| 206 |
+
|
| 207 |
+
if char < 16:
|
| 208 |
+
display_code += "0"
|
| 209 |
+
|
| 210 |
+
display_code += hex(char)[2:]
|
| 211 |
+
|
| 212 |
+
message = (
|
| 213 |
+
"Control characters (codes less than 0x1f and 0x7f)"
|
| 214 |
+
f" are not allowed in {type}, "
|
| 215 |
+
f"use {display_code} instead"
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
super().__init__(line, col, message=message)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class InvalidStringError(ValueError, TOMLKitError):
|
| 222 |
+
def __init__(self, value: str, invalid_sequences: Collection[str], delimiter: str):
|
| 223 |
+
repr_ = repr(value)[1:-1]
|
| 224 |
+
super().__init__(
|
| 225 |
+
f"Invalid string: {delimiter}{repr_}{delimiter}. "
|
| 226 |
+
f"The character sequences {invalid_sequences} are invalid."
|
| 227 |
+
)
|