diff --git a/.gitattributes b/.gitattributes
index efe83fdd4078dc0ad54fd40818d40cedafc63e45..8dd6964923f985676f7dd6df7392eca9b7100aac 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1076,3 +1076,8 @@ infer_4_33_0/lib/python3.10/site-packages/matplotlib/axes/__pycache__/_base.cpyt
infer_4_33_0/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans-Oblique.ttf filter=lfs diff=lfs merge=lfs -text
infer_4_33_0/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif-Bold.ttf filter=lfs diff=lfs merge=lfs -text
infer_4_37_2/lib/libquadmath.so.0 filter=lfs diff=lfs merge=lfs -text
+infer_4_37_2/lib/libtinfo.so.6.4 filter=lfs diff=lfs merge=lfs -text
+infer_4_37_2/lib/libitm.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
+emu3/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
+infer_4_37_2/lib/libitm.so filter=lfs diff=lfs merge=lfs -text
+infer_4_37_2/lib/libquadmath.so filter=lfs diff=lfs merge=lfs -text
diff --git a/emu3/bin/python3.10 b/emu3/bin/python3.10
new file mode 100644
index 0000000000000000000000000000000000000000..73a440a7bb23746ee40ae9e6b0f68ba1947cc9ec
--- /dev/null
+++ b/emu3/bin/python3.10
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab5b2de8a504f2de6cbed5d6ebb4df8723139c629a0e5ad95a48a4fe06c982a0
+size 17225608
diff --git a/infer_4_37_2/lib/libitm.so b/infer_4_37_2/lib/libitm.so
new file mode 100644
index 0000000000000000000000000000000000000000..f5eb8be8f056fa3817339919908a70a570eab6fa
--- /dev/null
+++ b/infer_4_37_2/lib/libitm.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:70a7a1a8352b39da726e026874f1854096cdd1c60e80ea5cf97a4e38055ea7c1
+size 1018904
diff --git a/infer_4_37_2/lib/libitm.so.1.0.0 b/infer_4_37_2/lib/libitm.so.1.0.0
new file mode 100644
index 0000000000000000000000000000000000000000..f5eb8be8f056fa3817339919908a70a570eab6fa
--- /dev/null
+++ b/infer_4_37_2/lib/libitm.so.1.0.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:70a7a1a8352b39da726e026874f1854096cdd1c60e80ea5cf97a4e38055ea7c1
+size 1018904
diff --git a/infer_4_37_2/lib/libquadmath.so b/infer_4_37_2/lib/libquadmath.so
new file mode 100644
index 0000000000000000000000000000000000000000..cd082c15339041642f25b85e32bd65b9e0393619
--- /dev/null
+++ b/infer_4_37_2/lib/libquadmath.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10c6fadba4c2f6d77e836a50aadbd92e95b137a85eb01b1ca183b50d8f39a2c6
+size 1009408
diff --git a/infer_4_37_2/lib/libtinfo.so.6.4 b/infer_4_37_2/lib/libtinfo.so.6.4
new file mode 100644
index 0000000000000000000000000000000000000000..65d1ab454bd249c20d15981aed66fc737eacc9ac
--- /dev/null
+++ b/infer_4_37_2/lib/libtinfo.so.6.4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:075ef6f9b969b7b8ad35b328fead4d4824eaa53bf24cd3659efdbd9b1ecc2d3f
+size 287080
diff --git a/infer_4_37_2/lib/python3.10/LICENSE.txt b/infer_4_37_2/lib/python3.10/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f26bcf4d2de6eb136e31006ca3ab447d5e488adf
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/LICENSE.txt
@@ -0,0 +1,279 @@
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC. Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see https://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team. In October of the same
+year, the PythonLabs team moved to Digital Creations, which became
+Zope Corporation. In 2001, the Python Software Foundation (PSF, see
+https://www.python.org/psf/) was formed, a non-profit organization
+created specifically to own Python-related Intellectual Property.
+Zope Corporation was a sponsoring member of the PSF.
+
+All Python releases are Open Source (see https://opensource.org for
+the Open Source Definition). Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+ Release Derived Year Owner GPL-
+ from compatible? (1)
+
+ 0.9.0 thru 1.2 1991-1995 CWI yes
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
+ 1.6 1.5.2 2000 CNRI no
+ 2.0 1.6 2000 BeOpen.com no
+ 1.6.1 1.6 2001 CNRI yes (2)
+ 2.1 2.0+1.6.1 2001 PSF no
+ 2.0.1 2.0+1.6.1 2001 PSF yes
+ 2.1.1 2.1+2.0.1 2001 PSF yes
+ 2.1.2 2.1.1 2002 PSF yes
+ 2.1.3 2.1.2 2002 PSF yes
+ 2.2 and above 2.1.1 2001-now PSF yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+ the GPL. All Python licenses, unlike the GPL, let you distribute
+ a modified version without making your changes open source. The
+ GPL-compatible licenses make it possible to combine Python with
+ other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+ because its license has a choice of law clause. According to
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+ is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+Python software and documentation are licensed under the
+Python Software Foundation License Version 2.
+
+Starting with Python 3.8.6, examples, recipes, and other code in
+the documentation are dual licensed under the PSF License Version 2
+and the Zero-Clause BSD license.
+
+Some software incorporated into Python is under different licenses.
+The licenses are listed with code falling under that license.
+
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation;
+All Rights Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions. Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee. This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party. As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee. Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement. This Agreement together with
+Python 1.6.1 may be located on the internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013. This
+Agreement may also be obtained from a proxy server on the internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee. This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+ ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands. All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION
+----------------------------------------------------------------------
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
diff --git a/infer_4_37_2/lib/python3.10/__future__.py b/infer_4_37_2/lib/python3.10/__future__.py
new file mode 100644
index 0000000000000000000000000000000000000000..97dc90c6e4644a71cd19683e31c8624ea3184824
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/__future__.py
@@ -0,0 +1,147 @@
+"""Record of phased-in incompatible language changes.
+
+Each line is of the form:
+
+ FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
+ CompilerFlag ")"
+
+where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
+of the same form as sys.version_info:
+
+ (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
+ PY_MINOR_VERSION, # the 1; an int
+ PY_MICRO_VERSION, # the 0; an int
+ PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
+ PY_RELEASE_SERIAL # the 3; an int
+ )
+
+OptionalRelease records the first release in which
+
+ from __future__ import FeatureName
+
+was accepted.
+
+In the case of MandatoryReleases that have not yet occurred,
+MandatoryRelease predicts the release in which the feature will become part
+of the language.
+
+Else MandatoryRelease records when the feature became part of the language;
+in releases at or after that, modules no longer need
+
+ from __future__ import FeatureName
+
+to use the feature in question, but may continue to use such imports.
+
+MandatoryRelease may also be None, meaning that a planned feature got
+dropped.
+
+Instances of class _Feature have two corresponding methods,
+.getOptionalRelease() and .getMandatoryRelease().
+
+CompilerFlag is the (bitfield) flag that should be passed in the fourth
+argument to the builtin function compile() to enable the feature in
+dynamically compiled code. This flag is stored in the .compiler_flag
+attribute on _Future instances. These values must match the appropriate
+#defines of CO_xxx flags in Include/cpython/compile.h.
+
+No feature line is ever to be deleted from this file.
+"""
+
+all_feature_names = [
+ "nested_scopes",
+ "generators",
+ "division",
+ "absolute_import",
+ "with_statement",
+ "print_function",
+ "unicode_literals",
+ "barry_as_FLUFL",
+ "generator_stop",
+ "annotations",
+]
+
+__all__ = ["all_feature_names"] + all_feature_names
+
+# The CO_xxx symbols are defined here under the same names defined in
+# code.h and used by compile.h, so that an editor search will find them here.
+# However, they're not exported in __all__, because they don't really belong to
+# this module.
+CO_NESTED = 0x0010 # nested_scopes
+CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
+CO_FUTURE_DIVISION = 0x20000 # division
+CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default
+CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement
+CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function
+CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals
+CO_FUTURE_BARRY_AS_BDFL = 0x400000
+CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators
+CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime
+
+
+class _Feature:
+
+ def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
+ self.optional = optionalRelease
+ self.mandatory = mandatoryRelease
+ self.compiler_flag = compiler_flag
+
+ def getOptionalRelease(self):
+ """Return first release in which this feature was recognized.
+
+ This is a 5-tuple, of the same form as sys.version_info.
+ """
+ return self.optional
+
+ def getMandatoryRelease(self):
+ """Return release in which this feature will become mandatory.
+
+ This is a 5-tuple, of the same form as sys.version_info, or, if
+ the feature was dropped, is None.
+ """
+ return self.mandatory
+
+ def __repr__(self):
+ return "_Feature" + repr((self.optional,
+ self.mandatory,
+ self.compiler_flag))
+
+
+nested_scopes = _Feature((2, 1, 0, "beta", 1),
+ (2, 2, 0, "alpha", 0),
+ CO_NESTED)
+
+generators = _Feature((2, 2, 0, "alpha", 1),
+ (2, 3, 0, "final", 0),
+ CO_GENERATOR_ALLOWED)
+
+division = _Feature((2, 2, 0, "alpha", 2),
+ (3, 0, 0, "alpha", 0),
+ CO_FUTURE_DIVISION)
+
+absolute_import = _Feature((2, 5, 0, "alpha", 1),
+ (3, 0, 0, "alpha", 0),
+ CO_FUTURE_ABSOLUTE_IMPORT)
+
+with_statement = _Feature((2, 5, 0, "alpha", 1),
+ (2, 6, 0, "alpha", 0),
+ CO_FUTURE_WITH_STATEMENT)
+
+print_function = _Feature((2, 6, 0, "alpha", 2),
+ (3, 0, 0, "alpha", 0),
+ CO_FUTURE_PRINT_FUNCTION)
+
+unicode_literals = _Feature((2, 6, 0, "alpha", 2),
+ (3, 0, 0, "alpha", 0),
+ CO_FUTURE_UNICODE_LITERALS)
+
+barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
+ (4, 0, 0, "alpha", 0),
+ CO_FUTURE_BARRY_AS_BDFL)
+
+generator_stop = _Feature((3, 5, 0, "beta", 1),
+ (3, 7, 0, "alpha", 0),
+ CO_FUTURE_GENERATOR_STOP)
+
+annotations = _Feature((3, 7, 0, "beta", 1),
+ (3, 11, 0, "alpha", 0),
+ CO_FUTURE_ANNOTATIONS)
diff --git a/infer_4_37_2/lib/python3.10/__phello__.foo.py b/infer_4_37_2/lib/python3.10/__phello__.foo.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e8623ee1daacbd61475bb84a840813dd99da18d
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/__phello__.foo.py
@@ -0,0 +1 @@
+# This file exists as a helper for the test.test_frozen module.
diff --git a/infer_4_37_2/lib/python3.10/_bootsubprocess.py b/infer_4_37_2/lib/python3.10/_bootsubprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..014782f616c823bae543909e3b17dad3dccc8cd0
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/_bootsubprocess.py
@@ -0,0 +1,97 @@
+"""
+Basic subprocess implementation for POSIX which only uses os functions. Only
+implement features required by setup.py to build C extension modules when
+subprocess is unavailable. setup.py is not used on Windows.
+"""
+import os
+
+
+# distutils.spawn used by distutils.command.build_ext
+# calls subprocess.Popen().wait()
+class Popen:
+ def __init__(self, cmd, env=None):
+ self._cmd = cmd
+ self._env = env
+ self.returncode = None
+
+ def wait(self):
+ pid = os.fork()
+ if pid == 0:
+ # Child process
+ try:
+ if self._env is not None:
+ os.execve(self._cmd[0], self._cmd, self._env)
+ else:
+ os.execv(self._cmd[0], self._cmd)
+ finally:
+ os._exit(1)
+ else:
+ # Parent process
+ _, status = os.waitpid(pid, 0)
+ self.returncode = os.waitstatus_to_exitcode(status)
+
+ return self.returncode
+
+
+def _check_cmd(cmd):
+ # Use regex [a-zA-Z0-9./-]+: reject empty string, space, etc.
+ safe_chars = []
+ for first, last in (("a", "z"), ("A", "Z"), ("0", "9")):
+ for ch in range(ord(first), ord(last) + 1):
+ safe_chars.append(chr(ch))
+ safe_chars.append("./-")
+ safe_chars = ''.join(safe_chars)
+
+ if isinstance(cmd, (tuple, list)):
+ check_strs = cmd
+ elif isinstance(cmd, str):
+ check_strs = [cmd]
+ else:
+ return False
+
+ for arg in check_strs:
+ if not isinstance(arg, str):
+ return False
+ if not arg:
+ # reject empty string
+ return False
+ for ch in arg:
+ if ch not in safe_chars:
+ return False
+
+ return True
+
+
+# _aix_support used by distutil.util calls subprocess.check_output()
+def check_output(cmd, **kwargs):
+ if kwargs:
+ raise NotImplementedError(repr(kwargs))
+
+ if not _check_cmd(cmd):
+ raise ValueError(f"unsupported command: {cmd!r}")
+
+ tmp_filename = "check_output.tmp"
+ if not isinstance(cmd, str):
+ cmd = " ".join(cmd)
+ cmd = f"{cmd} >{tmp_filename}"
+
+ try:
+ # system() spawns a shell
+ status = os.system(cmd)
+ exitcode = os.waitstatus_to_exitcode(status)
+ if exitcode:
+ raise ValueError(f"Command {cmd!r} returned non-zero "
+ f"exit status {exitcode!r}")
+
+ try:
+ with open(tmp_filename, "rb") as fp:
+ stdout = fp.read()
+ except FileNotFoundError:
+ stdout = b''
+ finally:
+ try:
+ os.unlink(tmp_filename)
+ except OSError:
+ pass
+
+ return stdout
diff --git a/infer_4_37_2/lib/python3.10/_collections_abc.py b/infer_4_37_2/lib/python3.10/_collections_abc.py
new file mode 100644
index 0000000000000000000000000000000000000000..72fd633cf9ac2f94327b5c48c45b6344e8dad5aa
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/_collections_abc.py
@@ -0,0 +1,1171 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
+
+Unit tests are in test_collections.
+"""
+
+from abc import ABCMeta, abstractmethod
+import sys
+
+GenericAlias = type(list[int])
+EllipsisType = type(...)
+def _f(): pass
+FunctionType = type(_f)
+del _f
+
+__all__ = ["Awaitable", "Coroutine",
+ "AsyncIterable", "AsyncIterator", "AsyncGenerator",
+ "Hashable", "Iterable", "Iterator", "Generator", "Reversible",
+ "Sized", "Container", "Callable", "Collection",
+ "Set", "MutableSet",
+ "Mapping", "MutableMapping",
+ "MappingView", "KeysView", "ItemsView", "ValuesView",
+ "Sequence", "MutableSequence",
+ "ByteString",
+ ]
+
+# This module has been renamed from collections.abc to _collections_abc to
+# speed up interpreter startup. Some of the types such as MutableMapping are
+# required early but collections module imports a lot of other modules.
+# See issue #19218
+__name__ = "collections.abc"
+
+# Private list of types that we want to register with the various ABCs
+# so that they will pass tests like:
+# it = iter(somebytearray)
+# assert isinstance(it, Iterable)
+# Note: in other implementations, these types might not be distinct
+# and they may have their own implementation specific types that
+# are not included on this list.
+bytes_iterator = type(iter(b''))
+bytearray_iterator = type(iter(bytearray()))
+#callable_iterator = ???
+dict_keyiterator = type(iter({}.keys()))
+dict_valueiterator = type(iter({}.values()))
+dict_itemiterator = type(iter({}.items()))
+list_iterator = type(iter([]))
+list_reverseiterator = type(iter(reversed([])))
+range_iterator = type(iter(range(0)))
+longrange_iterator = type(iter(range(1 << 1000)))
+set_iterator = type(iter(set()))
+str_iterator = type(iter(""))
+tuple_iterator = type(iter(()))
+zip_iterator = type(iter(zip()))
+## views ##
+dict_keys = type({}.keys())
+dict_values = type({}.values())
+dict_items = type({}.items())
+## misc ##
+mappingproxy = type(type.__dict__)
+generator = type((lambda: (yield))())
+## coroutine ##
+async def _coro(): pass
+_coro = _coro()
+coroutine = type(_coro)
+_coro.close() # Prevent ResourceWarning
+del _coro
+## asynchronous generator ##
+async def _ag(): yield
+_ag = _ag()
+async_generator = type(_ag)
+del _ag
+
+
+### ONE-TRICK PONIES ###
+
+def _check_methods(C, *methods):
+ mro = C.__mro__
+ for method in methods:
+ for B in mro:
+ if method in B.__dict__:
+ if B.__dict__[method] is None:
+ return NotImplemented
+ break
+ else:
+ return NotImplemented
+ return True
+
+class Hashable(metaclass=ABCMeta):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __hash__(self):
+ return 0
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Hashable:
+ return _check_methods(C, "__hash__")
+ return NotImplemented
+
+
+class Awaitable(metaclass=ABCMeta):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __await__(self):
+ yield
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Awaitable:
+ return _check_methods(C, "__await__")
+ return NotImplemented
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+class Coroutine(Awaitable):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def send(self, value):
+ """Send a value into the coroutine.
+ Return next yielded value or raise StopIteration.
+ """
+ raise StopIteration
+
+ @abstractmethod
+ def throw(self, typ, val=None, tb=None):
+ """Raise an exception in the coroutine.
+ Return next yielded value or raise StopIteration.
+ """
+ if val is None:
+ if tb is None:
+ raise typ
+ val = typ()
+ if tb is not None:
+ val = val.with_traceback(tb)
+ raise val
+
+ def close(self):
+ """Raise GeneratorExit inside coroutine.
+ """
+ try:
+ self.throw(GeneratorExit)
+ except (GeneratorExit, StopIteration):
+ pass
+ else:
+ raise RuntimeError("coroutine ignored GeneratorExit")
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Coroutine:
+ return _check_methods(C, '__await__', 'send', 'throw', 'close')
+ return NotImplemented
+
+
+Coroutine.register(coroutine)
+
+
+class AsyncIterable(metaclass=ABCMeta):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __aiter__(self):
+ return AsyncIterator()
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is AsyncIterable:
+ return _check_methods(C, "__aiter__")
+ return NotImplemented
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+class AsyncIterator(AsyncIterable):
+
+ __slots__ = ()
+
+ @abstractmethod
+ async def __anext__(self):
+ """Return the next item or raise StopAsyncIteration when exhausted."""
+ raise StopAsyncIteration
+
+ def __aiter__(self):
+ return self
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is AsyncIterator:
+ return _check_methods(C, "__anext__", "__aiter__")
+ return NotImplemented
+
+
+class AsyncGenerator(AsyncIterator):
+
+ __slots__ = ()
+
+ async def __anext__(self):
+ """Return the next item from the asynchronous generator.
+ When exhausted, raise StopAsyncIteration.
+ """
+ return await self.asend(None)
+
+ @abstractmethod
+ async def asend(self, value):
+ """Send a value into the asynchronous generator.
+ Return next yielded value or raise StopAsyncIteration.
+ """
+ raise StopAsyncIteration
+
+ @abstractmethod
+ async def athrow(self, typ, val=None, tb=None):
+ """Raise an exception in the asynchronous generator.
+ Return next yielded value or raise StopAsyncIteration.
+ """
+ if val is None:
+ if tb is None:
+ raise typ
+ val = typ()
+ if tb is not None:
+ val = val.with_traceback(tb)
+ raise val
+
+ async def aclose(self):
+ """Raise GeneratorExit inside coroutine.
+ """
+ try:
+ await self.athrow(GeneratorExit)
+ except (GeneratorExit, StopAsyncIteration):
+ pass
+ else:
+ raise RuntimeError("asynchronous generator ignored GeneratorExit")
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is AsyncGenerator:
+ return _check_methods(C, '__aiter__', '__anext__',
+ 'asend', 'athrow', 'aclose')
+ return NotImplemented
+
+
+AsyncGenerator.register(async_generator)
+
+
+class Iterable(metaclass=ABCMeta):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __iter__(self):
+ while False:
+ yield None
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Iterable:
+ return _check_methods(C, "__iter__")
+ return NotImplemented
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+class Iterator(Iterable):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __next__(self):
+ 'Return the next item from the iterator. When exhausted, raise StopIteration'
+ raise StopIteration
+
+ def __iter__(self):
+ return self
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Iterator:
+ return _check_methods(C, '__iter__', '__next__')
+ return NotImplemented
+
+
+Iterator.register(bytes_iterator)
+Iterator.register(bytearray_iterator)
+#Iterator.register(callable_iterator)
+Iterator.register(dict_keyiterator)
+Iterator.register(dict_valueiterator)
+Iterator.register(dict_itemiterator)
+Iterator.register(list_iterator)
+Iterator.register(list_reverseiterator)
+Iterator.register(range_iterator)
+Iterator.register(longrange_iterator)
+Iterator.register(set_iterator)
+Iterator.register(str_iterator)
+Iterator.register(tuple_iterator)
+Iterator.register(zip_iterator)
+
+
+class Reversible(Iterable):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __reversed__(self):
+ while False:
+ yield None
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Reversible:
+ return _check_methods(C, "__reversed__", "__iter__")
+ return NotImplemented
+
+
+class Generator(Iterator):
+
+ __slots__ = ()
+
+ def __next__(self):
+ """Return the next item from the generator.
+ When exhausted, raise StopIteration.
+ """
+ return self.send(None)
+
+ @abstractmethod
+ def send(self, value):
+ """Send a value into the generator.
+ Return next yielded value or raise StopIteration.
+ """
+ raise StopIteration
+
+ @abstractmethod
+ def throw(self, typ, val=None, tb=None):
+ """Raise an exception in the generator.
+ Return next yielded value or raise StopIteration.
+ """
+ if val is None:
+ if tb is None:
+ raise typ
+ val = typ()
+ if tb is not None:
+ val = val.with_traceback(tb)
+ raise val
+
+ def close(self):
+ """Raise GeneratorExit inside generator.
+ """
+ try:
+ self.throw(GeneratorExit)
+ except (GeneratorExit, StopIteration):
+ pass
+ else:
+ raise RuntimeError("generator ignored GeneratorExit")
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Generator:
+ return _check_methods(C, '__iter__', '__next__',
+ 'send', 'throw', 'close')
+ return NotImplemented
+
+
+Generator.register(generator)
+
+
+class Sized(metaclass=ABCMeta):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __len__(self):
+ return 0
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Sized:
+ return _check_methods(C, "__len__")
+ return NotImplemented
+
+
+class Container(metaclass=ABCMeta):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __contains__(self, x):
+ return False
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Container:
+ return _check_methods(C, "__contains__")
+ return NotImplemented
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+class Collection(Sized, Iterable, Container):
+
+ __slots__ = ()
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Collection:
+ return _check_methods(C, "__len__", "__iter__", "__contains__")
+ return NotImplemented
+
+
+class _CallableGenericAlias(GenericAlias):
+ """ Represent `Callable[argtypes, resulttype]`.
+
+ This sets ``__args__`` to a tuple containing the flattened ``argtypes``
+ followed by ``resulttype``.
+
+ Example: ``Callable[[int, str], float]`` sets ``__args__`` to
+ ``(int, str, float)``.
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, origin, args):
+ if not (isinstance(args, tuple) and len(args) == 2):
+ raise TypeError(
+ "Callable must be used as Callable[[arg, ...], result].")
+ t_args, t_result = args
+ if isinstance(t_args, list):
+ args = (*t_args, t_result)
+ elif not _is_param_expr(t_args):
+ raise TypeError(f"Expected a list of types, an ellipsis, "
+ f"ParamSpec, or Concatenate. Got {t_args}")
+ return super().__new__(cls, origin, args)
+
+ @property
+ def __parameters__(self):
+ params = []
+ for arg in self.__args__:
+ if isinstance(arg, type) and not isinstance(arg, GenericAlias):
+ continue
+ # Looks like a genericalias
+ if hasattr(arg, "__parameters__") and isinstance(arg.__parameters__, tuple):
+ params.extend(arg.__parameters__)
+ else:
+ if _is_typevarlike(arg):
+ params.append(arg)
+ return tuple(dict.fromkeys(params))
+
+ def __repr__(self):
+ if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]):
+ return super().__repr__()
+ return (f'collections.abc.Callable'
+ f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], '
+ f'{_type_repr(self.__args__[-1])}]')
+
+ def __reduce__(self):
+ args = self.__args__
+ if not (len(args) == 2 and _is_param_expr(args[0])):
+ args = list(args[:-1]), args[-1]
+ return _CallableGenericAlias, (Callable, args)
+
+ def __getitem__(self, item):
+ # Called during TypeVar substitution, returns the custom subclass
+ # rather than the default types.GenericAlias object. Most of the
+ # code is copied from typing's _GenericAlias and the builtin
+ # types.GenericAlias.
+
+ # A special case in PEP 612 where if X = Callable[P, int],
+ # then X[int, str] == X[[int, str]].
+ param_len = len(self.__parameters__)
+ if param_len == 0:
+ raise TypeError(f'{self} is not a generic class')
+ if not isinstance(item, tuple):
+ item = (item,)
+ if (param_len == 1 and _is_param_expr(self.__parameters__[0])
+ and item and not _is_param_expr(item[0])):
+ item = (list(item),)
+ item_len = len(item)
+ if item_len != param_len:
+ raise TypeError(f'Too {"many" if item_len > param_len else "few"}'
+ f' arguments for {self};'
+ f' actual {item_len}, expected {param_len}')
+ subst = dict(zip(self.__parameters__, item))
+ new_args = []
+ for arg in self.__args__:
+ if isinstance(arg, type) and not isinstance(arg, GenericAlias):
+ new_args.append(arg)
+ continue
+ if _is_typevarlike(arg):
+ if _is_param_expr(arg):
+ arg = subst[arg]
+ if not _is_param_expr(arg):
+ raise TypeError(f"Expected a list of types, an ellipsis, "
+ f"ParamSpec, or Concatenate. Got {arg}")
+ else:
+ arg = subst[arg]
+ # Looks like a GenericAlias
+ elif hasattr(arg, '__parameters__') and isinstance(arg.__parameters__, tuple):
+ subparams = arg.__parameters__
+ if subparams:
+ subargs = tuple(subst[x] for x in subparams)
+ arg = arg[subargs]
+ if isinstance(arg, tuple):
+ new_args.extend(arg)
+ else:
+ new_args.append(arg)
+
+ # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612
+ if not isinstance(new_args[0], list):
+ t_result = new_args[-1]
+ t_args = new_args[:-1]
+ new_args = (t_args, t_result)
+ return _CallableGenericAlias(Callable, tuple(new_args))
+
+
+def _is_typevarlike(arg):
+ obj = type(arg)
+ # looks like a TypeVar/ParamSpec
+ return (obj.__module__ == 'typing'
+ and obj.__name__ in {'ParamSpec', 'TypeVar'})
+
+def _is_param_expr(obj):
+ """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or
+ ``_ConcatenateGenericAlias`` from typing.py
+ """
+ if obj is Ellipsis:
+ return True
+ if isinstance(obj, list):
+ return True
+ obj = type(obj)
+ names = ('ParamSpec', '_ConcatenateGenericAlias')
+ return obj.__module__ == 'typing' and any(obj.__name__ == name for name in names)
+
+def _type_repr(obj):
+ """Return the repr() of an object, special-casing types (internal helper).
+
+ Copied from :mod:`typing` since collections.abc
+ shouldn't depend on that module.
+ """
+ if isinstance(obj, GenericAlias):
+ return repr(obj)
+ if isinstance(obj, type):
+ if obj.__module__ == 'builtins':
+ return obj.__qualname__
+ return f'{obj.__module__}.{obj.__qualname__}'
+ if obj is Ellipsis:
+ return '...'
+ if isinstance(obj, FunctionType):
+ return obj.__name__
+ return repr(obj)
+
+
+class Callable(metaclass=ABCMeta):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __call__(self, *args, **kwds):
+ return False
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is Callable:
+ return _check_methods(C, "__call__")
+ return NotImplemented
+
+ __class_getitem__ = classmethod(_CallableGenericAlias)
+
+
+### SETS ###
+
+
+class Set(Collection):
+ """A set is a finite, iterable container.
+
+ This class provides concrete generic implementations of all
+ methods except for __contains__, __iter__ and __len__.
+
+ To override the comparisons (presumably for speed, as the
+ semantics are fixed), redefine __le__ and __ge__,
+ then the other operations will automatically follow suit.
+ """
+
+ __slots__ = ()
+
+ def __le__(self, other):
+ if not isinstance(other, Set):
+ return NotImplemented
+ if len(self) > len(other):
+ return False
+ for elem in self:
+ if elem not in other:
+ return False
+ return True
+
+ def __lt__(self, other):
+ if not isinstance(other, Set):
+ return NotImplemented
+ return len(self) < len(other) and self.__le__(other)
+
+ def __gt__(self, other):
+ if not isinstance(other, Set):
+ return NotImplemented
+ return len(self) > len(other) and self.__ge__(other)
+
+ def __ge__(self, other):
+ if not isinstance(other, Set):
+ return NotImplemented
+ if len(self) < len(other):
+ return False
+ for elem in other:
+ if elem not in self:
+ return False
+ return True
+
+ def __eq__(self, other):
+ if not isinstance(other, Set):
+ return NotImplemented
+ return len(self) == len(other) and self.__le__(other)
+
+ @classmethod
+ def _from_iterable(cls, it):
+ '''Construct an instance of the class from any iterable input.
+
+ Must override this method if the class constructor signature
+ does not accept an iterable for an input.
+ '''
+ return cls(it)
+
+ def __and__(self, other):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ return self._from_iterable(value for value in other if value in self)
+
+ __rand__ = __and__
+
+ def isdisjoint(self, other):
+ 'Return True if two sets have a null intersection.'
+ for value in other:
+ if value in self:
+ return False
+ return True
+
+ def __or__(self, other):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ chain = (e for s in (self, other) for e in s)
+ return self._from_iterable(chain)
+
+ __ror__ = __or__
+
+ def __sub__(self, other):
+ if not isinstance(other, Set):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ other = self._from_iterable(other)
+ return self._from_iterable(value for value in self
+ if value not in other)
+
+ def __rsub__(self, other):
+ if not isinstance(other, Set):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ other = self._from_iterable(other)
+ return self._from_iterable(value for value in other
+ if value not in self)
+
+ def __xor__(self, other):
+ if not isinstance(other, Set):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ other = self._from_iterable(other)
+ return (self - other) | (other - self)
+
+ __rxor__ = __xor__
+
+ def _hash(self):
+ """Compute the hash value of a set.
+
+ Note that we don't define __hash__: not all sets are hashable.
+ But if you define a hashable set type, its __hash__ should
+ call this function.
+
+ This must be compatible __eq__.
+
+ All sets ought to compare equal if they contain the same
+ elements, regardless of how they are implemented, and
+ regardless of the order of the elements; so there's not much
+ freedom for __eq__ or __hash__. We match the algorithm used
+ by the built-in frozenset type.
+ """
+ MAX = sys.maxsize
+ MASK = 2 * MAX + 1
+ n = len(self)
+ h = 1927868237 * (n + 1)
+ h &= MASK
+ for x in self:
+ hx = hash(x)
+ h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
+ h &= MASK
+ h ^= (h >> 11) ^ (h >> 25)
+ h = h * 69069 + 907133923
+ h &= MASK
+ if h > MAX:
+ h -= MASK + 1
+ if h == -1:
+ h = 590923713
+ return h
+
+
+Set.register(frozenset)
+
+
+class MutableSet(Set):
+ """A mutable set is a finite, iterable container.
+
+ This class provides concrete generic implementations of all
+ methods except for __contains__, __iter__, __len__,
+ add(), and discard().
+
+ To override the comparisons (presumably for speed, as the
+ semantics are fixed), all you have to do is redefine __le__ and
+ then the other operations will automatically follow suit.
+ """
+
+ __slots__ = ()
+
+ @abstractmethod
+ def add(self, value):
+ """Add an element."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def discard(self, value):
+ """Remove an element. Do not raise an exception if absent."""
+ raise NotImplementedError
+
+ def remove(self, value):
+ """Remove an element. If not a member, raise a KeyError."""
+ if value not in self:
+ raise KeyError(value)
+ self.discard(value)
+
+ def pop(self):
+ """Return the popped value. Raise KeyError if empty."""
+ it = iter(self)
+ try:
+ value = next(it)
+ except StopIteration:
+ raise KeyError from None
+ self.discard(value)
+ return value
+
+ def clear(self):
+ """This is slow (creates N new iterators!) but effective."""
+ try:
+ while True:
+ self.pop()
+ except KeyError:
+ pass
+
+ def __ior__(self, it):
+ for value in it:
+ self.add(value)
+ return self
+
+ def __iand__(self, it):
+ for value in (self - it):
+ self.discard(value)
+ return self
+
+ def __ixor__(self, it):
+ if it is self:
+ self.clear()
+ else:
+ if not isinstance(it, Set):
+ it = self._from_iterable(it)
+ for value in it:
+ if value in self:
+ self.discard(value)
+ else:
+ self.add(value)
+ return self
+
+ def __isub__(self, it):
+ if it is self:
+ self.clear()
+ else:
+ for value in it:
+ self.discard(value)
+ return self
+
+
+MutableSet.register(set)
+
+
+### MAPPINGS ###
+
+class Mapping(Collection):
+ """A Mapping is a generic container for associating key/value
+ pairs.
+
+ This class provides concrete generic implementations of all
+ methods except for __getitem__, __iter__, and __len__.
+ """
+
+ __slots__ = ()
+
+ # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set.
+ __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING
+
+ @abstractmethod
+ def __getitem__(self, key):
+ raise KeyError
+
+ def get(self, key, default=None):
+ 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __contains__(self, key):
+ try:
+ self[key]
+ except KeyError:
+ return False
+ else:
+ return True
+
+ def keys(self):
+ "D.keys() -> a set-like object providing a view on D's keys"
+ return KeysView(self)
+
+ def items(self):
+ "D.items() -> a set-like object providing a view on D's items"
+ return ItemsView(self)
+
+ def values(self):
+ "D.values() -> an object providing a view on D's values"
+ return ValuesView(self)
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping):
+ return NotImplemented
+ return dict(self.items()) == dict(other.items())
+
+ __reversed__ = None
+
+Mapping.register(mappingproxy)
+
+
+class MappingView(Sized):
+
+ __slots__ = '_mapping',
+
+ def __init__(self, mapping):
+ self._mapping = mapping
+
+ def __len__(self):
+ return len(self._mapping)
+
+ def __repr__(self):
+ return '{0.__class__.__name__}({0._mapping!r})'.format(self)
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+class KeysView(MappingView, Set):
+
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(cls, it):
+ return set(it)
+
+ def __contains__(self, key):
+ return key in self._mapping
+
+ def __iter__(self):
+ yield from self._mapping
+
+
+KeysView.register(dict_keys)
+
+
+class ItemsView(MappingView, Set):
+
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(cls, it):
+ return set(it)
+
+ def __contains__(self, item):
+ key, value = item
+ try:
+ v = self._mapping[key]
+ except KeyError:
+ return False
+ else:
+ return v is value or v == value
+
+ def __iter__(self):
+ for key in self._mapping:
+ yield (key, self._mapping[key])
+
+
+ItemsView.register(dict_items)
+
+
+class ValuesView(MappingView, Collection):
+
+ __slots__ = ()
+
+ def __contains__(self, value):
+ for key in self._mapping:
+ v = self._mapping[key]
+ if v is value or v == value:
+ return True
+ return False
+
+ def __iter__(self):
+ for key in self._mapping:
+ yield self._mapping[key]
+
+
+ValuesView.register(dict_values)
+
+
+class MutableMapping(Mapping):
+ """A MutableMapping is a generic container for associating
+ key/value pairs.
+
+ This class provides concrete generic implementations of all
+ methods except for __getitem__, __setitem__, __delitem__,
+ __iter__, and __len__.
+ """
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __setitem__(self, key, value):
+ raise KeyError
+
+ @abstractmethod
+ def __delitem__(self, key):
+ raise KeyError
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ '''
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def popitem(self):
+ '''D.popitem() -> (k, v), remove and return some (key, value) pair
+ as a 2-tuple; but raise KeyError if D is empty.
+ '''
+ try:
+ key = next(iter(self))
+ except StopIteration:
+ raise KeyError from None
+ value = self[key]
+ del self[key]
+ return key, value
+
+ def clear(self):
+ 'D.clear() -> None. Remove all items from D.'
+ try:
+ while True:
+ self.popitem()
+ except KeyError:
+ pass
+
+ def update(self, other=(), /, **kwds):
+ ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
+ If E present and has a .keys() method, does: for k in E: D[k] = E[k]
+ If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
+ In either case, this is followed by: for k, v in F.items(): D[k] = v
+ '''
+ if isinstance(other, Mapping):
+ for key in other:
+ self[key] = other[key]
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self[key] = other[key]
+ else:
+ for key, value in other:
+ self[key] = value
+ for key, value in kwds.items():
+ self[key] = value
+
+ def setdefault(self, key, default=None):
+ 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+
+MutableMapping.register(dict)
+
+
+### SEQUENCES ###
+
+class Sequence(Reversible, Collection):
+ """All the operations on a read-only sequence.
+
+ Concrete subclasses must override __new__ or __init__,
+ __getitem__, and __len__.
+ """
+
+ __slots__ = ()
+
+ # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set.
+ __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE
+
+ @abstractmethod
+ def __getitem__(self, index):
+ raise IndexError
+
+ def __iter__(self):
+ i = 0
+ try:
+ while True:
+ v = self[i]
+ yield v
+ i += 1
+ except IndexError:
+ return
+
+ def __contains__(self, value):
+ for v in self:
+ if v is value or v == value:
+ return True
+ return False
+
+ def __reversed__(self):
+ for i in reversed(range(len(self))):
+ yield self[i]
+
+ def index(self, value, start=0, stop=None):
+ '''S.index(value, [start, [stop]]) -> integer -- return first index of value.
+ Raises ValueError if the value is not present.
+
+ Supporting start and stop arguments is optional, but
+ recommended.
+ '''
+ if start is not None and start < 0:
+ start = max(len(self) + start, 0)
+ if stop is not None and stop < 0:
+ stop += len(self)
+
+ i = start
+ while stop is None or i < stop:
+ try:
+ v = self[i]
+ if v is value or v == value:
+ return i
+ except IndexError:
+ break
+ i += 1
+ raise ValueError
+
+ def count(self, value):
+ 'S.count(value) -> integer -- return number of occurrences of value'
+ return sum(1 for v in self if v is value or v == value)
+
+Sequence.register(tuple)
+Sequence.register(str)
+Sequence.register(range)
+Sequence.register(memoryview)
+
+
+class ByteString(Sequence):
+ """This unifies bytes and bytearray.
+
+ XXX Should add all their methods.
+ """
+
+ __slots__ = ()
+
+ByteString.register(bytes)
+ByteString.register(bytearray)
+
+
+class MutableSequence(Sequence):
+ """All the operations on a read-write sequence.
+
+ Concrete subclasses must provide __new__ or __init__,
+ __getitem__, __setitem__, __delitem__, __len__, and insert().
+ """
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __setitem__(self, index, value):
+ raise IndexError
+
+ @abstractmethod
+ def __delitem__(self, index):
+ raise IndexError
+
+ @abstractmethod
+ def insert(self, index, value):
+ 'S.insert(index, value) -- insert value before index'
+ raise IndexError
+
+ def append(self, value):
+ 'S.append(value) -- append value to the end of the sequence'
+ self.insert(len(self), value)
+
+ def clear(self):
+ 'S.clear() -> None -- remove all items from S'
+ try:
+ while True:
+ self.pop()
+ except IndexError:
+ pass
+
+ def reverse(self):
+ 'S.reverse() -- reverse *IN PLACE*'
+ n = len(self)
+ for i in range(n//2):
+ self[i], self[n-i-1] = self[n-i-1], self[i]
+
+ def extend(self, values):
+ 'S.extend(iterable) -- extend sequence by appending elements from the iterable'
+ if values is self:
+ values = list(values)
+ for v in values:
+ self.append(v)
+
+ def pop(self, index=-1):
+ '''S.pop([index]) -> item -- remove and return item at index (default last).
+ Raise IndexError if list is empty or index is out of range.
+ '''
+ v = self[index]
+ del self[index]
+ return v
+
+ def remove(self, value):
+ '''S.remove(value) -- remove first occurrence of value.
+ Raise ValueError if the value is not present.
+ '''
+ del self[self.index(value)]
+
+ def __iadd__(self, values):
+ self.extend(values)
+ return self
+
+
+MutableSequence.register(list)
+MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
diff --git a/infer_4_37_2/lib/python3.10/_markupbase.py b/infer_4_37_2/lib/python3.10/_markupbase.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ad7e279960f7e1f2bf79d89fe9b905e53f6a12b
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/_markupbase.py
@@ -0,0 +1,396 @@
+"""Shared support for scanning document type declarations in HTML and XHTML.
+
+This module is used as a foundation for the html.parser module. It has no
+documented public API and should not be used directly.
+
+"""
+
+import re
+
+_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
+_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
+_commentclose = re.compile(r'--\s*>')
+_markedsectionclose = re.compile(r']\s*]\s*>')
+
+# An analysis of the MS-Word extensions is available at
+# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
+
+_msmarkedsectionclose = re.compile(r']\s*>')
+
+del re
+
+
+class ParserBase:
+ """Parser base class which provides some common support methods used
+ by the SGML/HTML and XHTML parsers."""
+
+ def __init__(self):
+ if self.__class__ is ParserBase:
+ raise RuntimeError(
+ "_markupbase.ParserBase must be subclassed")
+
+ def reset(self):
+ self.lineno = 1
+ self.offset = 0
+
+ def getpos(self):
+ """Return current line number and offset."""
+ return self.lineno, self.offset
+
+ # Internal -- update line number and offset. This should be
+ # called for each piece of data exactly once, in order -- in other
+ # words the concatenation of all the input strings to this
+ # function should be exactly the entire input.
+ def updatepos(self, i, j):
+ if i >= j:
+ return j
+ rawdata = self.rawdata
+ nlines = rawdata.count("\n", i, j)
+ if nlines:
+ self.lineno = self.lineno + nlines
+ pos = rawdata.rindex("\n", i, j) # Should not fail
+ self.offset = j-(pos+1)
+ else:
+ self.offset = self.offset + j-i
+ return j
+
+ _decl_otherchars = ''
+
+ # Internal -- parse declaration (for use by subclasses).
+ def parse_declaration(self, i):
+ # This is some sort of declaration; in "HTML as
+ # deployed," this should only be the document type
+ # declaration ("").
+ # ISO 8879:1986, however, has more complex
+ # declaration syntax for elements in , including:
+ # --comment--
+ # [marked section]
+ # name in the following list: ENTITY, DOCTYPE, ELEMENT,
+ # ATTLIST, NOTATION, SHORTREF, USEMAP,
+ # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
+ rawdata = self.rawdata
+ j = i + 2
+ assert rawdata[i:j] == "":
+ # the empty comment
+ return j + 1
+ if rawdata[j:j+1] in ("-", ""):
+ # Start of comment followed by buffer boundary,
+ # or just a buffer boundary.
+ return -1
+ # A simple, practical version could look like: ((name|stringlit) S*) + '>'
+ n = len(rawdata)
+ if rawdata[j:j+2] == '--': #comment
+ # Locate --.*-- as the body of the comment
+ return self.parse_comment(i)
+ elif rawdata[j] == '[': #marked section
+ # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
+ # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
+ # Note that this is extended by Microsoft Office "Save as Web" function
+ # to include [if...] and [endif].
+ return self.parse_marked_section(i)
+ else: #all other declaration elements
+ decltype, j = self._scan_name(j, i)
+ if j < 0:
+ return j
+ if decltype == "doctype":
+ self._decl_otherchars = ''
+ while j < n:
+ c = rawdata[j]
+ if c == ">":
+ # end of declaration syntax
+ data = rawdata[i+2:j]
+ if decltype == "doctype":
+ self.handle_decl(data)
+ else:
+ # According to the HTML5 specs sections "8.2.4.44 Bogus
+ # comment state" and "8.2.4.45 Markup declaration open
+ # state", a comment token should be emitted.
+ # Calling unknown_decl provides more flexibility though.
+ self.unknown_decl(data)
+ return j + 1
+ if c in "\"'":
+ m = _declstringlit_match(rawdata, j)
+ if not m:
+ return -1 # incomplete
+ j = m.end()
+ elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
+ name, j = self._scan_name(j, i)
+ elif c in self._decl_otherchars:
+ j = j + 1
+ elif c == "[":
+ # this could be handled in a separate doctype parser
+ if decltype == "doctype":
+ j = self._parse_doctype_subset(j + 1, i)
+ elif decltype in {"attlist", "linktype", "link", "element"}:
+ # must tolerate []'d groups in a content model in an element declaration
+ # also in data attribute specifications of attlist declaration
+ # also link type declaration subsets in linktype declarations
+ # also link attribute specification lists in link declarations
+ raise AssertionError("unsupported '[' char in %s declaration" % decltype)
+ else:
+ raise AssertionError("unexpected '[' char in declaration")
+ else:
+ raise AssertionError("unexpected %r char in declaration" % rawdata[j])
+ if j < 0:
+ return j
+ return -1 # incomplete
+
+ # Internal -- parse a marked section
+ # Override this to handle MS-word extension syntax content
+ def parse_marked_section(self, i, report=1):
+ rawdata= self.rawdata
+ assert rawdata[i:i+3] == ' ending
+ match= _markedsectionclose.search(rawdata, i+3)
+ elif sectName in {"if", "else", "endif"}:
+ # look for MS Office ]> ending
+ match= _msmarkedsectionclose.search(rawdata, i+3)
+ else:
+ raise AssertionError(
+ 'unknown status keyword %r in marked section' % rawdata[i+3:j]
+ )
+ if not match:
+ return -1
+ if report:
+ j = match.start(0)
+ self.unknown_decl(rawdata[i+3: j])
+ return match.end(0)
+
+ # Internal -- parse comment, return length or -1 if not terminated
+ def parse_comment(self, i, report=1):
+ rawdata = self.rawdata
+ if rawdata[i:i+4] != '
+
A problem occurred in a Python script. Here is the sequence of
+function calls leading up to the error, in the order they occurred.
'''
+
+ indent = '":
+ call += inspect.formatargvalues(args, varargs, varkw, locals,
+ formatvalue=lambda value: '=' + pydoc.html.repr(value))
+
+ highlight = {}
+ def reader(lnum=[lnum]):
+ highlight[lnum[0]] = 1
+ try: return linecache.getline(file, lnum[0])
+ finally: lnum[0] += 1
+ vars = scanvars(reader, frame, locals)
+
+ rows = ['| %s%s %s |
' %
+ (' ', link, call)]
+ if index is not None:
+ i = lnum - index
+ for line in lines:
+ num = small(' ' * (5-len(str(i))) + str(i)) + ' '
+ if i in highlight:
+ line = '=>%s%s' % (num, pydoc.html.preformat(line))
+ rows.append('| %s |
' % line)
+ else:
+ line = ' %s%s' % (num, pydoc.html.preformat(line))
+ rows.append('| %s |
' % grey(line))
+ i += 1
+
+ done, dump = {}, []
+ for name, where, value in vars:
+ if name in done: continue
+ done[name] = 1
+ if value is not __UNDEF__:
+ if where in ('global', 'builtin'):
+ name = ('%s ' % where) + strong(name)
+ elif where == 'local':
+ name = strong(name)
+ else:
+ name = where + strong(name.split('.')[-1])
+ dump.append('%s = %s' % (name, pydoc.html.repr(value)))
+ else:
+ dump.append(name + ' undefined')
+
+ rows.append('| %s |
' % small(grey(', '.join(dump))))
+ frames.append('''
+''' % '\n'.join(rows))
+
+ exception = ['%s: %s' % (strong(pydoc.html.escape(str(etype))),
+ pydoc.html.escape(str(evalue)))]
+ for name in dir(evalue):
+ if name[:1] == '_': continue
+ value = pydoc.html.repr(getattr(evalue, name))
+ exception.append('\n
%s%s =\n%s' % (indent, name, value))
+
+ return head + ''.join(frames) + ''.join(exception) + '''
+
+
+
+''' % pydoc.html.escape(
+ ''.join(traceback.format_exception(etype, evalue, etb)))
+
+def text(einfo, context=5):
+ """Return a plain text document describing a given traceback."""
+ etype, evalue, etb = einfo
+ if isinstance(etype, type):
+ etype = etype.__name__
+ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+ date = time.ctime(time.time())
+ head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
+A problem occurred in a Python script. Here is the sequence of
+function calls leading up to the error, in the order they occurred.
+'''
+
+ frames = []
+ records = inspect.getinnerframes(etb, context)
+ for frame, file, lnum, func, lines, index in records:
+ file = file and os.path.abspath(file) or '?'
+ args, varargs, varkw, locals = inspect.getargvalues(frame)
+ call = ''
+ if func != '?':
+ call = 'in ' + func
+ if func != "":
+ call += inspect.formatargvalues(args, varargs, varkw, locals,
+ formatvalue=lambda value: '=' + pydoc.text.repr(value))
+
+ highlight = {}
+ def reader(lnum=[lnum]):
+ highlight[lnum[0]] = 1
+ try: return linecache.getline(file, lnum[0])
+ finally: lnum[0] += 1
+ vars = scanvars(reader, frame, locals)
+
+ rows = [' %s %s' % (file, call)]
+ if index is not None:
+ i = lnum - index
+ for line in lines:
+ num = '%5d ' % i
+ rows.append(num+line.rstrip())
+ i += 1
+
+ done, dump = {}, []
+ for name, where, value in vars:
+ if name in done: continue
+ done[name] = 1
+ if value is not __UNDEF__:
+ if where == 'global': name = 'global ' + name
+ elif where != 'local': name = where + name.split('.')[-1]
+ dump.append('%s = %s' % (name, pydoc.text.repr(value)))
+ else:
+ dump.append(name + ' undefined')
+
+ rows.append('\n'.join(dump))
+ frames.append('\n%s\n' % '\n'.join(rows))
+
+ exception = ['%s: %s' % (str(etype), str(evalue))]
+ for name in dir(evalue):
+ value = pydoc.text.repr(getattr(evalue, name))
+ exception.append('\n%s%s = %s' % (" "*4, name, value))
+
+ return head + ''.join(frames) + ''.join(exception) + '''
+
+The above is a description of an error in a Python program. Here is
+the original traceback:
+
+%s
+''' % ''.join(traceback.format_exception(etype, evalue, etb))
+
+class Hook:
+ """A hook to replace sys.excepthook that shows tracebacks in HTML."""
+
+ def __init__(self, display=1, logdir=None, context=5, file=None,
+ format="html"):
+ self.display = display # send tracebacks to browser if true
+ self.logdir = logdir # log tracebacks to files if not None
+ self.context = context # number of source code lines per frame
+ self.file = file or sys.stdout # place to send the output
+ self.format = format
+
+ def __call__(self, etype, evalue, etb):
+ self.handle((etype, evalue, etb))
+
+ def handle(self, info=None):
+ info = info or sys.exc_info()
+ if self.format == "html":
+ self.file.write(reset())
+
+ formatter = (self.format=="html") and html or text
+ plain = False
+ try:
+ doc = formatter(info, self.context)
+ except: # just in case something goes wrong
+ doc = ''.join(traceback.format_exception(*info))
+ plain = True
+
+ if self.display:
+ if plain:
+ doc = pydoc.html.escape(doc)
+ self.file.write('' + doc + '
\n')
+ else:
+ self.file.write(doc + '\n')
+ else:
+ self.file.write('A problem occurred in a Python script.\n')
+
+ if self.logdir is not None:
+ suffix = ['.txt', '.html'][self.format=="html"]
+ (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
+
+ try:
+ with os.fdopen(fd, 'w') as file:
+ file.write(doc)
+ msg = '%s contains the description of this error.' % path
+ except:
+ msg = 'Tried to save traceback to %s, but failed.' % path
+
+ if self.format == 'html':
+ self.file.write('
%s
\n' % msg)
+ else:
+ self.file.write(msg + '\n')
+ try:
+ self.file.flush()
+ except: pass
+
+handler = Hook().handle
+def enable(display=1, logdir=None, context=5, format="html"):
+ """Install an exception handler that formats tracebacks as HTML.
+
+ The optional argument 'display' can be set to 0 to suppress sending the
+ traceback to the browser, and 'logdir' can be set to a directory to cause
+ tracebacks to be written to files there."""
+ sys.excepthook = Hook(display=display, logdir=logdir,
+ context=context, format=format)
diff --git a/infer_4_37_2/lib/python3.10/codecs.py b/infer_4_37_2/lib/python3.10/codecs.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b173b612101e7c635303a2129e3fe363791e2eb
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/codecs.py
@@ -0,0 +1,1127 @@
+""" codecs -- Python Codec Registry, API and helpers.
+
+
+Written by Marc-Andre Lemburg (mal@lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+
+import builtins
+import sys
+
+### Registry and builtin stateless codec functions
+
+try:
+ from _codecs import *
+except ImportError as why:
+ raise SystemError('Failed to load the builtin codecs: %s' % why)
+
+__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
+ "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
+ "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
+ "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
+ "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder",
+ "StreamReader", "StreamWriter",
+ "StreamReaderWriter", "StreamRecoder",
+ "getencoder", "getdecoder", "getincrementalencoder",
+ "getincrementaldecoder", "getreader", "getwriter",
+ "encode", "decode", "iterencode", "iterdecode",
+ "strict_errors", "ignore_errors", "replace_errors",
+ "xmlcharrefreplace_errors",
+ "backslashreplace_errors", "namereplace_errors",
+ "register_error", "lookup_error"]
+
+### Constants
+
+#
+# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
+# and its possible byte string values
+# for UTF8/UTF16/UTF32 output and little/big endian machines
+#
+
+# UTF-8
+BOM_UTF8 = b'\xef\xbb\xbf'
+
+# UTF-16, little endian
+BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
+
+# UTF-16, big endian
+BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
+
+# UTF-32, little endian
+BOM_UTF32_LE = b'\xff\xfe\x00\x00'
+
+# UTF-32, big endian
+BOM_UTF32_BE = b'\x00\x00\xfe\xff'
+
+if sys.byteorder == 'little':
+
+ # UTF-16, native endianness
+ BOM = BOM_UTF16 = BOM_UTF16_LE
+
+ # UTF-32, native endianness
+ BOM_UTF32 = BOM_UTF32_LE
+
+else:
+
+ # UTF-16, native endianness
+ BOM = BOM_UTF16 = BOM_UTF16_BE
+
+ # UTF-32, native endianness
+ BOM_UTF32 = BOM_UTF32_BE
+
+# Old broken names (don't use in new code)
+BOM32_LE = BOM_UTF16_LE
+BOM32_BE = BOM_UTF16_BE
+BOM64_LE = BOM_UTF32_LE
+BOM64_BE = BOM_UTF32_BE
+
+
+### Codec base classes (defining the API)
+
+class CodecInfo(tuple):
+ """Codec details when looking up the codec registry"""
+
+ # Private API to allow Python 3.4 to denylist the known non-Unicode
+ # codecs in the standard library. A more general mechanism to
+ # reliably distinguish test encodings from other codecs will hopefully
+ # be defined for Python 3.5
+ #
+ # See http://bugs.python.org/issue19619
+ _is_text_encoding = True # Assume codecs are text encodings by default
+
+ def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
+ incrementalencoder=None, incrementaldecoder=None, name=None,
+ *, _is_text_encoding=None):
+ self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
+ self.name = name
+ self.encode = encode
+ self.decode = decode
+ self.incrementalencoder = incrementalencoder
+ self.incrementaldecoder = incrementaldecoder
+ self.streamwriter = streamwriter
+ self.streamreader = streamreader
+ if _is_text_encoding is not None:
+ self._is_text_encoding = _is_text_encoding
+ return self
+
+ def __repr__(self):
+ return "<%s.%s object for encoding %s at %#x>" % \
+ (self.__class__.__module__, self.__class__.__qualname__,
+ self.name, id(self))
+
+class Codec:
+
+ """ Defines the interface for stateless encoders/decoders.
+
+ The .encode()/.decode() methods may use different error
+ handling schemes by providing the errors argument. These
+ string values are predefined:
+
+ 'strict' - raise a ValueError error (or a subclass)
+ 'ignore' - ignore the character and continue with the next
+ 'replace' - replace with a suitable replacement character;
+ Python will use the official U+FFFD REPLACEMENT
+ CHARACTER for the builtin Unicode codecs on
+ decoding and '?' on encoding.
+ 'surrogateescape' - replace with private code points U+DCnn.
+ 'xmlcharrefreplace' - Replace with the appropriate XML
+ character reference (only for encoding).
+ 'backslashreplace' - Replace with backslashed escape sequences.
+ 'namereplace' - Replace with \\N{...} escape sequences
+ (only for encoding).
+
+ The set of allowed values can be extended via register_error.
+
+ """
+ def encode(self, input, errors='strict'):
+
+ """ Encodes the object input and returns a tuple (output
+ object, length consumed).
+
+ errors defines the error handling to apply. It defaults to
+ 'strict' handling.
+
+ The method may not store state in the Codec instance. Use
+ StreamWriter for codecs which have to keep state in order to
+ make encoding efficient.
+
+ The encoder must be able to handle zero length input and
+ return an empty object of the output object type in this
+ situation.
+
+ """
+ raise NotImplementedError
+
+ def decode(self, input, errors='strict'):
+
+ """ Decodes the object input and returns a tuple (output
+ object, length consumed).
+
+ input must be an object which provides the bf_getreadbuf
+ buffer slot. Python strings, buffer objects and memory
+ mapped files are examples of objects providing this slot.
+
+ errors defines the error handling to apply. It defaults to
+ 'strict' handling.
+
+ The method may not store state in the Codec instance. Use
+ StreamReader for codecs which have to keep state in order to
+ make decoding efficient.
+
+ The decoder must be able to handle zero length input and
+ return an empty object of the output object type in this
+ situation.
+
+ """
+ raise NotImplementedError
+
+class IncrementalEncoder(object):
+ """
+ An IncrementalEncoder encodes an input in multiple steps. The input can
+ be passed piece by piece to the encode() method. The IncrementalEncoder
+ remembers the state of the encoding process between calls to encode().
+ """
+ def __init__(self, errors='strict'):
+ """
+ Creates an IncrementalEncoder instance.
+
+ The IncrementalEncoder may use different error handling schemes by
+ providing the errors keyword argument. See the module docstring
+ for a list of possible values.
+ """
+ self.errors = errors
+ self.buffer = ""
+
+ def encode(self, input, final=False):
+ """
+ Encodes input and returns the resulting object.
+ """
+ raise NotImplementedError
+
+ def reset(self):
+ """
+ Resets the encoder to the initial state.
+ """
+
+ def getstate(self):
+ """
+ Return the current state of the encoder.
+ """
+ return 0
+
+ def setstate(self, state):
+ """
+ Set the current state of the encoder. state must have been
+ returned by getstate().
+ """
+
+class BufferedIncrementalEncoder(IncrementalEncoder):
+ """
+ This subclass of IncrementalEncoder can be used as the baseclass for an
+ incremental encoder if the encoder must keep some of the output in a
+ buffer between calls to encode().
+ """
+ def __init__(self, errors='strict'):
+ IncrementalEncoder.__init__(self, errors)
+ # unencoded input that is kept between calls to encode()
+ self.buffer = ""
+
+ def _buffer_encode(self, input, errors, final):
+ # Overwrite this method in subclasses: It must encode input
+ # and return an (output, length consumed) tuple
+ raise NotImplementedError
+
+ def encode(self, input, final=False):
+ # encode input (taking the buffer into account)
+ data = self.buffer + input
+ (result, consumed) = self._buffer_encode(data, self.errors, final)
+ # keep unencoded input until the next call
+ self.buffer = data[consumed:]
+ return result
+
+ def reset(self):
+ IncrementalEncoder.reset(self)
+ self.buffer = ""
+
+ def getstate(self):
+ return self.buffer or 0
+
+ def setstate(self, state):
+ self.buffer = state or ""
+
+class IncrementalDecoder(object):
+ """
+ An IncrementalDecoder decodes an input in multiple steps. The input can
+ be passed piece by piece to the decode() method. The IncrementalDecoder
+ remembers the state of the decoding process between calls to decode().
+ """
+ def __init__(self, errors='strict'):
+ """
+ Create an IncrementalDecoder instance.
+
+ The IncrementalDecoder may use different error handling schemes by
+ providing the errors keyword argument. See the module docstring
+ for a list of possible values.
+ """
+ self.errors = errors
+
+ def decode(self, input, final=False):
+ """
+ Decode input and returns the resulting object.
+ """
+ raise NotImplementedError
+
+ def reset(self):
+ """
+ Reset the decoder to the initial state.
+ """
+
+ def getstate(self):
+ """
+ Return the current state of the decoder.
+
+ This must be a (buffered_input, additional_state_info) tuple.
+ buffered_input must be a bytes object containing bytes that
+ were passed to decode() that have not yet been converted.
+ additional_state_info must be a non-negative integer
+ representing the state of the decoder WITHOUT yet having
+ processed the contents of buffered_input. In the initial state
+ and after reset(), getstate() must return (b"", 0).
+ """
+ return (b"", 0)
+
+ def setstate(self, state):
+ """
+ Set the current state of the decoder.
+
+ state must have been returned by getstate(). The effect of
+ setstate((b"", 0)) must be equivalent to reset().
+ """
+
+class BufferedIncrementalDecoder(IncrementalDecoder):
+ """
+ This subclass of IncrementalDecoder can be used as the baseclass for an
+ incremental decoder if the decoder must be able to handle incomplete
+ byte sequences.
+ """
+ def __init__(self, errors='strict'):
+ IncrementalDecoder.__init__(self, errors)
+ # undecoded input that is kept between calls to decode()
+ self.buffer = b""
+
+ def _buffer_decode(self, input, errors, final):
+ # Overwrite this method in subclasses: It must decode input
+ # and return an (output, length consumed) tuple
+ raise NotImplementedError
+
+ def decode(self, input, final=False):
+ # decode input (taking the buffer into account)
+ data = self.buffer + input
+ (result, consumed) = self._buffer_decode(data, self.errors, final)
+ # keep undecoded input until the next call
+ self.buffer = data[consumed:]
+ return result
+
+ def reset(self):
+ IncrementalDecoder.reset(self)
+ self.buffer = b""
+
+ def getstate(self):
+ # additional state info is always 0
+ return (self.buffer, 0)
+
+ def setstate(self, state):
+ # ignore additional state info
+ self.buffer = state[0]
+
+#
+# The StreamWriter and StreamReader class provide generic working
+# interfaces which can be used to implement new encoding submodules
+# very easily. See encodings/utf_8.py for an example on how this is
+# done.
+#
+
+class StreamWriter(Codec):
+
+ def __init__(self, stream, errors='strict'):
+
+ """ Creates a StreamWriter instance.
+
+ stream must be a file-like object open for writing.
+
+ The StreamWriter may use different error handling
+ schemes by providing the errors keyword argument. These
+ parameters are predefined:
+
+ 'strict' - raise a ValueError (or a subclass)
+ 'ignore' - ignore the character and continue with the next
+ 'replace'- replace with a suitable replacement character
+ 'xmlcharrefreplace' - Replace with the appropriate XML
+ character reference.
+ 'backslashreplace' - Replace with backslashed escape
+ sequences.
+ 'namereplace' - Replace with \\N{...} escape sequences.
+
+ The set of allowed parameter values can be extended via
+ register_error.
+ """
+ self.stream = stream
+ self.errors = errors
+
+ def write(self, object):
+
+ """ Writes the object's contents encoded to self.stream.
+ """
+ data, consumed = self.encode(object, self.errors)
+ self.stream.write(data)
+
+ def writelines(self, list):
+
+ """ Writes the concatenated list of strings to the stream
+ using .write().
+ """
+ self.write(''.join(list))
+
+ def reset(self):
+
+ """ Resets the codec buffers used for keeping internal state.
+
+ Calling this method should ensure that the data on the
+ output is put into a clean state, that allows appending
+ of new fresh data without having to rescan the whole
+ stream to recover state.
+
+ """
+ pass
+
+ def seek(self, offset, whence=0):
+ self.stream.seek(offset, whence)
+ if whence == 0 and offset == 0:
+ self.reset()
+
+ def __getattr__(self, name,
+ getattr=getattr):
+
+ """ Inherit all other methods from the underlying stream.
+ """
+ return getattr(self.stream, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self.stream.close()
+
+###
+
+class StreamReader(Codec):
+
+ charbuffertype = str
+
+ def __init__(self, stream, errors='strict'):
+
+ """ Creates a StreamReader instance.
+
+ stream must be a file-like object open for reading.
+
+ The StreamReader may use different error handling
+ schemes by providing the errors keyword argument. These
+ parameters are predefined:
+
+ 'strict' - raise a ValueError (or a subclass)
+ 'ignore' - ignore the character and continue with the next
+ 'replace'- replace with a suitable replacement character
+ 'backslashreplace' - Replace with backslashed escape sequences;
+
+ The set of allowed parameter values can be extended via
+ register_error.
+ """
+ self.stream = stream
+ self.errors = errors
+ self.bytebuffer = b""
+ self._empty_charbuffer = self.charbuffertype()
+ self.charbuffer = self._empty_charbuffer
+ self.linebuffer = None
+
+ def decode(self, input, errors='strict'):
+ raise NotImplementedError
+
+ def read(self, size=-1, chars=-1, firstline=False):
+
+ """ Decodes data from the stream self.stream and returns the
+ resulting object.
+
+ chars indicates the number of decoded code points or bytes to
+ return. read() will never return more data than requested,
+ but it might return less, if there is not enough available.
+
+ size indicates the approximate maximum number of decoded
+ bytes or code points to read for decoding. The decoder
+ can modify this setting as appropriate. The default value
+ -1 indicates to read and decode as much as possible. size
+ is intended to prevent having to decode huge files in one
+ step.
+
+ If firstline is true, and a UnicodeDecodeError happens
+ after the first line terminator in the input only the first line
+ will be returned, the rest of the input will be kept until the
+ next call to read().
+
+ The method should use a greedy read strategy, meaning that
+ it should read as much data as is allowed within the
+ definition of the encoding and the given size, e.g. if
+ optional encoding endings or state markers are available
+ on the stream, these should be read too.
+ """
+ # If we have lines cached, first merge them back into characters
+ if self.linebuffer:
+ self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
+ self.linebuffer = None
+
+ if chars < 0:
+ # For compatibility with other read() methods that take a
+ # single argument
+ chars = size
+
+ # read until we get the required number of characters (if available)
+ while True:
+ # can the request be satisfied from the character buffer?
+ if chars >= 0:
+ if len(self.charbuffer) >= chars:
+ break
+ # we need more data
+ if size < 0:
+ newdata = self.stream.read()
+ else:
+ newdata = self.stream.read(size)
+ # decode bytes (those remaining from the last call included)
+ data = self.bytebuffer + newdata
+ if not data:
+ break
+ try:
+ newchars, decodedbytes = self.decode(data, self.errors)
+ except UnicodeDecodeError as exc:
+ if firstline:
+ newchars, decodedbytes = \
+ self.decode(data[:exc.start], self.errors)
+ lines = newchars.splitlines(keepends=True)
+ if len(lines)<=1:
+ raise
+ else:
+ raise
+ # keep undecoded bytes until the next call
+ self.bytebuffer = data[decodedbytes:]
+ # put new characters in the character buffer
+ self.charbuffer += newchars
+ # there was no data available
+ if not newdata:
+ break
+ if chars < 0:
+ # Return everything we've got
+ result = self.charbuffer
+ self.charbuffer = self._empty_charbuffer
+ else:
+ # Return the first chars characters
+ result = self.charbuffer[:chars]
+ self.charbuffer = self.charbuffer[chars:]
+ return result
+
+ def readline(self, size=None, keepends=True):
+
+ """ Read one line from the input stream and return the
+ decoded data.
+
+ size, if given, is passed as size argument to the
+ read() method.
+
+ """
+ # If we have lines cached from an earlier read, return
+ # them unconditionally
+ if self.linebuffer:
+ line = self.linebuffer[0]
+ del self.linebuffer[0]
+ if len(self.linebuffer) == 1:
+ # revert to charbuffer mode; we might need more data
+ # next time
+ self.charbuffer = self.linebuffer[0]
+ self.linebuffer = None
+ if not keepends:
+ line = line.splitlines(keepends=False)[0]
+ return line
+
+ readsize = size or 72
+ line = self._empty_charbuffer
+ # If size is given, we call read() only once
+ while True:
+ data = self.read(readsize, firstline=True)
+ if data:
+ # If we're at a "\r" read one extra character (which might
+ # be a "\n") to get a proper line ending. If the stream is
+ # temporarily exhausted we return the wrong line ending.
+ if (isinstance(data, str) and data.endswith("\r")) or \
+ (isinstance(data, bytes) and data.endswith(b"\r")):
+ data += self.read(size=1, chars=1)
+
+ line += data
+ lines = line.splitlines(keepends=True)
+ if lines:
+ if len(lines) > 1:
+ # More than one line result; the first line is a full line
+ # to return
+ line = lines[0]
+ del lines[0]
+ if len(lines) > 1:
+ # cache the remaining lines
+ lines[-1] += self.charbuffer
+ self.linebuffer = lines
+ self.charbuffer = None
+ else:
+ # only one remaining line, put it back into charbuffer
+ self.charbuffer = lines[0] + self.charbuffer
+ if not keepends:
+ line = line.splitlines(keepends=False)[0]
+ break
+ line0withend = lines[0]
+ line0withoutend = lines[0].splitlines(keepends=False)[0]
+ if line0withend != line0withoutend: # We really have a line end
+ # Put the rest back together and keep it until the next call
+ self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
+ self.charbuffer
+ if keepends:
+ line = line0withend
+ else:
+ line = line0withoutend
+ break
+ # we didn't get anything or this was our only try
+ if not data or size is not None:
+ if line and not keepends:
+ line = line.splitlines(keepends=False)[0]
+ break
+ if readsize < 8000:
+ readsize *= 2
+ return line
+
+ def readlines(self, sizehint=None, keepends=True):
+
+ """ Read all lines available on the input stream
+ and return them as a list.
+
+ Line breaks are implemented using the codec's decoder
+ method and are included in the list entries.
+
+ sizehint, if given, is ignored since there is no efficient
+ way to finding the true end-of-line.
+
+ """
+ data = self.read()
+ return data.splitlines(keepends)
+
+ def reset(self):
+
+ """ Resets the codec buffers used for keeping internal state.
+
+ Note that no stream repositioning should take place.
+ This method is primarily intended to be able to recover
+ from decoding errors.
+
+ """
+ self.bytebuffer = b""
+ self.charbuffer = self._empty_charbuffer
+ self.linebuffer = None
+
+ def seek(self, offset, whence=0):
+ """ Set the input stream's current position.
+
+ Resets the codec buffers used for keeping state.
+ """
+ self.stream.seek(offset, whence)
+ self.reset()
+
+ def __next__(self):
+
+ """ Return the next decoded line from the input stream."""
+ line = self.readline()
+ if line:
+ return line
+ raise StopIteration
+
+ def __iter__(self):
+ return self
+
+ def __getattr__(self, name,
+ getattr=getattr):
+
+ """ Inherit all other methods from the underlying stream.
+ """
+ return getattr(self.stream, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self.stream.close()
+
+###
+
+class StreamReaderWriter:
+
+ """ StreamReaderWriter instances allow wrapping streams which
+ work in both read and write modes.
+
+ The design is such that one can use the factory functions
+ returned by the codec.lookup() function to construct the
+ instance.
+
+ """
+ # Optional attributes set by the file wrappers below
+ encoding = 'unknown'
+
+ def __init__(self, stream, Reader, Writer, errors='strict'):
+
+ """ Creates a StreamReaderWriter instance.
+
+ stream must be a Stream-like object.
+
+ Reader, Writer must be factory functions or classes
+ providing the StreamReader, StreamWriter interface resp.
+
+ Error handling is done in the same way as defined for the
+ StreamWriter/Readers.
+
+ """
+ self.stream = stream
+ self.reader = Reader(stream, errors)
+ self.writer = Writer(stream, errors)
+ self.errors = errors
+
+ def read(self, size=-1):
+
+ return self.reader.read(size)
+
+ def readline(self, size=None):
+
+ return self.reader.readline(size)
+
+ def readlines(self, sizehint=None):
+
+ return self.reader.readlines(sizehint)
+
+ def __next__(self):
+
+ """ Return the next decoded line from the input stream."""
+ return next(self.reader)
+
+ def __iter__(self):
+ return self
+
+ def write(self, data):
+
+ return self.writer.write(data)
+
+ def writelines(self, list):
+
+ return self.writer.writelines(list)
+
+ def reset(self):
+
+ self.reader.reset()
+ self.writer.reset()
+
+ def seek(self, offset, whence=0):
+ self.stream.seek(offset, whence)
+ self.reader.reset()
+ if whence == 0 and offset == 0:
+ self.writer.reset()
+
+ def __getattr__(self, name,
+ getattr=getattr):
+
+ """ Inherit all other methods from the underlying stream.
+ """
+ return getattr(self.stream, name)
+
+ # these are needed to make "with StreamReaderWriter(...)" work properly
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self.stream.close()
+
+###
+
+class StreamRecoder:
+
+ """ StreamRecoder instances translate data from one encoding to another.
+
+ They use the complete set of APIs returned by the
+ codecs.lookup() function to implement their task.
+
+ Data written to the StreamRecoder is first decoded into an
+ intermediate format (depending on the "decode" codec) and then
+ written to the underlying stream using an instance of the provided
+ Writer class.
+
+ In the other direction, data is read from the underlying stream using
+ a Reader instance and then encoded and returned to the caller.
+
+ """
+ # Optional attributes set by the file wrappers below
+ data_encoding = 'unknown'
+ file_encoding = 'unknown'
+
+ def __init__(self, stream, encode, decode, Reader, Writer,
+ errors='strict'):
+
+ """ Creates a StreamRecoder instance which implements a two-way
+ conversion: encode and decode work on the frontend (the
+ data visible to .read() and .write()) while Reader and Writer
+ work on the backend (the data in stream).
+
+ You can use these objects to do transparent
+ transcodings from e.g. latin-1 to utf-8 and back.
+
+ stream must be a file-like object.
+
+ encode and decode must adhere to the Codec interface; Reader and
+ Writer must be factory functions or classes providing the
+ StreamReader and StreamWriter interfaces resp.
+
+ Error handling is done in the same way as defined for the
+ StreamWriter/Readers.
+
+ """
+ self.stream = stream
+ self.encode = encode
+ self.decode = decode
+ self.reader = Reader(stream, errors)
+ self.writer = Writer(stream, errors)
+ self.errors = errors
+
+ def read(self, size=-1):
+
+ data = self.reader.read(size)
+ data, bytesencoded = self.encode(data, self.errors)
+ return data
+
+ def readline(self, size=None):
+
+ if size is None:
+ data = self.reader.readline()
+ else:
+ data = self.reader.readline(size)
+ data, bytesencoded = self.encode(data, self.errors)
+ return data
+
+ def readlines(self, sizehint=None):
+
+ data = self.reader.read()
+ data, bytesencoded = self.encode(data, self.errors)
+ return data.splitlines(keepends=True)
+
+ def __next__(self):
+
+ """ Return the next decoded line from the input stream."""
+ data = next(self.reader)
+ data, bytesencoded = self.encode(data, self.errors)
+ return data
+
+ def __iter__(self):
+ return self
+
+ def write(self, data):
+
+ data, bytesdecoded = self.decode(data, self.errors)
+ return self.writer.write(data)
+
+ def writelines(self, list):
+
+ data = b''.join(list)
+ data, bytesdecoded = self.decode(data, self.errors)
+ return self.writer.write(data)
+
+ def reset(self):
+
+ self.reader.reset()
+ self.writer.reset()
+
+ def seek(self, offset, whence=0):
+ # Seeks must be propagated to both the readers and writers
+ # as they might need to reset their internal buffers.
+ self.reader.seek(offset, whence)
+ self.writer.seek(offset, whence)
+
+ def __getattr__(self, name,
+ getattr=getattr):
+
+ """ Inherit all other methods from the underlying stream.
+ """
+ return getattr(self.stream, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self.stream.close()
+
+### Shortcuts
+
+def open(filename, mode='r', encoding=None, errors='strict', buffering=-1):
+
+ """ Open an encoded file using the given mode and return
+ a wrapped version providing transparent encoding/decoding.
+
+ Note: The wrapped version will only accept the object format
+ defined by the codecs, i.e. Unicode objects for most builtin
+ codecs. Output is also codec dependent and will usually be
+ Unicode as well.
+
+ If encoding is not None, then the
+ underlying encoded files are always opened in binary mode.
+ The default file mode is 'r', meaning to open the file in read mode.
+
+ encoding specifies the encoding which is to be used for the
+ file.
+
+ errors may be given to define the error handling. It defaults
+ to 'strict' which causes ValueErrors to be raised in case an
+ encoding error occurs.
+
+ buffering has the same meaning as for the builtin open() API.
+ It defaults to -1 which means that the default buffer size will
+ be used.
+
+ The returned wrapped file object provides an extra attribute
+ .encoding which allows querying the used encoding. This
+ attribute is only available if an encoding was specified as
+ parameter.
+
+ """
+ if encoding is not None and \
+ 'b' not in mode:
+ # Force opening of the file in binary mode
+ mode = mode + 'b'
+ file = builtins.open(filename, mode, buffering)
+ if encoding is None:
+ return file
+
+ try:
+ info = lookup(encoding)
+ srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
+ # Add attributes to simplify introspection
+ srw.encoding = encoding
+ return srw
+ except:
+ file.close()
+ raise
+
+def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
+
+ """ Return a wrapped version of file which provides transparent
+ encoding translation.
+
+ Data written to the wrapped file is decoded according
+ to the given data_encoding and then encoded to the underlying
+ file using file_encoding. The intermediate data type
+ will usually be Unicode but depends on the specified codecs.
+
+ Bytes read from the file are decoded using file_encoding and then
+ passed back to the caller encoded using data_encoding.
+
+ If file_encoding is not given, it defaults to data_encoding.
+
+ errors may be given to define the error handling. It defaults
+ to 'strict' which causes ValueErrors to be raised in case an
+ encoding error occurs.
+
+ The returned wrapped file object provides two extra attributes
+ .data_encoding and .file_encoding which reflect the given
+ parameters of the same name. The attributes can be used for
+ introspection by Python programs.
+
+ """
+ if file_encoding is None:
+ file_encoding = data_encoding
+ data_info = lookup(data_encoding)
+ file_info = lookup(file_encoding)
+ sr = StreamRecoder(file, data_info.encode, data_info.decode,
+ file_info.streamreader, file_info.streamwriter, errors)
+ # Add attributes to simplify introspection
+ sr.data_encoding = data_encoding
+ sr.file_encoding = file_encoding
+ return sr
+
+### Helpers for codec lookup
+
+def getencoder(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its encoder function.
+
+ Raises a LookupError in case the encoding cannot be found.
+
+ """
+ return lookup(encoding).encode
+
+def getdecoder(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its decoder function.
+
+ Raises a LookupError in case the encoding cannot be found.
+
+ """
+ return lookup(encoding).decode
+
+def getincrementalencoder(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its IncrementalEncoder class or factory function.
+
+ Raises a LookupError in case the encoding cannot be found
+ or the codecs doesn't provide an incremental encoder.
+
+ """
+ encoder = lookup(encoding).incrementalencoder
+ if encoder is None:
+ raise LookupError(encoding)
+ return encoder
+
+def getincrementaldecoder(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its IncrementalDecoder class or factory function.
+
+ Raises a LookupError in case the encoding cannot be found
+ or the codecs doesn't provide an incremental decoder.
+
+ """
+ decoder = lookup(encoding).incrementaldecoder
+ if decoder is None:
+ raise LookupError(encoding)
+ return decoder
+
+def getreader(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its StreamReader class or factory function.
+
+ Raises a LookupError in case the encoding cannot be found.
+
+ """
+ return lookup(encoding).streamreader
+
+def getwriter(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its StreamWriter class or factory function.
+
+ Raises a LookupError in case the encoding cannot be found.
+
+ """
+ return lookup(encoding).streamwriter
+
+def iterencode(iterator, encoding, errors='strict', **kwargs):
+ """
+ Encoding iterator.
+
+ Encodes the input strings from the iterator using an IncrementalEncoder.
+
+ errors and kwargs are passed through to the IncrementalEncoder
+ constructor.
+ """
+ encoder = getincrementalencoder(encoding)(errors, **kwargs)
+ for input in iterator:
+ output = encoder.encode(input)
+ if output:
+ yield output
+ output = encoder.encode("", True)
+ if output:
+ yield output
+
+def iterdecode(iterator, encoding, errors='strict', **kwargs):
+ """
+ Decoding iterator.
+
+ Decodes the input strings from the iterator using an IncrementalDecoder.
+
+ errors and kwargs are passed through to the IncrementalDecoder
+ constructor.
+ """
+ decoder = getincrementaldecoder(encoding)(errors, **kwargs)
+ for input in iterator:
+ output = decoder.decode(input)
+ if output:
+ yield output
+ output = decoder.decode(b"", True)
+ if output:
+ yield output
+
+### Helpers for charmap-based codecs
+
+def make_identity_dict(rng):
+
+ """ make_identity_dict(rng) -> dict
+
+ Return a dictionary where elements of the rng sequence are
+ mapped to themselves.
+
+ """
+ return {i:i for i in rng}
+
+def make_encoding_map(decoding_map):
+
+ """ Creates an encoding map from a decoding map.
+
+ If a target mapping in the decoding map occurs multiple
+ times, then that target is mapped to None (undefined mapping),
+ causing an exception when encountered by the charmap codec
+ during translation.
+
+ One example where this happens is cp875.py which decodes
+ multiple character to \\u001a.
+
+ """
+ m = {}
+ for k,v in decoding_map.items():
+ if not v in m:
+ m[v] = k
+ else:
+ m[v] = None
+ return m
+
+### error handlers
+
+try:
+ strict_errors = lookup_error("strict")
+ ignore_errors = lookup_error("ignore")
+ replace_errors = lookup_error("replace")
+ xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
+ backslashreplace_errors = lookup_error("backslashreplace")
+ namereplace_errors = lookup_error("namereplace")
+except LookupError:
+ # In --disable-unicode builds, these error handler are missing
+ strict_errors = None
+ ignore_errors = None
+ replace_errors = None
+ xmlcharrefreplace_errors = None
+ backslashreplace_errors = None
+ namereplace_errors = None
+
+# Tell modulefinder that using codecs probably needs the encodings
+# package
+_false = 0
+if _false:
+ import encodings
+
+### Tests
+
+if __name__ == '__main__':
+
+ # Make stdout translate Latin-1 output into UTF-8 output
+ sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
+
+ # Have stdin translate Latin-1 input into UTF-8 input
+ sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
diff --git a/infer_4_37_2/lib/python3.10/colorsys.py b/infer_4_37_2/lib/python3.10/colorsys.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f52512a67d87c571835467b411ec8ec4e691230
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/colorsys.py
@@ -0,0 +1,165 @@
+"""Conversion functions between RGB and other color systems.
+
+This modules provides two functions for each color system ABC:
+
+ rgb_to_abc(r, g, b) --> a, b, c
+ abc_to_rgb(a, b, c) --> r, g, b
+
+All inputs and outputs are triples of floats in the range [0.0...1.0]
+(with the exception of I and Q, which covers a slightly larger range).
+Inputs outside the valid range may cause exceptions or invalid outputs.
+
+Supported color systems:
+RGB: Red, Green, Blue components
+YIQ: Luminance, Chrominance (used by composite video signals)
+HLS: Hue, Luminance, Saturation
+HSV: Hue, Saturation, Value
+"""
+
+# References:
+# http://en.wikipedia.org/wiki/YIQ
+# http://en.wikipedia.org/wiki/HLS_color_space
+# http://en.wikipedia.org/wiki/HSV_color_space
+
+__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
+ "rgb_to_hsv","hsv_to_rgb"]
+
+# Some floating point constants
+
+ONE_THIRD = 1.0/3.0
+ONE_SIXTH = 1.0/6.0
+TWO_THIRD = 2.0/3.0
+
+# YIQ: used by composite video signals (linear combinations of RGB)
+# Y: perceived grey level (0.0 == black, 1.0 == white)
+# I, Q: color components
+#
+# There are a great many versions of the constants used in these formulae.
+# The ones in this library uses constants from the FCC version of NTSC.
+
+def rgb_to_yiq(r, g, b):
+ y = 0.30*r + 0.59*g + 0.11*b
+ i = 0.74*(r-y) - 0.27*(b-y)
+ q = 0.48*(r-y) + 0.41*(b-y)
+ return (y, i, q)
+
+def yiq_to_rgb(y, i, q):
+ # r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48)
+ # b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48)
+ # g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59
+
+ r = y + 0.9468822170900693*i + 0.6235565819861433*q
+ g = y - 0.27478764629897834*i - 0.6356910791873801*q
+ b = y - 1.1085450346420322*i + 1.7090069284064666*q
+
+ if r < 0.0:
+ r = 0.0
+ if g < 0.0:
+ g = 0.0
+ if b < 0.0:
+ b = 0.0
+ if r > 1.0:
+ r = 1.0
+ if g > 1.0:
+ g = 1.0
+ if b > 1.0:
+ b = 1.0
+ return (r, g, b)
+
+
+# HLS: Hue, Luminance, Saturation
+# H: position in the spectrum
+# L: color lightness
+# S: color saturation
+
+def rgb_to_hls(r, g, b):
+ maxc = max(r, g, b)
+ minc = min(r, g, b)
+ sumc = (maxc+minc)
+ rangec = (maxc-minc)
+ l = sumc/2.0
+ if minc == maxc:
+ return 0.0, l, 0.0
+ if l <= 0.5:
+ s = rangec / sumc
+ else:
+ s = rangec / (2.0-sumc)
+ rc = (maxc-r) / rangec
+ gc = (maxc-g) / rangec
+ bc = (maxc-b) / rangec
+ if r == maxc:
+ h = bc-gc
+ elif g == maxc:
+ h = 2.0+rc-bc
+ else:
+ h = 4.0+gc-rc
+ h = (h/6.0) % 1.0
+ return h, l, s
+
+def hls_to_rgb(h, l, s):
+ if s == 0.0:
+ return l, l, l
+ if l <= 0.5:
+ m2 = l * (1.0+s)
+ else:
+ m2 = l+s-(l*s)
+ m1 = 2.0*l - m2
+ return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
+
+def _v(m1, m2, hue):
+ hue = hue % 1.0
+ if hue < ONE_SIXTH:
+ return m1 + (m2-m1)*hue*6.0
+ if hue < 0.5:
+ return m2
+ if hue < TWO_THIRD:
+ return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
+ return m1
+
+
+# HSV: Hue, Saturation, Value
+# H: position in the spectrum
+# S: color saturation ("purity")
+# V: color brightness
+
+def rgb_to_hsv(r, g, b):
+ maxc = max(r, g, b)
+ minc = min(r, g, b)
+ v = maxc
+ if minc == maxc:
+ return 0.0, 0.0, v
+ s = (maxc-minc) / maxc
+ rc = (maxc-r) / (maxc-minc)
+ gc = (maxc-g) / (maxc-minc)
+ bc = (maxc-b) / (maxc-minc)
+ if r == maxc:
+ h = bc-gc
+ elif g == maxc:
+ h = 2.0+rc-bc
+ else:
+ h = 4.0+gc-rc
+ h = (h/6.0) % 1.0
+ return h, s, v
+
+def hsv_to_rgb(h, s, v):
+ if s == 0.0:
+ return v, v, v
+ i = int(h*6.0) # XXX assume int() truncates!
+ f = (h*6.0) - i
+ p = v*(1.0 - s)
+ q = v*(1.0 - s*f)
+ t = v*(1.0 - s*(1.0-f))
+ i = i%6
+ if i == 0:
+ return v, t, p
+ if i == 1:
+ return q, v, p
+ if i == 2:
+ return p, v, t
+ if i == 3:
+ return p, q, v
+ if i == 4:
+ return t, p, v
+ if i == 5:
+ return v, p, q
+ # Cannot get here
diff --git a/infer_4_37_2/lib/python3.10/compileall.py b/infer_4_37_2/lib/python3.10/compileall.py
new file mode 100644
index 0000000000000000000000000000000000000000..50183ea85468aa74c367576a6666c490e4d4de2f
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/compileall.py
@@ -0,0 +1,463 @@
+"""Module/script to byte-compile all .py files to .pyc files.
+
+When called as a script with arguments, this compiles the directories
+given as arguments recursively; the -l option prevents it from
+recursing into directories.
+
+Without arguments, if compiles all modules on sys.path, without
+recursing into subdirectories. (Even though it should do so for
+packages -- for now, you'll have to deal with packages separately.)
+
+See module py_compile for details of the actual byte-compilation.
+"""
+import os
+import sys
+import importlib.util
+import py_compile
+import struct
+import filecmp
+
+from functools import partial
+from pathlib import Path
+
+__all__ = ["compile_dir","compile_file","compile_path"]
+
+def _walk_dir(dir, maxlevels, quiet=0):
+ if quiet < 2 and isinstance(dir, os.PathLike):
+ dir = os.fspath(dir)
+ if not quiet:
+ print('Listing {!r}...'.format(dir))
+ try:
+ names = os.listdir(dir)
+ except OSError:
+ if quiet < 2:
+ print("Can't list {!r}".format(dir))
+ names = []
+ names.sort()
+ for name in names:
+ if name == '__pycache__':
+ continue
+ fullname = os.path.join(dir, name)
+ if not os.path.isdir(fullname):
+ yield fullname
+ elif (maxlevels > 0 and name != os.curdir and name != os.pardir and
+ os.path.isdir(fullname) and not os.path.islink(fullname)):
+ yield from _walk_dir(fullname, maxlevels=maxlevels - 1,
+ quiet=quiet)
+
+def compile_dir(dir, maxlevels=None, ddir=None, force=False,
+ rx=None, quiet=0, legacy=False, optimize=-1, workers=1,
+ invalidation_mode=None, *, stripdir=None,
+ prependdir=None, limit_sl_dest=None, hardlink_dupes=False):
+ """Byte-compile all modules in the given directory tree.
+
+ Arguments (only dir is required):
+
+ dir: the directory to byte-compile
+ maxlevels: maximum recursion level (default `sys.getrecursionlimit()`)
+ ddir: the directory that will be prepended to the path to the
+ file as it is compiled into each byte-code file.
+ force: if True, force compilation, even if timestamps are up-to-date
+ quiet: full output with False or 0, errors only with 1,
+ no output with 2
+ legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
+ optimize: int or list of optimization levels or -1 for level of
+ the interpreter. Multiple levels leads to multiple compiled
+ files each with one optimization level.
+ workers: maximum number of parallel workers
+ invalidation_mode: how the up-to-dateness of the pyc will be checked
+ stripdir: part of path to left-strip from source file path
+ prependdir: path to prepend to beginning of original file path, applied
+ after stripdir
+ limit_sl_dest: ignore symlinks if they are pointing outside of
+ the defined path
+ hardlink_dupes: hardlink duplicated pyc files
+ """
+ ProcessPoolExecutor = None
+ if ddir is not None and (stripdir is not None or prependdir is not None):
+ raise ValueError(("Destination dir (ddir) cannot be used "
+ "in combination with stripdir or prependdir"))
+ if ddir is not None:
+ stripdir = dir
+ prependdir = ddir
+ ddir = None
+ if workers < 0:
+ raise ValueError('workers must be greater or equal to 0')
+ if workers != 1:
+ # Check if this is a system where ProcessPoolExecutor can function.
+ from concurrent.futures.process import _check_system_limits
+ try:
+ _check_system_limits()
+ except NotImplementedError:
+ workers = 1
+ else:
+ from concurrent.futures import ProcessPoolExecutor
+ if maxlevels is None:
+ maxlevels = sys.getrecursionlimit()
+ files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels)
+ success = True
+ if workers != 1 and ProcessPoolExecutor is not None:
+ # If workers == 0, let ProcessPoolExecutor choose
+ workers = workers or None
+ with ProcessPoolExecutor(max_workers=workers) as executor:
+ results = executor.map(partial(compile_file,
+ ddir=ddir, force=force,
+ rx=rx, quiet=quiet,
+ legacy=legacy,
+ optimize=optimize,
+ invalidation_mode=invalidation_mode,
+ stripdir=stripdir,
+ prependdir=prependdir,
+ limit_sl_dest=limit_sl_dest,
+ hardlink_dupes=hardlink_dupes),
+ files)
+ success = min(results, default=True)
+ else:
+ for file in files:
+ if not compile_file(file, ddir, force, rx, quiet,
+ legacy, optimize, invalidation_mode,
+ stripdir=stripdir, prependdir=prependdir,
+ limit_sl_dest=limit_sl_dest,
+ hardlink_dupes=hardlink_dupes):
+ success = False
+ return success
+
+def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0,
+ legacy=False, optimize=-1,
+ invalidation_mode=None, *, stripdir=None, prependdir=None,
+ limit_sl_dest=None, hardlink_dupes=False):
+ """Byte-compile one file.
+
+ Arguments (only fullname is required):
+
+ fullname: the file to byte-compile
+ ddir: if given, the directory name compiled in to the
+ byte-code file.
+ force: if True, force compilation, even if timestamps are up-to-date
+ quiet: full output with False or 0, errors only with 1,
+ no output with 2
+ legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
+ optimize: int or list of optimization levels or -1 for level of
+ the interpreter. Multiple levels leads to multiple compiled
+ files each with one optimization level.
+ invalidation_mode: how the up-to-dateness of the pyc will be checked
+ stripdir: part of path to left-strip from source file path
+ prependdir: path to prepend to beginning of original file path, applied
+ after stripdir
+ limit_sl_dest: ignore symlinks if they are pointing outside of
+ the defined path.
+ hardlink_dupes: hardlink duplicated pyc files
+ """
+
+ if ddir is not None and (stripdir is not None or prependdir is not None):
+ raise ValueError(("Destination dir (ddir) cannot be used "
+ "in combination with stripdir or prependdir"))
+
+ success = True
+ fullname = os.fspath(fullname)
+ stripdir = os.fspath(stripdir) if stripdir is not None else None
+ name = os.path.basename(fullname)
+
+ dfile = None
+
+ if ddir is not None:
+ dfile = os.path.join(ddir, name)
+
+ if stripdir is not None:
+ fullname_parts = fullname.split(os.path.sep)
+ stripdir_parts = stripdir.split(os.path.sep)
+ ddir_parts = list(fullname_parts)
+
+ for spart, opart in zip(stripdir_parts, fullname_parts):
+ if spart == opart:
+ ddir_parts.remove(spart)
+
+ dfile = os.path.join(*ddir_parts)
+
+ if prependdir is not None:
+ if dfile is None:
+ dfile = os.path.join(prependdir, fullname)
+ else:
+ dfile = os.path.join(prependdir, dfile)
+
+ if isinstance(optimize, int):
+ optimize = [optimize]
+
+ # Use set() to remove duplicates.
+ # Use sorted() to create pyc files in a deterministic order.
+ optimize = sorted(set(optimize))
+
+ if hardlink_dupes and len(optimize) < 2:
+ raise ValueError("Hardlinking of duplicated bytecode makes sense "
+ "only for more than one optimization level")
+
+ if rx is not None:
+ mo = rx.search(fullname)
+ if mo:
+ return success
+
+ if limit_sl_dest is not None and os.path.islink(fullname):
+ if Path(limit_sl_dest).resolve() not in Path(fullname).resolve().parents:
+ return success
+
+ opt_cfiles = {}
+
+ if os.path.isfile(fullname):
+ for opt_level in optimize:
+ if legacy:
+ opt_cfiles[opt_level] = fullname + 'c'
+ else:
+ if opt_level >= 0:
+ opt = opt_level if opt_level >= 1 else ''
+ cfile = (importlib.util.cache_from_source(
+ fullname, optimization=opt))
+ opt_cfiles[opt_level] = cfile
+ else:
+ cfile = importlib.util.cache_from_source(fullname)
+ opt_cfiles[opt_level] = cfile
+
+ head, tail = name[:-3], name[-3:]
+ if tail == '.py':
+ if not force:
+ try:
+ mtime = int(os.stat(fullname).st_mtime)
+ expect = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER,
+ 0, mtime & 0xFFFF_FFFF)
+ for cfile in opt_cfiles.values():
+ with open(cfile, 'rb') as chandle:
+ actual = chandle.read(12)
+ if expect != actual:
+ break
+ else:
+ return success
+ except OSError:
+ pass
+ if not quiet:
+ print('Compiling {!r}...'.format(fullname))
+ try:
+ for index, opt_level in enumerate(optimize):
+ cfile = opt_cfiles[opt_level]
+ ok = py_compile.compile(fullname, cfile, dfile, True,
+ optimize=opt_level,
+ invalidation_mode=invalidation_mode)
+ if index > 0 and hardlink_dupes:
+ previous_cfile = opt_cfiles[optimize[index - 1]]
+ if filecmp.cmp(cfile, previous_cfile, shallow=False):
+ os.unlink(cfile)
+ os.link(previous_cfile, cfile)
+ except py_compile.PyCompileError as err:
+ success = False
+ if quiet >= 2:
+ return success
+ elif quiet:
+ print('*** Error compiling {!r}...'.format(fullname))
+ else:
+ print('*** ', end='')
+ # escape non-printable characters in msg
+ encoding = sys.stdout.encoding or sys.getdefaultencoding()
+ msg = err.msg.encode(encoding, errors='backslashreplace').decode(encoding)
+ print(msg)
+ except (SyntaxError, UnicodeError, OSError) as e:
+ success = False
+ if quiet >= 2:
+ return success
+ elif quiet:
+ print('*** Error compiling {!r}...'.format(fullname))
+ else:
+ print('*** ', end='')
+ print(e.__class__.__name__ + ':', e)
+ else:
+ if ok == 0:
+ success = False
+ return success
+
+def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0,
+ legacy=False, optimize=-1,
+ invalidation_mode=None):
+ """Byte-compile all module on sys.path.
+
+ Arguments (all optional):
+
+ skip_curdir: if true, skip current directory (default True)
+ maxlevels: max recursion level (default 0)
+ force: as for compile_dir() (default False)
+ quiet: as for compile_dir() (default 0)
+ legacy: as for compile_dir() (default False)
+ optimize: as for compile_dir() (default -1)
+ invalidation_mode: as for compiler_dir()
+ """
+ success = True
+ for dir in sys.path:
+ if (not dir or dir == os.curdir) and skip_curdir:
+ if quiet < 2:
+ print('Skipping current directory')
+ else:
+ success = success and compile_dir(
+ dir,
+ maxlevels,
+ None,
+ force,
+ quiet=quiet,
+ legacy=legacy,
+ optimize=optimize,
+ invalidation_mode=invalidation_mode,
+ )
+ return success
+
+
+def main():
+ """Script main program."""
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ description='Utilities to support installing Python libraries.')
+ parser.add_argument('-l', action='store_const', const=0,
+ default=None, dest='maxlevels',
+ help="don't recurse into subdirectories")
+ parser.add_argument('-r', type=int, dest='recursion',
+ help=('control the maximum recursion level. '
+ 'if `-l` and `-r` options are specified, '
+ 'then `-r` takes precedence.'))
+ parser.add_argument('-f', action='store_true', dest='force',
+ help='force rebuild even if timestamps are up to date')
+ parser.add_argument('-q', action='count', dest='quiet', default=0,
+ help='output only error messages; -qq will suppress '
+ 'the error messages as well.')
+ parser.add_argument('-b', action='store_true', dest='legacy',
+ help='use legacy (pre-PEP3147) compiled file locations')
+ parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None,
+ help=('directory to prepend to file paths for use in '
+ 'compile-time tracebacks and in runtime '
+ 'tracebacks in cases where the source file is '
+ 'unavailable'))
+ parser.add_argument('-s', metavar='STRIPDIR', dest='stripdir',
+ default=None,
+ help=('part of path to left-strip from path '
+ 'to source file - for example buildroot. '
+ '`-d` and `-s` options cannot be '
+ 'specified together.'))
+ parser.add_argument('-p', metavar='PREPENDDIR', dest='prependdir',
+ default=None,
+ help=('path to add as prefix to path '
+ 'to source file - for example / to make '
+ 'it absolute when some part is removed '
+ 'by `-s` option. '
+ '`-d` and `-p` options cannot be '
+ 'specified together.'))
+ parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
+ help=('skip files matching the regular expression; '
+ 'the regexp is searched for in the full path '
+ 'of each file considered for compilation'))
+ parser.add_argument('-i', metavar='FILE', dest='flist',
+ help=('add all the files and directories listed in '
+ 'FILE to the list considered for compilation; '
+ 'if "-", names are read from stdin'))
+ parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*',
+ help=('zero or more file and directory names '
+ 'to compile; if no arguments given, defaults '
+ 'to the equivalent of -l sys.path'))
+ parser.add_argument('-j', '--workers', default=1,
+ type=int, help='Run compileall concurrently')
+ invalidation_modes = [mode.name.lower().replace('_', '-')
+ for mode in py_compile.PycInvalidationMode]
+ parser.add_argument('--invalidation-mode',
+ choices=sorted(invalidation_modes),
+ help=('set .pyc invalidation mode; defaults to '
+ '"checked-hash" if the SOURCE_DATE_EPOCH '
+ 'environment variable is set, and '
+ '"timestamp" otherwise.'))
+ parser.add_argument('-o', action='append', type=int, dest='opt_levels',
+ help=('Optimization levels to run compilation with. '
+ 'Default is -1 which uses the optimization level '
+ 'of the Python interpreter itself (see -O).'))
+ parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest',
+ help='Ignore symlinks pointing outsite of the DIR')
+ parser.add_argument('--hardlink-dupes', action='store_true',
+ dest='hardlink_dupes',
+ help='Hardlink duplicated pyc files')
+
+ args = parser.parse_args()
+ compile_dests = args.compile_dest
+
+ if args.rx:
+ import re
+ args.rx = re.compile(args.rx)
+
+ if args.limit_sl_dest == "":
+ args.limit_sl_dest = None
+
+ if args.recursion is not None:
+ maxlevels = args.recursion
+ else:
+ maxlevels = args.maxlevels
+
+ if args.opt_levels is None:
+ args.opt_levels = [-1]
+
+ if len(args.opt_levels) == 1 and args.hardlink_dupes:
+ parser.error(("Hardlinking of duplicated bytecode makes sense "
+ "only for more than one optimization level."))
+
+ if args.ddir is not None and (
+ args.stripdir is not None or args.prependdir is not None
+ ):
+ parser.error("-d cannot be used in combination with -s or -p")
+
+ # if flist is provided then load it
+ if args.flist:
+ try:
+ with (sys.stdin if args.flist=='-' else
+ open(args.flist, encoding="utf-8")) as f:
+ for line in f:
+ compile_dests.append(line.strip())
+ except OSError:
+ if args.quiet < 2:
+ print("Error reading file list {}".format(args.flist))
+ return False
+
+ if args.invalidation_mode:
+ ivl_mode = args.invalidation_mode.replace('-', '_').upper()
+ invalidation_mode = py_compile.PycInvalidationMode[ivl_mode]
+ else:
+ invalidation_mode = None
+
+ success = True
+ try:
+ if compile_dests:
+ for dest in compile_dests:
+ if os.path.isfile(dest):
+ if not compile_file(dest, args.ddir, args.force, args.rx,
+ args.quiet, args.legacy,
+ invalidation_mode=invalidation_mode,
+ stripdir=args.stripdir,
+ prependdir=args.prependdir,
+ optimize=args.opt_levels,
+ limit_sl_dest=args.limit_sl_dest,
+ hardlink_dupes=args.hardlink_dupes):
+ success = False
+ else:
+ if not compile_dir(dest, maxlevels, args.ddir,
+ args.force, args.rx, args.quiet,
+ args.legacy, workers=args.workers,
+ invalidation_mode=invalidation_mode,
+ stripdir=args.stripdir,
+ prependdir=args.prependdir,
+ optimize=args.opt_levels,
+ limit_sl_dest=args.limit_sl_dest,
+ hardlink_dupes=args.hardlink_dupes):
+ success = False
+ return success
+ else:
+ return compile_path(legacy=args.legacy, force=args.force,
+ quiet=args.quiet,
+ invalidation_mode=invalidation_mode)
+ except KeyboardInterrupt:
+ if args.quiet < 2:
+ print("\n[interrupted]")
+ return False
+ return True
+
+
+if __name__ == '__main__':
+ exit_status = int(not main())
+ sys.exit(exit_status)
diff --git a/infer_4_37_2/lib/python3.10/csv.py b/infer_4_37_2/lib/python3.10/csv.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb3ee269ae7931f700efad706cbc3ed40c8be071
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/csv.py
@@ -0,0 +1,444 @@
+
+"""
+csv.py - read/write/investigate CSV files
+"""
+
+import re
+from _csv import Error, __version__, writer, reader, register_dialect, \
+ unregister_dialect, get_dialect, list_dialects, \
+ field_size_limit, \
+ QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
+ __doc__
+from _csv import Dialect as _Dialect
+
+from io import StringIO
+
+__all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
+ "Error", "Dialect", "__doc__", "excel", "excel_tab",
+ "field_size_limit", "reader", "writer",
+ "register_dialect", "get_dialect", "list_dialects", "Sniffer",
+ "unregister_dialect", "__version__", "DictReader", "DictWriter",
+ "unix_dialect"]
+
+class Dialect:
+ """Describe a CSV dialect.
+
+ This must be subclassed (see csv.excel). Valid attributes are:
+ delimiter, quotechar, escapechar, doublequote, skipinitialspace,
+ lineterminator, quoting.
+
+ """
+ _name = ""
+ _valid = False
+ # placeholders
+ delimiter = None
+ quotechar = None
+ escapechar = None
+ doublequote = None
+ skipinitialspace = None
+ lineterminator = None
+ quoting = None
+
+ def __init__(self):
+ if self.__class__ != Dialect:
+ self._valid = True
+ self._validate()
+
+ def _validate(self):
+ try:
+ _Dialect(self)
+ except TypeError as e:
+ # We do this for compatibility with py2.3
+ raise Error(str(e))
+
+class excel(Dialect):
+ """Describe the usual properties of Excel-generated CSV files."""
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\r\n'
+ quoting = QUOTE_MINIMAL
+register_dialect("excel", excel)
+
+class excel_tab(excel):
+ """Describe the usual properties of Excel-generated TAB-delimited files."""
+ delimiter = '\t'
+register_dialect("excel-tab", excel_tab)
+
+class unix_dialect(Dialect):
+ """Describe the usual properties of Unix-generated CSV files."""
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\n'
+ quoting = QUOTE_ALL
+register_dialect("unix", unix_dialect)
+
+
+class DictReader:
+ def __init__(self, f, fieldnames=None, restkey=None, restval=None,
+ dialect="excel", *args, **kwds):
+ self._fieldnames = fieldnames # list of keys for the dict
+ self.restkey = restkey # key to catch long rows
+ self.restval = restval # default value for short rows
+ self.reader = reader(f, dialect, *args, **kwds)
+ self.dialect = dialect
+ self.line_num = 0
+
+ def __iter__(self):
+ return self
+
+ @property
+ def fieldnames(self):
+ if self._fieldnames is None:
+ try:
+ self._fieldnames = next(self.reader)
+ except StopIteration:
+ pass
+ self.line_num = self.reader.line_num
+ return self._fieldnames
+
+ @fieldnames.setter
+ def fieldnames(self, value):
+ self._fieldnames = value
+
+ def __next__(self):
+ if self.line_num == 0:
+ # Used only for its side effect.
+ self.fieldnames
+ row = next(self.reader)
+ self.line_num = self.reader.line_num
+
+ # unlike the basic reader, we prefer not to return blanks,
+ # because we will typically wind up with a dict full of None
+ # values
+ while row == []:
+ row = next(self.reader)
+ d = dict(zip(self.fieldnames, row))
+ lf = len(self.fieldnames)
+ lr = len(row)
+ if lf < lr:
+ d[self.restkey] = row[lf:]
+ elif lf > lr:
+ for key in self.fieldnames[lr:]:
+ d[key] = self.restval
+ return d
+
+
+class DictWriter:
+ def __init__(self, f, fieldnames, restval="", extrasaction="raise",
+ dialect="excel", *args, **kwds):
+ self.fieldnames = fieldnames # list of keys for the dict
+ self.restval = restval # for writing short dicts
+ if extrasaction.lower() not in ("raise", "ignore"):
+ raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
+ % extrasaction)
+ self.extrasaction = extrasaction
+ self.writer = writer(f, dialect, *args, **kwds)
+
+ def writeheader(self):
+ header = dict(zip(self.fieldnames, self.fieldnames))
+ return self.writerow(header)
+
+ def _dict_to_list(self, rowdict):
+ if self.extrasaction == "raise":
+ wrong_fields = rowdict.keys() - self.fieldnames
+ if wrong_fields:
+ raise ValueError("dict contains fields not in fieldnames: "
+ + ", ".join([repr(x) for x in wrong_fields]))
+ return (rowdict.get(key, self.restval) for key in self.fieldnames)
+
+ def writerow(self, rowdict):
+ return self.writer.writerow(self._dict_to_list(rowdict))
+
+ def writerows(self, rowdicts):
+ return self.writer.writerows(map(self._dict_to_list, rowdicts))
+
+# Guard Sniffer's type checking against builds that exclude complex()
+try:
+ complex
+except NameError:
+ complex = float
+
+class Sniffer:
+ '''
+ "Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
+ Returns a Dialect object.
+ '''
+ def __init__(self):
+ # in case there is more than one possible delimiter
+ self.preferred = [',', '\t', ';', ' ', ':']
+
+
+ def sniff(self, sample, delimiters=None):
+ """
+ Returns a dialect (or None) corresponding to the sample
+ """
+
+ quotechar, doublequote, delimiter, skipinitialspace = \
+ self._guess_quote_and_delimiter(sample, delimiters)
+ if not delimiter:
+ delimiter, skipinitialspace = self._guess_delimiter(sample,
+ delimiters)
+
+ if not delimiter:
+ raise Error("Could not determine delimiter")
+
+ class dialect(Dialect):
+ _name = "sniffed"
+ lineterminator = '\r\n'
+ quoting = QUOTE_MINIMAL
+ # escapechar = ''
+
+ dialect.doublequote = doublequote
+ dialect.delimiter = delimiter
+ # _csv.reader won't accept a quotechar of ''
+ dialect.quotechar = quotechar or '"'
+ dialect.skipinitialspace = skipinitialspace
+
+ return dialect
+
+
+ def _guess_quote_and_delimiter(self, data, delimiters):
+ """
+ Looks for text enclosed between two identical quotes
+ (the probable quotechar) which are preceded and followed
+ by the same character (the probable delimiter).
+ For example:
+ ,'some text',
+ The quote with the most wins, same with the delimiter.
+ If there is no quotechar the delimiter can't be determined
+ this way.
+ """
+
+ matches = []
+ for restr in (r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?P=delim)', # ,".*?",
+ r'(?:^|\n)(?P["\']).*?(?P=quote)(?P[^\w\n"\'])(?P ?)', # ".*?",
+ r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
+ r'(?:^|\n)(?P["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
+ regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
+ matches = regexp.findall(data)
+ if matches:
+ break
+
+ if not matches:
+ # (quotechar, doublequote, delimiter, skipinitialspace)
+ return ('', False, None, 0)
+ quotes = {}
+ delims = {}
+ spaces = 0
+ groupindex = regexp.groupindex
+ for m in matches:
+ n = groupindex['quote'] - 1
+ key = m[n]
+ if key:
+ quotes[key] = quotes.get(key, 0) + 1
+ try:
+ n = groupindex['delim'] - 1
+ key = m[n]
+ except KeyError:
+ continue
+ if key and (delimiters is None or key in delimiters):
+ delims[key] = delims.get(key, 0) + 1
+ try:
+ n = groupindex['space'] - 1
+ except KeyError:
+ continue
+ if m[n]:
+ spaces += 1
+
+ quotechar = max(quotes, key=quotes.get)
+
+ if delims:
+ delim = max(delims, key=delims.get)
+ skipinitialspace = delims[delim] == spaces
+ if delim == '\n': # most likely a file with a single column
+ delim = ''
+ else:
+ # there is *no* delimiter, it's a single column of quoted data
+ delim = ''
+ skipinitialspace = 0
+
+ # if we see an extra quote between delimiters, we've got a
+ # double quoted format
+ dq_regexp = re.compile(
+ r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
+ {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
+
+
+
+ if dq_regexp.search(data):
+ doublequote = True
+ else:
+ doublequote = False
+
+ return (quotechar, doublequote, delim, skipinitialspace)
+
+
+ def _guess_delimiter(self, data, delimiters):
+ """
+ The delimiter /should/ occur the same number of times on
+ each row. However, due to malformed data, it may not. We don't want
+ an all or nothing approach, so we allow for small variations in this
+ number.
+ 1) build a table of the frequency of each character on every line.
+ 2) build a table of frequencies of this frequency (meta-frequency?),
+ e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
+ 7 times in 2 rows'
+ 3) use the mode of the meta-frequency to determine the /expected/
+ frequency for that character
+ 4) find out how often the character actually meets that goal
+ 5) the character that best meets its goal is the delimiter
+ For performance reasons, the data is evaluated in chunks, so it can
+ try and evaluate the smallest portion of the data possible, evaluating
+ additional chunks as necessary.
+ """
+
+ data = list(filter(None, data.split('\n')))
+
+ ascii = [chr(c) for c in range(127)] # 7-bit ASCII
+
+ # build frequency tables
+ chunkLength = min(10, len(data))
+ iteration = 0
+ charFrequency = {}
+ modes = {}
+ delims = {}
+ start, end = 0, chunkLength
+ while start < len(data):
+ iteration += 1
+ for line in data[start:end]:
+ for char in ascii:
+ metaFrequency = charFrequency.get(char, {})
+ # must count even if frequency is 0
+ freq = line.count(char)
+ # value is the mode
+ metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
+ charFrequency[char] = metaFrequency
+
+ for char in charFrequency.keys():
+ items = list(charFrequency[char].items())
+ if len(items) == 1 and items[0][0] == 0:
+ continue
+ # get the mode of the frequencies
+ if len(items) > 1:
+ modes[char] = max(items, key=lambda x: x[1])
+ # adjust the mode - subtract the sum of all
+ # other frequencies
+ items.remove(modes[char])
+ modes[char] = (modes[char][0], modes[char][1]
+ - sum(item[1] for item in items))
+ else:
+ modes[char] = items[0]
+
+ # build a list of possible delimiters
+ modeList = modes.items()
+ total = float(min(chunkLength * iteration, len(data)))
+ # (rows of consistent data) / (number of rows) = 100%
+ consistency = 1.0
+ # minimum consistency threshold
+ threshold = 0.9
+ while len(delims) == 0 and consistency >= threshold:
+ for k, v in modeList:
+ if v[0] > 0 and v[1] > 0:
+ if ((v[1]/total) >= consistency and
+ (delimiters is None or k in delimiters)):
+ delims[k] = v
+ consistency -= 0.01
+
+ if len(delims) == 1:
+ delim = list(delims.keys())[0]
+ skipinitialspace = (data[0].count(delim) ==
+ data[0].count("%c " % delim))
+ return (delim, skipinitialspace)
+
+ # analyze another chunkLength lines
+ start = end
+ end += chunkLength
+
+ if not delims:
+ return ('', 0)
+
+ # if there's more than one, fall back to a 'preferred' list
+ if len(delims) > 1:
+ for d in self.preferred:
+ if d in delims.keys():
+ skipinitialspace = (data[0].count(d) ==
+ data[0].count("%c " % d))
+ return (d, skipinitialspace)
+
+ # nothing else indicates a preference, pick the character that
+ # dominates(?)
+ items = [(v,k) for (k,v) in delims.items()]
+ items.sort()
+ delim = items[-1][1]
+
+ skipinitialspace = (data[0].count(delim) ==
+ data[0].count("%c " % delim))
+ return (delim, skipinitialspace)
+
+
+ def has_header(self, sample):
+ # Creates a dictionary of types of data in each column. If any
+ # column is of a single type (say, integers), *except* for the first
+ # row, then the first row is presumed to be labels. If the type
+ # can't be determined, it is assumed to be a string in which case
+ # the length of the string is the determining factor: if all of the
+ # rows except for the first are the same length, it's a header.
+ # Finally, a 'vote' is taken at the end for each column, adding or
+ # subtracting from the likelihood of the first row being a header.
+
+ rdr = reader(StringIO(sample), self.sniff(sample))
+
+ header = next(rdr) # assume first row is header
+
+ columns = len(header)
+ columnTypes = {}
+ for i in range(columns): columnTypes[i] = None
+
+ checked = 0
+ for row in rdr:
+ # arbitrary number of rows to check, to keep it sane
+ if checked > 20:
+ break
+ checked += 1
+
+ if len(row) != columns:
+ continue # skip rows that have irregular number of columns
+
+ for col in list(columnTypes.keys()):
+ thisType = complex
+ try:
+ thisType(row[col])
+ except (ValueError, OverflowError):
+ # fallback to length of string
+ thisType = len(row[col])
+
+ if thisType != columnTypes[col]:
+ if columnTypes[col] is None: # add new column type
+ columnTypes[col] = thisType
+ else:
+ # type is inconsistent, remove column from
+ # consideration
+ del columnTypes[col]
+
+ # finally, compare results against first row and "vote"
+ # on whether it's a header
+ hasHeader = 0
+ for col, colType in columnTypes.items():
+ if type(colType) == type(0): # it's a length
+ if len(header[col]) != colType:
+ hasHeader += 1
+ else:
+ hasHeader -= 1
+ else: # attempt typecast
+ try:
+ colType(header[col])
+ except (ValueError, TypeError):
+ hasHeader += 1
+ else:
+ hasHeader -= 1
+
+ return hasHeader > 0
diff --git a/infer_4_37_2/lib/python3.10/dis.py b/infer_4_37_2/lib/python3.10/dis.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe5d24e88058f745b7df7ddab4c5efc23e7bb27e
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/dis.py
@@ -0,0 +1,540 @@
+"""Disassembler of Python byte code into mnemonics."""
+
+import sys
+import types
+import collections
+import io
+
+from opcode import *
+from opcode import __all__ as _opcodes_all
+
+__all__ = ["code_info", "dis", "disassemble", "distb", "disco",
+ "findlinestarts", "findlabels", "show_code",
+ "get_instructions", "Instruction", "Bytecode"] + _opcodes_all
+del _opcodes_all
+
+_have_code = (types.MethodType, types.FunctionType, types.CodeType,
+ classmethod, staticmethod, type)
+
+FORMAT_VALUE = opmap['FORMAT_VALUE']
+FORMAT_VALUE_CONVERTERS = (
+ (None, ''),
+ (str, 'str'),
+ (repr, 'repr'),
+ (ascii, 'ascii'),
+)
+MAKE_FUNCTION = opmap['MAKE_FUNCTION']
+MAKE_FUNCTION_FLAGS = ('defaults', 'kwdefaults', 'annotations', 'closure')
+
+
+def _try_compile(source, name):
+ """Attempts to compile the given source, first as an expression and
+ then as a statement if the first approach fails.
+
+ Utility function to accept strings in functions that otherwise
+ expect code objects
+ """
+ try:
+ c = compile(source, name, 'eval')
+ except SyntaxError:
+ c = compile(source, name, 'exec')
+ return c
+
+def dis(x=None, *, file=None, depth=None):
+ """Disassemble classes, methods, functions, and other compiled objects.
+
+ With no argument, disassemble the last traceback.
+
+ Compiled objects currently include generator objects, async generator
+ objects, and coroutine objects, all of which store their code object
+ in a special attribute.
+ """
+ if x is None:
+ distb(file=file)
+ return
+ # Extract functions from methods.
+ if hasattr(x, '__func__'):
+ x = x.__func__
+ # Extract compiled code objects from...
+ if hasattr(x, '__code__'): # ...a function, or
+ x = x.__code__
+ elif hasattr(x, 'gi_code'): #...a generator object, or
+ x = x.gi_code
+ elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or
+ x = x.ag_code
+ elif hasattr(x, 'cr_code'): #...a coroutine.
+ x = x.cr_code
+ # Perform the disassembly.
+ if hasattr(x, '__dict__'): # Class or module
+ items = sorted(x.__dict__.items())
+ for name, x1 in items:
+ if isinstance(x1, _have_code):
+ print("Disassembly of %s:" % name, file=file)
+ try:
+ dis(x1, file=file, depth=depth)
+ except TypeError as msg:
+ print("Sorry:", msg, file=file)
+ print(file=file)
+ elif hasattr(x, 'co_code'): # Code object
+ _disassemble_recursive(x, file=file, depth=depth)
+ elif isinstance(x, (bytes, bytearray)): # Raw bytecode
+ _disassemble_bytes(x, file=file)
+ elif isinstance(x, str): # Source code
+ _disassemble_str(x, file=file, depth=depth)
+ else:
+ raise TypeError("don't know how to disassemble %s objects" %
+ type(x).__name__)
+
+def distb(tb=None, *, file=None):
+ """Disassemble a traceback (default: last traceback)."""
+ if tb is None:
+ try:
+ tb = sys.last_traceback
+ except AttributeError:
+ raise RuntimeError("no last traceback to disassemble") from None
+ while tb.tb_next: tb = tb.tb_next
+ disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file)
+
+# The inspect module interrogates this dictionary to build its
+# list of CO_* constants. It is also used by pretty_flags to
+# turn the co_flags field into a human readable list.
+COMPILER_FLAG_NAMES = {
+ 1: "OPTIMIZED",
+ 2: "NEWLOCALS",
+ 4: "VARARGS",
+ 8: "VARKEYWORDS",
+ 16: "NESTED",
+ 32: "GENERATOR",
+ 64: "NOFREE",
+ 128: "COROUTINE",
+ 256: "ITERABLE_COROUTINE",
+ 512: "ASYNC_GENERATOR",
+}
+
+def pretty_flags(flags):
+ """Return pretty representation of code flags."""
+ names = []
+ for i in range(32):
+ flag = 1<")
+ # By now, if we don't have a code object, we can't disassemble x.
+ if hasattr(x, 'co_code'):
+ return x
+ raise TypeError("don't know how to disassemble %s objects" %
+ type(x).__name__)
+
+def code_info(x):
+ """Formatted details of methods, functions, or code."""
+ return _format_code_info(_get_code_object(x))
+
+def _format_code_info(co):
+ lines = []
+ lines.append("Name: %s" % co.co_name)
+ lines.append("Filename: %s" % co.co_filename)
+ lines.append("Argument count: %s" % co.co_argcount)
+ lines.append("Positional-only arguments: %s" % co.co_posonlyargcount)
+ lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount)
+ lines.append("Number of locals: %s" % co.co_nlocals)
+ lines.append("Stack size: %s" % co.co_stacksize)
+ lines.append("Flags: %s" % pretty_flags(co.co_flags))
+ if co.co_consts:
+ lines.append("Constants:")
+ for i_c in enumerate(co.co_consts):
+ lines.append("%4d: %r" % i_c)
+ if co.co_names:
+ lines.append("Names:")
+ for i_n in enumerate(co.co_names):
+ lines.append("%4d: %s" % i_n)
+ if co.co_varnames:
+ lines.append("Variable names:")
+ for i_n in enumerate(co.co_varnames):
+ lines.append("%4d: %s" % i_n)
+ if co.co_freevars:
+ lines.append("Free variables:")
+ for i_n in enumerate(co.co_freevars):
+ lines.append("%4d: %s" % i_n)
+ if co.co_cellvars:
+ lines.append("Cell variables:")
+ for i_n in enumerate(co.co_cellvars):
+ lines.append("%4d: %s" % i_n)
+ return "\n".join(lines)
+
+def show_code(co, *, file=None):
+ """Print details of methods, functions, or code to *file*.
+
+ If *file* is not provided, the output is printed on stdout.
+ """
+ print(code_info(co), file=file)
+
+_Instruction = collections.namedtuple("_Instruction",
+ "opname opcode arg argval argrepr offset starts_line is_jump_target")
+
+_Instruction.opname.__doc__ = "Human readable name for operation"
+_Instruction.opcode.__doc__ = "Numeric code for operation"
+_Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None"
+_Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg"
+_Instruction.argrepr.__doc__ = "Human readable description of operation argument"
+_Instruction.offset.__doc__ = "Start index of operation within bytecode sequence"
+_Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None"
+_Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False"
+
+_OPNAME_WIDTH = 20
+_OPARG_WIDTH = 5
+
+class Instruction(_Instruction):
+ """Details for a bytecode operation
+
+ Defined fields:
+ opname - human readable name for operation
+ opcode - numeric code for operation
+ arg - numeric argument to operation (if any), otherwise None
+ argval - resolved arg value (if known), otherwise same as arg
+ argrepr - human readable description of operation argument
+ offset - start index of operation within bytecode sequence
+ starts_line - line started by this opcode (if any), otherwise None
+ is_jump_target - True if other code jumps to here, otherwise False
+ """
+
+ def _disassemble(self, lineno_width=3, mark_as_current=False, offset_width=4):
+ """Format instruction details for inclusion in disassembly output
+
+ *lineno_width* sets the width of the line number field (0 omits it)
+ *mark_as_current* inserts a '-->' marker arrow as part of the line
+ *offset_width* sets the width of the instruction offset field
+ """
+ fields = []
+ # Column: Source code line number
+ if lineno_width:
+ if self.starts_line is not None:
+ lineno_fmt = "%%%dd" % lineno_width
+ fields.append(lineno_fmt % self.starts_line)
+ else:
+ fields.append(' ' * lineno_width)
+ # Column: Current instruction indicator
+ if mark_as_current:
+ fields.append('-->')
+ else:
+ fields.append(' ')
+ # Column: Jump target marker
+ if self.is_jump_target:
+ fields.append('>>')
+ else:
+ fields.append(' ')
+ # Column: Instruction offset from start of code sequence
+ fields.append(repr(self.offset).rjust(offset_width))
+ # Column: Opcode name
+ fields.append(self.opname.ljust(_OPNAME_WIDTH))
+ # Column: Opcode argument
+ if self.arg is not None:
+ fields.append(repr(self.arg).rjust(_OPARG_WIDTH))
+ # Column: Opcode argument details
+ if self.argrepr:
+ fields.append('(' + self.argrepr + ')')
+ return ' '.join(fields).rstrip()
+
+
+def get_instructions(x, *, first_line=None):
+ """Iterator for the opcodes in methods, functions or code
+
+ Generates a series of Instruction named tuples giving the details of
+ each operations in the supplied code.
+
+ If *first_line* is not None, it indicates the line number that should
+ be reported for the first source line in the disassembled code.
+ Otherwise, the source line information (if any) is taken directly from
+ the disassembled code object.
+ """
+ co = _get_code_object(x)
+ cell_names = co.co_cellvars + co.co_freevars
+ linestarts = dict(findlinestarts(co))
+ if first_line is not None:
+ line_offset = first_line - co.co_firstlineno
+ else:
+ line_offset = 0
+ return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
+ co.co_consts, cell_names, linestarts,
+ line_offset)
+
+def _get_const_info(const_index, const_list):
+ """Helper to get optional details about const references
+
+ Returns the dereferenced constant and its repr if the constant
+ list is defined.
+ Otherwise returns the constant index and its repr().
+ """
+ argval = const_index
+ if const_list is not None:
+ argval = const_list[const_index]
+ return argval, repr(argval)
+
+def _get_name_info(name_index, name_list):
+ """Helper to get optional details about named references
+
+ Returns the dereferenced name as both value and repr if the name
+ list is defined.
+ Otherwise returns the name index and its repr().
+ """
+ argval = name_index
+ if name_list is not None:
+ argval = name_list[name_index]
+ argrepr = argval
+ else:
+ argrepr = repr(argval)
+ return argval, argrepr
+
+
+def _get_instructions_bytes(code, varnames=None, names=None, constants=None,
+ cells=None, linestarts=None, line_offset=0):
+ """Iterate over the instructions in a bytecode string.
+
+ Generates a sequence of Instruction namedtuples giving the details of each
+ opcode. Additional information about the code's runtime environment
+ (e.g. variable names, constants) can be specified using optional
+ arguments.
+
+ """
+ labels = findlabels(code)
+ starts_line = None
+ for offset, op, arg in _unpack_opargs(code):
+ if linestarts is not None:
+ starts_line = linestarts.get(offset, None)
+ if starts_line is not None:
+ starts_line += line_offset
+ is_jump_target = offset in labels
+ argval = None
+ argrepr = ''
+ if arg is not None:
+ # Set argval to the dereferenced value of the argument when
+ # available, and argrepr to the string representation of argval.
+ # _disassemble_bytes needs the string repr of the
+ # raw name index for LOAD_GLOBAL, LOAD_CONST, etc.
+ argval = arg
+ if op in hasconst:
+ argval, argrepr = _get_const_info(arg, constants)
+ elif op in hasname:
+ argval, argrepr = _get_name_info(arg, names)
+ elif op in hasjabs:
+ argval = arg*2
+ argrepr = "to " + repr(argval)
+ elif op in hasjrel:
+ argval = offset + 2 + arg*2
+ argrepr = "to " + repr(argval)
+ elif op in haslocal:
+ argval, argrepr = _get_name_info(arg, varnames)
+ elif op in hascompare:
+ argval = cmp_op[arg]
+ argrepr = argval
+ elif op in hasfree:
+ argval, argrepr = _get_name_info(arg, cells)
+ elif op == FORMAT_VALUE:
+ argval, argrepr = FORMAT_VALUE_CONVERTERS[arg & 0x3]
+ argval = (argval, bool(arg & 0x4))
+ if argval[1]:
+ if argrepr:
+ argrepr += ', '
+ argrepr += 'with format'
+ elif op == MAKE_FUNCTION:
+ argrepr = ', '.join(s for i, s in enumerate(MAKE_FUNCTION_FLAGS)
+ if arg & (1< 0:
+ if depth is not None:
+ depth = depth - 1
+ for x in co.co_consts:
+ if hasattr(x, 'co_code'):
+ print(file=file)
+ print("Disassembly of %r:" % (x,), file=file)
+ _disassemble_recursive(x, file=file, depth=depth)
+
+def _disassemble_bytes(code, lasti=-1, varnames=None, names=None,
+ constants=None, cells=None, linestarts=None,
+ *, file=None, line_offset=0):
+ # Omit the line number column entirely if we have no line number info
+ show_lineno = bool(linestarts)
+ if show_lineno:
+ maxlineno = max(linestarts.values()) + line_offset
+ if maxlineno >= 1000:
+ lineno_width = len(str(maxlineno))
+ else:
+ lineno_width = 3
+ else:
+ lineno_width = 0
+ maxoffset = len(code) - 2
+ if maxoffset >= 10000:
+ offset_width = len(str(maxoffset))
+ else:
+ offset_width = 4
+ for instr in _get_instructions_bytes(code, varnames, names,
+ constants, cells, linestarts,
+ line_offset=line_offset):
+ new_source_line = (show_lineno and
+ instr.starts_line is not None and
+ instr.offset > 0)
+ if new_source_line:
+ print(file=file)
+ is_current_instr = instr.offset == lasti
+ print(instr._disassemble(lineno_width, is_current_instr, offset_width),
+ file=file)
+
+def _disassemble_str(source, **kwargs):
+ """Compile the source string, then disassemble the code object."""
+ _disassemble_recursive(_try_compile(source, ''), **kwargs)
+
+disco = disassemble # XXX For backwards compatibility
+
+def _unpack_opargs(code):
+ extended_arg = 0
+ for i in range(0, len(code), 2):
+ op = code[i]
+ if op >= HAVE_ARGUMENT:
+ arg = code[i+1] | extended_arg
+ extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
+ else:
+ arg = None
+ extended_arg = 0
+ yield (i, op, arg)
+
+def findlabels(code):
+ """Detect all offsets in a byte code which are jump targets.
+
+ Return the list of offsets.
+
+ """
+ labels = []
+ for offset, op, arg in _unpack_opargs(code):
+ if arg is not None:
+ if op in hasjrel:
+ label = offset + 2 + arg*2
+ elif op in hasjabs:
+ label = arg*2
+ else:
+ continue
+ if label not in labels:
+ labels.append(label)
+ return labels
+
+def findlinestarts(code):
+ """Find the offsets in a byte code which are start of lines in the source.
+
+ Generate pairs (offset, lineno)
+ """
+ lastline = None
+ for start, end, line in code.co_lines():
+ if line is not None and line != lastline:
+ lastline = line
+ yield start, line
+ return
+
+
+class Bytecode:
+ """The bytecode operations of a piece of code
+
+ Instantiate this with a function, method, other compiled object, string of
+ code, or a code object (as returned by compile()).
+
+ Iterating over this yields the bytecode operations as Instruction instances.
+ """
+ def __init__(self, x, *, first_line=None, current_offset=None):
+ self.codeobj = co = _get_code_object(x)
+ if first_line is None:
+ self.first_line = co.co_firstlineno
+ self._line_offset = 0
+ else:
+ self.first_line = first_line
+ self._line_offset = first_line - co.co_firstlineno
+ self._cell_names = co.co_cellvars + co.co_freevars
+ self._linestarts = dict(findlinestarts(co))
+ self._original_object = x
+ self.current_offset = current_offset
+
+ def __iter__(self):
+ co = self.codeobj
+ return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
+ co.co_consts, self._cell_names,
+ self._linestarts,
+ line_offset=self._line_offset)
+
+ def __repr__(self):
+ return "{}({!r})".format(self.__class__.__name__,
+ self._original_object)
+
+ @classmethod
+ def from_traceback(cls, tb):
+ """ Construct a Bytecode from the given traceback """
+ while tb.tb_next:
+ tb = tb.tb_next
+ return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
+
+ def info(self):
+ """Return formatted information about the code object."""
+ return _format_code_info(self.codeobj)
+
+ def dis(self):
+ """Return a formatted view of the bytecode operations."""
+ co = self.codeobj
+ if self.current_offset is not None:
+ offset = self.current_offset
+ else:
+ offset = -1
+ with io.StringIO() as output:
+ _disassemble_bytes(co.co_code, varnames=co.co_varnames,
+ names=co.co_names, constants=co.co_consts,
+ cells=self._cell_names,
+ linestarts=self._linestarts,
+ line_offset=self._line_offset,
+ file=output,
+ lasti=offset)
+ return output.getvalue()
+
+
+def _test():
+ """Simple test program to disassemble a file."""
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('infile', type=argparse.FileType('rb'), nargs='?', default='-')
+ args = parser.parse_args()
+ with args.infile as infile:
+ source = infile.read()
+ code = compile(source, args.infile.name, "exec")
+ dis(code)
+
+if __name__ == "__main__":
+ _test()
diff --git a/infer_4_37_2/lib/python3.10/fractions.py b/infer_4_37_2/lib/python3.10/fractions.py
new file mode 100644
index 0000000000000000000000000000000000000000..96047beb4546a5384e23e2963f45abe5186dd9cc
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/fractions.py
@@ -0,0 +1,748 @@
+# Originally contributed by Sjoerd Mullender.
+# Significantly modified by Jeffrey Yasskin .
+
+"""Fraction, infinite-precision, real numbers."""
+
+from decimal import Decimal
+import math
+import numbers
+import operator
+import re
+import sys
+
+__all__ = ['Fraction']
+
+
+# Constants related to the hash implementation; hash(x) is based
+# on the reduction of x modulo the prime _PyHASH_MODULUS.
+_PyHASH_MODULUS = sys.hash_info.modulus
+# Value to be used for rationals that reduce to infinity modulo
+# _PyHASH_MODULUS.
+_PyHASH_INF = sys.hash_info.inf
+
+_RATIONAL_FORMAT = re.compile(r"""
+ \A\s* # optional whitespace at the start, then
+ (?P[-+]?) # an optional sign, then
+ (?=\d|\.\d) # lookahead for digit or .digit
+ (?P\d*) # numerator (possibly empty)
+ (?: # followed by
+ (?:/(?P\d+))? # an optional denominator
+ | # or
+ (?:\.(?P\d*))? # an optional fractional part
+ (?:E(?P[-+]?\d+))? # and optional exponent
+ )
+ \s*\Z # and optional whitespace to finish
+""", re.VERBOSE | re.IGNORECASE)
+
+
+class Fraction(numbers.Rational):
+ """This class implements rational numbers.
+
+ In the two-argument form of the constructor, Fraction(8, 6) will
+ produce a rational number equivalent to 4/3. Both arguments must
+ be Rational. The numerator defaults to 0 and the denominator
+ defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
+
+ Fractions can also be constructed from:
+
+ - numeric strings similar to those accepted by the
+ float constructor (for example, '-2.3' or '1e10')
+
+ - strings of the form '123/456'
+
+ - float and Decimal instances
+
+ - other Rational instances (including integers)
+
+ """
+
+ __slots__ = ('_numerator', '_denominator')
+
+ # We're immutable, so use __new__ not __init__
+ def __new__(cls, numerator=0, denominator=None, *, _normalize=True):
+ """Constructs a Rational.
+
+ Takes a string like '3/2' or '1.5', another Rational instance, a
+ numerator/denominator pair, or a float.
+
+ Examples
+ --------
+
+ >>> Fraction(10, -8)
+ Fraction(-5, 4)
+ >>> Fraction(Fraction(1, 7), 5)
+ Fraction(1, 35)
+ >>> Fraction(Fraction(1, 7), Fraction(2, 3))
+ Fraction(3, 14)
+ >>> Fraction('314')
+ Fraction(314, 1)
+ >>> Fraction('-35/4')
+ Fraction(-35, 4)
+ >>> Fraction('3.1415') # conversion from numeric string
+ Fraction(6283, 2000)
+ >>> Fraction('-47e-2') # string may include a decimal exponent
+ Fraction(-47, 100)
+ >>> Fraction(1.47) # direct construction from float (exact conversion)
+ Fraction(6620291452234629, 4503599627370496)
+ >>> Fraction(2.25)
+ Fraction(9, 4)
+ >>> Fraction(Decimal('1.47'))
+ Fraction(147, 100)
+
+ """
+ self = super(Fraction, cls).__new__(cls)
+
+ if denominator is None:
+ if type(numerator) is int:
+ self._numerator = numerator
+ self._denominator = 1
+ return self
+
+ elif isinstance(numerator, numbers.Rational):
+ self._numerator = numerator.numerator
+ self._denominator = numerator.denominator
+ return self
+
+ elif isinstance(numerator, (float, Decimal)):
+ # Exact conversion
+ self._numerator, self._denominator = numerator.as_integer_ratio()
+ return self
+
+ elif isinstance(numerator, str):
+ # Handle construction from strings.
+ m = _RATIONAL_FORMAT.match(numerator)
+ if m is None:
+ raise ValueError('Invalid literal for Fraction: %r' %
+ numerator)
+ numerator = int(m.group('num') or '0')
+ denom = m.group('denom')
+ if denom:
+ denominator = int(denom)
+ else:
+ denominator = 1
+ decimal = m.group('decimal')
+ if decimal:
+ scale = 10**len(decimal)
+ numerator = numerator * scale + int(decimal)
+ denominator *= scale
+ exp = m.group('exp')
+ if exp:
+ exp = int(exp)
+ if exp >= 0:
+ numerator *= 10**exp
+ else:
+ denominator *= 10**-exp
+ if m.group('sign') == '-':
+ numerator = -numerator
+
+ else:
+ raise TypeError("argument should be a string "
+ "or a Rational instance")
+
+ elif type(numerator) is int is type(denominator):
+ pass # *very* normal case
+
+ elif (isinstance(numerator, numbers.Rational) and
+ isinstance(denominator, numbers.Rational)):
+ numerator, denominator = (
+ numerator.numerator * denominator.denominator,
+ denominator.numerator * numerator.denominator
+ )
+ else:
+ raise TypeError("both arguments should be "
+ "Rational instances")
+
+ if denominator == 0:
+ raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
+ if _normalize:
+ g = math.gcd(numerator, denominator)
+ if denominator < 0:
+ g = -g
+ numerator //= g
+ denominator //= g
+ self._numerator = numerator
+ self._denominator = denominator
+ return self
+
+ @classmethod
+ def from_float(cls, f):
+ """Converts a finite float to a rational number, exactly.
+
+ Beware that Fraction.from_float(0.3) != Fraction(3, 10).
+
+ """
+ if isinstance(f, numbers.Integral):
+ return cls(f)
+ elif not isinstance(f, float):
+ raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
+ (cls.__name__, f, type(f).__name__))
+ return cls(*f.as_integer_ratio())
+
+ @classmethod
+ def from_decimal(cls, dec):
+ """Converts a finite Decimal instance to a rational number, exactly."""
+ from decimal import Decimal
+ if isinstance(dec, numbers.Integral):
+ dec = Decimal(int(dec))
+ elif not isinstance(dec, Decimal):
+ raise TypeError(
+ "%s.from_decimal() only takes Decimals, not %r (%s)" %
+ (cls.__name__, dec, type(dec).__name__))
+ return cls(*dec.as_integer_ratio())
+
+ def as_integer_ratio(self):
+ """Return the integer ratio as a tuple.
+
+ Return a tuple of two integers, whose ratio is equal to the
+ Fraction and with a positive denominator.
+ """
+ return (self._numerator, self._denominator)
+
+ def limit_denominator(self, max_denominator=1000000):
+ """Closest Fraction to self with denominator at most max_denominator.
+
+ >>> Fraction('3.141592653589793').limit_denominator(10)
+ Fraction(22, 7)
+ >>> Fraction('3.141592653589793').limit_denominator(100)
+ Fraction(311, 99)
+ >>> Fraction(4321, 8765).limit_denominator(10000)
+ Fraction(4321, 8765)
+
+ """
+ # Algorithm notes: For any real number x, define a *best upper
+ # approximation* to x to be a rational number p/q such that:
+ #
+ # (1) p/q >= x, and
+ # (2) if p/q > r/s >= x then s > q, for any rational r/s.
+ #
+ # Define *best lower approximation* similarly. Then it can be
+ # proved that a rational number is a best upper or lower
+ # approximation to x if, and only if, it is a convergent or
+ # semiconvergent of the (unique shortest) continued fraction
+ # associated to x.
+ #
+ # To find a best rational approximation with denominator <= M,
+ # we find the best upper and lower approximations with
+ # denominator <= M and take whichever of these is closer to x.
+ # In the event of a tie, the bound with smaller denominator is
+ # chosen. If both denominators are equal (which can happen
+ # only when max_denominator == 1 and self is midway between
+ # two integers) the lower bound---i.e., the floor of self, is
+ # taken.
+
+ if max_denominator < 1:
+ raise ValueError("max_denominator should be at least 1")
+ if self._denominator <= max_denominator:
+ return Fraction(self)
+
+ p0, q0, p1, q1 = 0, 1, 1, 0
+ n, d = self._numerator, self._denominator
+ while True:
+ a = n//d
+ q2 = q0+a*q1
+ if q2 > max_denominator:
+ break
+ p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
+ n, d = d, n-a*d
+
+ k = (max_denominator-q0)//q1
+ bound1 = Fraction(p0+k*p1, q0+k*q1)
+ bound2 = Fraction(p1, q1)
+ if abs(bound2 - self) <= abs(bound1-self):
+ return bound2
+ else:
+ return bound1
+
+ @property
+ def numerator(a):
+ return a._numerator
+
+ @property
+ def denominator(a):
+ return a._denominator
+
+ def __repr__(self):
+ """repr(self)"""
+ return '%s(%s, %s)' % (self.__class__.__name__,
+ self._numerator, self._denominator)
+
+ def __str__(self):
+ """str(self)"""
+ if self._denominator == 1:
+ return str(self._numerator)
+ else:
+ return '%s/%s' % (self._numerator, self._denominator)
+
+ def _operator_fallbacks(monomorphic_operator, fallback_operator):
+ """Generates forward and reverse operators given a purely-rational
+ operator and a function from the operator module.
+
+ Use this like:
+ __op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
+
+ In general, we want to implement the arithmetic operations so
+ that mixed-mode operations either call an implementation whose
+ author knew about the types of both arguments, or convert both
+ to the nearest built in type and do the operation there. In
+ Fraction, that means that we define __add__ and __radd__ as:
+
+ def __add__(self, other):
+ # Both types have numerators/denominator attributes,
+ # so do the operation directly
+ if isinstance(other, (int, Fraction)):
+ return Fraction(self.numerator * other.denominator +
+ other.numerator * self.denominator,
+ self.denominator * other.denominator)
+ # float and complex don't have those operations, but we
+ # know about those types, so special case them.
+ elif isinstance(other, float):
+ return float(self) + other
+ elif isinstance(other, complex):
+ return complex(self) + other
+ # Let the other type take over.
+ return NotImplemented
+
+ def __radd__(self, other):
+ # radd handles more types than add because there's
+ # nothing left to fall back to.
+ if isinstance(other, numbers.Rational):
+ return Fraction(self.numerator * other.denominator +
+ other.numerator * self.denominator,
+ self.denominator * other.denominator)
+ elif isinstance(other, Real):
+ return float(other) + float(self)
+ elif isinstance(other, Complex):
+ return complex(other) + complex(self)
+ return NotImplemented
+
+
+ There are 5 different cases for a mixed-type addition on
+ Fraction. I'll refer to all of the above code that doesn't
+ refer to Fraction, float, or complex as "boilerplate". 'r'
+ will be an instance of Fraction, which is a subtype of
+ Rational (r : Fraction <: Rational), and b : B <:
+ Complex. The first three involve 'r + b':
+
+ 1. If B <: Fraction, int, float, or complex, we handle
+ that specially, and all is well.
+ 2. If Fraction falls back to the boilerplate code, and it
+ were to return a value from __add__, we'd miss the
+ possibility that B defines a more intelligent __radd__,
+ so the boilerplate should return NotImplemented from
+ __add__. In particular, we don't handle Rational
+ here, even though we could get an exact answer, in case
+ the other type wants to do something special.
+ 3. If B <: Fraction, Python tries B.__radd__ before
+ Fraction.__add__. This is ok, because it was
+ implemented with knowledge of Fraction, so it can
+ handle those instances before delegating to Real or
+ Complex.
+
+ The next two situations describe 'b + r'. We assume that b
+ didn't know about Fraction in its implementation, and that it
+ uses similar boilerplate code:
+
+ 4. If B <: Rational, then __radd_ converts both to the
+ builtin rational type (hey look, that's us) and
+ proceeds.
+ 5. Otherwise, __radd__ tries to find the nearest common
+ base ABC, and fall back to its builtin type. Since this
+ class doesn't subclass a concrete type, there's no
+ implementation to fall back to, so we need to try as
+ hard as possible to return an actual value, or the user
+ will get a TypeError.
+
+ """
+ def forward(a, b):
+ if isinstance(b, (int, Fraction)):
+ return monomorphic_operator(a, b)
+ elif isinstance(b, float):
+ return fallback_operator(float(a), b)
+ elif isinstance(b, complex):
+ return fallback_operator(complex(a), b)
+ else:
+ return NotImplemented
+ forward.__name__ = '__' + fallback_operator.__name__ + '__'
+ forward.__doc__ = monomorphic_operator.__doc__
+
+ def reverse(b, a):
+ if isinstance(a, numbers.Rational):
+ # Includes ints.
+ return monomorphic_operator(a, b)
+ elif isinstance(a, numbers.Real):
+ return fallback_operator(float(a), float(b))
+ elif isinstance(a, numbers.Complex):
+ return fallback_operator(complex(a), complex(b))
+ else:
+ return NotImplemented
+ reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
+ reverse.__doc__ = monomorphic_operator.__doc__
+
+ return forward, reverse
+
+ # Rational arithmetic algorithms: Knuth, TAOCP, Volume 2, 4.5.1.
+ #
+ # Assume input fractions a and b are normalized.
+ #
+ # 1) Consider addition/subtraction.
+ #
+ # Let g = gcd(da, db). Then
+ #
+ # na nb na*db ± nb*da
+ # a ± b == -- ± -- == ------------- ==
+ # da db da*db
+ #
+ # na*(db//g) ± nb*(da//g) t
+ # == ----------------------- == -
+ # (da*db)//g d
+ #
+ # Now, if g > 1, we're working with smaller integers.
+ #
+ # Note, that t, (da//g) and (db//g) are pairwise coprime.
+ #
+ # Indeed, (da//g) and (db//g) share no common factors (they were
+ # removed) and da is coprime with na (since input fractions are
+ # normalized), hence (da//g) and na are coprime. By symmetry,
+ # (db//g) and nb are coprime too. Then,
+ #
+ # gcd(t, da//g) == gcd(na*(db//g), da//g) == 1
+ # gcd(t, db//g) == gcd(nb*(da//g), db//g) == 1
+ #
+ # Above allows us optimize reduction of the result to lowest
+ # terms. Indeed,
+ #
+ # g2 = gcd(t, d) == gcd(t, (da//g)*(db//g)*g) == gcd(t, g)
+ #
+ # t//g2 t//g2
+ # a ± b == ----------------------- == ----------------
+ # (da//g)*(db//g)*(g//g2) (da//g)*(db//g2)
+ #
+ # is a normalized fraction. This is useful because the unnormalized
+ # denominator d could be much larger than g.
+ #
+ # We should special-case g == 1 (and g2 == 1), since 60.8% of
+ # randomly-chosen integers are coprime:
+ # https://en.wikipedia.org/wiki/Coprime_integers#Probability_of_coprimality
+ # Note, that g2 == 1 always for fractions, obtained from floats: here
+ # g is a power of 2 and the unnormalized numerator t is an odd integer.
+ #
+ # 2) Consider multiplication
+ #
+ # Let g1 = gcd(na, db) and g2 = gcd(nb, da), then
+ #
+ # na*nb na*nb (na//g1)*(nb//g2)
+ # a*b == ----- == ----- == -----------------
+ # da*db db*da (db//g1)*(da//g2)
+ #
+ # Note, that after divisions we're multiplying smaller integers.
+ #
+ # Also, the resulting fraction is normalized, because each of
+ # two factors in the numerator is coprime to each of the two factors
+ # in the denominator.
+ #
+ # Indeed, pick (na//g1). It's coprime with (da//g2), because input
+ # fractions are normalized. It's also coprime with (db//g1), because
+ # common factors are removed by g1 == gcd(na, db).
+ #
+ # As for addition/subtraction, we should special-case g1 == 1
+ # and g2 == 1 for same reason. That happens also for multiplying
+ # rationals, obtained from floats.
+
+ def _add(a, b):
+ """a + b"""
+ na, da = a.numerator, a.denominator
+ nb, db = b.numerator, b.denominator
+ g = math.gcd(da, db)
+ if g == 1:
+ return Fraction(na * db + da * nb, da * db, _normalize=False)
+ s = da // g
+ t = na * (db // g) + nb * s
+ g2 = math.gcd(t, g)
+ if g2 == 1:
+ return Fraction(t, s * db, _normalize=False)
+ return Fraction(t // g2, s * (db // g2), _normalize=False)
+
+ __add__, __radd__ = _operator_fallbacks(_add, operator.add)
+
+ def _sub(a, b):
+ """a - b"""
+ na, da = a.numerator, a.denominator
+ nb, db = b.numerator, b.denominator
+ g = math.gcd(da, db)
+ if g == 1:
+ return Fraction(na * db - da * nb, da * db, _normalize=False)
+ s = da // g
+ t = na * (db // g) - nb * s
+ g2 = math.gcd(t, g)
+ if g2 == 1:
+ return Fraction(t, s * db, _normalize=False)
+ return Fraction(t // g2, s * (db // g2), _normalize=False)
+
+ __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
+
+ def _mul(a, b):
+ """a * b"""
+ na, da = a.numerator, a.denominator
+ nb, db = b.numerator, b.denominator
+ g1 = math.gcd(na, db)
+ if g1 > 1:
+ na //= g1
+ db //= g1
+ g2 = math.gcd(nb, da)
+ if g2 > 1:
+ nb //= g2
+ da //= g2
+ return Fraction(na * nb, db * da, _normalize=False)
+
+ __mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
+
+ def _div(a, b):
+ """a / b"""
+ # Same as _mul(), with inversed b.
+ na, da = a.numerator, a.denominator
+ nb, db = b.numerator, b.denominator
+ g1 = math.gcd(na, nb)
+ if g1 > 1:
+ na //= g1
+ nb //= g1
+ g2 = math.gcd(db, da)
+ if g2 > 1:
+ da //= g2
+ db //= g2
+ n, d = na * db, nb * da
+ if d < 0:
+ n, d = -n, -d
+ return Fraction(n, d, _normalize=False)
+
+ __truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
+
+ def _floordiv(a, b):
+ """a // b"""
+ return (a.numerator * b.denominator) // (a.denominator * b.numerator)
+
+ __floordiv__, __rfloordiv__ = _operator_fallbacks(_floordiv, operator.floordiv)
+
+ def _divmod(a, b):
+ """(a // b, a % b)"""
+ da, db = a.denominator, b.denominator
+ div, n_mod = divmod(a.numerator * db, da * b.numerator)
+ return div, Fraction(n_mod, da * db)
+
+ __divmod__, __rdivmod__ = _operator_fallbacks(_divmod, divmod)
+
+ def _mod(a, b):
+ """a % b"""
+ da, db = a.denominator, b.denominator
+ return Fraction((a.numerator * db) % (b.numerator * da), da * db)
+
+ __mod__, __rmod__ = _operator_fallbacks(_mod, operator.mod)
+
+ def __pow__(a, b):
+ """a ** b
+
+ If b is not an integer, the result will be a float or complex
+ since roots are generally irrational. If b is an integer, the
+ result will be rational.
+
+ """
+ if isinstance(b, numbers.Rational):
+ if b.denominator == 1:
+ power = b.numerator
+ if power >= 0:
+ return Fraction(a._numerator ** power,
+ a._denominator ** power,
+ _normalize=False)
+ elif a._numerator >= 0:
+ return Fraction(a._denominator ** -power,
+ a._numerator ** -power,
+ _normalize=False)
+ else:
+ return Fraction((-a._denominator) ** -power,
+ (-a._numerator) ** -power,
+ _normalize=False)
+ else:
+ # A fractional power will generally produce an
+ # irrational number.
+ return float(a) ** float(b)
+ else:
+ return float(a) ** b
+
+ def __rpow__(b, a):
+ """a ** b"""
+ if b._denominator == 1 and b._numerator >= 0:
+ # If a is an int, keep it that way if possible.
+ return a ** b._numerator
+
+ if isinstance(a, numbers.Rational):
+ return Fraction(a.numerator, a.denominator) ** b
+
+ if b._denominator == 1:
+ return a ** b._numerator
+
+ return a ** float(b)
+
+ def __pos__(a):
+ """+a: Coerces a subclass instance to Fraction"""
+ return Fraction(a._numerator, a._denominator, _normalize=False)
+
+ def __neg__(a):
+ """-a"""
+ return Fraction(-a._numerator, a._denominator, _normalize=False)
+
+ def __abs__(a):
+ """abs(a)"""
+ return Fraction(abs(a._numerator), a._denominator, _normalize=False)
+
+ def __trunc__(a):
+ """trunc(a)"""
+ if a._numerator < 0:
+ return -(-a._numerator // a._denominator)
+ else:
+ return a._numerator // a._denominator
+
+ def __floor__(a):
+ """math.floor(a)"""
+ return a.numerator // a.denominator
+
+ def __ceil__(a):
+ """math.ceil(a)"""
+ # The negations cleverly convince floordiv to return the ceiling.
+ return -(-a.numerator // a.denominator)
+
+ def __round__(self, ndigits=None):
+ """round(self, ndigits)
+
+ Rounds half toward even.
+ """
+ if ndigits is None:
+ floor, remainder = divmod(self.numerator, self.denominator)
+ if remainder * 2 < self.denominator:
+ return floor
+ elif remainder * 2 > self.denominator:
+ return floor + 1
+ # Deal with the half case:
+ elif floor % 2 == 0:
+ return floor
+ else:
+ return floor + 1
+ shift = 10**abs(ndigits)
+ # See _operator_fallbacks.forward to check that the results of
+ # these operations will always be Fraction and therefore have
+ # round().
+ if ndigits > 0:
+ return Fraction(round(self * shift), shift)
+ else:
+ return Fraction(round(self / shift) * shift)
+
+ def __hash__(self):
+ """hash(self)"""
+
+ # To make sure that the hash of a Fraction agrees with the hash
+ # of a numerically equal integer, float or Decimal instance, we
+ # follow the rules for numeric hashes outlined in the
+ # documentation. (See library docs, 'Built-in Types').
+
+ try:
+ dinv = pow(self._denominator, -1, _PyHASH_MODULUS)
+ except ValueError:
+ # ValueError means there is no modular inverse.
+ hash_ = _PyHASH_INF
+ else:
+ # The general algorithm now specifies that the absolute value of
+ # the hash is
+ # (|N| * dinv) % P
+ # where N is self._numerator and P is _PyHASH_MODULUS. That's
+ # optimized here in two ways: first, for a non-negative int i,
+ # hash(i) == i % P, but the int hash implementation doesn't need
+ # to divide, and is faster than doing % P explicitly. So we do
+ # hash(|N| * dinv)
+ # instead. Second, N is unbounded, so its product with dinv may
+ # be arbitrarily expensive to compute. The final answer is the
+ # same if we use the bounded |N| % P instead, which can again
+ # be done with an int hash() call. If 0 <= i < P, hash(i) == i,
+ # so this nested hash() call wastes a bit of time making a
+ # redundant copy when |N| < P, but can save an arbitrarily large
+ # amount of computation for large |N|.
+ hash_ = hash(hash(abs(self._numerator)) * dinv)
+ result = hash_ if self._numerator >= 0 else -hash_
+ return -2 if result == -1 else result
+
+ def __eq__(a, b):
+ """a == b"""
+ if type(b) is int:
+ return a._numerator == b and a._denominator == 1
+ if isinstance(b, numbers.Rational):
+ return (a._numerator == b.numerator and
+ a._denominator == b.denominator)
+ if isinstance(b, numbers.Complex) and b.imag == 0:
+ b = b.real
+ if isinstance(b, float):
+ if math.isnan(b) or math.isinf(b):
+ # comparisons with an infinity or nan should behave in
+ # the same way for any finite a, so treat a as zero.
+ return 0.0 == b
+ else:
+ return a == a.from_float(b)
+ else:
+ # Since a doesn't know how to compare with b, let's give b
+ # a chance to compare itself with a.
+ return NotImplemented
+
+ def _richcmp(self, other, op):
+ """Helper for comparison operators, for internal use only.
+
+ Implement comparison between a Rational instance `self`, and
+ either another Rational instance or a float `other`. If
+ `other` is not a Rational instance or a float, return
+ NotImplemented. `op` should be one of the six standard
+ comparison operators.
+
+ """
+ # convert other to a Rational instance where reasonable.
+ if isinstance(other, numbers.Rational):
+ return op(self._numerator * other.denominator,
+ self._denominator * other.numerator)
+ if isinstance(other, float):
+ if math.isnan(other) or math.isinf(other):
+ return op(0.0, other)
+ else:
+ return op(self, self.from_float(other))
+ else:
+ return NotImplemented
+
+ def __lt__(a, b):
+ """a < b"""
+ return a._richcmp(b, operator.lt)
+
+ def __gt__(a, b):
+ """a > b"""
+ return a._richcmp(b, operator.gt)
+
+ def __le__(a, b):
+ """a <= b"""
+ return a._richcmp(b, operator.le)
+
+ def __ge__(a, b):
+ """a >= b"""
+ return a._richcmp(b, operator.ge)
+
+ def __bool__(a):
+ """a != 0"""
+ # bpo-39274: Use bool() because (a._numerator != 0) can return an
+ # object which is not a bool.
+ return bool(a._numerator)
+
+ # support for pickling, copy, and deepcopy
+
+ def __reduce__(self):
+ return (self.__class__, (str(self),))
+
+ def __copy__(self):
+ if type(self) == Fraction:
+ return self # I'm immutable; therefore I am my own clone
+ return self.__class__(self._numerator, self._denominator)
+
+ def __deepcopy__(self, memo):
+ if type(self) == Fraction:
+ return self # My components are also immutable
+ return self.__class__(self._numerator, self._denominator)
diff --git a/infer_4_37_2/lib/python3.10/ftplib.py b/infer_4_37_2/lib/python3.10/ftplib.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c5a50715f6dc691443ba89bfef07bbbea7fdc1d
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/ftplib.py
@@ -0,0 +1,981 @@
+"""An FTP client class and some helper functions.
+
+Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
+
+Example:
+
+>>> from ftplib import FTP
+>>> ftp = FTP('ftp.python.org') # connect to host, default port
+>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
+'230 Guest login ok, access restrictions apply.'
+>>> ftp.retrlines('LIST') # list directory contents
+total 9
+drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
+drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
+drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
+drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
+d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
+drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
+drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
+drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
+-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
+'226 Transfer complete.'
+>>> ftp.quit()
+'221 Goodbye.'
+>>>
+
+A nice test that reveals some of the network dialogue would be:
+python ftplib.py -d localhost -l -p -l
+"""
+
+#
+# Changes and improvements suggested by Steve Majewski.
+# Modified by Jack to work on the mac.
+# Modified by Siebren to support docstrings and PASV.
+# Modified by Phil Schwartz to add storbinary and storlines callbacks.
+# Modified by Giampaolo Rodola' to add TLS support.
+#
+
+import sys
+import socket
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+
+__all__ = ["FTP", "error_reply", "error_temp", "error_perm", "error_proto",
+ "all_errors"]
+
+# Magic number from
+MSG_OOB = 0x1 # Process data out of band
+
+
+# The standard FTP server control port
+FTP_PORT = 21
+# The sizehint parameter passed to readline() calls
+MAXLINE = 8192
+
+
+# Exception raised when an error or invalid response is received
+class Error(Exception): pass
+class error_reply(Error): pass # unexpected [123]xx reply
+class error_temp(Error): pass # 4xx errors
+class error_perm(Error): pass # 5xx errors
+class error_proto(Error): pass # response does not begin with [1-5]
+
+
+# All exceptions (hopefully) that may be raised here and that aren't
+# (always) programming errors on our side
+all_errors = (Error, OSError, EOFError)
+
+
+# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
+CRLF = '\r\n'
+B_CRLF = b'\r\n'
+
+# The class itself
+class FTP:
+ '''An FTP client class.
+
+ To create a connection, call the class using these arguments:
+ host, user, passwd, acct, timeout, source_address, encoding
+
+ The first four arguments are all strings, and have default value ''.
+ The parameter ´timeout´ must be numeric and defaults to None if not
+ passed, meaning that no timeout will be set on any ftp socket(s).
+ If a timeout is passed, then this is now the default timeout for all ftp
+ socket operations for this instance.
+ The last parameter is the encoding of filenames, which defaults to utf-8.
+
+ Then use self.connect() with optional host and port argument.
+
+ To download a file, use ftp.retrlines('RETR ' + filename),
+ or ftp.retrbinary() with slightly different arguments.
+ To upload a file, use ftp.storlines() or ftp.storbinary(),
+ which have an open file as argument (see their definitions
+ below for details).
+ The download/upload functions first issue appropriate TYPE
+ and PORT or PASV commands.
+ '''
+
+ debugging = 0
+ host = ''
+ port = FTP_PORT
+ maxline = MAXLINE
+ sock = None
+ file = None
+ welcome = None
+ passiveserver = True
+ # Disables https://bugs.python.org/issue43285 security if set to True.
+ trust_server_pasv_ipv4_address = False
+
+ def __init__(self, host='', user='', passwd='', acct='',
+ timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *,
+ encoding='utf-8'):
+ """Initialization method (called by class instantiation).
+ Initialize host to localhost, port to standard ftp port.
+ Optional arguments are host (for connect()),
+ and user, passwd, acct (for login()).
+ """
+ self.encoding = encoding
+ self.source_address = source_address
+ self.timeout = timeout
+ if host:
+ self.connect(host)
+ if user:
+ self.login(user, passwd, acct)
+
+ def __enter__(self):
+ return self
+
+ # Context management protocol: try to quit() if active
+ def __exit__(self, *args):
+ if self.sock is not None:
+ try:
+ self.quit()
+ except (OSError, EOFError):
+ pass
+ finally:
+ if self.sock is not None:
+ self.close()
+
+ def connect(self, host='', port=0, timeout=-999, source_address=None):
+ '''Connect to host. Arguments are:
+ - host: hostname to connect to (string, default previous host)
+ - port: port to connect to (integer, default previous port)
+ - timeout: the timeout to set against the ftp socket(s)
+ - source_address: a 2-tuple (host, port) for the socket to bind
+ to as its source address before connecting.
+ '''
+ if host != '':
+ self.host = host
+ if port > 0:
+ self.port = port
+ if timeout != -999:
+ self.timeout = timeout
+ if self.timeout is not None and not self.timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
+ if source_address is not None:
+ self.source_address = source_address
+ sys.audit("ftplib.connect", self, self.host, self.port)
+ self.sock = socket.create_connection((self.host, self.port), self.timeout,
+ source_address=self.source_address)
+ self.af = self.sock.family
+ self.file = self.sock.makefile('r', encoding=self.encoding)
+ self.welcome = self.getresp()
+ return self.welcome
+
+ def getwelcome(self):
+ '''Get the welcome message from the server.
+ (this is read and squirreled away by connect())'''
+ if self.debugging:
+ print('*welcome*', self.sanitize(self.welcome))
+ return self.welcome
+
+ def set_debuglevel(self, level):
+ '''Set the debugging level.
+ The required argument level means:
+ 0: no debugging output (default)
+ 1: print commands and responses but not body text etc.
+ 2: also print raw lines read and sent before stripping CR/LF'''
+ self.debugging = level
+ debug = set_debuglevel
+
+ def set_pasv(self, val):
+ '''Use passive or active mode for data transfers.
+ With a false argument, use the normal PORT mode,
+ With a true argument, use the PASV command.'''
+ self.passiveserver = val
+
+ # Internal: "sanitize" a string for printing
+ def sanitize(self, s):
+ if s[:5] in {'pass ', 'PASS '}:
+ i = len(s.rstrip('\r\n'))
+ s = s[:5] + '*'*(i-5) + s[i:]
+ return repr(s)
+
+ # Internal: send one line to the server, appending CRLF
+ def putline(self, line):
+ if '\r' in line or '\n' in line:
+ raise ValueError('an illegal newline character should not be contained')
+ sys.audit("ftplib.sendcmd", self, line)
+ line = line + CRLF
+ if self.debugging > 1:
+ print('*put*', self.sanitize(line))
+ self.sock.sendall(line.encode(self.encoding))
+
+ # Internal: send one command to the server (through putline())
+ def putcmd(self, line):
+ if self.debugging: print('*cmd*', self.sanitize(line))
+ self.putline(line)
+
+ # Internal: return one line from the server, stripping CRLF.
+ # Raise EOFError if the connection is closed
+ def getline(self):
+ line = self.file.readline(self.maxline + 1)
+ if len(line) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
+ if self.debugging > 1:
+ print('*get*', self.sanitize(line))
+ if not line:
+ raise EOFError
+ if line[-2:] == CRLF:
+ line = line[:-2]
+ elif line[-1:] in CRLF:
+ line = line[:-1]
+ return line
+
+ # Internal: get a response from the server, which may possibly
+ # consist of multiple lines. Return a single string with no
+ # trailing CRLF. If the response consists of multiple lines,
+ # these are separated by '\n' characters in the string
+ def getmultiline(self):
+ line = self.getline()
+ if line[3:4] == '-':
+ code = line[:3]
+ while 1:
+ nextline = self.getline()
+ line = line + ('\n' + nextline)
+ if nextline[:3] == code and \
+ nextline[3:4] != '-':
+ break
+ return line
+
+ # Internal: get a response from the server.
+ # Raise various errors if the response indicates an error
+ def getresp(self):
+ resp = self.getmultiline()
+ if self.debugging:
+ print('*resp*', self.sanitize(resp))
+ self.lastresp = resp[:3]
+ c = resp[:1]
+ if c in {'1', '2', '3'}:
+ return resp
+ if c == '4':
+ raise error_temp(resp)
+ if c == '5':
+ raise error_perm(resp)
+ raise error_proto(resp)
+
+ def voidresp(self):
+ """Expect a response beginning with '2'."""
+ resp = self.getresp()
+ if resp[:1] != '2':
+ raise error_reply(resp)
+ return resp
+
+ def abort(self):
+ '''Abort a file transfer. Uses out-of-band data.
+ This does not follow the procedure from the RFC to send Telnet
+ IP and Synch; that doesn't seem to work with the servers I've
+ tried. Instead, just send the ABOR command as OOB data.'''
+ line = b'ABOR' + B_CRLF
+ if self.debugging > 1:
+ print('*put urgent*', self.sanitize(line))
+ self.sock.sendall(line, MSG_OOB)
+ resp = self.getmultiline()
+ if resp[:3] not in {'426', '225', '226'}:
+ raise error_proto(resp)
+ return resp
+
+ def sendcmd(self, cmd):
+ '''Send a command and return the response.'''
+ self.putcmd(cmd)
+ return self.getresp()
+
+ def voidcmd(self, cmd):
+ """Send a command and expect a response beginning with '2'."""
+ self.putcmd(cmd)
+ return self.voidresp()
+
+ def sendport(self, host, port):
+ '''Send a PORT command with the current host and the given
+ port number.
+ '''
+ hbytes = host.split('.')
+ pbytes = [repr(port//256), repr(port%256)]
+ bytes = hbytes + pbytes
+ cmd = 'PORT ' + ','.join(bytes)
+ return self.voidcmd(cmd)
+
+ def sendeprt(self, host, port):
+ '''Send an EPRT command with the current host and the given port number.'''
+ af = 0
+ if self.af == socket.AF_INET:
+ af = 1
+ if self.af == socket.AF_INET6:
+ af = 2
+ if af == 0:
+ raise error_proto('unsupported address family')
+ fields = ['', repr(af), host, repr(port), '']
+ cmd = 'EPRT ' + '|'.join(fields)
+ return self.voidcmd(cmd)
+
+ def makeport(self):
+ '''Create a new socket and send a PORT command for it.'''
+ sock = socket.create_server(("", 0), family=self.af, backlog=1)
+ port = sock.getsockname()[1] # Get proper port
+ host = self.sock.getsockname()[0] # Get proper host
+ if self.af == socket.AF_INET:
+ resp = self.sendport(host, port)
+ else:
+ resp = self.sendeprt(host, port)
+ if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(self.timeout)
+ return sock
+
+ def makepasv(self):
+ """Internal: Does the PASV or EPSV handshake -> (address, port)"""
+ if self.af == socket.AF_INET:
+ untrusted_host, port = parse227(self.sendcmd('PASV'))
+ if self.trust_server_pasv_ipv4_address:
+ host = untrusted_host
+ else:
+ host = self.sock.getpeername()[0]
+ else:
+ host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
+ return host, port
+
+ def ntransfercmd(self, cmd, rest=None):
+ """Initiate a transfer over the data connection.
+
+ If the transfer is active, send a port command and the
+ transfer command, and accept the connection. If the server is
+ passive, send a pasv command, connect to it, and start the
+ transfer command. Either way, return the socket for the
+ connection and the expected size of the transfer. The
+ expected size may be None if it could not be determined.
+
+ Optional `rest' argument can be a string that is sent as the
+ argument to a REST command. This is essentially a server
+ marker used to tell the server to skip over any data up to the
+ given marker.
+ """
+ size = None
+ if self.passiveserver:
+ host, port = self.makepasv()
+ conn = socket.create_connection((host, port), self.timeout,
+ source_address=self.source_address)
+ try:
+ if rest is not None:
+ self.sendcmd("REST %s" % rest)
+ resp = self.sendcmd(cmd)
+ # Some servers apparently send a 200 reply to
+ # a LIST or STOR command, before the 150 reply
+ # (and way before the 226 reply). This seems to
+ # be in violation of the protocol (which only allows
+ # 1xx or error messages for LIST), so we just discard
+ # this response.
+ if resp[0] == '2':
+ resp = self.getresp()
+ if resp[0] != '1':
+ raise error_reply(resp)
+ except:
+ conn.close()
+ raise
+ else:
+ with self.makeport() as sock:
+ if rest is not None:
+ self.sendcmd("REST %s" % rest)
+ resp = self.sendcmd(cmd)
+ # See above.
+ if resp[0] == '2':
+ resp = self.getresp()
+ if resp[0] != '1':
+ raise error_reply(resp)
+ conn, sockaddr = sock.accept()
+ if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+ conn.settimeout(self.timeout)
+ if resp[:3] == '150':
+ # this is conditional in case we received a 125
+ size = parse150(resp)
+ return conn, size
+
+ def transfercmd(self, cmd, rest=None):
+ """Like ntransfercmd() but returns only the socket."""
+ return self.ntransfercmd(cmd, rest)[0]
+
+ def login(self, user = '', passwd = '', acct = ''):
+ '''Login, default anonymous.'''
+ if not user:
+ user = 'anonymous'
+ if not passwd:
+ passwd = ''
+ if not acct:
+ acct = ''
+ if user == 'anonymous' and passwd in {'', '-'}:
+ # If there is no anonymous ftp password specified
+ # then we'll just use anonymous@
+ # We don't send any other thing because:
+ # - We want to remain anonymous
+ # - We want to stop SPAM
+ # - We don't want to let ftp sites to discriminate by the user,
+ # host or country.
+ passwd = passwd + 'anonymous@'
+ resp = self.sendcmd('USER ' + user)
+ if resp[0] == '3':
+ resp = self.sendcmd('PASS ' + passwd)
+ if resp[0] == '3':
+ resp = self.sendcmd('ACCT ' + acct)
+ if resp[0] != '2':
+ raise error_reply(resp)
+ return resp
+
+ def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
+ """Retrieve data in binary mode. A new port is created for you.
+
+ Args:
+ cmd: A RETR command.
+ callback: A single parameter callable to be called on each
+ block of data read.
+ blocksize: The maximum number of bytes to read from the
+ socket at one time. [default: 8192]
+ rest: Passed to transfercmd(). [default: None]
+
+ Returns:
+ The response code.
+ """
+ self.voidcmd('TYPE I')
+ with self.transfercmd(cmd, rest) as conn:
+ while 1:
+ data = conn.recv(blocksize)
+ if not data:
+ break
+ callback(data)
+ # shutdown ssl layer
+ if _SSLSocket is not None and isinstance(conn, _SSLSocket):
+ conn.unwrap()
+ return self.voidresp()
+
+ def retrlines(self, cmd, callback = None):
+ """Retrieve data in line mode. A new port is created for you.
+
+ Args:
+ cmd: A RETR, LIST, or NLST command.
+ callback: An optional single parameter callable that is called
+ for each line with the trailing CRLF stripped.
+ [default: print_line()]
+
+ Returns:
+ The response code.
+ """
+ if callback is None:
+ callback = print_line
+ resp = self.sendcmd('TYPE A')
+ with self.transfercmd(cmd) as conn, \
+ conn.makefile('r', encoding=self.encoding) as fp:
+ while 1:
+ line = fp.readline(self.maxline + 1)
+ if len(line) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
+ if self.debugging > 2:
+ print('*retr*', repr(line))
+ if not line:
+ break
+ if line[-2:] == CRLF:
+ line = line[:-2]
+ elif line[-1:] == '\n':
+ line = line[:-1]
+ callback(line)
+ # shutdown ssl layer
+ if _SSLSocket is not None and isinstance(conn, _SSLSocket):
+ conn.unwrap()
+ return self.voidresp()
+
+ def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
+ """Store a file in binary mode. A new port is created for you.
+
+ Args:
+ cmd: A STOR command.
+ fp: A file-like object with a read(num_bytes) method.
+ blocksize: The maximum data size to read from fp and send over
+ the connection at once. [default: 8192]
+ callback: An optional single parameter callable that is called on
+ each block of data after it is sent. [default: None]
+ rest: Passed to transfercmd(). [default: None]
+
+ Returns:
+ The response code.
+ """
+ self.voidcmd('TYPE I')
+ with self.transfercmd(cmd, rest) as conn:
+ while 1:
+ buf = fp.read(blocksize)
+ if not buf:
+ break
+ conn.sendall(buf)
+ if callback:
+ callback(buf)
+ # shutdown ssl layer
+ if _SSLSocket is not None and isinstance(conn, _SSLSocket):
+ conn.unwrap()
+ return self.voidresp()
+
+ def storlines(self, cmd, fp, callback=None):
+ """Store a file in line mode. A new port is created for you.
+
+ Args:
+ cmd: A STOR command.
+ fp: A file-like object with a readline() method.
+ callback: An optional single parameter callable that is called on
+ each line after it is sent. [default: None]
+
+ Returns:
+ The response code.
+ """
+ self.voidcmd('TYPE A')
+ with self.transfercmd(cmd) as conn:
+ while 1:
+ buf = fp.readline(self.maxline + 1)
+ if len(buf) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
+ if not buf:
+ break
+ if buf[-2:] != B_CRLF:
+ if buf[-1] in B_CRLF: buf = buf[:-1]
+ buf = buf + B_CRLF
+ conn.sendall(buf)
+ if callback:
+ callback(buf)
+ # shutdown ssl layer
+ if _SSLSocket is not None and isinstance(conn, _SSLSocket):
+ conn.unwrap()
+ return self.voidresp()
+
+ def acct(self, password):
+ '''Send new account name.'''
+ cmd = 'ACCT ' + password
+ return self.voidcmd(cmd)
+
+ def nlst(self, *args):
+ '''Return a list of files in a given directory (default the current).'''
+ cmd = 'NLST'
+ for arg in args:
+ cmd = cmd + (' ' + arg)
+ files = []
+ self.retrlines(cmd, files.append)
+ return files
+
+ def dir(self, *args):
+ '''List a directory in long form.
+ By default list current directory to stdout.
+ Optional last argument is callback function; all
+ non-empty arguments before it are concatenated to the
+ LIST command. (This *should* only be used for a pathname.)'''
+ cmd = 'LIST'
+ func = None
+ if args[-1:] and type(args[-1]) != type(''):
+ args, func = args[:-1], args[-1]
+ for arg in args:
+ if arg:
+ cmd = cmd + (' ' + arg)
+ self.retrlines(cmd, func)
+
+ def mlsd(self, path="", facts=[]):
+ '''List a directory in a standardized format by using MLSD
+ command (RFC-3659). If path is omitted the current directory
+ is assumed. "facts" is a list of strings representing the type
+ of information desired (e.g. ["type", "size", "perm"]).
+
+ Return a generator object yielding a tuple of two elements
+ for every file found in path.
+ First element is the file name, the second one is a dictionary
+ including a variable number of "facts" depending on the server
+ and whether "facts" argument has been provided.
+ '''
+ if facts:
+ self.sendcmd("OPTS MLST " + ";".join(facts) + ";")
+ if path:
+ cmd = "MLSD %s" % path
+ else:
+ cmd = "MLSD"
+ lines = []
+ self.retrlines(cmd, lines.append)
+ for line in lines:
+ facts_found, _, name = line.rstrip(CRLF).partition(' ')
+ entry = {}
+ for fact in facts_found[:-1].split(";"):
+ key, _, value = fact.partition("=")
+ entry[key.lower()] = value
+ yield (name, entry)
+
+ def rename(self, fromname, toname):
+ '''Rename a file.'''
+ resp = self.sendcmd('RNFR ' + fromname)
+ if resp[0] != '3':
+ raise error_reply(resp)
+ return self.voidcmd('RNTO ' + toname)
+
+ def delete(self, filename):
+ '''Delete a file.'''
+ resp = self.sendcmd('DELE ' + filename)
+ if resp[:3] in {'250', '200'}:
+ return resp
+ else:
+ raise error_reply(resp)
+
+ def cwd(self, dirname):
+ '''Change to a directory.'''
+ if dirname == '..':
+ try:
+ return self.voidcmd('CDUP')
+ except error_perm as msg:
+ if msg.args[0][:3] != '500':
+ raise
+ elif dirname == '':
+ dirname = '.' # does nothing, but could return error
+ cmd = 'CWD ' + dirname
+ return self.voidcmd(cmd)
+
+ def size(self, filename):
+ '''Retrieve the size of a file.'''
+ # The SIZE command is defined in RFC-3659
+ resp = self.sendcmd('SIZE ' + filename)
+ if resp[:3] == '213':
+ s = resp[3:].strip()
+ return int(s)
+
+ def mkd(self, dirname):
+ '''Make a directory, return its full pathname.'''
+ resp = self.voidcmd('MKD ' + dirname)
+ # fix around non-compliant implementations such as IIS shipped
+ # with Windows server 2003
+ if not resp.startswith('257'):
+ return ''
+ return parse257(resp)
+
+ def rmd(self, dirname):
+ '''Remove a directory.'''
+ return self.voidcmd('RMD ' + dirname)
+
+ def pwd(self):
+ '''Return current working directory.'''
+ resp = self.voidcmd('PWD')
+ # fix around non-compliant implementations such as IIS shipped
+ # with Windows server 2003
+ if not resp.startswith('257'):
+ return ''
+ return parse257(resp)
+
+ def quit(self):
+ '''Quit, and close the connection.'''
+ resp = self.voidcmd('QUIT')
+ self.close()
+ return resp
+
+ def close(self):
+ '''Close the connection without assuming anything about it.'''
+ try:
+ file = self.file
+ self.file = None
+ if file is not None:
+ file.close()
+ finally:
+ sock = self.sock
+ self.sock = None
+ if sock is not None:
+ sock.close()
+
+try:
+ import ssl
+except ImportError:
+ _SSLSocket = None
+else:
+ _SSLSocket = ssl.SSLSocket
+
+ class FTP_TLS(FTP):
+ '''A FTP subclass which adds TLS support to FTP as described
+ in RFC-4217.
+
+ Connect as usual to port 21 implicitly securing the FTP control
+ connection before authenticating.
+
+ Securing the data connection requires user to explicitly ask
+ for it by calling prot_p() method.
+
+ Usage example:
+ >>> from ftplib import FTP_TLS
+ >>> ftps = FTP_TLS('ftp.python.org')
+ >>> ftps.login() # login anonymously previously securing control channel
+ '230 Guest login ok, access restrictions apply.'
+ >>> ftps.prot_p() # switch to secure data connection
+ '200 Protection level set to P'
+ >>> ftps.retrlines('LIST') # list directory content securely
+ total 9
+ drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
+ drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
+ drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
+ drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
+ d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
+ drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
+ drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
+ drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
+ -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
+ '226 Transfer complete.'
+ >>> ftps.quit()
+ '221 Goodbye.'
+ >>>
+ '''
+ ssl_version = ssl.PROTOCOL_TLS_CLIENT
+
+ def __init__(self, host='', user='', passwd='', acct='',
+ keyfile=None, certfile=None, context=None,
+ timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *,
+ encoding='utf-8'):
+ if context is not None and keyfile is not None:
+ raise ValueError("context and keyfile arguments are mutually "
+ "exclusive")
+ if context is not None and certfile is not None:
+ raise ValueError("context and certfile arguments are mutually "
+ "exclusive")
+ if keyfile is not None or certfile is not None:
+ import warnings
+ warnings.warn("keyfile and certfile are deprecated, use a "
+ "custom context instead", DeprecationWarning, 2)
+ self.keyfile = keyfile
+ self.certfile = certfile
+ if context is None:
+ context = ssl._create_stdlib_context(self.ssl_version,
+ certfile=certfile,
+ keyfile=keyfile)
+ self.context = context
+ self._prot_p = False
+ super().__init__(host, user, passwd, acct,
+ timeout, source_address, encoding=encoding)
+
+ def login(self, user='', passwd='', acct='', secure=True):
+ if secure and not isinstance(self.sock, ssl.SSLSocket):
+ self.auth()
+ return super().login(user, passwd, acct)
+
+ def auth(self):
+ '''Set up secure control connection by using TLS/SSL.'''
+ if isinstance(self.sock, ssl.SSLSocket):
+ raise ValueError("Already using TLS")
+ if self.ssl_version >= ssl.PROTOCOL_TLS:
+ resp = self.voidcmd('AUTH TLS')
+ else:
+ resp = self.voidcmd('AUTH SSL')
+ self.sock = self.context.wrap_socket(self.sock, server_hostname=self.host)
+ self.file = self.sock.makefile(mode='r', encoding=self.encoding)
+ return resp
+
+ def ccc(self):
+ '''Switch back to a clear-text control connection.'''
+ if not isinstance(self.sock, ssl.SSLSocket):
+ raise ValueError("not using TLS")
+ resp = self.voidcmd('CCC')
+ self.sock = self.sock.unwrap()
+ return resp
+
+ def prot_p(self):
+ '''Set up secure data connection.'''
+ # PROT defines whether or not the data channel is to be protected.
+ # Though RFC-2228 defines four possible protection levels,
+ # RFC-4217 only recommends two, Clear and Private.
+ # Clear (PROT C) means that no security is to be used on the
+ # data-channel, Private (PROT P) means that the data-channel
+ # should be protected by TLS.
+ # PBSZ command MUST still be issued, but must have a parameter of
+ # '0' to indicate that no buffering is taking place and the data
+ # connection should not be encapsulated.
+ self.voidcmd('PBSZ 0')
+ resp = self.voidcmd('PROT P')
+ self._prot_p = True
+ return resp
+
+ def prot_c(self):
+ '''Set up clear text data connection.'''
+ resp = self.voidcmd('PROT C')
+ self._prot_p = False
+ return resp
+
+ # --- Overridden FTP methods
+
+ def ntransfercmd(self, cmd, rest=None):
+ conn, size = super().ntransfercmd(cmd, rest)
+ if self._prot_p:
+ conn = self.context.wrap_socket(conn,
+ server_hostname=self.host)
+ return conn, size
+
+ def abort(self):
+ # overridden as we can't pass MSG_OOB flag to sendall()
+ line = b'ABOR' + B_CRLF
+ self.sock.sendall(line)
+ resp = self.getmultiline()
+ if resp[:3] not in {'426', '225', '226'}:
+ raise error_proto(resp)
+ return resp
+
+ __all__.append('FTP_TLS')
+ all_errors = (Error, OSError, EOFError, ssl.SSLError)
+
+
+_150_re = None
+
+def parse150(resp):
+ '''Parse the '150' response for a RETR request.
+ Returns the expected transfer size or None; size is not guaranteed to
+ be present in the 150 message.
+ '''
+ if resp[:3] != '150':
+ raise error_reply(resp)
+ global _150_re
+ if _150_re is None:
+ import re
+ _150_re = re.compile(
+ r"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII)
+ m = _150_re.match(resp)
+ if not m:
+ return None
+ return int(m.group(1))
+
+
+_227_re = None
+
+def parse227(resp):
+ '''Parse the '227' response for a PASV request.
+ Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
+ Return ('host.addr.as.numbers', port#) tuple.'''
+ if resp[:3] != '227':
+ raise error_reply(resp)
+ global _227_re
+ if _227_re is None:
+ import re
+ _227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)', re.ASCII)
+ m = _227_re.search(resp)
+ if not m:
+ raise error_proto(resp)
+ numbers = m.groups()
+ host = '.'.join(numbers[:4])
+ port = (int(numbers[4]) << 8) + int(numbers[5])
+ return host, port
+
+
+def parse229(resp, peer):
+ '''Parse the '229' response for an EPSV request.
+ Raises error_proto if it does not contain '(|||port|)'
+ Return ('host.addr.as.numbers', port#) tuple.'''
+ if resp[:3] != '229':
+ raise error_reply(resp)
+ left = resp.find('(')
+ if left < 0: raise error_proto(resp)
+ right = resp.find(')', left + 1)
+ if right < 0:
+ raise error_proto(resp) # should contain '(|||port|)'
+ if resp[left + 1] != resp[right - 1]:
+ raise error_proto(resp)
+ parts = resp[left + 1:right].split(resp[left+1])
+ if len(parts) != 5:
+ raise error_proto(resp)
+ host = peer[0]
+ port = int(parts[3])
+ return host, port
+
+
+def parse257(resp):
+ '''Parse the '257' response for a MKD or PWD request.
+ This is a response to a MKD or PWD request: a directory name.
+ Returns the directoryname in the 257 reply.'''
+ if resp[:3] != '257':
+ raise error_reply(resp)
+ if resp[3:5] != ' "':
+ return '' # Not compliant to RFC 959, but UNIX ftpd does this
+ dirname = ''
+ i = 5
+ n = len(resp)
+ while i < n:
+ c = resp[i]
+ i = i+1
+ if c == '"':
+ if i >= n or resp[i] != '"':
+ break
+ i = i+1
+ dirname = dirname + c
+ return dirname
+
+
+def print_line(line):
+ '''Default retrlines callback to print a line.'''
+ print(line)
+
+
+def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
+ '''Copy file from one FTP-instance to another.'''
+ if not targetname:
+ targetname = sourcename
+ type = 'TYPE ' + type
+ source.voidcmd(type)
+ target.voidcmd(type)
+ sourcehost, sourceport = parse227(source.sendcmd('PASV'))
+ target.sendport(sourcehost, sourceport)
+ # RFC 959: the user must "listen" [...] BEFORE sending the
+ # transfer request.
+ # So: STOR before RETR, because here the target is a "user".
+ treply = target.sendcmd('STOR ' + targetname)
+ if treply[:3] not in {'125', '150'}:
+ raise error_proto # RFC 959
+ sreply = source.sendcmd('RETR ' + sourcename)
+ if sreply[:3] not in {'125', '150'}:
+ raise error_proto # RFC 959
+ source.voidresp()
+ target.voidresp()
+
+
+def test():
+ '''Test program.
+ Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
+
+ -d dir
+ -l list
+ -p password
+ '''
+
+ if len(sys.argv) < 2:
+ print(test.__doc__)
+ sys.exit(0)
+
+ import netrc
+
+ debugging = 0
+ rcfile = None
+ while sys.argv[1] == '-d':
+ debugging = debugging+1
+ del sys.argv[1]
+ if sys.argv[1][:2] == '-r':
+ # get name of alternate ~/.netrc file:
+ rcfile = sys.argv[1][2:]
+ del sys.argv[1]
+ host = sys.argv[1]
+ ftp = FTP(host)
+ ftp.set_debuglevel(debugging)
+ userid = passwd = acct = ''
+ try:
+ netrcobj = netrc.netrc(rcfile)
+ except OSError:
+ if rcfile is not None:
+ sys.stderr.write("Could not open account file"
+ " -- using anonymous login.")
+ else:
+ try:
+ userid, acct, passwd = netrcobj.authenticators(host)
+ except KeyError:
+ # no account for host
+ sys.stderr.write(
+ "No account -- using anonymous login.")
+ ftp.login(userid, passwd, acct)
+ for file in sys.argv[2:]:
+ if file[:2] == '-l':
+ ftp.dir(file[2:])
+ elif file[:2] == '-d':
+ cmd = 'CWD'
+ if file[2:]: cmd = cmd + ' ' + file[2:]
+ resp = ftp.sendcmd(cmd)
+ elif file == '-p':
+ ftp.set_pasv(not ftp.passiveserver)
+ else:
+ ftp.retrbinary('RETR ' + file, \
+ sys.stdout.write, 1024)
+ ftp.quit()
+
+
+if __name__ == '__main__':
+ test()
diff --git a/infer_4_37_2/lib/python3.10/getopt.py b/infer_4_37_2/lib/python3.10/getopt.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d4cab1bac360dda9d49dd8eef258f23dc252a2b
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/getopt.py
@@ -0,0 +1,215 @@
+"""Parser for command line options.
+
+This module helps scripts to parse the command line arguments in
+sys.argv. It supports the same conventions as the Unix getopt()
+function (including the special meanings of arguments of the form `-'
+and `--'). Long options similar to those supported by GNU software
+may be used as well via an optional third argument. This module
+provides two functions and an exception:
+
+getopt() -- Parse command line options
+gnu_getopt() -- Like getopt(), but allow option and non-option arguments
+to be intermixed.
+GetoptError -- exception (class) raised with 'opt' attribute, which is the
+option involved with the exception.
+"""
+
+# Long option support added by Lars Wirzenius .
+#
+# Gerrit Holl moved the string-based exceptions
+# to class-based exceptions.
+#
+# Peter Åstrand added gnu_getopt().
+#
+# TODO for gnu_getopt():
+#
+# - GNU getopt_long_only mechanism
+# - allow the caller to specify ordering
+# - RETURN_IN_ORDER option
+# - GNU extension with '-' as first character of option string
+# - optional arguments, specified by double colons
+# - an option string with a W followed by semicolon should
+# treat "-W foo" as "--foo"
+
+__all__ = ["GetoptError","error","getopt","gnu_getopt"]
+
+import os
+try:
+ from gettext import gettext as _
+except ImportError:
+ # Bootstrapping Python: gettext's dependencies not built yet
+ def _(s): return s
+
+class GetoptError(Exception):
+ opt = ''
+ msg = ''
+ def __init__(self, msg, opt=''):
+ self.msg = msg
+ self.opt = opt
+ Exception.__init__(self, msg, opt)
+
+ def __str__(self):
+ return self.msg
+
+error = GetoptError # backward compatibility
+
+def getopt(args, shortopts, longopts = []):
+ """getopt(args, options[, long_options]) -> opts, args
+
+ Parses command line options and parameter list. args is the
+ argument list to be parsed, without the leading reference to the
+ running program. Typically, this means "sys.argv[1:]". shortopts
+ is the string of option letters that the script wants to
+ recognize, with options that require an argument followed by a
+ colon (i.e., the same format that Unix getopt() uses). If
+ specified, longopts is a list of strings with the names of the
+ long options which should be supported. The leading '--'
+ characters should not be included in the option name. Options
+ which require an argument should be followed by an equal sign
+ ('=').
+
+ The return value consists of two elements: the first is a list of
+ (option, value) pairs; the second is the list of program arguments
+ left after the option list was stripped (this is a trailing slice
+ of the first argument). Each option-and-value pair returned has
+ the option as its first element, prefixed with a hyphen (e.g.,
+ '-x'), and the option argument as its second element, or an empty
+ string if the option has no argument. The options occur in the
+ list in the same order in which they were found, thus allowing
+ multiple occurrences. Long and short options may be mixed.
+
+ """
+
+ opts = []
+ if type(longopts) == type(""):
+ longopts = [longopts]
+ else:
+ longopts = list(longopts)
+ while args and args[0].startswith('-') and args[0] != '-':
+ if args[0] == '--':
+ args = args[1:]
+ break
+ if args[0].startswith('--'):
+ opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
+ else:
+ opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
+
+ return opts, args
+
+def gnu_getopt(args, shortopts, longopts = []):
+ """getopt(args, options[, long_options]) -> opts, args
+
+ This function works like getopt(), except that GNU style scanning
+ mode is used by default. This means that option and non-option
+ arguments may be intermixed. The getopt() function stops
+ processing options as soon as a non-option argument is
+ encountered.
+
+ If the first character of the option string is `+', or if the
+ environment variable POSIXLY_CORRECT is set, then option
+ processing stops as soon as a non-option argument is encountered.
+
+ """
+
+ opts = []
+ prog_args = []
+ if isinstance(longopts, str):
+ longopts = [longopts]
+ else:
+ longopts = list(longopts)
+
+ # Allow options after non-option arguments?
+ if shortopts.startswith('+'):
+ shortopts = shortopts[1:]
+ all_options_first = True
+ elif os.environ.get("POSIXLY_CORRECT"):
+ all_options_first = True
+ else:
+ all_options_first = False
+
+ while args:
+ if args[0] == '--':
+ prog_args += args[1:]
+ break
+
+ if args[0][:2] == '--':
+ opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
+ elif args[0][:1] == '-' and args[0] != '-':
+ opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
+ else:
+ if all_options_first:
+ prog_args += args
+ break
+ else:
+ prog_args.append(args[0])
+ args = args[1:]
+
+ return opts, prog_args
+
+def do_longs(opts, opt, longopts, args):
+ try:
+ i = opt.index('=')
+ except ValueError:
+ optarg = None
+ else:
+ opt, optarg = opt[:i], opt[i+1:]
+
+ has_arg, opt = long_has_args(opt, longopts)
+ if has_arg:
+ if optarg is None:
+ if not args:
+ raise GetoptError(_('option --%s requires argument') % opt, opt)
+ optarg, args = args[0], args[1:]
+ elif optarg is not None:
+ raise GetoptError(_('option --%s must not have an argument') % opt, opt)
+ opts.append(('--' + opt, optarg or ''))
+ return opts, args
+
+# Return:
+# has_arg?
+# full option name
+def long_has_args(opt, longopts):
+ possibilities = [o for o in longopts if o.startswith(opt)]
+ if not possibilities:
+ raise GetoptError(_('option --%s not recognized') % opt, opt)
+ # Is there an exact match?
+ if opt in possibilities:
+ return False, opt
+ elif opt + '=' in possibilities:
+ return True, opt
+ # No exact match, so better be unique.
+ if len(possibilities) > 1:
+ # XXX since possibilities contains all valid continuations, might be
+ # nice to work them into the error msg
+ raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
+ assert len(possibilities) == 1
+ unique_match = possibilities[0]
+ has_arg = unique_match.endswith('=')
+ if has_arg:
+ unique_match = unique_match[:-1]
+ return has_arg, unique_match
+
+def do_shorts(opts, optstring, shortopts, args):
+ while optstring != '':
+ opt, optstring = optstring[0], optstring[1:]
+ if short_has_arg(opt, shortopts):
+ if optstring == '':
+ if not args:
+ raise GetoptError(_('option -%s requires argument') % opt,
+ opt)
+ optstring, args = args[0], args[1:]
+ optarg, optstring = optstring, ''
+ else:
+ optarg = ''
+ opts.append(('-' + opt, optarg))
+ return opts, args
+
+def short_has_arg(opt, shortopts):
+ for i in range(len(shortopts)):
+ if opt == shortopts[i] != ':':
+ return shortopts.startswith(':', i+1)
+ raise GetoptError(_('option -%s not recognized') % opt, opt)
+
+if __name__ == '__main__':
+ import sys
+ print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
diff --git a/infer_4_37_2/lib/python3.10/getpass.py b/infer_4_37_2/lib/python3.10/getpass.py
new file mode 100644
index 0000000000000000000000000000000000000000..6970d8adfbab3673de681ef05059732dffa8f64c
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/getpass.py
@@ -0,0 +1,185 @@
+"""Utilities to get a password and/or the current user name.
+
+getpass(prompt[, stream]) - Prompt for a password, with echo turned off.
+getuser() - Get the user name from the environment or password database.
+
+GetPassWarning - This UserWarning is issued when getpass() cannot prevent
+ echoing of the password contents while reading.
+
+On Windows, the msvcrt module will be used.
+
+"""
+
+# Authors: Piers Lauder (original)
+# Guido van Rossum (Windows support and cleanup)
+# Gregory P. Smith (tty support & GetPassWarning)
+
+import contextlib
+import io
+import os
+import sys
+import warnings
+
+__all__ = ["getpass","getuser","GetPassWarning"]
+
+
+class GetPassWarning(UserWarning): pass
+
+
+def unix_getpass(prompt='Password: ', stream=None):
+ """Prompt for a password, with echo turned off.
+
+ Args:
+ prompt: Written on stream to ask for the input. Default: 'Password: '
+ stream: A writable file object to display the prompt. Defaults to
+ the tty. If no tty is available defaults to sys.stderr.
+ Returns:
+ The seKr3t input.
+ Raises:
+ EOFError: If our input tty or stdin was closed.
+ GetPassWarning: When we were unable to turn echo off on the input.
+
+ Always restores terminal settings before returning.
+ """
+ passwd = None
+ with contextlib.ExitStack() as stack:
+ try:
+ # Always try reading and writing directly on the tty first.
+ fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY)
+ tty = io.FileIO(fd, 'w+')
+ stack.enter_context(tty)
+ input = io.TextIOWrapper(tty)
+ stack.enter_context(input)
+ if not stream:
+ stream = input
+ except OSError:
+ # If that fails, see if stdin can be controlled.
+ stack.close()
+ try:
+ fd = sys.stdin.fileno()
+ except (AttributeError, ValueError):
+ fd = None
+ passwd = fallback_getpass(prompt, stream)
+ input = sys.stdin
+ if not stream:
+ stream = sys.stderr
+
+ if fd is not None:
+ try:
+ old = termios.tcgetattr(fd) # a copy to save
+ new = old[:]
+ new[3] &= ~termios.ECHO # 3 == 'lflags'
+ tcsetattr_flags = termios.TCSAFLUSH
+ if hasattr(termios, 'TCSASOFT'):
+ tcsetattr_flags |= termios.TCSASOFT
+ try:
+ termios.tcsetattr(fd, tcsetattr_flags, new)
+ passwd = _raw_input(prompt, stream, input=input)
+ finally:
+ termios.tcsetattr(fd, tcsetattr_flags, old)
+ stream.flush() # issue7208
+ except termios.error:
+ if passwd is not None:
+ # _raw_input succeeded. The final tcsetattr failed. Reraise
+ # instead of leaving the terminal in an unknown state.
+ raise
+ # We can't control the tty or stdin. Give up and use normal IO.
+ # fallback_getpass() raises an appropriate warning.
+ if stream is not input:
+ # clean up unused file objects before blocking
+ stack.close()
+ passwd = fallback_getpass(prompt, stream)
+
+ stream.write('\n')
+ return passwd
+
+
+def win_getpass(prompt='Password: ', stream=None):
+ """Prompt for password with echo off, using Windows getwch()."""
+ if sys.stdin is not sys.__stdin__:
+ return fallback_getpass(prompt, stream)
+
+ for c in prompt:
+ msvcrt.putwch(c)
+ pw = ""
+ while 1:
+ c = msvcrt.getwch()
+ if c == '\r' or c == '\n':
+ break
+ if c == '\003':
+ raise KeyboardInterrupt
+ if c == '\b':
+ pw = pw[:-1]
+ else:
+ pw = pw + c
+ msvcrt.putwch('\r')
+ msvcrt.putwch('\n')
+ return pw
+
+
+def fallback_getpass(prompt='Password: ', stream=None):
+ warnings.warn("Can not control echo on the terminal.", GetPassWarning,
+ stacklevel=2)
+ if not stream:
+ stream = sys.stderr
+ print("Warning: Password input may be echoed.", file=stream)
+ return _raw_input(prompt, stream)
+
+
+def _raw_input(prompt="", stream=None, input=None):
+ # This doesn't save the string in the GNU readline history.
+ if not stream:
+ stream = sys.stderr
+ if not input:
+ input = sys.stdin
+ prompt = str(prompt)
+ if prompt:
+ try:
+ stream.write(prompt)
+ except UnicodeEncodeError:
+ # Use replace error handler to get as much as possible printed.
+ prompt = prompt.encode(stream.encoding, 'replace')
+ prompt = prompt.decode(stream.encoding)
+ stream.write(prompt)
+ stream.flush()
+ # NOTE: The Python C API calls flockfile() (and unlock) during readline.
+ line = input.readline()
+ if not line:
+ raise EOFError
+ if line[-1] == '\n':
+ line = line[:-1]
+ return line
+
+
+def getuser():
+ """Get the username from the environment or password database.
+
+ First try various environment variables, then the password
+ database. This works on Windows as long as USERNAME is set.
+
+ """
+
+ for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
+ user = os.environ.get(name)
+ if user:
+ return user
+
+ # If this fails, the exception will "explain" why
+ import pwd
+ return pwd.getpwuid(os.getuid())[0]
+
+# Bind the name getpass to the appropriate function
+try:
+ import termios
+ # it's possible there is an incompatible termios from the
+ # McMillan Installer, make sure we have a UNIX-compatible termios
+ termios.tcgetattr, termios.tcsetattr
+except (ImportError, AttributeError):
+ try:
+ import msvcrt
+ except ImportError:
+ getpass = fallback_getpass
+ else:
+ getpass = win_getpass
+else:
+ getpass = unix_getpass
diff --git a/infer_4_37_2/lib/python3.10/gettext.py b/infer_4_37_2/lib/python3.10/gettext.py
new file mode 100644
index 0000000000000000000000000000000000000000..77b67aef4204c909931d6eb1f75854324ef796a9
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/gettext.py
@@ -0,0 +1,788 @@
+"""Internationalization and localization support.
+
+This module provides internationalization (I18N) and localization (L10N)
+support for your Python programs by providing an interface to the GNU gettext
+message catalog library.
+
+I18N refers to the operation by which a program is made aware of multiple
+languages. L10N refers to the adaptation of your program, once
+internationalized, to the local language and cultural habits.
+
+"""
+
+# This module represents the integration of work, contributions, feedback, and
+# suggestions from the following people:
+#
+# Martin von Loewis, who wrote the initial implementation of the underlying
+# C-based libintlmodule (later renamed _gettext), along with a skeletal
+# gettext.py implementation.
+#
+# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
+# which also included a pure-Python implementation to read .mo files if
+# intlmodule wasn't available.
+#
+# James Henstridge, who also wrote a gettext.py module, which has some
+# interesting, but currently unsupported experimental features: the notion of
+# a Catalog class and instances, and the ability to add to a catalog file via
+# a Python API.
+#
+# Barry Warsaw integrated these modules, wrote the .install() API and code,
+# and conformed all C and Python code to Python's coding standards.
+#
+# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
+# module.
+#
+# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.
+#
+# TODO:
+# - Lazy loading of .mo files. Currently the entire catalog is loaded into
+# memory, but that's probably bad for large translated programs. Instead,
+# the lexical sort of original strings in GNU .mo files should be exploited
+# to do binary searches and lazy initializations. Or you might want to use
+# the undocumented double-hash algorithm for .mo files with hash tables, but
+# you'll need to study the GNU gettext code to do this.
+#
+# - Support Solaris .mo file formats. Unfortunately, we've been unable to
+# find this format documented anywhere.
+
+
+import os
+import re
+import sys
+
+
+__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
+ 'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
+ 'bind_textdomain_codeset',
+ 'dgettext', 'dngettext', 'gettext', 'lgettext', 'ldgettext',
+ 'ldngettext', 'lngettext', 'ngettext',
+ 'pgettext', 'dpgettext', 'npgettext', 'dnpgettext',
+ ]
+
+_default_localedir = os.path.join(sys.base_prefix, 'share', 'locale')
+
+# Expression parsing for plural form selection.
+#
+# The gettext library supports a small subset of C syntax. The only
+# incompatible difference is that integer literals starting with zero are
+# decimal.
+#
+# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms
+# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y
+
+_token_pattern = re.compile(r"""
+ (?P[ \t]+) | # spaces and horizontal tabs
+ (?P[0-9]+\b) | # decimal integer
+ (?Pn\b) | # only n is allowed
+ (?P[()]) |
+ (?P[-*/%+?:]|[>,
+ # <=, >=, ==, !=, &&, ||,
+ # ? :
+ # unary and bitwise ops
+ # not allowed
+ (?P\w+|.) # invalid token
+ """, re.VERBOSE|re.DOTALL)
+
+def _tokenize(plural):
+ for mo in re.finditer(_token_pattern, plural):
+ kind = mo.lastgroup
+ if kind == 'WHITESPACES':
+ continue
+ value = mo.group(kind)
+ if kind == 'INVALID':
+ raise ValueError('invalid token in plural form: %s' % value)
+ yield value
+ yield ''
+
+def _error(value):
+ if value:
+ return ValueError('unexpected token in plural form: %s' % value)
+ else:
+ return ValueError('unexpected end of plural form')
+
+_binary_ops = (
+ ('||',),
+ ('&&',),
+ ('==', '!='),
+ ('<', '>', '<=', '>='),
+ ('+', '-'),
+ ('*', '/', '%'),
+)
+_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops}
+_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'}
+
+def _parse(tokens, priority=-1):
+ result = ''
+ nexttok = next(tokens)
+ while nexttok == '!':
+ result += 'not '
+ nexttok = next(tokens)
+
+ if nexttok == '(':
+ sub, nexttok = _parse(tokens)
+ result = '%s(%s)' % (result, sub)
+ if nexttok != ')':
+ raise ValueError('unbalanced parenthesis in plural form')
+ elif nexttok == 'n':
+ result = '%s%s' % (result, nexttok)
+ else:
+ try:
+ value = int(nexttok, 10)
+ except ValueError:
+ raise _error(nexttok) from None
+ result = '%s%d' % (result, value)
+ nexttok = next(tokens)
+
+ j = 100
+ while nexttok in _binary_ops:
+ i = _binary_ops[nexttok]
+ if i < priority:
+ break
+ # Break chained comparisons
+ if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>='
+ result = '(%s)' % result
+ # Replace some C operators by their Python equivalents
+ op = _c2py_ops.get(nexttok, nexttok)
+ right, nexttok = _parse(tokens, i + 1)
+ result = '%s %s %s' % (result, op, right)
+ j = i
+ if j == priority == 4: # '<', '>', '<=', '>='
+ result = '(%s)' % result
+
+ if nexttok == '?' and priority <= 0:
+ if_true, nexttok = _parse(tokens, 0)
+ if nexttok != ':':
+ raise _error(nexttok)
+ if_false, nexttok = _parse(tokens)
+ result = '%s if %s else %s' % (if_true, result, if_false)
+ if priority == 0:
+ result = '(%s)' % result
+
+ return result, nexttok
+
+def _as_int(n):
+ try:
+ i = round(n)
+ except TypeError:
+ raise TypeError('Plural value must be an integer, got %s' %
+ (n.__class__.__name__,)) from None
+ import warnings
+ warnings.warn('Plural value must be an integer, got %s' %
+ (n.__class__.__name__,),
+ DeprecationWarning, 4)
+ return n
+
+def c2py(plural):
+ """Gets a C expression as used in PO files for plural forms and returns a
+ Python function that implements an equivalent expression.
+ """
+
+ if len(plural) > 1000:
+ raise ValueError('plural form expression is too long')
+ try:
+ result, nexttok = _parse(_tokenize(plural))
+ if nexttok:
+ raise _error(nexttok)
+
+ depth = 0
+ for c in result:
+ if c == '(':
+ depth += 1
+ if depth > 20:
+ # Python compiler limit is about 90.
+ # The most complex example has 2.
+ raise ValueError('plural form expression is too complex')
+ elif c == ')':
+ depth -= 1
+
+ ns = {'_as_int': _as_int}
+ exec('''if True:
+ def func(n):
+ if not isinstance(n, int):
+ n = _as_int(n)
+ return int(%s)
+ ''' % result, ns)
+ return ns['func']
+ except RecursionError:
+ # Recursion error can be raised in _parse() or exec().
+ raise ValueError('plural form expression is too complex')
+
+
+def _expand_lang(loc):
+ import locale
+ loc = locale.normalize(loc)
+ COMPONENT_CODESET = 1 << 0
+ COMPONENT_TERRITORY = 1 << 1
+ COMPONENT_MODIFIER = 1 << 2
+ # split up the locale into its base components
+ mask = 0
+ pos = loc.find('@')
+ if pos >= 0:
+ modifier = loc[pos:]
+ loc = loc[:pos]
+ mask |= COMPONENT_MODIFIER
+ else:
+ modifier = ''
+ pos = loc.find('.')
+ if pos >= 0:
+ codeset = loc[pos:]
+ loc = loc[:pos]
+ mask |= COMPONENT_CODESET
+ else:
+ codeset = ''
+ pos = loc.find('_')
+ if pos >= 0:
+ territory = loc[pos:]
+ loc = loc[:pos]
+ mask |= COMPONENT_TERRITORY
+ else:
+ territory = ''
+ language = loc
+ ret = []
+ for i in range(mask+1):
+ if not (i & ~mask): # if all components for this combo exist ...
+ val = language
+ if i & COMPONENT_TERRITORY: val += territory
+ if i & COMPONENT_CODESET: val += codeset
+ if i & COMPONENT_MODIFIER: val += modifier
+ ret.append(val)
+ ret.reverse()
+ return ret
+
+
+
+class NullTranslations:
+ def __init__(self, fp=None):
+ self._info = {}
+ self._charset = None
+ self._output_charset = None
+ self._fallback = None
+ if fp is not None:
+ self._parse(fp)
+
+ def _parse(self, fp):
+ pass
+
+ def add_fallback(self, fallback):
+ if self._fallback:
+ self._fallback.add_fallback(fallback)
+ else:
+ self._fallback = fallback
+
+ def gettext(self, message):
+ if self._fallback:
+ return self._fallback.gettext(message)
+ return message
+
+ def lgettext(self, message):
+ import warnings
+ warnings.warn('lgettext() is deprecated, use gettext() instead',
+ DeprecationWarning, 2)
+ import locale
+ if self._fallback:
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\blgettext\b.*',
+ DeprecationWarning)
+ return self._fallback.lgettext(message)
+ if self._output_charset:
+ return message.encode(self._output_charset)
+ return message.encode(locale.getpreferredencoding())
+
+ def ngettext(self, msgid1, msgid2, n):
+ if self._fallback:
+ return self._fallback.ngettext(msgid1, msgid2, n)
+ if n == 1:
+ return msgid1
+ else:
+ return msgid2
+
+ def lngettext(self, msgid1, msgid2, n):
+ import warnings
+ warnings.warn('lngettext() is deprecated, use ngettext() instead',
+ DeprecationWarning, 2)
+ import locale
+ if self._fallback:
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\blngettext\b.*',
+ DeprecationWarning)
+ return self._fallback.lngettext(msgid1, msgid2, n)
+ if n == 1:
+ tmsg = msgid1
+ else:
+ tmsg = msgid2
+ if self._output_charset:
+ return tmsg.encode(self._output_charset)
+ return tmsg.encode(locale.getpreferredencoding())
+
+ def pgettext(self, context, message):
+ if self._fallback:
+ return self._fallback.pgettext(context, message)
+ return message
+
+ def npgettext(self, context, msgid1, msgid2, n):
+ if self._fallback:
+ return self._fallback.npgettext(context, msgid1, msgid2, n)
+ if n == 1:
+ return msgid1
+ else:
+ return msgid2
+
+ def info(self):
+ return self._info
+
+ def charset(self):
+ return self._charset
+
+ def output_charset(self):
+ import warnings
+ warnings.warn('output_charset() is deprecated',
+ DeprecationWarning, 2)
+ return self._output_charset
+
+ def set_output_charset(self, charset):
+ import warnings
+ warnings.warn('set_output_charset() is deprecated',
+ DeprecationWarning, 2)
+ self._output_charset = charset
+
+ def install(self, names=None):
+ import builtins
+ builtins.__dict__['_'] = self.gettext
+ if names is not None:
+ allowed = {'gettext', 'lgettext', 'lngettext',
+ 'ngettext', 'npgettext', 'pgettext'}
+ for name in allowed & set(names):
+ builtins.__dict__[name] = getattr(self, name)
+
+
+class GNUTranslations(NullTranslations):
+ # Magic number of .mo files
+ LE_MAGIC = 0x950412de
+ BE_MAGIC = 0xde120495
+
+ # The encoding of a msgctxt and a msgid in a .mo file is
+ # msgctxt + "\x04" + msgid (gettext version >= 0.15)
+ CONTEXT = "%s\x04%s"
+
+ # Acceptable .mo versions
+ VERSIONS = (0, 1)
+
+ def _get_versions(self, version):
+ """Returns a tuple of major version, minor version"""
+ return (version >> 16, version & 0xffff)
+
+ def _parse(self, fp):
+ """Override this method to support alternative .mo formats."""
+ # Delay struct import for speeding up gettext import when .mo files
+ # are not used.
+ from struct import unpack
+ filename = getattr(fp, 'name', '')
+ # Parse the .mo file header, which consists of 5 little endian 32
+ # bit words.
+ self._catalog = catalog = {}
+ self.plural = lambda n: int(n != 1) # germanic plural by default
+ buf = fp.read()
+ buflen = len(buf)
+ # Are we big endian or little endian?
+ magic = unpack('4I', buf[4:20])
+ ii = '>II'
+ else:
+ raise OSError(0, 'Bad magic number', filename)
+
+ major_version, minor_version = self._get_versions(version)
+
+ if major_version not in self.VERSIONS:
+ raise OSError(0, 'Bad version number ' + str(major_version), filename)
+
+ # Now put all messages from the .mo file buffer into the catalog
+ # dictionary.
+ for i in range(0, msgcount):
+ mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
+ mend = moff + mlen
+ tlen, toff = unpack(ii, buf[transidx:transidx+8])
+ tend = toff + tlen
+ if mend < buflen and tend < buflen:
+ msg = buf[moff:mend]
+ tmsg = buf[toff:tend]
+ else:
+ raise OSError(0, 'File is corrupt', filename)
+ # See if we're looking at GNU .mo conventions for metadata
+ if mlen == 0:
+ # Catalog description
+ lastk = None
+ for b_item in tmsg.split(b'\n'):
+ item = b_item.decode().strip()
+ if not item:
+ continue
+ # Skip over comment lines:
+ if item.startswith('#-#-#-#-#') and item.endswith('#-#-#-#-#'):
+ continue
+ k = v = None
+ if ':' in item:
+ k, v = item.split(':', 1)
+ k = k.strip().lower()
+ v = v.strip()
+ self._info[k] = v
+ lastk = k
+ elif lastk:
+ self._info[lastk] += '\n' + item
+ if k == 'content-type':
+ self._charset = v.split('charset=')[1]
+ elif k == 'plural-forms':
+ v = v.split(';')
+ plural = v[1].split('plural=')[1]
+ self.plural = c2py(plural)
+ # Note: we unconditionally convert both msgids and msgstrs to
+ # Unicode using the character encoding specified in the charset
+ # parameter of the Content-Type header. The gettext documentation
+ # strongly encourages msgids to be us-ascii, but some applications
+ # require alternative encodings (e.g. Zope's ZCML and ZPT). For
+ # traditional gettext applications, the msgid conversion will
+ # cause no problems since us-ascii should always be a subset of
+ # the charset encoding. We may want to fall back to 8-bit msgids
+ # if the Unicode conversion fails.
+ charset = self._charset or 'ascii'
+ if b'\x00' in msg:
+ # Plural forms
+ msgid1, msgid2 = msg.split(b'\x00')
+ tmsg = tmsg.split(b'\x00')
+ msgid1 = str(msgid1, charset)
+ for i, x in enumerate(tmsg):
+ catalog[(msgid1, i)] = str(x, charset)
+ else:
+ catalog[str(msg, charset)] = str(tmsg, charset)
+ # advance to next entry in the seek tables
+ masteridx += 8
+ transidx += 8
+
+ def lgettext(self, message):
+ import warnings
+ warnings.warn('lgettext() is deprecated, use gettext() instead',
+ DeprecationWarning, 2)
+ import locale
+ missing = object()
+ tmsg = self._catalog.get(message, missing)
+ if tmsg is missing:
+ if self._fallback:
+ return self._fallback.lgettext(message)
+ tmsg = message
+ if self._output_charset:
+ return tmsg.encode(self._output_charset)
+ return tmsg.encode(locale.getpreferredencoding())
+
+ def lngettext(self, msgid1, msgid2, n):
+ import warnings
+ warnings.warn('lngettext() is deprecated, use ngettext() instead',
+ DeprecationWarning, 2)
+ import locale
+ try:
+ tmsg = self._catalog[(msgid1, self.plural(n))]
+ except KeyError:
+ if self._fallback:
+ return self._fallback.lngettext(msgid1, msgid2, n)
+ if n == 1:
+ tmsg = msgid1
+ else:
+ tmsg = msgid2
+ if self._output_charset:
+ return tmsg.encode(self._output_charset)
+ return tmsg.encode(locale.getpreferredencoding())
+
+ def gettext(self, message):
+ missing = object()
+ tmsg = self._catalog.get(message, missing)
+ if tmsg is missing:
+ if self._fallback:
+ return self._fallback.gettext(message)
+ return message
+ return tmsg
+
+ def ngettext(self, msgid1, msgid2, n):
+ try:
+ tmsg = self._catalog[(msgid1, self.plural(n))]
+ except KeyError:
+ if self._fallback:
+ return self._fallback.ngettext(msgid1, msgid2, n)
+ if n == 1:
+ tmsg = msgid1
+ else:
+ tmsg = msgid2
+ return tmsg
+
+ def pgettext(self, context, message):
+ ctxt_msg_id = self.CONTEXT % (context, message)
+ missing = object()
+ tmsg = self._catalog.get(ctxt_msg_id, missing)
+ if tmsg is missing:
+ if self._fallback:
+ return self._fallback.pgettext(context, message)
+ return message
+ return tmsg
+
+ def npgettext(self, context, msgid1, msgid2, n):
+ ctxt_msg_id = self.CONTEXT % (context, msgid1)
+ try:
+ tmsg = self._catalog[ctxt_msg_id, self.plural(n)]
+ except KeyError:
+ if self._fallback:
+ return self._fallback.npgettext(context, msgid1, msgid2, n)
+ if n == 1:
+ tmsg = msgid1
+ else:
+ tmsg = msgid2
+ return tmsg
+
+
+# Locate a .mo file using the gettext strategy
+def find(domain, localedir=None, languages=None, all=False):
+ # Get some reasonable defaults for arguments that were not supplied
+ if localedir is None:
+ localedir = _default_localedir
+ if languages is None:
+ languages = []
+ for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
+ val = os.environ.get(envar)
+ if val:
+ languages = val.split(':')
+ break
+ if 'C' not in languages:
+ languages.append('C')
+ # now normalize and expand the languages
+ nelangs = []
+ for lang in languages:
+ for nelang in _expand_lang(lang):
+ if nelang not in nelangs:
+ nelangs.append(nelang)
+ # select a language
+ if all:
+ result = []
+ else:
+ result = None
+ for lang in nelangs:
+ if lang == 'C':
+ break
+ mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
+ if os.path.exists(mofile):
+ if all:
+ result.append(mofile)
+ else:
+ return mofile
+ return result
+
+
+
+# a mapping between absolute .mo file path and Translation object
+_translations = {}
+_unspecified = ['unspecified']
+
+def translation(domain, localedir=None, languages=None,
+ class_=None, fallback=False, codeset=_unspecified):
+ if class_ is None:
+ class_ = GNUTranslations
+ mofiles = find(domain, localedir, languages, all=True)
+ if not mofiles:
+ if fallback:
+ return NullTranslations()
+ from errno import ENOENT
+ raise FileNotFoundError(ENOENT,
+ 'No translation file found for domain', domain)
+ # Avoid opening, reading, and parsing the .mo file after it's been done
+ # once.
+ result = None
+ for mofile in mofiles:
+ key = (class_, os.path.abspath(mofile))
+ t = _translations.get(key)
+ if t is None:
+ with open(mofile, 'rb') as fp:
+ t = _translations.setdefault(key, class_(fp))
+ # Copy the translation object to allow setting fallbacks and
+ # output charset. All other instance data is shared with the
+ # cached object.
+ # Delay copy import for speeding up gettext import when .mo files
+ # are not used.
+ import copy
+ t = copy.copy(t)
+ if codeset is not _unspecified:
+ import warnings
+ warnings.warn('parameter codeset is deprecated',
+ DeprecationWarning, 2)
+ if codeset:
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\bset_output_charset\b.*',
+ DeprecationWarning)
+ t.set_output_charset(codeset)
+ if result is None:
+ result = t
+ else:
+ result.add_fallback(t)
+ return result
+
+
+def install(domain, localedir=None, codeset=_unspecified, names=None):
+ t = translation(domain, localedir, fallback=True, codeset=codeset)
+ t.install(names)
+
+
+
+# a mapping b/w domains and locale directories
+_localedirs = {}
+# a mapping b/w domains and codesets
+_localecodesets = {}
+# current global domain, `messages' used for compatibility w/ GNU gettext
+_current_domain = 'messages'
+
+
+def textdomain(domain=None):
+ global _current_domain
+ if domain is not None:
+ _current_domain = domain
+ return _current_domain
+
+
+def bindtextdomain(domain, localedir=None):
+ global _localedirs
+ if localedir is not None:
+ _localedirs[domain] = localedir
+ return _localedirs.get(domain, _default_localedir)
+
+
+def bind_textdomain_codeset(domain, codeset=None):
+ import warnings
+ warnings.warn('bind_textdomain_codeset() is deprecated',
+ DeprecationWarning, 2)
+ global _localecodesets
+ if codeset is not None:
+ _localecodesets[domain] = codeset
+ return _localecodesets.get(domain)
+
+
+def dgettext(domain, message):
+ try:
+ t = translation(domain, _localedirs.get(domain, None))
+ except OSError:
+ return message
+ return t.gettext(message)
+
+def ldgettext(domain, message):
+ import warnings
+ warnings.warn('ldgettext() is deprecated, use dgettext() instead',
+ DeprecationWarning, 2)
+ import locale
+ codeset = _localecodesets.get(domain)
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\bparameter codeset\b.*',
+ DeprecationWarning)
+ t = translation(domain, _localedirs.get(domain, None), codeset=codeset)
+ except OSError:
+ return message.encode(codeset or locale.getpreferredencoding())
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\blgettext\b.*',
+ DeprecationWarning)
+ return t.lgettext(message)
+
+def dngettext(domain, msgid1, msgid2, n):
+ try:
+ t = translation(domain, _localedirs.get(domain, None))
+ except OSError:
+ if n == 1:
+ return msgid1
+ else:
+ return msgid2
+ return t.ngettext(msgid1, msgid2, n)
+
+def ldngettext(domain, msgid1, msgid2, n):
+ import warnings
+ warnings.warn('ldngettext() is deprecated, use dngettext() instead',
+ DeprecationWarning, 2)
+ import locale
+ codeset = _localecodesets.get(domain)
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\bparameter codeset\b.*',
+ DeprecationWarning)
+ t = translation(domain, _localedirs.get(domain, None), codeset=codeset)
+ except OSError:
+ if n == 1:
+ tmsg = msgid1
+ else:
+ tmsg = msgid2
+ return tmsg.encode(codeset or locale.getpreferredencoding())
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\blngettext\b.*',
+ DeprecationWarning)
+ return t.lngettext(msgid1, msgid2, n)
+
+
+def dpgettext(domain, context, message):
+ try:
+ t = translation(domain, _localedirs.get(domain, None))
+ except OSError:
+ return message
+ return t.pgettext(context, message)
+
+
+def dnpgettext(domain, context, msgid1, msgid2, n):
+ try:
+ t = translation(domain, _localedirs.get(domain, None))
+ except OSError:
+ if n == 1:
+ return msgid1
+ else:
+ return msgid2
+ return t.npgettext(context, msgid1, msgid2, n)
+
+
+def gettext(message):
+ return dgettext(_current_domain, message)
+
+def lgettext(message):
+ import warnings
+ warnings.warn('lgettext() is deprecated, use gettext() instead',
+ DeprecationWarning, 2)
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\bldgettext\b.*',
+ DeprecationWarning)
+ return ldgettext(_current_domain, message)
+
+def ngettext(msgid1, msgid2, n):
+ return dngettext(_current_domain, msgid1, msgid2, n)
+
+def lngettext(msgid1, msgid2, n):
+ import warnings
+ warnings.warn('lngettext() is deprecated, use ngettext() instead',
+ DeprecationWarning, 2)
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', r'.*\bldngettext\b.*',
+ DeprecationWarning)
+ return ldngettext(_current_domain, msgid1, msgid2, n)
+
+
+def pgettext(context, message):
+ return dpgettext(_current_domain, context, message)
+
+
+def npgettext(context, msgid1, msgid2, n):
+ return dnpgettext(_current_domain, context, msgid1, msgid2, n)
+
+
+# dcgettext() has been deemed unnecessary and is not implemented.
+
+# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
+# was:
+#
+# import gettext
+# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
+# _ = cat.gettext
+# print _('Hello World')
+
+# The resulting catalog object currently don't support access through a
+# dictionary API, which was supported (but apparently unused) in GNOME
+# gettext.
+
+Catalog = translation
diff --git a/infer_4_37_2/lib/python3.10/gzip.py b/infer_4_37_2/lib/python3.10/gzip.py
new file mode 100644
index 0000000000000000000000000000000000000000..475ec326c0c982bf2b31603d64d788ba6d2d35ca
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/gzip.py
@@ -0,0 +1,609 @@
+"""Functions that read and write gzipped files.
+
+The user of the file doesn't have to worry about the compression,
+but random access is not allowed."""
+
+# based on Andrew Kuchling's minigzip.py distributed with the zlib module
+
+import struct, sys, time, os
+import zlib
+import builtins
+import io
+import _compression
+
+__all__ = ["BadGzipFile", "GzipFile", "open", "compress", "decompress"]
+
+FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
+
+READ, WRITE = 1, 2
+
+_COMPRESS_LEVEL_FAST = 1
+_COMPRESS_LEVEL_TRADEOFF = 6
+_COMPRESS_LEVEL_BEST = 9
+
+
+def open(filename, mode="rb", compresslevel=_COMPRESS_LEVEL_BEST,
+ encoding=None, errors=None, newline=None):
+ """Open a gzip-compressed file in binary or text mode.
+
+ The filename argument can be an actual filename (a str or bytes object), or
+ an existing file object to read from or write to.
+
+ The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for
+ binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is
+ "rb", and the default compresslevel is 9.
+
+ For binary mode, this function is equivalent to the GzipFile constructor:
+ GzipFile(filename, mode, compresslevel). In this case, the encoding, errors
+ and newline arguments must not be provided.
+
+ For text mode, a GzipFile object is created, and wrapped in an
+ io.TextIOWrapper instance with the specified encoding, error handling
+ behavior, and line ending(s).
+
+ """
+ if "t" in mode:
+ if "b" in mode:
+ raise ValueError("Invalid mode: %r" % (mode,))
+ else:
+ if encoding is not None:
+ raise ValueError("Argument 'encoding' not supported in binary mode")
+ if errors is not None:
+ raise ValueError("Argument 'errors' not supported in binary mode")
+ if newline is not None:
+ raise ValueError("Argument 'newline' not supported in binary mode")
+
+ gz_mode = mode.replace("t", "")
+ if isinstance(filename, (str, bytes, os.PathLike)):
+ binary_file = GzipFile(filename, gz_mode, compresslevel)
+ elif hasattr(filename, "read") or hasattr(filename, "write"):
+ binary_file = GzipFile(None, gz_mode, compresslevel, filename)
+ else:
+ raise TypeError("filename must be a str or bytes object, or a file")
+
+ if "t" in mode:
+ encoding = io.text_encoding(encoding)
+ return io.TextIOWrapper(binary_file, encoding, errors, newline)
+ else:
+ return binary_file
+
+def write32u(output, value):
+ # The L format writes the bit pattern correctly whether signed
+ # or unsigned.
+ output.write(struct.pack("'
+
+ def _init_write(self, filename):
+ self.name = filename
+ self.crc = zlib.crc32(b"")
+ self.size = 0
+ self.writebuf = []
+ self.bufsize = 0
+ self.offset = 0 # Current file offset for seek(), tell(), etc
+
+ def _write_gzip_header(self, compresslevel):
+ self.fileobj.write(b'\037\213') # magic header
+ self.fileobj.write(b'\010') # compression method
+ try:
+ # RFC 1952 requires the FNAME field to be Latin-1. Do not
+ # include filenames that cannot be represented that way.
+ fname = os.path.basename(self.name)
+ if not isinstance(fname, bytes):
+ fname = fname.encode('latin-1')
+ if fname.endswith(b'.gz'):
+ fname = fname[:-3]
+ except UnicodeEncodeError:
+ fname = b''
+ flags = 0
+ if fname:
+ flags = FNAME
+ self.fileobj.write(chr(flags).encode('latin-1'))
+ mtime = self._write_mtime
+ if mtime is None:
+ mtime = time.time()
+ write32u(self.fileobj, int(mtime))
+ if compresslevel == _COMPRESS_LEVEL_BEST:
+ xfl = b'\002'
+ elif compresslevel == _COMPRESS_LEVEL_FAST:
+ xfl = b'\004'
+ else:
+ xfl = b'\000'
+ self.fileobj.write(xfl)
+ self.fileobj.write(b'\377')
+ if fname:
+ self.fileobj.write(fname + b'\000')
+
+ def write(self,data):
+ self._check_not_closed()
+ if self.mode != WRITE:
+ import errno
+ raise OSError(errno.EBADF, "write() on read-only GzipFile object")
+
+ if self.fileobj is None:
+ raise ValueError("write() on closed GzipFile object")
+
+ if isinstance(data, (bytes, bytearray)):
+ length = len(data)
+ else:
+ # accept any data that supports the buffer protocol
+ data = memoryview(data)
+ length = data.nbytes
+
+ if length > 0:
+ self.fileobj.write(self.compress.compress(data))
+ self.size += length
+ self.crc = zlib.crc32(data, self.crc)
+ self.offset += length
+
+ return length
+
+ def read(self, size=-1):
+ self._check_not_closed()
+ if self.mode != READ:
+ import errno
+ raise OSError(errno.EBADF, "read() on write-only GzipFile object")
+ return self._buffer.read(size)
+
+ def read1(self, size=-1):
+ """Implements BufferedIOBase.read1()
+
+ Reads up to a buffer's worth of data if size is negative."""
+ self._check_not_closed()
+ if self.mode != READ:
+ import errno
+ raise OSError(errno.EBADF, "read1() on write-only GzipFile object")
+
+ if size < 0:
+ size = io.DEFAULT_BUFFER_SIZE
+ return self._buffer.read1(size)
+
+ def peek(self, n):
+ self._check_not_closed()
+ if self.mode != READ:
+ import errno
+ raise OSError(errno.EBADF, "peek() on write-only GzipFile object")
+ return self._buffer.peek(n)
+
+ @property
+ def closed(self):
+ return self.fileobj is None
+
+ def close(self):
+ fileobj = self.fileobj
+ if fileobj is None:
+ return
+ self.fileobj = None
+ try:
+ if self.mode == WRITE:
+ fileobj.write(self.compress.flush())
+ write32u(fileobj, self.crc)
+ # self.size may exceed 2 GiB, or even 4 GiB
+ write32u(fileobj, self.size & 0xffffffff)
+ elif self.mode == READ:
+ self._buffer.close()
+ finally:
+ myfileobj = self.myfileobj
+ if myfileobj:
+ self.myfileobj = None
+ myfileobj.close()
+
+ def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
+ self._check_not_closed()
+ if self.mode == WRITE:
+ # Ensure the compressor's buffer is flushed
+ self.fileobj.write(self.compress.flush(zlib_mode))
+ self.fileobj.flush()
+
+ def fileno(self):
+ """Invoke the underlying file object's fileno() method.
+
+ This will raise AttributeError if the underlying file object
+ doesn't support fileno().
+ """
+ return self.fileobj.fileno()
+
+ def rewind(self):
+ '''Return the uncompressed stream file position indicator to the
+ beginning of the file'''
+ if self.mode != READ:
+ raise OSError("Can't rewind in write mode")
+ self._buffer.seek(0)
+
+ def readable(self):
+ return self.mode == READ
+
+ def writable(self):
+ return self.mode == WRITE
+
+ def seekable(self):
+ return True
+
+ def seek(self, offset, whence=io.SEEK_SET):
+ if self.mode == WRITE:
+ if whence != io.SEEK_SET:
+ if whence == io.SEEK_CUR:
+ offset = self.offset + offset
+ else:
+ raise ValueError('Seek from end not supported')
+ if offset < self.offset:
+ raise OSError('Negative seek in write mode')
+ count = offset - self.offset
+ chunk = b'\0' * 1024
+ for i in range(count // 1024):
+ self.write(chunk)
+ self.write(b'\0' * (count % 1024))
+ elif self.mode == READ:
+ self._check_not_closed()
+ return self._buffer.seek(offset, whence)
+
+ return self.offset
+
+ def readline(self, size=-1):
+ self._check_not_closed()
+ return self._buffer.readline(size)
+
+
+class _GzipReader(_compression.DecompressReader):
+ def __init__(self, fp):
+ super().__init__(_PaddedFile(fp), zlib.decompressobj,
+ wbits=-zlib.MAX_WBITS)
+ # Set flag indicating start of a new member
+ self._new_member = True
+ self._last_mtime = None
+
+ def _init_read(self):
+ self._crc = zlib.crc32(b"")
+ self._stream_size = 0 # Decompressed size of unconcatenated stream
+
+ def _read_exact(self, n):
+ '''Read exactly *n* bytes from `self._fp`
+
+ This method is required because self._fp may be unbuffered,
+ i.e. return short reads.
+ '''
+
+ data = self._fp.read(n)
+ while len(data) < n:
+ b = self._fp.read(n - len(data))
+ if not b:
+ raise EOFError("Compressed file ended before the "
+ "end-of-stream marker was reached")
+ data += b
+ return data
+
+ def _read_gzip_header(self):
+ magic = self._fp.read(2)
+ if magic == b'':
+ return False
+
+ if magic != b'\037\213':
+ raise BadGzipFile('Not a gzipped file (%r)' % magic)
+
+ (method, flag,
+ self._last_mtime) = struct.unpack(" December 1997.
+#
+# Authentication code contributed by Donn Cave June 1998.
+# String method conversion by ESR, February 2001.
+# GET/SETACL contributed by Anthony Baxter April 2001.
+# IMAP4_SSL contributed by Tino Lange March 2002.
+# GET/SETQUOTA contributed by Andreas Zeidler June 2002.
+# PROXYAUTH contributed by Rick Holbert November 2002.
+# GET/SETANNOTATION contributed by Tomas Lindroos June 2005.
+
+__version__ = "2.58"
+
+import binascii, errno, random, re, socket, subprocess, sys, time, calendar
+from datetime import datetime, timezone, timedelta
+from io import DEFAULT_BUFFER_SIZE
+
+try:
+ import ssl
+ HAVE_SSL = True
+except ImportError:
+ HAVE_SSL = False
+
+__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple",
+ "Int2AP", "ParseFlags", "Time2Internaldate"]
+
+# Globals
+
+CRLF = b'\r\n'
+Debug = 0
+IMAP4_PORT = 143
+IMAP4_SSL_PORT = 993
+AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
+
+# Maximal line length when calling readline(). This is to prevent
+# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1)
+# don't specify a line length. RFC 2683 suggests limiting client
+# command lines to 1000 octets and that servers should be prepared
+# to accept command lines up to 8000 octets, so we used to use 10K here.
+# In the modern world (eg: gmail) the response to, for example, a
+# search command can be quite large, so we now use 1M.
+_MAXLINE = 1000000
+
+
+# Commands
+
+Commands = {
+ # name valid states
+ 'APPEND': ('AUTH', 'SELECTED'),
+ 'AUTHENTICATE': ('NONAUTH',),
+ 'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
+ 'CHECK': ('SELECTED',),
+ 'CLOSE': ('SELECTED',),
+ 'COPY': ('SELECTED',),
+ 'CREATE': ('AUTH', 'SELECTED'),
+ 'DELETE': ('AUTH', 'SELECTED'),
+ 'DELETEACL': ('AUTH', 'SELECTED'),
+ 'ENABLE': ('AUTH', ),
+ 'EXAMINE': ('AUTH', 'SELECTED'),
+ 'EXPUNGE': ('SELECTED',),
+ 'FETCH': ('SELECTED',),
+ 'GETACL': ('AUTH', 'SELECTED'),
+ 'GETANNOTATION':('AUTH', 'SELECTED'),
+ 'GETQUOTA': ('AUTH', 'SELECTED'),
+ 'GETQUOTAROOT': ('AUTH', 'SELECTED'),
+ 'MYRIGHTS': ('AUTH', 'SELECTED'),
+ 'LIST': ('AUTH', 'SELECTED'),
+ 'LOGIN': ('NONAUTH',),
+ 'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
+ 'LSUB': ('AUTH', 'SELECTED'),
+ 'MOVE': ('SELECTED',),
+ 'NAMESPACE': ('AUTH', 'SELECTED'),
+ 'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
+ 'PARTIAL': ('SELECTED',), # NB: obsolete
+ 'PROXYAUTH': ('AUTH',),
+ 'RENAME': ('AUTH', 'SELECTED'),
+ 'SEARCH': ('SELECTED',),
+ 'SELECT': ('AUTH', 'SELECTED'),
+ 'SETACL': ('AUTH', 'SELECTED'),
+ 'SETANNOTATION':('AUTH', 'SELECTED'),
+ 'SETQUOTA': ('AUTH', 'SELECTED'),
+ 'SORT': ('SELECTED',),
+ 'STARTTLS': ('NONAUTH',),
+ 'STATUS': ('AUTH', 'SELECTED'),
+ 'STORE': ('SELECTED',),
+ 'SUBSCRIBE': ('AUTH', 'SELECTED'),
+ 'THREAD': ('SELECTED',),
+ 'UID': ('SELECTED',),
+ 'UNSUBSCRIBE': ('AUTH', 'SELECTED'),
+ 'UNSELECT': ('SELECTED',),
+ }
+
+# Patterns to match server responses
+
+Continuation = re.compile(br'\+( (?P.*))?')
+Flags = re.compile(br'.*FLAGS \((?P[^\)]*)\)')
+InternalDate = re.compile(br'.*INTERNALDATE "'
+ br'(?P[ 0123][0-9])-(?P[A-Z][a-z][a-z])-(?P[0-9][0-9][0-9][0-9])'
+ br' (?P[0-9][0-9]):(?P[0-9][0-9]):(?P[0-9][0-9])'
+ br' (?P[-+])(?P[0-9][0-9])(?P[0-9][0-9])'
+ br'"')
+# Literal is no longer used; kept for backward compatibility.
+Literal = re.compile(br'.*{(?P\d+)}$', re.ASCII)
+MapCRLF = re.compile(br'\r\n|\r|\n')
+# We no longer exclude the ']' character from the data portion of the response
+# code, even though it violates the RFC. Popular IMAP servers such as Gmail
+# allow flags with ']', and there are programs (including imaplib!) that can
+# produce them. The problem with this is if the 'text' portion of the response
+# includes a ']' we'll parse the response wrong (which is the point of the RFC
+# restriction). However, that seems less likely to be a problem in practice
+# than being unable to correctly parse flags that include ']' chars, which
+# was reported as a real-world problem in issue #21815.
+Response_code = re.compile(br'\[(?P[A-Z-]+)( (?P.*))?\]')
+Untagged_response = re.compile(br'\* (?P[A-Z-]+)( (?P.*))?')
+# Untagged_status is no longer used; kept for backward compatibility
+Untagged_status = re.compile(
+ br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?', re.ASCII)
+# We compile these in _mode_xxx.
+_Literal = br'.*{(?P\d+)}$'
+_Untagged_status = br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?'
+
+
+
+class IMAP4:
+
+ r"""IMAP4 client class.
+
+ Instantiate with: IMAP4([host[, port[, timeout=None]]])
+
+ host - host's name (default: localhost);
+ port - port number (default: standard IMAP4 port).
+ timeout - socket timeout (default: None)
+ If timeout is not given or is None,
+ the global default socket timeout is used
+
+ All IMAP4rev1 commands are supported by methods of the same
+ name (in lower-case).
+
+ All arguments to commands are converted to strings, except for
+ AUTHENTICATE, and the last argument to APPEND which is passed as
+ an IMAP4 literal. If necessary (the string contains any
+ non-printing characters or white-space and isn't enclosed with
+ either parentheses or double quotes) each string is quoted.
+ However, the 'password' argument to the LOGIN command is always
+ quoted. If you want to avoid having an argument string quoted
+ (eg: the 'flags' argument to STORE) then enclose the string in
+ parentheses (eg: "(\Deleted)").
+
+ Each command returns a tuple: (type, [data, ...]) where 'type'
+ is usually 'OK' or 'NO', and 'data' is either the text from the
+ tagged response, or untagged results from command. Each 'data'
+ is either a string, or a tuple. If a tuple, then the first part
+ is the header of the response, and the second part contains
+ the data (ie: 'literal' value).
+
+ Errors raise the exception class .error("").
+ IMAP4 server errors raise .abort(""),
+ which is a sub-class of 'error'. Mailbox status changes
+ from READ-WRITE to READ-ONLY raise the exception class
+ .readonly(""), which is a sub-class of 'abort'.
+
+ "error" exceptions imply a program error.
+ "abort" exceptions imply the connection should be reset, and
+ the command re-tried.
+ "readonly" exceptions imply the command should be re-tried.
+
+ Note: to use this module, you must read the RFCs pertaining to the
+ IMAP4 protocol, as the semantics of the arguments to each IMAP4
+ command are left to the invoker, not to mention the results. Also,
+ most IMAP servers implement a sub-set of the commands available here.
+ """
+
+ class error(Exception): pass # Logical errors - debug required
+ class abort(error): pass # Service errors - close and retry
+ class readonly(abort): pass # Mailbox status changed to READ-ONLY
+
+ def __init__(self, host='', port=IMAP4_PORT, timeout=None):
+ self.debug = Debug
+ self.state = 'LOGOUT'
+ self.literal = None # A literal argument to a command
+ self.tagged_commands = {} # Tagged commands awaiting response
+ self.untagged_responses = {} # {typ: [data, ...], ...}
+ self.continuation_response = '' # Last continuation response
+ self.is_readonly = False # READ-ONLY desired state
+ self.tagnum = 0
+ self._tls_established = False
+ self._mode_ascii()
+
+ # Open socket to server.
+
+ self.open(host, port, timeout)
+
+ try:
+ self._connect()
+ except Exception:
+ try:
+ self.shutdown()
+ except OSError:
+ pass
+ raise
+
+ def _mode_ascii(self):
+ self.utf8_enabled = False
+ self._encoding = 'ascii'
+ self.Literal = re.compile(_Literal, re.ASCII)
+ self.Untagged_status = re.compile(_Untagged_status, re.ASCII)
+
+
+ def _mode_utf8(self):
+ self.utf8_enabled = True
+ self._encoding = 'utf-8'
+ self.Literal = re.compile(_Literal)
+ self.Untagged_status = re.compile(_Untagged_status)
+
+
+ def _connect(self):
+ # Create unique tag for this session,
+ # and compile tagged response matcher.
+
+ self.tagpre = Int2AP(random.randint(4096, 65535))
+ self.tagre = re.compile(br'(?P'
+ + self.tagpre
+ + br'\d+) (?P[A-Z]+) (?P.*)', re.ASCII)
+
+ # Get server welcome message,
+ # request and store CAPABILITY response.
+
+ if __debug__:
+ self._cmd_log_len = 10
+ self._cmd_log_idx = 0
+ self._cmd_log = {} # Last `_cmd_log_len' interactions
+ if self.debug >= 1:
+ self._mesg('imaplib version %s' % __version__)
+ self._mesg('new IMAP4 connection, tag=%s' % self.tagpre)
+
+ self.welcome = self._get_response()
+ if 'PREAUTH' in self.untagged_responses:
+ self.state = 'AUTH'
+ elif 'OK' in self.untagged_responses:
+ self.state = 'NONAUTH'
+ else:
+ raise self.error(self.welcome)
+
+ self._get_capabilities()
+ if __debug__:
+ if self.debug >= 3:
+ self._mesg('CAPABILITIES: %r' % (self.capabilities,))
+
+ for version in AllowedVersions:
+ if not version in self.capabilities:
+ continue
+ self.PROTOCOL_VERSION = version
+ return
+
+ raise self.error('server not IMAP4 compliant')
+
+
+ def __getattr__(self, attr):
+ # Allow UPPERCASE variants of IMAP4 command methods.
+ if attr in Commands:
+ return getattr(self, attr.lower())
+ raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ if self.state == "LOGOUT":
+ return
+
+ try:
+ self.logout()
+ except OSError:
+ pass
+
+
+ # Overridable methods
+
+
+ def _create_socket(self, timeout):
+ # Default value of IMAP4.host is '', but socket.getaddrinfo()
+ # (which is used by socket.create_connection()) expects None
+ # as a default value for host.
+ if timeout is not None and not timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
+ host = None if not self.host else self.host
+ sys.audit("imaplib.open", self, self.host, self.port)
+ address = (host, self.port)
+ if timeout is not None:
+ return socket.create_connection(address, timeout)
+ return socket.create_connection(address)
+
+ def open(self, host='', port=IMAP4_PORT, timeout=None):
+ """Setup connection to remote server on "host:port"
+ (default: localhost:standard IMAP4 port).
+ This connection will be used by the routines:
+ read, readline, send, shutdown.
+ """
+ self.host = host
+ self.port = port
+ self.sock = self._create_socket(timeout)
+ self.file = self.sock.makefile('rb')
+
+
+ def read(self, size):
+ """Read 'size' bytes from remote."""
+ return self.file.read(size)
+
+
+ def readline(self):
+ """Read line from remote."""
+ line = self.file.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise self.error("got more than %d bytes" % _MAXLINE)
+ return line
+
+
+ def send(self, data):
+ """Send data to remote."""
+ sys.audit("imaplib.send", self, data)
+ self.sock.sendall(data)
+
+
+ def shutdown(self):
+ """Close I/O established in "open"."""
+ self.file.close()
+ try:
+ self.sock.shutdown(socket.SHUT_RDWR)
+ except OSError as exc:
+ # The server might already have closed the connection.
+ # On Windows, this may result in WSAEINVAL (error 10022):
+ # An invalid operation was attempted.
+ if (exc.errno != errno.ENOTCONN
+ and getattr(exc, 'winerror', 0) != 10022):
+ raise
+ finally:
+ self.sock.close()
+
+
+ def socket(self):
+ """Return socket instance used to connect to IMAP4 server.
+
+ socket = .socket()
+ """
+ return self.sock
+
+
+
+ # Utility methods
+
+
+ def recent(self):
+ """Return most recent 'RECENT' responses if any exist,
+ else prompt server for an update using the 'NOOP' command.
+
+ (typ, [data]) = .recent()
+
+ 'data' is None if no new messages,
+ else list of RECENT responses, most recent last.
+ """
+ name = 'RECENT'
+ typ, dat = self._untagged_response('OK', [None], name)
+ if dat[-1]:
+ return typ, dat
+ typ, dat = self.noop() # Prod server for response
+ return self._untagged_response(typ, dat, name)
+
+
+ def response(self, code):
+ """Return data for response 'code' if received, or None.
+
+ Old value for response 'code' is cleared.
+
+ (code, [data]) = .response(code)
+ """
+ return self._untagged_response(code, [None], code.upper())
+
+
+
+ # IMAP4 commands
+
+
+ def append(self, mailbox, flags, date_time, message):
+ """Append message to named mailbox.
+
+ (typ, [data]) = .append(mailbox, flags, date_time, message)
+
+ All args except `message' can be None.
+ """
+ name = 'APPEND'
+ if not mailbox:
+ mailbox = 'INBOX'
+ if flags:
+ if (flags[0],flags[-1]) != ('(',')'):
+ flags = '(%s)' % flags
+ else:
+ flags = None
+ if date_time:
+ date_time = Time2Internaldate(date_time)
+ else:
+ date_time = None
+ literal = MapCRLF.sub(CRLF, message)
+ if self.utf8_enabled:
+ literal = b'UTF8 (' + literal + b')'
+ self.literal = literal
+ return self._simple_command(name, mailbox, flags, date_time)
+
+
+ def authenticate(self, mechanism, authobject):
+ """Authenticate command - requires response processing.
+
+ 'mechanism' specifies which authentication mechanism is to
+ be used - it must appear in .capabilities in the
+ form AUTH=.
+
+ 'authobject' must be a callable object:
+
+ data = authobject(response)
+
+ It will be called to process server continuation responses; the
+ response argument it is passed will be a bytes. It should return bytes
+ data that will be base64 encoded and sent to the server. It should
+ return None if the client abort response '*' should be sent instead.
+ """
+ mech = mechanism.upper()
+ # XXX: shouldn't this code be removed, not commented out?
+ #cap = 'AUTH=%s' % mech
+ #if not cap in self.capabilities: # Let the server decide!
+ # raise self.error("Server doesn't allow %s authentication." % mech)
+ self.literal = _Authenticator(authobject).process
+ typ, dat = self._simple_command('AUTHENTICATE', mech)
+ if typ != 'OK':
+ raise self.error(dat[-1].decode('utf-8', 'replace'))
+ self.state = 'AUTH'
+ return typ, dat
+
+
+ def capability(self):
+ """(typ, [data]) = .capability()
+ Fetch capabilities list from server."""
+
+ name = 'CAPABILITY'
+ typ, dat = self._simple_command(name)
+ return self._untagged_response(typ, dat, name)
+
+
+ def check(self):
+ """Checkpoint mailbox on server.
+
+ (typ, [data]) = .check()
+ """
+ return self._simple_command('CHECK')
+
+
+ def close(self):
+ """Close currently selected mailbox.
+
+ Deleted messages are removed from writable mailbox.
+ This is the recommended command before 'LOGOUT'.
+
+ (typ, [data]) = .close()
+ """
+ try:
+ typ, dat = self._simple_command('CLOSE')
+ finally:
+ self.state = 'AUTH'
+ return typ, dat
+
+
+ def copy(self, message_set, new_mailbox):
+ """Copy 'message_set' messages onto end of 'new_mailbox'.
+
+ (typ, [data]) = .copy(message_set, new_mailbox)
+ """
+ return self._simple_command('COPY', message_set, new_mailbox)
+
+
+ def create(self, mailbox):
+ """Create new mailbox.
+
+ (typ, [data]) = .create(mailbox)
+ """
+ return self._simple_command('CREATE', mailbox)
+
+
+ def delete(self, mailbox):
+ """Delete old mailbox.
+
+ (typ, [data]) = .delete(mailbox)
+ """
+ return self._simple_command('DELETE', mailbox)
+
+ def deleteacl(self, mailbox, who):
+ """Delete the ACLs (remove any rights) set for who on mailbox.
+
+ (typ, [data]) = .deleteacl(mailbox, who)
+ """
+ return self._simple_command('DELETEACL', mailbox, who)
+
+ def enable(self, capability):
+ """Send an RFC5161 enable string to the server.
+
+ (typ, [data]) = .enable(capability)
+ """
+ if 'ENABLE' not in self.capabilities:
+ raise IMAP4.error("Server does not support ENABLE")
+ typ, data = self._simple_command('ENABLE', capability)
+ if typ == 'OK' and 'UTF8=ACCEPT' in capability.upper():
+ self._mode_utf8()
+ return typ, data
+
+ def expunge(self):
+ """Permanently remove deleted items from selected mailbox.
+
+ Generates 'EXPUNGE' response for each deleted message.
+
+ (typ, [data]) = .expunge()
+
+ 'data' is list of 'EXPUNGE'd message numbers in order received.
+ """
+ name = 'EXPUNGE'
+ typ, dat = self._simple_command(name)
+ return self._untagged_response(typ, dat, name)
+
+
+ def fetch(self, message_set, message_parts):
+ """Fetch (parts of) messages.
+
+ (typ, [data, ...]) = .fetch(message_set, message_parts)
+
+ 'message_parts' should be a string of selected parts
+ enclosed in parentheses, eg: "(UID BODY[TEXT])".
+
+ 'data' are tuples of message part envelope and data.
+ """
+ name = 'FETCH'
+ typ, dat = self._simple_command(name, message_set, message_parts)
+ return self._untagged_response(typ, dat, name)
+
+
+ def getacl(self, mailbox):
+ """Get the ACLs for a mailbox.
+
+ (typ, [data]) = .getacl(mailbox)
+ """
+ typ, dat = self._simple_command('GETACL', mailbox)
+ return self._untagged_response(typ, dat, 'ACL')
+
+
+ def getannotation(self, mailbox, entry, attribute):
+ """(typ, [data]) = .getannotation(mailbox, entry, attribute)
+ Retrieve ANNOTATIONs."""
+
+ typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute)
+ return self._untagged_response(typ, dat, 'ANNOTATION')
+
+
+ def getquota(self, root):
+ """Get the quota root's resource usage and limits.
+
+ Part of the IMAP4 QUOTA extension defined in rfc2087.
+
+ (typ, [data]) = .getquota(root)
+ """
+ typ, dat = self._simple_command('GETQUOTA', root)
+ return self._untagged_response(typ, dat, 'QUOTA')
+
+
+ def getquotaroot(self, mailbox):
+ """Get the list of quota roots for the named mailbox.
+
+ (typ, [[QUOTAROOT responses...], [QUOTA responses]]) = .getquotaroot(mailbox)
+ """
+ typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
+ typ, quota = self._untagged_response(typ, dat, 'QUOTA')
+ typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
+ return typ, [quotaroot, quota]
+
+
+ def list(self, directory='""', pattern='*'):
+ """List mailbox names in directory matching pattern.
+
+ (typ, [data]) = .list(directory='""', pattern='*')
+
+ 'data' is list of LIST responses.
+ """
+ name = 'LIST'
+ typ, dat = self._simple_command(name, directory, pattern)
+ return self._untagged_response(typ, dat, name)
+
+
+ def login(self, user, password):
+ """Identify client using plaintext password.
+
+ (typ, [data]) = .login(user, password)
+
+ NB: 'password' will be quoted.
+ """
+ typ, dat = self._simple_command('LOGIN', user, self._quote(password))
+ if typ != 'OK':
+ raise self.error(dat[-1])
+ self.state = 'AUTH'
+ return typ, dat
+
+
+ def login_cram_md5(self, user, password):
+ """ Force use of CRAM-MD5 authentication.
+
+ (typ, [data]) = .login_cram_md5(user, password)
+ """
+ self.user, self.password = user, password
+ return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH)
+
+
+ def _CRAM_MD5_AUTH(self, challenge):
+ """ Authobject to use with CRAM-MD5 authentication. """
+ import hmac
+ pwd = (self.password.encode('utf-8') if isinstance(self.password, str)
+ else self.password)
+ return self.user + " " + hmac.HMAC(pwd, challenge, 'md5').hexdigest()
+
+
+ def logout(self):
+ """Shutdown connection to server.
+
+ (typ, [data]) = .logout()
+
+ Returns server 'BYE' response.
+ """
+ self.state = 'LOGOUT'
+ typ, dat = self._simple_command('LOGOUT')
+ self.shutdown()
+ return typ, dat
+
+
+ def lsub(self, directory='""', pattern='*'):
+ """List 'subscribed' mailbox names in directory matching pattern.
+
+ (typ, [data, ...]) = .lsub(directory='""', pattern='*')
+
+ 'data' are tuples of message part envelope and data.
+ """
+ name = 'LSUB'
+ typ, dat = self._simple_command(name, directory, pattern)
+ return self._untagged_response(typ, dat, name)
+
+ def myrights(self, mailbox):
+ """Show my ACLs for a mailbox (i.e. the rights that I have on mailbox).
+
+ (typ, [data]) = .myrights(mailbox)
+ """
+ typ,dat = self._simple_command('MYRIGHTS', mailbox)
+ return self._untagged_response(typ, dat, 'MYRIGHTS')
+
+ def namespace(self):
+ """ Returns IMAP namespaces ala rfc2342
+
+ (typ, [data, ...]) = .namespace()
+ """
+ name = 'NAMESPACE'
+ typ, dat = self._simple_command(name)
+ return self._untagged_response(typ, dat, name)
+
+
+ def noop(self):
+ """Send NOOP command.
+
+ (typ, [data]) = .noop()
+ """
+ if __debug__:
+ if self.debug >= 3:
+ self._dump_ur(self.untagged_responses)
+ return self._simple_command('NOOP')
+
+
+ def partial(self, message_num, message_part, start, length):
+ """Fetch truncated part of a message.
+
+ (typ, [data, ...]) = .partial(message_num, message_part, start, length)
+
+ 'data' is tuple of message part envelope and data.
+ """
+ name = 'PARTIAL'
+ typ, dat = self._simple_command(name, message_num, message_part, start, length)
+ return self._untagged_response(typ, dat, 'FETCH')
+
+
+ def proxyauth(self, user):
+ """Assume authentication as "user".
+
+ Allows an authorised administrator to proxy into any user's
+ mailbox.
+
+ (typ, [data]) = .proxyauth(user)
+ """
+
+ name = 'PROXYAUTH'
+ return self._simple_command('PROXYAUTH', user)
+
+
+ def rename(self, oldmailbox, newmailbox):
+ """Rename old mailbox name to new.
+
+ (typ, [data]) = .rename(oldmailbox, newmailbox)
+ """
+ return self._simple_command('RENAME', oldmailbox, newmailbox)
+
+
+ def search(self, charset, *criteria):
+ """Search mailbox for matching messages.
+
+ (typ, [data]) = .search(charset, criterion, ...)
+
+ 'data' is space separated list of matching message numbers.
+ If UTF8 is enabled, charset MUST be None.
+ """
+ name = 'SEARCH'
+ if charset:
+ if self.utf8_enabled:
+ raise IMAP4.error("Non-None charset not valid in UTF8 mode")
+ typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria)
+ else:
+ typ, dat = self._simple_command(name, *criteria)
+ return self._untagged_response(typ, dat, name)
+
+
+ def select(self, mailbox='INBOX', readonly=False):
+ """Select a mailbox.
+
+ Flush all untagged responses.
+
+ (typ, [data]) = .select(mailbox='INBOX', readonly=False)
+
+ 'data' is count of messages in mailbox ('EXISTS' response).
+
+ Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
+ other responses should be obtained via .response('FLAGS') etc.
+ """
+ self.untagged_responses = {} # Flush old responses.
+ self.is_readonly = readonly
+ if readonly:
+ name = 'EXAMINE'
+ else:
+ name = 'SELECT'
+ typ, dat = self._simple_command(name, mailbox)
+ if typ != 'OK':
+ self.state = 'AUTH' # Might have been 'SELECTED'
+ return typ, dat
+ self.state = 'SELECTED'
+ if 'READ-ONLY' in self.untagged_responses \
+ and not readonly:
+ if __debug__:
+ if self.debug >= 1:
+ self._dump_ur(self.untagged_responses)
+ raise self.readonly('%s is not writable' % mailbox)
+ return typ, self.untagged_responses.get('EXISTS', [None])
+
+
+ def setacl(self, mailbox, who, what):
+ """Set a mailbox acl.
+
+ (typ, [data]) = .setacl(mailbox, who, what)
+ """
+ return self._simple_command('SETACL', mailbox, who, what)
+
+
+ def setannotation(self, *args):
+ """(typ, [data]) = .setannotation(mailbox[, entry, attribute]+)
+ Set ANNOTATIONs."""
+
+ typ, dat = self._simple_command('SETANNOTATION', *args)
+ return self._untagged_response(typ, dat, 'ANNOTATION')
+
+
+ def setquota(self, root, limits):
+ """Set the quota root's resource limits.
+
+ (typ, [data]) = .setquota(root, limits)
+ """
+ typ, dat = self._simple_command('SETQUOTA', root, limits)
+ return self._untagged_response(typ, dat, 'QUOTA')
+
+
+ def sort(self, sort_criteria, charset, *search_criteria):
+ """IMAP4rev1 extension SORT command.
+
+ (typ, [data]) = .sort(sort_criteria, charset, search_criteria, ...)
+ """
+ name = 'SORT'
+ #if not name in self.capabilities: # Let the server decide!
+ # raise self.error('unimplemented extension command: %s' % name)
+ if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
+ sort_criteria = '(%s)' % sort_criteria
+ typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria)
+ return self._untagged_response(typ, dat, name)
+
+
+ def starttls(self, ssl_context=None):
+ name = 'STARTTLS'
+ if not HAVE_SSL:
+ raise self.error('SSL support missing')
+ if self._tls_established:
+ raise self.abort('TLS session already established')
+ if name not in self.capabilities:
+ raise self.abort('TLS not supported by server')
+ # Generate a default SSL context if none was passed.
+ if ssl_context is None:
+ ssl_context = ssl._create_stdlib_context()
+ typ, dat = self._simple_command(name)
+ if typ == 'OK':
+ self.sock = ssl_context.wrap_socket(self.sock,
+ server_hostname=self.host)
+ self.file = self.sock.makefile('rb')
+ self._tls_established = True
+ self._get_capabilities()
+ else:
+ raise self.error("Couldn't establish TLS session")
+ return self._untagged_response(typ, dat, name)
+
+
+ def status(self, mailbox, names):
+ """Request named status conditions for mailbox.
+
+ (typ, [data]) = .status(mailbox, names)
+ """
+ name = 'STATUS'
+ #if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide!
+ # raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name)
+ typ, dat = self._simple_command(name, mailbox, names)
+ return self._untagged_response(typ, dat, name)
+
+
+ def store(self, message_set, command, flags):
+ """Alters flag dispositions for messages in mailbox.
+
+ (typ, [data]) = .store(message_set, command, flags)
+ """
+ if (flags[0],flags[-1]) != ('(',')'):
+ flags = '(%s)' % flags # Avoid quoting the flags
+ typ, dat = self._simple_command('STORE', message_set, command, flags)
+ return self._untagged_response(typ, dat, 'FETCH')
+
+
+ def subscribe(self, mailbox):
+ """Subscribe to new mailbox.
+
+ (typ, [data]) = .subscribe(mailbox)
+ """
+ return self._simple_command('SUBSCRIBE', mailbox)
+
+
+ def thread(self, threading_algorithm, charset, *search_criteria):
+ """IMAPrev1 extension THREAD command.
+
+ (type, [data]) = .thread(threading_algorithm, charset, search_criteria, ...)
+ """
+ name = 'THREAD'
+ typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria)
+ return self._untagged_response(typ, dat, name)
+
+
+ def uid(self, command, *args):
+ """Execute "command arg ..." with messages identified by UID,
+ rather than message number.
+
+ (typ, [data]) = .uid(command, arg1, arg2, ...)
+
+ Returns response appropriate to 'command'.
+ """
+ command = command.upper()
+ if not command in Commands:
+ raise self.error("Unknown IMAP4 UID command: %s" % command)
+ if self.state not in Commands[command]:
+ raise self.error("command %s illegal in state %s, "
+ "only allowed in states %s" %
+ (command, self.state,
+ ', '.join(Commands[command])))
+ name = 'UID'
+ typ, dat = self._simple_command(name, command, *args)
+ if command in ('SEARCH', 'SORT', 'THREAD'):
+ name = command
+ else:
+ name = 'FETCH'
+ return self._untagged_response(typ, dat, name)
+
+
+ def unsubscribe(self, mailbox):
+ """Unsubscribe from old mailbox.
+
+ (typ, [data]) = .unsubscribe(mailbox)
+ """
+ return self._simple_command('UNSUBSCRIBE', mailbox)
+
+
+ def unselect(self):
+ """Free server's resources associated with the selected mailbox
+ and returns the server to the authenticated state.
+ This command performs the same actions as CLOSE, except
+ that no messages are permanently removed from the currently
+ selected mailbox.
+
+ (typ, [data]) = .unselect()
+ """
+ try:
+ typ, data = self._simple_command('UNSELECT')
+ finally:
+ self.state = 'AUTH'
+ return typ, data
+
+
+ def xatom(self, name, *args):
+ """Allow simple extension commands
+ notified by server in CAPABILITY response.
+
+ Assumes command is legal in current state.
+
+ (typ, [data]) = .xatom(name, arg, ...)
+
+ Returns response appropriate to extension command `name'.
+ """
+ name = name.upper()
+ #if not name in self.capabilities: # Let the server decide!
+ # raise self.error('unknown extension command: %s' % name)
+ if not name in Commands:
+ Commands[name] = (self.state,)
+ return self._simple_command(name, *args)
+
+
+
+ # Private methods
+
+
+ def _append_untagged(self, typ, dat):
+ if dat is None:
+ dat = b''
+ ur = self.untagged_responses
+ if __debug__:
+ if self.debug >= 5:
+ self._mesg('untagged_responses[%s] %s += ["%r"]' %
+ (typ, len(ur.get(typ,'')), dat))
+ if typ in ur:
+ ur[typ].append(dat)
+ else:
+ ur[typ] = [dat]
+
+
+ def _check_bye(self):
+ bye = self.untagged_responses.get('BYE')
+ if bye:
+ raise self.abort(bye[-1].decode(self._encoding, 'replace'))
+
+
+ def _command(self, name, *args):
+
+ if self.state not in Commands[name]:
+ self.literal = None
+ raise self.error("command %s illegal in state %s, "
+ "only allowed in states %s" %
+ (name, self.state,
+ ', '.join(Commands[name])))
+
+ for typ in ('OK', 'NO', 'BAD'):
+ if typ in self.untagged_responses:
+ del self.untagged_responses[typ]
+
+ if 'READ-ONLY' in self.untagged_responses \
+ and not self.is_readonly:
+ raise self.readonly('mailbox status changed to READ-ONLY')
+
+ tag = self._new_tag()
+ name = bytes(name, self._encoding)
+ data = tag + b' ' + name
+ for arg in args:
+ if arg is None: continue
+ if isinstance(arg, str):
+ arg = bytes(arg, self._encoding)
+ data = data + b' ' + arg
+
+ literal = self.literal
+ if literal is not None:
+ self.literal = None
+ if type(literal) is type(self._command):
+ literator = literal
+ else:
+ literator = None
+ data = data + bytes(' {%s}' % len(literal), self._encoding)
+
+ if __debug__:
+ if self.debug >= 4:
+ self._mesg('> %r' % data)
+ else:
+ self._log('> %r' % data)
+
+ try:
+ self.send(data + CRLF)
+ except OSError as val:
+ raise self.abort('socket error: %s' % val)
+
+ if literal is None:
+ return tag
+
+ while 1:
+ # Wait for continuation response
+
+ while self._get_response():
+ if self.tagged_commands[tag]: # BAD/NO?
+ return tag
+
+ # Send literal
+
+ if literator:
+ literal = literator(self.continuation_response)
+
+ if __debug__:
+ if self.debug >= 4:
+ self._mesg('write literal size %s' % len(literal))
+
+ try:
+ self.send(literal)
+ self.send(CRLF)
+ except OSError as val:
+ raise self.abort('socket error: %s' % val)
+
+ if not literator:
+ break
+
+ return tag
+
+
+ def _command_complete(self, name, tag):
+ logout = (name == 'LOGOUT')
+ # BYE is expected after LOGOUT
+ if not logout:
+ self._check_bye()
+ try:
+ typ, data = self._get_tagged_response(tag, expect_bye=logout)
+ except self.abort as val:
+ raise self.abort('command: %s => %s' % (name, val))
+ except self.error as val:
+ raise self.error('command: %s => %s' % (name, val))
+ if not logout:
+ self._check_bye()
+ if typ == 'BAD':
+ raise self.error('%s command error: %s %s' % (name, typ, data))
+ return typ, data
+
+
+ def _get_capabilities(self):
+ typ, dat = self.capability()
+ if dat == [None]:
+ raise self.error('no CAPABILITY response from server')
+ dat = str(dat[-1], self._encoding)
+ dat = dat.upper()
+ self.capabilities = tuple(dat.split())
+
+
+ def _get_response(self):
+
+ # Read response and store.
+ #
+ # Returns None for continuation responses,
+ # otherwise first response line received.
+
+ resp = self._get_line()
+
+ # Command completion response?
+
+ if self._match(self.tagre, resp):
+ tag = self.mo.group('tag')
+ if not tag in self.tagged_commands:
+ raise self.abort('unexpected tagged response: %r' % resp)
+
+ typ = self.mo.group('type')
+ typ = str(typ, self._encoding)
+ dat = self.mo.group('data')
+ self.tagged_commands[tag] = (typ, [dat])
+ else:
+ dat2 = None
+
+ # '*' (untagged) responses?
+
+ if not self._match(Untagged_response, resp):
+ if self._match(self.Untagged_status, resp):
+ dat2 = self.mo.group('data2')
+
+ if self.mo is None:
+ # Only other possibility is '+' (continuation) response...
+
+ if self._match(Continuation, resp):
+ self.continuation_response = self.mo.group('data')
+ return None # NB: indicates continuation
+
+ raise self.abort("unexpected response: %r" % resp)
+
+ typ = self.mo.group('type')
+ typ = str(typ, self._encoding)
+ dat = self.mo.group('data')
+ if dat is None: dat = b'' # Null untagged response
+ if dat2: dat = dat + b' ' + dat2
+
+ # Is there a literal to come?
+
+ while self._match(self.Literal, dat):
+
+ # Read literal direct from connection.
+
+ size = int(self.mo.group('size'))
+ if __debug__:
+ if self.debug >= 4:
+ self._mesg('read literal size %s' % size)
+ data = self.read(size)
+
+ # Store response with literal as tuple
+
+ self._append_untagged(typ, (dat, data))
+
+ # Read trailer - possibly containing another literal
+
+ dat = self._get_line()
+
+ self._append_untagged(typ, dat)
+
+ # Bracketed response information?
+
+ if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat):
+ typ = self.mo.group('type')
+ typ = str(typ, self._encoding)
+ self._append_untagged(typ, self.mo.group('data'))
+
+ if __debug__:
+ if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'):
+ self._mesg('%s response: %r' % (typ, dat))
+
+ return resp
+
+
+ def _get_tagged_response(self, tag, expect_bye=False):
+
+ while 1:
+ result = self.tagged_commands[tag]
+ if result is not None:
+ del self.tagged_commands[tag]
+ return result
+
+ if expect_bye:
+ typ = 'BYE'
+ bye = self.untagged_responses.pop(typ, None)
+ if bye is not None:
+ # Server replies to the "LOGOUT" command with "BYE"
+ return (typ, bye)
+
+ # If we've seen a BYE at this point, the socket will be
+ # closed, so report the BYE now.
+ self._check_bye()
+
+ # Some have reported "unexpected response" exceptions.
+ # Note that ignoring them here causes loops.
+ # Instead, send me details of the unexpected response and
+ # I'll update the code in `_get_response()'.
+
+ try:
+ self._get_response()
+ except self.abort as val:
+ if __debug__:
+ if self.debug >= 1:
+ self.print_log()
+ raise
+
+
+ def _get_line(self):
+
+ line = self.readline()
+ if not line:
+ raise self.abort('socket error: EOF')
+
+ # Protocol mandates all lines terminated by CRLF
+ if not line.endswith(b'\r\n'):
+ raise self.abort('socket error: unterminated line: %r' % line)
+
+ line = line[:-2]
+ if __debug__:
+ if self.debug >= 4:
+ self._mesg('< %r' % line)
+ else:
+ self._log('< %r' % line)
+ return line
+
+
+ def _match(self, cre, s):
+
+ # Run compiled regular expression match method on 's'.
+ # Save result, return success.
+
+ self.mo = cre.match(s)
+ if __debug__:
+ if self.mo is not None and self.debug >= 5:
+ self._mesg("\tmatched %r => %r" % (cre.pattern, self.mo.groups()))
+ return self.mo is not None
+
+
+ def _new_tag(self):
+
+ tag = self.tagpre + bytes(str(self.tagnum), self._encoding)
+ self.tagnum = self.tagnum + 1
+ self.tagged_commands[tag] = None
+ return tag
+
+
+ def _quote(self, arg):
+
+ arg = arg.replace('\\', '\\\\')
+ arg = arg.replace('"', '\\"')
+
+ return '"' + arg + '"'
+
+
+ def _simple_command(self, name, *args):
+
+ return self._command_complete(name, self._command(name, *args))
+
+
+ def _untagged_response(self, typ, dat, name):
+ if typ == 'NO':
+ return typ, dat
+ if not name in self.untagged_responses:
+ return typ, [None]
+ data = self.untagged_responses.pop(name)
+ if __debug__:
+ if self.debug >= 5:
+ self._mesg('untagged_responses[%s] => %s' % (name, data))
+ return typ, data
+
+
+ if __debug__:
+
+ def _mesg(self, s, secs=None):
+ if secs is None:
+ secs = time.time()
+ tm = time.strftime('%M:%S', time.localtime(secs))
+ sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s))
+ sys.stderr.flush()
+
+ def _dump_ur(self, untagged_resp_dict):
+ if not untagged_resp_dict:
+ return
+ items = (f'{key}: {value!r}'
+ for key, value in untagged_resp_dict.items())
+ self._mesg('untagged responses dump:' + '\n\t\t'.join(items))
+
+ def _log(self, line):
+ # Keep log of last `_cmd_log_len' interactions for debugging.
+ self._cmd_log[self._cmd_log_idx] = (line, time.time())
+ self._cmd_log_idx += 1
+ if self._cmd_log_idx >= self._cmd_log_len:
+ self._cmd_log_idx = 0
+
+ def print_log(self):
+ self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log))
+ i, n = self._cmd_log_idx, self._cmd_log_len
+ while n:
+ try:
+ self._mesg(*self._cmd_log[i])
+ except:
+ pass
+ i += 1
+ if i >= self._cmd_log_len:
+ i = 0
+ n -= 1
+
+
+if HAVE_SSL:
+
+ class IMAP4_SSL(IMAP4):
+
+ """IMAP4 client class over SSL connection
+
+ Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile[, ssl_context[, timeout=None]]]]]])
+
+ host - host's name (default: localhost);
+ port - port number (default: standard IMAP4 SSL port);
+ keyfile - PEM formatted file that contains your private key (default: None);
+ certfile - PEM formatted certificate chain file (default: None);
+ ssl_context - a SSLContext object that contains your certificate chain
+ and private key (default: None)
+ Note: if ssl_context is provided, then parameters keyfile or
+ certfile should not be set otherwise ValueError is raised.
+ timeout - socket timeout (default: None) If timeout is not given or is None,
+ the global default socket timeout is used
+
+ for more documentation see the docstring of the parent class IMAP4.
+ """
+
+
+ def __init__(self, host='', port=IMAP4_SSL_PORT, keyfile=None,
+ certfile=None, ssl_context=None, timeout=None):
+ if ssl_context is not None and keyfile is not None:
+ raise ValueError("ssl_context and keyfile arguments are mutually "
+ "exclusive")
+ if ssl_context is not None and certfile is not None:
+ raise ValueError("ssl_context and certfile arguments are mutually "
+ "exclusive")
+ if keyfile is not None or certfile is not None:
+ import warnings
+ warnings.warn("keyfile and certfile are deprecated, use a "
+ "custom ssl_context instead", DeprecationWarning, 2)
+ self.keyfile = keyfile
+ self.certfile = certfile
+ if ssl_context is None:
+ ssl_context = ssl._create_stdlib_context(certfile=certfile,
+ keyfile=keyfile)
+ self.ssl_context = ssl_context
+ IMAP4.__init__(self, host, port, timeout)
+
+ def _create_socket(self, timeout):
+ sock = IMAP4._create_socket(self, timeout)
+ return self.ssl_context.wrap_socket(sock,
+ server_hostname=self.host)
+
+ def open(self, host='', port=IMAP4_SSL_PORT, timeout=None):
+ """Setup connection to remote server on "host:port".
+ (default: localhost:standard IMAP4 SSL port).
+ This connection will be used by the routines:
+ read, readline, send, shutdown.
+ """
+ IMAP4.open(self, host, port, timeout)
+
+ __all__.append("IMAP4_SSL")
+
+
+class IMAP4_stream(IMAP4):
+
+ """IMAP4 client class over a stream
+
+ Instantiate with: IMAP4_stream(command)
+
+ "command" - a string that can be passed to subprocess.Popen()
+
+ for more documentation see the docstring of the parent class IMAP4.
+ """
+
+
+ def __init__(self, command):
+ self.command = command
+ IMAP4.__init__(self)
+
+
+ def open(self, host=None, port=None, timeout=None):
+ """Setup a stream connection.
+ This connection will be used by the routines:
+ read, readline, send, shutdown.
+ """
+ self.host = None # For compatibility with parent class
+ self.port = None
+ self.sock = None
+ self.file = None
+ self.process = subprocess.Popen(self.command,
+ bufsize=DEFAULT_BUFFER_SIZE,
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ shell=True, close_fds=True)
+ self.writefile = self.process.stdin
+ self.readfile = self.process.stdout
+
+ def read(self, size):
+ """Read 'size' bytes from remote."""
+ return self.readfile.read(size)
+
+
+ def readline(self):
+ """Read line from remote."""
+ return self.readfile.readline()
+
+
+ def send(self, data):
+ """Send data to remote."""
+ self.writefile.write(data)
+ self.writefile.flush()
+
+
+ def shutdown(self):
+ """Close I/O established in "open"."""
+ self.readfile.close()
+ self.writefile.close()
+ self.process.wait()
+
+
+
+class _Authenticator:
+
+ """Private class to provide en/decoding
+ for base64-based authentication conversation.
+ """
+
+ def __init__(self, mechinst):
+ self.mech = mechinst # Callable object to provide/process data
+
+ def process(self, data):
+ ret = self.mech(self.decode(data))
+ if ret is None:
+ return b'*' # Abort conversation
+ return self.encode(ret)
+
+ def encode(self, inp):
+ #
+ # Invoke binascii.b2a_base64 iteratively with
+ # short even length buffers, strip the trailing
+ # line feed from the result and append. "Even"
+ # means a number that factors to both 6 and 8,
+ # so when it gets to the end of the 8-bit input
+ # there's no partial 6-bit output.
+ #
+ oup = b''
+ if isinstance(inp, str):
+ inp = inp.encode('utf-8')
+ while inp:
+ if len(inp) > 48:
+ t = inp[:48]
+ inp = inp[48:]
+ else:
+ t = inp
+ inp = b''
+ e = binascii.b2a_base64(t)
+ if e:
+ oup = oup + e[:-1]
+ return oup
+
+ def decode(self, inp):
+ if not inp:
+ return b''
+ return binascii.a2b_base64(inp)
+
+Months = ' Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split(' ')
+Mon2num = {s.encode():n+1 for n, s in enumerate(Months[1:])}
+
+def Internaldate2tuple(resp):
+ """Parse an IMAP4 INTERNALDATE string.
+
+ Return corresponding local time. The return value is a
+ time.struct_time tuple or None if the string has wrong format.
+ """
+
+ mo = InternalDate.match(resp)
+ if not mo:
+ return None
+
+ mon = Mon2num[mo.group('mon')]
+ zonen = mo.group('zonen')
+
+ day = int(mo.group('day'))
+ year = int(mo.group('year'))
+ hour = int(mo.group('hour'))
+ min = int(mo.group('min'))
+ sec = int(mo.group('sec'))
+ zoneh = int(mo.group('zoneh'))
+ zonem = int(mo.group('zonem'))
+
+ # INTERNALDATE timezone must be subtracted to get UT
+
+ zone = (zoneh*60 + zonem)*60
+ if zonen == b'-':
+ zone = -zone
+
+ tt = (year, mon, day, hour, min, sec, -1, -1, -1)
+ utc = calendar.timegm(tt) - zone
+
+ return time.localtime(utc)
+
+
+
+def Int2AP(num):
+
+ """Convert integer to A-P string representation."""
+
+ val = b''; AP = b'ABCDEFGHIJKLMNOP'
+ num = int(abs(num))
+ while num:
+ num, mod = divmod(num, 16)
+ val = AP[mod:mod+1] + val
+ return val
+
+
+
+def ParseFlags(resp):
+
+ """Convert IMAP4 flags response to python tuple."""
+
+ mo = Flags.match(resp)
+ if not mo:
+ return ()
+
+ return tuple(mo.group('flags').split())
+
+
+def Time2Internaldate(date_time):
+
+ """Convert date_time to IMAP4 INTERNALDATE representation.
+
+ Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The
+ date_time argument can be a number (int or float) representing
+ seconds since epoch (as returned by time.time()), a 9-tuple
+ representing local time, an instance of time.struct_time (as
+ returned by time.localtime()), an aware datetime instance or a
+ double-quoted string. In the last case, it is assumed to already
+ be in the correct format.
+ """
+ if isinstance(date_time, (int, float)):
+ dt = datetime.fromtimestamp(date_time,
+ timezone.utc).astimezone()
+ elif isinstance(date_time, tuple):
+ try:
+ gmtoff = date_time.tm_gmtoff
+ except AttributeError:
+ if time.daylight:
+ dst = date_time[8]
+ if dst == -1:
+ dst = time.localtime(time.mktime(date_time))[8]
+ gmtoff = -(time.timezone, time.altzone)[dst]
+ else:
+ gmtoff = -time.timezone
+ delta = timedelta(seconds=gmtoff)
+ dt = datetime(*date_time[:6], tzinfo=timezone(delta))
+ elif isinstance(date_time, datetime):
+ if date_time.tzinfo is None:
+ raise ValueError("date_time must be aware")
+ dt = date_time
+ elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
+ return date_time # Assume in correct format
+ else:
+ raise ValueError("date_time not of a known type")
+ fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month])
+ return dt.strftime(fmt)
+
+
+
+if __name__ == '__main__':
+
+ # To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]'
+ # or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"'
+ # to test the IMAP4_stream class
+
+ import getopt, getpass
+
+ try:
+ optlist, args = getopt.getopt(sys.argv[1:], 'd:s:')
+ except getopt.error as val:
+ optlist, args = (), ()
+
+ stream_command = None
+ for opt,val in optlist:
+ if opt == '-d':
+ Debug = int(val)
+ elif opt == '-s':
+ stream_command = val
+ if not args: args = (stream_command,)
+
+ if not args: args = ('',)
+
+ host = args[0]
+
+ USER = getpass.getuser()
+ PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost"))
+
+ test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'}
+ test_seq1 = (
+ ('login', (USER, PASSWD)),
+ ('create', ('/tmp/xxx 1',)),
+ ('rename', ('/tmp/xxx 1', '/tmp/yyy')),
+ ('CREATE', ('/tmp/yyz 2',)),
+ ('append', ('/tmp/yyz 2', None, None, test_mesg)),
+ ('list', ('/tmp', 'yy*')),
+ ('select', ('/tmp/yyz 2',)),
+ ('search', (None, 'SUBJECT', 'test')),
+ ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')),
+ ('store', ('1', 'FLAGS', r'(\Deleted)')),
+ ('namespace', ()),
+ ('expunge', ()),
+ ('recent', ()),
+ ('close', ()),
+ )
+
+ test_seq2 = (
+ ('select', ()),
+ ('response',('UIDVALIDITY',)),
+ ('uid', ('SEARCH', 'ALL')),
+ ('response', ('EXISTS',)),
+ ('append', (None, None, None, test_mesg)),
+ ('recent', ()),
+ ('logout', ()),
+ )
+
+ def run(cmd, args):
+ M._mesg('%s %s' % (cmd, args))
+ typ, dat = getattr(M, cmd)(*args)
+ M._mesg('%s => %s %s' % (cmd, typ, dat))
+ if typ == 'NO': raise dat[0]
+ return dat
+
+ try:
+ if stream_command:
+ M = IMAP4_stream(stream_command)
+ else:
+ M = IMAP4(host)
+ if M.state == 'AUTH':
+ test_seq1 = test_seq1[1:] # Login not needed
+ M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION)
+ M._mesg('CAPABILITIES = %r' % (M.capabilities,))
+
+ for cmd,args in test_seq1:
+ run(cmd, args)
+
+ for ml in run('list', ('/tmp/', 'yy%')):
+ mo = re.match(r'.*"([^"]+)"$', ml)
+ if mo: path = mo.group(1)
+ else: path = ml.split()[-1]
+ run('delete', (path,))
+
+ for cmd,args in test_seq2:
+ dat = run(cmd, args)
+
+ if (cmd,args) != ('uid', ('SEARCH', 'ALL')):
+ continue
+
+ uid = dat[-1].split()
+ if not uid: continue
+ run('uid', ('FETCH', '%s' % uid[-1],
+ '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)'))
+
+ print('\nAll tests OK.')
+
+ except:
+ print('\nTests failed.')
+
+ if not Debug:
+ print('''
+If you would like to see debugging output,
+try: %s -d5
+''' % sys.argv[0])
+
+ raise
diff --git a/infer_4_37_2/lib/python3.10/ipaddress.py b/infer_4_37_2/lib/python3.10/ipaddress.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac284ca125bead84ef1dff9af7f913eb1fee9181
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/ipaddress.py
@@ -0,0 +1,2361 @@
+# Copyright 2007 Google Inc.
+# Licensed to PSF under a Contributor Agreement.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+__version__ = '1.0'
+
+
+import functools
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+ """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+ """A Value Error related to the netmask."""
+
+
+def ip_address(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the *address* passed isn't either a v4 or a v6
+ address
+
+ """
+ try:
+ return IPv4Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address')
+
+
+def ip_network(address, strict=True):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP network. Either IPv4 or
+ IPv6 networks may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Network or IPv6Network object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address. Or if the network has host bits set.
+
+ """
+ try:
+ return IPv4Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 network')
+
+
+def ip_interface(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Interface or IPv6Interface object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ Notes:
+ The IPv?Interface classes describe an Address on a particular
+ Network, so they're basically a combination of both the Address
+ and Network classes.
+
+ """
+ try:
+ return IPv4Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 interface')
+
+
+def v4_int_to_packed(address):
+ """Represent an address as 4 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv4 IP address.
+
+ Returns:
+ The integer address packed as 4 bytes in network (big-endian) order.
+
+ Raises:
+ ValueError: If the integer is negative or too large to be an
+ IPv4 IP address.
+
+ """
+ try:
+ return address.to_bytes(4, 'big')
+ except OverflowError:
+ raise ValueError("Address negative or too large for IPv4")
+
+
+def v6_int_to_packed(address):
+ """Represent an address as 16 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv6 IP address.
+
+ Returns:
+ The integer address packed as 16 bytes in network (big-endian) order.
+
+ """
+ try:
+ return address.to_bytes(16, 'big')
+ except OverflowError:
+ raise ValueError("Address negative or too large for IPv6")
+
+
+def _split_optional_netmask(address):
+ """Helper to split the netmask and raise AddressValueError if needed"""
+ addr = str(address).split('/')
+ if len(addr) > 2:
+ raise AddressValueError(f"Only one '/' permitted in {address!r}")
+ return addr
+
+
+def _find_address_range(addresses):
+ """Find a sequence of sorted deduplicated IPv#Address.
+
+ Args:
+ addresses: a list of IPv#Address objects.
+
+ Yields:
+ A tuple containing the first and last IP addresses in the sequence.
+
+ """
+ it = iter(addresses)
+ first = last = next(it)
+ for ip in it:
+ if ip._ip != last._ip + 1:
+ yield first, last
+ first = ip
+ last = ip
+ yield first, last
+
+
+def _count_righthand_zero_bits(number, bits):
+ """Count the number of zero bits on the right hand side.
+
+ Args:
+ number: an integer.
+ bits: maximum number of bits to count.
+
+ Returns:
+ The number of zero bits on the right hand side of the number.
+
+ """
+ if number == 0:
+ return bits
+ return min(bits, (~number & (number-1)).bit_length())
+
+
+def summarize_address_range(first, last):
+ """Summarize a network range given the first and last IP addresses.
+
+ Example:
+ >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
+ ... IPv4Address('192.0.2.130')))
+ ... #doctest: +NORMALIZE_WHITESPACE
+ [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
+ IPv4Network('192.0.2.130/32')]
+
+ Args:
+ first: the first IPv4Address or IPv6Address in the range.
+ last: the last IPv4Address or IPv6Address in the range.
+
+ Returns:
+ An iterator of the summarized IPv(4|6) network objects.
+
+ Raise:
+ TypeError:
+ If the first and last objects are not IP addresses.
+ If the first and last objects are not the same version.
+ ValueError:
+ If the last object is not greater than the first.
+ If the version of the first address is not 4 or 6.
+
+ """
+ if (not (isinstance(first, _BaseAddress) and
+ isinstance(last, _BaseAddress))):
+ raise TypeError('first and last must be IP addresses, not networks')
+ if first.version != last.version:
+ raise TypeError("%s and %s are not of the same version" % (
+ first, last))
+ if first > last:
+ raise ValueError('last IP address must be greater than first')
+
+ if first.version == 4:
+ ip = IPv4Network
+ elif first.version == 6:
+ ip = IPv6Network
+ else:
+ raise ValueError('unknown IP version')
+
+ ip_bits = first._max_prefixlen
+ first_int = first._ip
+ last_int = last._ip
+ while first_int <= last_int:
+ nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
+ (last_int - first_int + 1).bit_length() - 1)
+ net = ip((first_int, ip_bits - nbits))
+ yield net
+ first_int += 1 << nbits
+ if first_int - 1 == ip._ALL_ONES:
+ break
+
+
+def _collapse_addresses_internal(addresses):
+ """Loops through the addresses, collapsing concurrent netblocks.
+
+ Example:
+
+ ip1 = IPv4Network('192.0.2.0/26')
+ ip2 = IPv4Network('192.0.2.64/26')
+ ip3 = IPv4Network('192.0.2.128/26')
+ ip4 = IPv4Network('192.0.2.192/26')
+
+ _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ This shouldn't be called directly; it is called via
+ collapse_addresses([]).
+
+ Args:
+ addresses: A list of IPv4Network's or IPv6Network's
+
+ Returns:
+ A list of IPv4Network's or IPv6Network's depending on what we were
+ passed.
+
+ """
+ # First merge
+ to_merge = list(addresses)
+ subnets = {}
+ while to_merge:
+ net = to_merge.pop()
+ supernet = net.supernet()
+ existing = subnets.get(supernet)
+ if existing is None:
+ subnets[supernet] = net
+ elif existing != net:
+ # Merge consecutive subnets
+ del subnets[supernet]
+ to_merge.append(supernet)
+ # Then iterate over resulting networks, skipping subsumed subnets
+ last = None
+ for net in sorted(subnets.values()):
+ if last is not None:
+ # Since they are sorted, last.network_address <= net.network_address
+ # is a given.
+ if last.broadcast_address >= net.broadcast_address:
+ continue
+ yield net
+ last = net
+
+
+def collapse_addresses(addresses):
+ """Collapse a list of IP objects.
+
+ Example:
+ collapse_addresses([IPv4Network('192.0.2.0/25'),
+ IPv4Network('192.0.2.128/25')]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ Args:
+ addresses: An iterable of IPv4Network or IPv6Network objects.
+
+ Returns:
+ An iterator of the collapsed IPv(4|6)Network objects.
+
+ Raises:
+ TypeError: If passed a list of mixed version objects.
+
+ """
+ addrs = []
+ ips = []
+ nets = []
+
+ # split IP addresses and networks
+ for ip in addresses:
+ if isinstance(ip, _BaseAddress):
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, ips[-1]))
+ ips.append(ip)
+ elif ip._prefixlen == ip._max_prefixlen:
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, ips[-1]))
+ try:
+ ips.append(ip.ip)
+ except AttributeError:
+ ips.append(ip.network_address)
+ else:
+ if nets and nets[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, nets[-1]))
+ nets.append(ip)
+
+ # sort and dedup
+ ips = sorted(set(ips))
+
+ # find consecutive address ranges in the sorted sequence and summarize them
+ if ips:
+ for first, last in _find_address_range(ips):
+ addrs.extend(summarize_address_range(first, last))
+
+ return _collapse_addresses_internal(addrs + nets)
+
+
+def get_mixed_type_key(obj):
+ """Return a key suitable for sorting between networks and addresses.
+
+ Address and Network objects are not sortable by default; they're
+ fundamentally different so the expression
+
+ IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
+
+ doesn't make any sense. There are some times however, where you may wish
+ to have ipaddress sort these for you anyway. If you need to do this, you
+ can use this function as the key= argument to sorted().
+
+ Args:
+ obj: either a Network or Address object.
+ Returns:
+ appropriate key.
+
+ """
+ if isinstance(obj, _BaseNetwork):
+ return obj._get_networks_key()
+ elif isinstance(obj, _BaseAddress):
+ return obj._get_address_key()
+ return NotImplemented
+
+
+class _IPAddressBase:
+
+ """The mother class."""
+
+ __slots__ = ()
+
+ @property
+ def exploded(self):
+ """Return the longhand version of the IP address as a string."""
+ return self._explode_shorthand_ip_string()
+
+ @property
+ def compressed(self):
+ """Return the shorthand version of the IP address as a string."""
+ return str(self)
+
+ @property
+ def reverse_pointer(self):
+ """The name of the reverse DNS pointer for the IP address, e.g.:
+ >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+ '1.0.0.127.in-addr.arpa'
+ >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+ '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+ """
+ return self._reverse_pointer()
+
+ @property
+ def version(self):
+ msg = '%200s has no version specified' % (type(self),)
+ raise NotImplementedError(msg)
+
+ def _check_int_address(self, address):
+ if address < 0:
+ msg = "%d (< 0) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._version))
+ if address > self._ALL_ONES:
+ msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._max_prefixlen,
+ self._version))
+
+ def _check_packed_address(self, address, expected_len):
+ address_len = len(address)
+ if address_len != expected_len:
+ msg = "%r (len %d != %d) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, address_len,
+ expected_len, self._version))
+
+ @classmethod
+ def _ip_int_from_prefix(cls, prefixlen):
+ """Turn the prefix length into a bitwise netmask
+
+ Args:
+ prefixlen: An integer, the prefix length.
+
+ Returns:
+ An integer.
+
+ """
+ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
+
+ @classmethod
+ def _prefix_from_ip_int(cls, ip_int):
+ """Return prefix length from the bitwise netmask.
+
+ Args:
+ ip_int: An integer, the netmask in expanded bitwise format
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ ValueError: If the input intermingles zeroes & ones
+ """
+ trailing_zeroes = _count_righthand_zero_bits(ip_int,
+ cls._max_prefixlen)
+ prefixlen = cls._max_prefixlen - trailing_zeroes
+ leading_ones = ip_int >> trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ byteslen = cls._max_prefixlen // 8
+ details = ip_int.to_bytes(byteslen, 'big')
+ msg = 'Netmask pattern %r mixes zeroes & ones'
+ raise ValueError(msg % details)
+ return prefixlen
+
+ @classmethod
+ def _report_invalid_netmask(cls, netmask_str):
+ msg = '%r is not a valid netmask' % netmask_str
+ raise NetmaskValueError(msg) from None
+
+ @classmethod
+ def _prefix_from_prefix_string(cls, prefixlen_str):
+ """Return prefix length from a numeric string
+
+ Args:
+ prefixlen_str: The string to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask
+ """
+ # int allows a leading +/- as well as surrounding whitespace,
+ # so we ensure that isn't the case
+ if not (prefixlen_str.isascii() and prefixlen_str.isdigit()):
+ cls._report_invalid_netmask(prefixlen_str)
+ try:
+ prefixlen = int(prefixlen_str)
+ except ValueError:
+ cls._report_invalid_netmask(prefixlen_str)
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen_str)
+ return prefixlen
+
+ @classmethod
+ def _prefix_from_ip_string(cls, ip_str):
+ """Turn a netmask/hostmask string into a prefix length
+
+ Args:
+ ip_str: The netmask/hostmask to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask/hostmask
+ """
+ # Parse the netmask/hostmask like an IP address.
+ try:
+ ip_int = cls._ip_int_from_string(ip_str)
+ except AddressValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+ # Note that the two ambiguous cases (all-ones and all-zeroes) are
+ # treated as netmasks.
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ pass
+
+ # Invert the bits, and try matching a /0+1+/ hostmask instead.
+ ip_int ^= cls._ALL_ONES
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ @classmethod
+ def _split_addr_prefix(cls, address):
+ """Helper function to parse address of Network/Interface.
+
+ Arg:
+ address: Argument of Network/Interface.
+
+ Returns:
+ (addr, prefix) tuple.
+ """
+ # a packed address or integer
+ if isinstance(address, (bytes, int)):
+ return address, cls._max_prefixlen
+
+ if not isinstance(address, tuple):
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ address = _split_optional_netmask(address)
+
+ # Constructing from a tuple (addr, [mask])
+ if len(address) > 1:
+ return address
+ return address[0], cls._max_prefixlen
+
+ def __reduce__(self):
+ return self.__class__, (str(self),)
+
+
+_address_fmt_re = None
+
+@functools.total_ordering
+class _BaseAddress(_IPAddressBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by single IP addresses.
+ """
+
+ __slots__ = ()
+
+ def __int__(self):
+ return self._ip
+
+ def __eq__(self, other):
+ try:
+ return (self._ip == other._ip
+ and self._version == other._version)
+ except AttributeError:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if not isinstance(other, _BaseAddress):
+ return NotImplemented
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ self, other))
+ if self._ip != other._ip:
+ return self._ip < other._ip
+ return False
+
+ # Shorthand for Integer addition and subtraction. This is not
+ # meant to ever support addition/subtraction of addresses.
+ def __add__(self, other):
+ if not isinstance(other, int):
+ return NotImplemented
+ return self.__class__(int(self) + other)
+
+ def __sub__(self, other):
+ if not isinstance(other, int):
+ return NotImplemented
+ return self.__class__(int(self) - other)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, str(self))
+
+ def __str__(self):
+ return str(self._string_from_ip_int(self._ip))
+
+ def __hash__(self):
+ return hash(hex(int(self._ip)))
+
+ def _get_address_key(self):
+ return (self._version, self)
+
+ def __reduce__(self):
+ return self.__class__, (self._ip,)
+
+ def __format__(self, fmt):
+ """Returns an IP address as a formatted string.
+
+ Supported presentation types are:
+ 's': returns the IP address as a string (default)
+ 'b': converts to binary and returns a zero-padded string
+ 'X' or 'x': converts to upper- or lower-case hex and returns a zero-padded string
+ 'n': the same as 'b' for IPv4 and 'x' for IPv6
+
+ For binary and hex presentation types, the alternate form specifier
+ '#' and the grouping option '_' are supported.
+ """
+
+ # Support string formatting
+ if not fmt or fmt[-1] == 's':
+ return format(str(self), fmt)
+
+ # From here on down, support for 'bnXx'
+ global _address_fmt_re
+ if _address_fmt_re is None:
+ import re
+ _address_fmt_re = re.compile('(#?)(_?)([xbnX])')
+
+ m = _address_fmt_re.fullmatch(fmt)
+ if not m:
+ return super().__format__(fmt)
+
+ alternate, grouping, fmt_base = m.groups()
+
+ # Set some defaults
+ if fmt_base == 'n':
+ if self._version == 4:
+ fmt_base = 'b' # Binary is default for ipv4
+ else:
+ fmt_base = 'x' # Hex is default for ipv6
+
+ if fmt_base == 'b':
+ padlen = self._max_prefixlen
+ else:
+ padlen = self._max_prefixlen // 4
+
+ if grouping:
+ padlen += padlen // 4 - 1
+
+ if alternate:
+ padlen += 2 # 0b or 0x
+
+ return format(int(self), f'{alternate}0{padlen}{grouping}{fmt_base}')
+
+
+@functools.total_ordering
+class _BaseNetwork(_IPAddressBase):
+ """A generic IP network object.
+
+ This IP class contains the version independent methods which are
+ used by networks.
+ """
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, str(self))
+
+ def __str__(self):
+ return '%s/%d' % (self.network_address, self.prefixlen)
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the network
+ or broadcast addresses.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in range(network + 1, broadcast):
+ yield self._address_class(x)
+
+ def __iter__(self):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in range(network, broadcast + 1):
+ yield self._address_class(x)
+
+ def __getitem__(self, n):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ if n >= 0:
+ if network + n > broadcast:
+ raise IndexError('address out of range')
+ return self._address_class(network + n)
+ else:
+ n += 1
+ if broadcast + n < network:
+ raise IndexError('address out of range')
+ return self._address_class(broadcast + n)
+
+ def __lt__(self, other):
+ if not isinstance(other, _BaseNetwork):
+ return NotImplemented
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ self, other))
+ if self.network_address != other.network_address:
+ return self.network_address < other.network_address
+ if self.netmask != other.netmask:
+ return self.netmask < other.netmask
+ return False
+
+ def __eq__(self, other):
+ try:
+ return (self._version == other._version and
+ self.network_address == other.network_address and
+ int(self.netmask) == int(other.netmask))
+ except AttributeError:
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(int(self.network_address) ^ int(self.netmask))
+
+ def __contains__(self, other):
+ # always false if one is v4 and the other is v6.
+ if self._version != other._version:
+ return False
+ # dealing with another network.
+ if isinstance(other, _BaseNetwork):
+ return False
+ # dealing with another address
+ else:
+ # address
+ return other._ip & self.netmask._ip == self.network_address._ip
+
+ def overlaps(self, other):
+ """Tell if self is partly contained in other."""
+ return self.network_address in other or (
+ self.broadcast_address in other or (
+ other.network_address in self or (
+ other.broadcast_address in self)))
+
+ @functools.cached_property
+ def broadcast_address(self):
+ return self._address_class(int(self.network_address) |
+ int(self.hostmask))
+
+ @functools.cached_property
+ def hostmask(self):
+ return self._address_class(int(self.netmask) ^ self._ALL_ONES)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%d' % (self.network_address, self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self.network_address, self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self.network_address, self.hostmask)
+
+ @property
+ def num_addresses(self):
+ """Number of hosts in the current subnet."""
+ return int(self.broadcast_address) - int(self.network_address) + 1
+
+ @property
+ def _address_class(self):
+ # Returning bare address objects (rather than interfaces) allows for
+ # more consistent behaviour across the network address, broadcast
+ # address and individual host addresses.
+ msg = '%200s has no associated address class' % (type(self),)
+ raise NotImplementedError(msg)
+
+ @property
+ def prefixlen(self):
+ return self._prefixlen
+
+ def address_exclude(self, other):
+ """Remove an address from a larger block.
+
+ For example:
+
+ addr1 = ip_network('192.0.2.0/28')
+ addr2 = ip_network('192.0.2.1/32')
+ list(addr1.address_exclude(addr2)) =
+ [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
+ IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
+
+ or IPv6:
+
+ addr1 = ip_network('2001:db8::1/32')
+ addr2 = ip_network('2001:db8::1/128')
+ list(addr1.address_exclude(addr2)) =
+ [ip_network('2001:db8::1/128'),
+ ip_network('2001:db8::2/127'),
+ ip_network('2001:db8::4/126'),
+ ip_network('2001:db8::8/125'),
+ ...
+ ip_network('2001:db8:8000::/33')]
+
+ Args:
+ other: An IPv4Network or IPv6Network object of the same type.
+
+ Returns:
+ An iterator of the IPv(4|6)Network objects which is self
+ minus other.
+
+ Raises:
+ TypeError: If self and other are of differing address
+ versions, or if other is not a network object.
+ ValueError: If other is not completely contained by self.
+
+ """
+ if not self._version == other._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ self, other))
+
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError("%s is not a network object" % other)
+
+ if not other.subnet_of(self):
+ raise ValueError('%s not contained in %s' % (other, self))
+ if other == self:
+ return
+
+ # Make sure we're comparing the network of other.
+ other = other.__class__('%s/%s' % (other.network_address,
+ other.prefixlen))
+
+ s1, s2 = self.subnets()
+ while s1 != other and s2 != other:
+ if other.subnet_of(s1):
+ yield s2
+ s1, s2 = s1.subnets()
+ elif other.subnet_of(s2):
+ yield s1
+ s1, s2 = s2.subnets()
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (s1, s2, other))
+ if s1 == other:
+ yield s2
+ elif s2 == other:
+ yield s1
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (s1, s2, other))
+
+ def compare_networks(self, other):
+ """Compare two IP objects.
+
+ This is only concerned about the comparison of the integer
+ representation of the network addresses. This means that the
+ host bits aren't considered at all in this method. If you want
+ to compare host bits, you can easily enough do a
+ 'HostA._ip < HostB._ip'
+
+ Args:
+ other: An IP object.
+
+ Returns:
+ If the IP versions of self and other are the same, returns:
+
+ -1 if self < other:
+ eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
+ IPv6Network('2001:db8::1000/124') <
+ IPv6Network('2001:db8::2000/124')
+ 0 if self == other
+ eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
+ IPv6Network('2001:db8::1000/124') ==
+ IPv6Network('2001:db8::1000/124')
+ 1 if self > other
+ eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
+ IPv6Network('2001:db8::2000/124') >
+ IPv6Network('2001:db8::1000/124')
+
+ Raises:
+ TypeError if the IP versions are different.
+
+ """
+ # does this need to raise a ValueError?
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ # self._version == other._version below here:
+ if self.network_address < other.network_address:
+ return -1
+ if self.network_address > other.network_address:
+ return 1
+ # self.network_address == other.network_address below here:
+ if self.netmask < other.netmask:
+ return -1
+ if self.netmask > other.netmask:
+ return 1
+ return 0
+
+ def _get_networks_key(self):
+ """Network-only key function.
+
+ Returns an object that identifies this address' network and
+ netmask. This function is a suitable "key" argument for sorted()
+ and list.sort().
+
+ """
+ return (self._version, self.network_address, self.netmask)
+
+ def subnets(self, prefixlen_diff=1, new_prefix=None):
+ """The subnets which join to make the current subnet.
+
+ In the case that self contains only one IP
+ (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+ for IPv6), yield an iterator with just ourself.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length
+ should be increased by. This should not be set if
+ new_prefix is also set.
+ new_prefix: The desired new prefix length. This must be a
+ larger number (smaller prefix) than the existing prefix.
+ This should not be set if prefixlen_diff is also set.
+
+ Returns:
+ An iterator of IPv(4|6) objects.
+
+ Raises:
+ ValueError: The prefixlen_diff is too small or too large.
+ OR
+ prefixlen_diff and new_prefix are both set or new_prefix
+ is a smaller number than the current prefix (smaller
+ number means a larger network)
+
+ """
+ if self._prefixlen == self._max_prefixlen:
+ yield self
+ return
+
+ if new_prefix is not None:
+ if new_prefix < self._prefixlen:
+ raise ValueError('new prefix must be longer')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = new_prefix - self._prefixlen
+
+ if prefixlen_diff < 0:
+ raise ValueError('prefix length diff must be > 0')
+ new_prefixlen = self._prefixlen + prefixlen_diff
+
+ if new_prefixlen > self._max_prefixlen:
+ raise ValueError(
+ 'prefix length diff %d is invalid for netblock %s' % (
+ new_prefixlen, self))
+
+ start = int(self.network_address)
+ end = int(self.broadcast_address) + 1
+ step = (int(self.hostmask) + 1) >> prefixlen_diff
+ for new_addr in range(start, end, step):
+ current = self.__class__((new_addr, new_prefixlen))
+ yield current
+
+ def supernet(self, prefixlen_diff=1, new_prefix=None):
+ """The supernet containing the current network.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length of
+ the network should be decreased by. For example, given a
+ /24 network and a prefixlen_diff of 3, a supernet with a
+ /21 netmask is returned.
+
+ Returns:
+ An IPv4 network object.
+
+ Raises:
+ ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
+ a negative prefix length.
+ OR
+ If prefixlen_diff and new_prefix are both set or new_prefix is a
+ larger number than the current prefix (larger number means a
+ smaller network)
+
+ """
+ if self._prefixlen == 0:
+ return self
+
+ if new_prefix is not None:
+ if new_prefix > self._prefixlen:
+ raise ValueError('new prefix must be shorter')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = self._prefixlen - new_prefix
+
+ new_prefixlen = self.prefixlen - prefixlen_diff
+ if new_prefixlen < 0:
+ raise ValueError(
+ 'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
+ (self.prefixlen, prefixlen_diff))
+ return self.__class__((
+ int(self.network_address) & (int(self.netmask) << prefixlen_diff),
+ new_prefixlen
+ ))
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return (self.network_address.is_multicast and
+ self.broadcast_address.is_multicast)
+
+ @staticmethod
+ def _is_subnet_of(a, b):
+ try:
+ # Always false if one is v4 and the other is v6.
+ if a._version != b._version:
+ raise TypeError(f"{a} and {b} are not of the same version")
+ return (b.network_address <= a.network_address and
+ b.broadcast_address >= a.broadcast_address)
+ except AttributeError:
+ raise TypeError(f"Unable to test subnet containment "
+ f"between {a} and {b}")
+
+ def subnet_of(self, other):
+ """Return True if this network is a subnet of other."""
+ return self._is_subnet_of(self, other)
+
+ def supernet_of(self, other):
+ """Return True if this network is a supernet of other."""
+ return self._is_subnet_of(other, self)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return (self.network_address.is_reserved and
+ self.broadcast_address.is_reserved)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return (self.network_address.is_link_local and
+ self.broadcast_address.is_link_local)
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return (self.network_address.is_private and
+ self.broadcast_address.is_private)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return (self.network_address.is_unspecified and
+ self.broadcast_address.is_unspecified)
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return (self.network_address.is_loopback and
+ self.broadcast_address.is_loopback)
+
+class _BaseV4:
+
+ """Base IPv4 object.
+
+ The following methods are used by IPv4 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 4
+ # Equivalent to 255.255.255.255 or 32 bits of 1's.
+ _ALL_ONES = (2**IPV4LENGTH) - 1
+
+ _max_prefixlen = IPV4LENGTH
+ # There are only a handful of valid v4 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ def _explode_shorthand_ip_string(self):
+ return str(self)
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, int):
+ prefixlen = arg
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen)
+ else:
+ try:
+ # Check for a netmask in prefix length form
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ except NetmaskValueError:
+ # Check for a netmask or hostmask in dotted-quad form.
+ # This may raise NetmaskValueError.
+ prefixlen = cls._prefix_from_ip_string(arg)
+ netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn the given IP string into an integer for comparison.
+
+ Args:
+ ip_str: A string, the IP ip_str.
+
+ Returns:
+ The IP ip_str as an integer.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError('Address cannot be empty')
+
+ octets = ip_str.split('.')
+ if len(octets) != 4:
+ raise AddressValueError("Expected 4 octets in %r" % ip_str)
+
+ try:
+ return int.from_bytes(map(cls._parse_octet, octets), 'big')
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str)) from None
+
+ @classmethod
+ def _parse_octet(cls, octet_str):
+ """Convert a decimal octet into an integer.
+
+ Args:
+ octet_str: A string, the number to parse.
+
+ Returns:
+ The octet as an integer.
+
+ Raises:
+ ValueError: if the octet isn't strictly a decimal from [0..255].
+
+ """
+ if not octet_str:
+ raise ValueError("Empty octet not permitted")
+ # Reject non-ASCII digits.
+ if not (octet_str.isascii() and octet_str.isdigit()):
+ msg = "Only decimal digits permitted in %r"
+ raise ValueError(msg % octet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(octet_str) > 3:
+ msg = "At most 3 characters permitted in %r"
+ raise ValueError(msg % octet_str)
+ # Handle leading zeros as strict as glibc's inet_pton()
+ # See security bug bpo-36384
+ if octet_str != '0' and octet_str[0] == '0':
+ msg = "Leading zeros are not permitted in %r"
+ raise ValueError(msg % octet_str)
+ # Convert to integer (we know digits are legal)
+ octet_int = int(octet_str, 10)
+ if octet_int > 255:
+ raise ValueError("Octet %d (> 255) not permitted" % octet_int)
+ return octet_int
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int):
+ """Turns a 32-bit integer into dotted decimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ The IP address as a string in dotted decimal notation.
+
+ """
+ return '.'.join(map(str, ip_int.to_bytes(4, 'big')))
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv4 address.
+
+ This implements the method described in RFC1035 3.5.
+
+ """
+ reverse_octets = str(self).split('.')[::-1]
+ return '.'.join(reverse_octets) + '.in-addr.arpa'
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv4Address(_BaseV4, _BaseAddress):
+
+ """Represent and manipulate single IPv4 Addresses."""
+
+ __slots__ = ('_ip', '__weakref__')
+
+ def __init__(self, address):
+
+ """
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv4Address('192.0.2.1') == IPv4Address(3221225985).
+ or, more generally
+ IPv4Address(int(IPv4Address('192.0.2.1'))) ==
+ IPv4Address('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, int):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 4)
+ self._ip = int.from_bytes(address, 'big')
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = str(address)
+ if '/' in addr_str:
+ raise AddressValueError(f"Unexpected '/' in {address!r}")
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v4_int_to_packed(self._ip)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within the
+ reserved IPv4 Network range.
+
+ """
+ return self in self._constants._reserved_network
+
+ @property
+ @functools.lru_cache()
+ def is_private(self):
+ """``True`` if the address is defined as not globally reachable by
+ iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
+ (for IPv6) with the following exceptions:
+
+ * ``is_private`` is ``False`` for ``100.64.0.0/10``
+ * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+
+ address.is_private == address.ipv4_mapped.is_private
+
+ ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10``
+ IPv4 range where they are both ``False``.
+ """
+ return (
+ any(self in net for net in self._constants._private_networks)
+ and all(self not in net for net in self._constants._private_networks_exceptions)
+ )
+
+ @property
+ @functools.lru_cache()
+ def is_global(self):
+ """``True`` if the address is defined as globally reachable by
+ iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
+ (for IPv6) with the following exception:
+
+ For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+
+ address.is_global == address.ipv4_mapped.is_global
+
+ ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10``
+ IPv4 range where they are both ``False``.
+ """
+ return self not in self._constants._public_network and not self.is_private
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is multicast.
+ See RFC 3171 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 5735 3.
+
+ """
+ return self == self._constants._unspecified_address
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback per RFC 3330.
+
+ """
+ return self in self._constants._loopback_network
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is link-local per RFC 3927.
+
+ """
+ return self in self._constants._linklocal_network
+
+
+class IPv4Interface(IPv4Address):
+
+ def __init__(self, address):
+ addr, mask = self._split_addr_prefix(address)
+
+ IPv4Address.__init__(self, addr)
+ self.network = IPv4Network((addr, mask), strict=False)
+ self.netmask = self.network.netmask
+ self._prefixlen = self.network._prefixlen
+
+ @functools.cached_property
+ def hostmask(self):
+ return self.network.hostmask
+
+ def __str__(self):
+ return '%s/%d' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ def __eq__(self, other):
+ address_equal = IPv4Address.__eq__(self, other)
+ if address_equal is NotImplemented or not address_equal:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv4Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (self.network < other.network or
+ self.network == other.network and address_less)
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return hash((self._ip, self._prefixlen, int(self.network.network_address)))
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv4Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.hostmask)
+
+
+class IPv4Network(_BaseV4, _BaseNetwork):
+
+ """This class represents and manipulates 32-bit IPv4 network + addresses..
+
+ Attributes: [examples for IPv4Network('192.0.2.0/27')]
+ .network_address: IPv4Address('192.0.2.0')
+ .hostmask: IPv4Address('0.0.0.31')
+ .broadcast_address: IPv4Address('192.0.2.32')
+ .netmask: IPv4Address('255.255.255.224')
+ .prefixlen: 27
+
+ """
+ # Class to use when creating address objects
+ _address_class = IPv4Address
+
+ def __init__(self, address, strict=True):
+ """Instantiate a new IPv4 network object.
+
+ Args:
+ address: A string or integer representing the IP [& network].
+ '192.0.2.0/24'
+ '192.0.2.0/255.255.255.0'
+ '192.0.2.0/0.0.0.255'
+ are all functionally the same in IPv4. Similarly,
+ '192.0.2.1'
+ '192.0.2.1/255.255.255.255'
+ '192.0.2.1/32'
+ are also functionally equivalent. That is to say, failing to
+ provide a subnetmask will create an object with a mask of /32.
+
+ If the mask (portion after the / in the argument) is given in
+ dotted quad form, it is treated as a netmask if it starts with a
+ non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+ starts with a zero field (e.g. 0.255.255.255 == /8), with the
+ single exception of an all-zero mask which is treated as a
+ netmask == /0. If no mask is given, a default of /32 is used.
+
+ Additionally, an integer can be passed, so
+ IPv4Network('192.0.2.1') == IPv4Network(3221225985)
+ or, more generally
+ IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
+ IPv4Interface('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv4 address.
+ ValueError: If strict is True and a network address is not
+ supplied.
+ """
+ addr, mask = self._split_addr_prefix(address)
+
+ self.network_address = IPv4Address(addr)
+ self.netmask, self._prefixlen = self._make_netmask(mask)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError('%s has host bits set' % self)
+ else:
+ self.network_address = IPv4Address(packed &
+ int(self.netmask))
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+ elif self._prefixlen == (self._max_prefixlen):
+ self.hosts = lambda: [IPv4Address(addr)]
+
+ @property
+ @functools.lru_cache()
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return (not (self.network_address in IPv4Network('100.64.0.0/10') and
+ self.broadcast_address in IPv4Network('100.64.0.0/10')) and
+ not self.is_private)
+
+
+class _IPv4Constants:
+ _linklocal_network = IPv4Network('169.254.0.0/16')
+
+ _loopback_network = IPv4Network('127.0.0.0/8')
+
+ _multicast_network = IPv4Network('224.0.0.0/4')
+
+ _public_network = IPv4Network('100.64.0.0/10')
+
+ # Not globally reachable address blocks listed on
+ # https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
+ _private_networks = [
+ IPv4Network('0.0.0.0/8'),
+ IPv4Network('10.0.0.0/8'),
+ IPv4Network('127.0.0.0/8'),
+ IPv4Network('169.254.0.0/16'),
+ IPv4Network('172.16.0.0/12'),
+ IPv4Network('192.0.0.0/24'),
+ IPv4Network('192.0.0.170/31'),
+ IPv4Network('192.0.2.0/24'),
+ IPv4Network('192.168.0.0/16'),
+ IPv4Network('198.18.0.0/15'),
+ IPv4Network('198.51.100.0/24'),
+ IPv4Network('203.0.113.0/24'),
+ IPv4Network('240.0.0.0/4'),
+ IPv4Network('255.255.255.255/32'),
+ ]
+
+ _private_networks_exceptions = [
+ IPv4Network('192.0.0.9/32'),
+ IPv4Network('192.0.0.10/32'),
+ ]
+
+ _reserved_network = IPv4Network('240.0.0.0/4')
+
+ _unspecified_address = IPv4Address('0.0.0.0')
+
+
+IPv4Address._constants = _IPv4Constants
+
+
+class _BaseV6:
+
+ """Base IPv6 object.
+
+ The following methods are used by IPv6 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 6
+ _ALL_ONES = (2**IPV6LENGTH) - 1
+ _HEXTET_COUNT = 8
+ _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+ _max_prefixlen = IPV6LENGTH
+
+ # There are only a bunch of valid v6 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, int):
+ prefixlen = arg
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen)
+ else:
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn an IPv6 ip_str into an integer.
+
+ Args:
+ ip_str: A string, the IPv6 ip_str.
+
+ Returns:
+ An int, the IPv6 address
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError('Address cannot be empty')
+
+ parts = ip_str.split(':')
+
+ # An IPv6 address needs at least 2 colons (3 parts).
+ _min_parts = 3
+ if len(parts) < _min_parts:
+ msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
+ raise AddressValueError(msg)
+
+ # If the address has an IPv4-style suffix, convert it to hexadecimal.
+ if '.' in parts[-1]:
+ try:
+ ipv4_int = IPv4Address(parts.pop())._ip
+ except AddressValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str)) from None
+ parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
+ parts.append('%x' % (ipv4_int & 0xFFFF))
+
+ # An IPv6 address can't have more than 8 colons (9 parts).
+ # The extra colon comes from using the "::" notation for a single
+ # leading or trailing zero part.
+ _max_parts = cls._HEXTET_COUNT + 1
+ if len(parts) > _max_parts:
+ msg = "At most %d colons permitted in %r" % (_max_parts-1, ip_str)
+ raise AddressValueError(msg)
+
+ # Disregarding the endpoints, find '::' with nothing in between.
+ # This indicates that a run of zeroes has been skipped.
+ skip_index = None
+ for i in range(1, len(parts) - 1):
+ if not parts[i]:
+ if skip_index is not None:
+ # Can't have more than one '::'
+ msg = "At most one '::' permitted in %r" % ip_str
+ raise AddressValueError(msg)
+ skip_index = i
+
+ # parts_hi is the number of parts to copy from above/before the '::'
+ # parts_lo is the number of parts to copy from below/after the '::'
+ if skip_index is not None:
+ # If we found a '::', then check if it also covers the endpoints.
+ parts_hi = skip_index
+ parts_lo = len(parts) - skip_index - 1
+ if not parts[0]:
+ parts_hi -= 1
+ if parts_hi:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ parts_lo -= 1
+ if parts_lo:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
+ if parts_skipped < 1:
+ msg = "Expected at most %d other parts with '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT-1, ip_str))
+ else:
+ # Otherwise, allocate the entire address to parts_hi. The
+ # endpoints could still be empty, but _parse_hextet() will check
+ # for that.
+ if len(parts) != cls._HEXTET_COUNT:
+ msg = "Exactly %d parts expected without '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
+ if not parts[0]:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_hi = len(parts)
+ parts_lo = 0
+ parts_skipped = 0
+
+ try:
+ # Now, parse the hextets into a 128-bit integer.
+ ip_int = 0
+ for i in range(parts_hi):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ ip_int <<= 16 * parts_skipped
+ for i in range(-parts_lo, 0):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ return ip_int
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str)) from None
+
+ @classmethod
+ def _parse_hextet(cls, hextet_str):
+ """Convert an IPv6 hextet string into an integer.
+
+ Args:
+ hextet_str: A string, the number to parse.
+
+ Returns:
+ The hextet as an integer.
+
+ Raises:
+ ValueError: if the input isn't strictly a hex number from
+ [0..FFFF].
+
+ """
+ # Reject non-ASCII digits.
+ if not cls._HEX_DIGITS.issuperset(hextet_str):
+ raise ValueError("Only hex digits permitted in %r" % hextet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(hextet_str) > 4:
+ msg = "At most 4 characters permitted in %r"
+ raise ValueError(msg % hextet_str)
+ # Length check means we can skip checking the integer value
+ return int(hextet_str, 16)
+
+ @classmethod
+ def _compress_hextets(cls, hextets):
+ """Compresses a list of hextets.
+
+ Compresses a list of strings, replacing the longest continuous
+ sequence of "0" in the list with "" and adding empty strings at
+ the beginning or at the end of the string such that subsequently
+ calling ":".join(hextets) will produce the compressed version of
+ the IPv6 address.
+
+ Args:
+ hextets: A list of strings, the hextets to compress.
+
+ Returns:
+ A list of strings.
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+ for index, hextet in enumerate(hextets):
+ if hextet == '0':
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (best_doublecolon_start +
+ best_doublecolon_len)
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += ['']
+ hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [''] + hextets
+
+ return hextets
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int=None):
+ """Turns a 128-bit integer into hexadecimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ A string, the hexadecimal representation of the address.
+
+ Raises:
+ ValueError: The address is bigger than 128 bits of all ones.
+
+ """
+ if ip_int is None:
+ ip_int = int(cls._ip)
+
+ if ip_int > cls._ALL_ONES:
+ raise ValueError('IPv6 address is too large')
+
+ hex_str = '%032x' % ip_int
+ hextets = ['%x' % int(hex_str[x:x+4], 16) for x in range(0, 32, 4)]
+
+ hextets = cls._compress_hextets(hextets)
+ return ':'.join(hextets)
+
+ def _explode_shorthand_ip_string(self):
+ """Expand a shortened IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if isinstance(self, IPv6Network):
+ ip_str = str(self.network_address)
+ elif isinstance(self, IPv6Interface):
+ ip_str = str(self.ip)
+ else:
+ ip_str = str(self)
+
+ ip_int = self._ip_int_from_string(ip_str)
+ hex_str = '%032x' % ip_int
+ parts = [hex_str[x:x+4] for x in range(0, 32, 4)]
+ if isinstance(self, (_BaseNetwork, IPv6Interface)):
+ return '%s/%d' % (':'.join(parts), self._prefixlen)
+ return ':'.join(parts)
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv6 address.
+
+ This implements the method described in RFC3596 2.5.
+
+ """
+ reverse_chars = self.exploded[::-1].replace(':', '')
+ return '.'.join(reverse_chars) + '.ip6.arpa'
+
+ @staticmethod
+ def _split_scope_id(ip_str):
+ """Helper function to parse IPv6 string address with scope id.
+
+ See RFC 4007 for details.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ (addr, scope_id) tuple.
+
+ """
+ addr, sep, scope_id = ip_str.partition('%')
+ if not sep:
+ scope_id = None
+ elif not scope_id or '%' in scope_id:
+ raise AddressValueError('Invalid IPv6 address: "%r"' % ip_str)
+ return addr, scope_id
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv6Address(_BaseV6, _BaseAddress):
+
+ """Represent and manipulate single IPv6 Addresses."""
+
+ __slots__ = ('_ip', '_scope_id', '__weakref__')
+
+ def __init__(self, address):
+ """Instantiate a new IPv6 address object.
+
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv6Address('2001:db8::') ==
+ IPv6Address(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Address(int(IPv6Address('2001:db8::'))) ==
+ IPv6Address('2001:db8::')
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, int):
+ self._check_int_address(address)
+ self._ip = address
+ self._scope_id = None
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 16)
+ self._ip = int.from_bytes(address, 'big')
+ self._scope_id = None
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = str(address)
+ if '/' in addr_str:
+ raise AddressValueError(f"Unexpected '/' in {address!r}")
+ addr_str, self._scope_id = self._split_scope_id(addr_str)
+
+ self._ip = self._ip_int_from_string(addr_str)
+
+ def __str__(self):
+ ip_str = super().__str__()
+ return ip_str + '%' + self._scope_id if self._scope_id else ip_str
+
+ def __hash__(self):
+ return hash((self._ip, self._scope_id))
+
+ def __eq__(self, other):
+ address_equal = super().__eq__(other)
+ if address_equal is NotImplemented:
+ return NotImplemented
+ if not address_equal:
+ return False
+ return self._scope_id == getattr(other, '_scope_id', None)
+
+ @property
+ def scope_id(self):
+ """Identifier of a particular zone of the address's scope.
+
+ See RFC 4007 for details.
+
+ Returns:
+ A string identifying the zone of the address if specified, else None.
+
+ """
+ return self._scope_id
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v6_int_to_packed(self._ip)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ ipv4_mapped = self.ipv4_mapped
+ if ipv4_mapped is not None:
+ return ipv4_mapped.is_multicast
+ return self in self._constants._multicast_network
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ ipv4_mapped = self.ipv4_mapped
+ if ipv4_mapped is not None:
+ return ipv4_mapped.is_reserved
+ return any(self in x for x in self._constants._reserved_networks)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ ipv4_mapped = self.ipv4_mapped
+ if ipv4_mapped is not None:
+ return ipv4_mapped.is_link_local
+ return self in self._constants._linklocal_network
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return self in self._constants._sitelocal_network
+
+ @property
+ @functools.lru_cache()
+ def is_private(self):
+ """``True`` if the address is defined as not globally reachable by
+ iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
+ (for IPv6) with the following exceptions:
+
+ * ``is_private`` is ``False`` for ``100.64.0.0/10``
+ * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+
+ address.is_private == address.ipv4_mapped.is_private
+
+ ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10``
+ IPv4 range where they are both ``False``.
+ """
+ ipv4_mapped = self.ipv4_mapped
+ if ipv4_mapped is not None:
+ return ipv4_mapped.is_private
+ return (
+ any(self in net for net in self._constants._private_networks)
+ and all(self not in net for net in self._constants._private_networks_exceptions)
+ )
+
+ @property
+ def is_global(self):
+ """``True`` if the address is defined as globally reachable by
+ iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
+ (for IPv6) with the following exception:
+
+ For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+
+ address.is_global == address.ipv4_mapped.is_global
+
+ ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10``
+ IPv4 range where they are both ``False``.
+ """
+ ipv4_mapped = self.ipv4_mapped
+ if ipv4_mapped is not None:
+ return ipv4_mapped.is_global
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ ipv4_mapped = self.ipv4_mapped
+ if ipv4_mapped is not None:
+ return ipv4_mapped.is_unspecified
+ return self._ip == 0
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ ipv4_mapped = self.ipv4_mapped
+ if ipv4_mapped is not None:
+ return ipv4_mapped.is_loopback
+ return self._ip == 1
+
+ @property
+ def ipv4_mapped(self):
+ """Return the IPv4 mapped address.
+
+ Returns:
+ If the IPv6 address is a v4 mapped address, return the
+ IPv4 mapped address. Return None otherwise.
+
+ """
+ if (self._ip >> 32) != 0xFFFF:
+ return None
+ return IPv4Address(self._ip & 0xFFFFFFFF)
+
+ @property
+ def teredo(self):
+ """Tuple of embedded teredo IPs.
+
+ Returns:
+ Tuple of the (server, client) IPs or None if the address
+ doesn't appear to be a teredo address (doesn't start with
+ 2001::/32)
+
+ """
+ if (self._ip >> 96) != 0x20010000:
+ return None
+ return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+ IPv4Address(~self._ip & 0xFFFFFFFF))
+
+ @property
+ def sixtofour(self):
+ """Return the IPv4 6to4 embedded address.
+
+ Returns:
+ The IPv4 6to4-embedded address if present or None if the
+ address doesn't appear to contain a 6to4 embedded address.
+
+ """
+ if (self._ip >> 112) != 0x2002:
+ return None
+ return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Interface(IPv6Address):
+
+ def __init__(self, address):
+ addr, mask = self._split_addr_prefix(address)
+
+ IPv6Address.__init__(self, addr)
+ self.network = IPv6Network((addr, mask), strict=False)
+ self.netmask = self.network.netmask
+ self._prefixlen = self.network._prefixlen
+
+ @functools.cached_property
+ def hostmask(self):
+ return self.network.hostmask
+
+ def __str__(self):
+ return '%s/%d' % (super().__str__(),
+ self._prefixlen)
+
+ def __eq__(self, other):
+ address_equal = IPv6Address.__eq__(self, other)
+ if address_equal is NotImplemented or not address_equal:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv6Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return address_less
+ try:
+ return (self.network < other.network or
+ self.network == other.network and address_less)
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return hash((self._ip, self._prefixlen, int(self.network.network_address)))
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv6Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.hostmask)
+
+ @property
+ def is_unspecified(self):
+ return self._ip == 0 and self.network.is_unspecified
+
+ @property
+ def is_loopback(self):
+ return super().is_loopback and self.network.is_loopback
+
+
+class IPv6Network(_BaseV6, _BaseNetwork):
+
+ """This class represents and manipulates 128-bit IPv6 networks.
+
+ Attributes: [examples for IPv6('2001:db8::1000/124')]
+ .network_address: IPv6Address('2001:db8::1000')
+ .hostmask: IPv6Address('::f')
+ .broadcast_address: IPv6Address('2001:db8::100f')
+ .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
+ .prefixlen: 124
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv6Address
+
+ def __init__(self, address, strict=True):
+ """Instantiate a new IPv6 Network object.
+
+ Args:
+ address: A string or integer representing the IPv6 network or the
+ IP and prefix/netmask.
+ '2001:db8::/128'
+ '2001:db8:0000:0000:0000:0000:0000:0000/128'
+ '2001:db8::'
+ are all functionally the same in IPv6. That is to say,
+ failing to provide a subnetmask will create an object with
+ a mask of /128.
+
+ Additionally, an integer can be passed, so
+ IPv6Network('2001:db8::') ==
+ IPv6Network(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Network(int(IPv6Network('2001:db8::'))) ==
+ IPv6Network('2001:db8::')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 2001:db8::1000/124 and not an
+ IP address on a network, eg, 2001:db8::1/124.
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv6 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+ """
+ addr, mask = self._split_addr_prefix(address)
+
+ self.network_address = IPv6Address(addr)
+ self.netmask, self._prefixlen = self._make_netmask(mask)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError('%s has host bits set' % self)
+ else:
+ self.network_address = IPv6Address(packed &
+ int(self.netmask))
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+ elif self._prefixlen == self._max_prefixlen:
+ self.hosts = lambda: [IPv6Address(addr)]
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the
+ Subnet-Router anycast address.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in range(network + 1, broadcast + 1):
+ yield self._address_class(x)
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return (self.network_address.is_site_local and
+ self.broadcast_address.is_site_local)
+
+
+class _IPv6Constants:
+
+ _linklocal_network = IPv6Network('fe80::/10')
+
+ _multicast_network = IPv6Network('ff00::/8')
+
+ # Not globally reachable address blocks listed on
+ # https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
+ _private_networks = [
+ IPv6Network('::1/128'),
+ IPv6Network('::/128'),
+ IPv6Network('::ffff:0:0/96'),
+ IPv6Network('64:ff9b:1::/48'),
+ IPv6Network('100::/64'),
+ IPv6Network('2001::/23'),
+ IPv6Network('2001:db8::/32'),
+ # IANA says N/A, let's consider it not globally reachable to be safe
+ IPv6Network('2002::/16'),
+ # RFC 9637: https://www.rfc-editor.org/rfc/rfc9637.html#section-6-2.2
+ IPv6Network('3fff::/20'),
+ IPv6Network('fc00::/7'),
+ IPv6Network('fe80::/10'),
+ ]
+
+ _private_networks_exceptions = [
+ IPv6Network('2001:1::1/128'),
+ IPv6Network('2001:1::2/128'),
+ IPv6Network('2001:3::/32'),
+ IPv6Network('2001:4:112::/48'),
+ IPv6Network('2001:20::/28'),
+ IPv6Network('2001:30::/28'),
+ ]
+
+ _reserved_networks = [
+ IPv6Network('::/8'), IPv6Network('100::/8'),
+ IPv6Network('200::/7'), IPv6Network('400::/6'),
+ IPv6Network('800::/5'), IPv6Network('1000::/4'),
+ IPv6Network('4000::/3'), IPv6Network('6000::/3'),
+ IPv6Network('8000::/3'), IPv6Network('A000::/3'),
+ IPv6Network('C000::/3'), IPv6Network('E000::/4'),
+ IPv6Network('F000::/5'), IPv6Network('F800::/6'),
+ IPv6Network('FE00::/9'),
+ ]
+
+ _sitelocal_network = IPv6Network('fec0::/10')
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/infer_4_37_2/lib/python3.10/linecache.py b/infer_4_37_2/lib/python3.10/linecache.py
new file mode 100644
index 0000000000000000000000000000000000000000..97644a8e3794e17151a415bb92af22cda8d595bb
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/linecache.py
@@ -0,0 +1,182 @@
+"""Cache lines from Python source files.
+
+This is intended to read lines from modules imported -- hence if a filename
+is not found, it will look down the module search path for a file by
+that name.
+"""
+
+import functools
+import sys
+import os
+import tokenize
+
+__all__ = ["getline", "clearcache", "checkcache", "lazycache"]
+
+
+# The cache. Maps filenames to either a thunk which will provide source code,
+# or a tuple (size, mtime, lines, fullname) once loaded.
+cache = {}
+
+
+def clearcache():
+ """Clear the cache entirely."""
+ cache.clear()
+
+
+def getline(filename, lineno, module_globals=None):
+ """Get a line for a Python source file from the cache.
+ Update the cache if it doesn't contain an entry for this file already."""
+
+ lines = getlines(filename, module_globals)
+ if 1 <= lineno <= len(lines):
+ return lines[lineno - 1]
+ return ''
+
+
+def getlines(filename, module_globals=None):
+ """Get the lines for a Python source file from the cache.
+ Update the cache if it doesn't contain an entry for this file already."""
+
+ if filename in cache:
+ entry = cache[filename]
+ if len(entry) != 1:
+ return cache[filename][2]
+
+ try:
+ return updatecache(filename, module_globals)
+ except MemoryError:
+ clearcache()
+ return []
+
+
+def checkcache(filename=None):
+ """Discard cache entries that are out of date.
+ (This is not checked upon each call!)"""
+
+ if filename is None:
+ filenames = list(cache.keys())
+ elif filename in cache:
+ filenames = [filename]
+ else:
+ return
+
+ for filename in filenames:
+ entry = cache[filename]
+ if len(entry) == 1:
+ # lazy cache entry, leave it lazy.
+ continue
+ size, mtime, lines, fullname = entry
+ if mtime is None:
+ continue # no-op for files loaded via a __loader__
+ try:
+ stat = os.stat(fullname)
+ except OSError:
+ cache.pop(filename, None)
+ continue
+ if size != stat.st_size or mtime != stat.st_mtime:
+ cache.pop(filename, None)
+
+
+def updatecache(filename, module_globals=None):
+ """Update a cache entry and return its list of lines.
+ If something's wrong, print a message, discard the cache entry,
+ and return an empty list."""
+
+ if filename in cache:
+ if len(cache[filename]) != 1:
+ cache.pop(filename, None)
+ if not filename or (filename.startswith('<') and filename.endswith('>')):
+ return []
+
+ fullname = filename
+ try:
+ stat = os.stat(fullname)
+ except OSError:
+ basename = filename
+
+ # Realise a lazy loader based lookup if there is one
+ # otherwise try to lookup right now.
+ if lazycache(filename, module_globals):
+ try:
+ data = cache[filename][0]()
+ except (ImportError, OSError):
+ pass
+ else:
+ if data is None:
+ # No luck, the PEP302 loader cannot find the source
+ # for this module.
+ return []
+ cache[filename] = (
+ len(data),
+ None,
+ [line + '\n' for line in data.splitlines()],
+ fullname
+ )
+ return cache[filename][2]
+
+ # Try looking through the module search path, which is only useful
+ # when handling a relative filename.
+ if os.path.isabs(filename):
+ return []
+
+ for dirname in sys.path:
+ try:
+ fullname = os.path.join(dirname, basename)
+ except (TypeError, AttributeError):
+ # Not sufficiently string-like to do anything useful with.
+ continue
+ try:
+ stat = os.stat(fullname)
+ break
+ except OSError:
+ pass
+ else:
+ return []
+ try:
+ with tokenize.open(fullname) as fp:
+ lines = fp.readlines()
+ except (OSError, UnicodeDecodeError, SyntaxError):
+ return []
+ if lines and not lines[-1].endswith('\n'):
+ lines[-1] += '\n'
+ size, mtime = stat.st_size, stat.st_mtime
+ cache[filename] = size, mtime, lines, fullname
+ return lines
+
+
+def lazycache(filename, module_globals):
+ """Seed the cache for filename with module_globals.
+
+ The module loader will be asked for the source only when getlines is
+ called, not immediately.
+
+ If there is an entry in the cache already, it is not altered.
+
+ :return: True if a lazy load is registered in the cache,
+ otherwise False. To register such a load a module loader with a
+ get_source method must be found, the filename must be a cacheable
+ filename, and the filename must not be already cached.
+ """
+ if filename in cache:
+ if len(cache[filename]) == 1:
+ return True
+ else:
+ return False
+ if not filename or (filename.startswith('<') and filename.endswith('>')):
+ return False
+ # Try for a __loader__, if available
+ if module_globals and '__name__' in module_globals:
+ name = module_globals['__name__']
+ if (loader := module_globals.get('__loader__')) is None:
+ if spec := module_globals.get('__spec__'):
+ try:
+ loader = spec.loader
+ except AttributeError:
+ pass
+ get_source = getattr(loader, 'get_source', None)
+
+ if name and get_source:
+ get_lines = functools.partial(get_source, name)
+ cache[filename] = (get_lines,)
+ return True
+ return False
diff --git a/infer_4_37_2/lib/python3.10/locale.py b/infer_4_37_2/lib/python3.10/locale.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d4f51929923f6f865dd774ddc5c7d601f540816
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/locale.py
@@ -0,0 +1,1761 @@
+"""Locale support module.
+
+The module provides low-level access to the C lib's locale APIs and adds high
+level number formatting APIs as well as a locale aliasing engine to complement
+these.
+
+The aliasing engine includes support for many commonly used locale names and
+maps them to values suitable for passing to the C lib's setlocale() function. It
+also includes default encodings for all supported locale names.
+
+"""
+
+import sys
+import encodings
+import encodings.aliases
+import re
+import _collections_abc
+from builtins import str as _builtin_str
+import functools
+
+# Try importing the _locale module.
+#
+# If this fails, fall back on a basic 'C' locale emulation.
+
+# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
+# trying the import. So __all__ is also fiddled at the end of the file.
+__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
+ "setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
+ "str", "atof", "atoi", "format", "format_string", "currency",
+ "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
+ "LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
+
+def _strcoll(a,b):
+ """ strcoll(string,string) -> int.
+ Compares two strings according to the locale.
+ """
+ return (a > b) - (a < b)
+
+def _strxfrm(s):
+ """ strxfrm(string) -> string.
+ Returns a string that behaves for cmp locale-aware.
+ """
+ return s
+
+try:
+
+ from _locale import *
+
+except ImportError:
+
+ # Locale emulation
+
+ CHAR_MAX = 127
+ LC_ALL = 6
+ LC_COLLATE = 3
+ LC_CTYPE = 0
+ LC_MESSAGES = 5
+ LC_MONETARY = 4
+ LC_NUMERIC = 1
+ LC_TIME = 2
+ Error = ValueError
+
+ def localeconv():
+ """ localeconv() -> dict.
+ Returns numeric and monetary locale-specific parameters.
+ """
+ # 'C' locale default values
+ return {'grouping': [127],
+ 'currency_symbol': '',
+ 'n_sign_posn': 127,
+ 'p_cs_precedes': 127,
+ 'n_cs_precedes': 127,
+ 'mon_grouping': [],
+ 'n_sep_by_space': 127,
+ 'decimal_point': '.',
+ 'negative_sign': '',
+ 'positive_sign': '',
+ 'p_sep_by_space': 127,
+ 'int_curr_symbol': '',
+ 'p_sign_posn': 127,
+ 'thousands_sep': '',
+ 'mon_thousands_sep': '',
+ 'frac_digits': 127,
+ 'mon_decimal_point': '',
+ 'int_frac_digits': 127}
+
+ def setlocale(category, value=None):
+ """ setlocale(integer,string=None) -> string.
+ Activates/queries locale processing.
+ """
+ if value not in (None, '', 'C'):
+ raise Error('_locale emulation only supports "C" locale')
+ return 'C'
+
+# These may or may not exist in _locale, so be sure to set them.
+if 'strxfrm' not in globals():
+ strxfrm = _strxfrm
+if 'strcoll' not in globals():
+ strcoll = _strcoll
+
+
+_localeconv = localeconv
+
+# With this dict, you can override some items of localeconv's return value.
+# This is useful for testing purposes.
+_override_localeconv = {}
+
+@functools.wraps(_localeconv)
+def localeconv():
+ d = _localeconv()
+ if _override_localeconv:
+ d.update(_override_localeconv)
+ return d
+
+
+### Number formatting APIs
+
+# Author: Martin von Loewis
+# improved by Georg Brandl
+
+# Iterate over grouping intervals
+def _grouping_intervals(grouping):
+ last_interval = None
+ for interval in grouping:
+ # if grouping is -1, we are done
+ if interval == CHAR_MAX:
+ return
+ # 0: re-use last group ad infinitum
+ if interval == 0:
+ if last_interval is None:
+ raise ValueError("invalid grouping")
+ while True:
+ yield last_interval
+ yield interval
+ last_interval = interval
+
+#perform the grouping from right to left
+def _group(s, monetary=False):
+ conv = localeconv()
+ thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
+ grouping = conv[monetary and 'mon_grouping' or 'grouping']
+ if not grouping:
+ return (s, 0)
+ if s[-1] == ' ':
+ stripped = s.rstrip()
+ right_spaces = s[len(stripped):]
+ s = stripped
+ else:
+ right_spaces = ''
+ left_spaces = ''
+ groups = []
+ for interval in _grouping_intervals(grouping):
+ if not s or s[-1] not in "0123456789":
+ # only non-digit characters remain (sign, spaces)
+ left_spaces = s
+ s = ''
+ break
+ groups.append(s[-interval:])
+ s = s[:-interval]
+ if s:
+ groups.append(s)
+ groups.reverse()
+ return (
+ left_spaces + thousands_sep.join(groups) + right_spaces,
+ len(thousands_sep) * (len(groups) - 1)
+ )
+
+# Strip a given amount of excess padding from the given string
+def _strip_padding(s, amount):
+ lpos = 0
+ while amount and s[lpos] == ' ':
+ lpos += 1
+ amount -= 1
+ rpos = len(s) - 1
+ while amount and s[rpos] == ' ':
+ rpos -= 1
+ amount -= 1
+ return s[lpos:rpos+1]
+
+_percent_re = re.compile(r'%(?:\((?P.*?)\))?'
+ r'(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
+
+def _format(percent, value, grouping=False, monetary=False, *additional):
+ if additional:
+ formatted = percent % ((value,) + additional)
+ else:
+ formatted = percent % value
+ if percent[-1] in 'eEfFgGdiu':
+ formatted = _localize(formatted, grouping, monetary)
+ return formatted
+
+# Transform formatted as locale number according to the locale settings
+def _localize(formatted, grouping=False, monetary=False):
+ # floats and decimal ints need special action!
+ if '.' in formatted:
+ seps = 0
+ parts = formatted.split('.')
+ if grouping:
+ parts[0], seps = _group(parts[0], monetary=monetary)
+ decimal_point = localeconv()[monetary and 'mon_decimal_point'
+ or 'decimal_point']
+ formatted = decimal_point.join(parts)
+ if seps:
+ formatted = _strip_padding(formatted, seps)
+ else:
+ seps = 0
+ if grouping:
+ formatted, seps = _group(formatted, monetary=monetary)
+ if seps:
+ formatted = _strip_padding(formatted, seps)
+ return formatted
+
+def format_string(f, val, grouping=False, monetary=False):
+ """Formats a string in the same way that the % formatting would use,
+ but takes the current locale into account.
+
+ Grouping is applied if the third parameter is true.
+ Conversion uses monetary thousands separator and grouping strings if
+ forth parameter monetary is true."""
+ percents = list(_percent_re.finditer(f))
+ new_f = _percent_re.sub('%s', f)
+
+ if isinstance(val, _collections_abc.Mapping):
+ new_val = []
+ for perc in percents:
+ if perc.group()[-1]=='%':
+ new_val.append('%')
+ else:
+ new_val.append(_format(perc.group(), val, grouping, monetary))
+ else:
+ if not isinstance(val, tuple):
+ val = (val,)
+ new_val = []
+ i = 0
+ for perc in percents:
+ if perc.group()[-1]=='%':
+ new_val.append('%')
+ else:
+ starcount = perc.group('modifiers').count('*')
+ new_val.append(_format(perc.group(),
+ val[i],
+ grouping,
+ monetary,
+ *val[i+1:i+1+starcount]))
+ i += (1 + starcount)
+ val = tuple(new_val)
+
+ return new_f % val
+
+def format(percent, value, grouping=False, monetary=False, *additional):
+ """Deprecated, use format_string instead."""
+ import warnings
+ warnings.warn(
+ "This method will be removed in a future version of Python. "
+ "Use 'locale.format_string()' instead.",
+ DeprecationWarning, stacklevel=2
+ )
+
+ match = _percent_re.match(percent)
+ if not match or len(match.group())!= len(percent):
+ raise ValueError(("format() must be given exactly one %%char "
+ "format specifier, %s not valid") % repr(percent))
+ return _format(percent, value, grouping, monetary, *additional)
+
+def currency(val, symbol=True, grouping=False, international=False):
+ """Formats val according to the currency settings
+ in the current locale."""
+ conv = localeconv()
+
+ # check for illegal values
+ digits = conv[international and 'int_frac_digits' or 'frac_digits']
+ if digits == 127:
+ raise ValueError("Currency formatting is not possible using "
+ "the 'C' locale.")
+
+ s = _localize(f'{abs(val):.{digits}f}', grouping, monetary=True)
+ # '<' and '>' are markers if the sign must be inserted between symbol and value
+ s = '<' + s + '>'
+
+ if symbol:
+ smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
+ precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
+ separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
+
+ if precedes:
+ s = smb + (separated and ' ' or '') + s
+ else:
+ if international and smb[-1] == ' ':
+ smb = smb[:-1]
+ s = s + (separated and ' ' or '') + smb
+
+ sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
+ sign = conv[val<0 and 'negative_sign' or 'positive_sign']
+
+ if sign_pos == 0:
+ s = '(' + s + ')'
+ elif sign_pos == 1:
+ s = sign + s
+ elif sign_pos == 2:
+ s = s + sign
+ elif sign_pos == 3:
+ s = s.replace('<', sign)
+ elif sign_pos == 4:
+ s = s.replace('>', sign)
+ else:
+ # the default if nothing specified;
+ # this should be the most fitting sign position
+ s = sign + s
+
+ return s.replace('<', '').replace('>', '')
+
+def str(val):
+ """Convert float to string, taking the locale into account."""
+ return _format("%.12g", val)
+
+def delocalize(string):
+ "Parses a string as a normalized number according to the locale settings."
+
+ conv = localeconv()
+
+ #First, get rid of the grouping
+ ts = conv['thousands_sep']
+ if ts:
+ string = string.replace(ts, '')
+
+ #next, replace the decimal point with a dot
+ dd = conv['decimal_point']
+ if dd:
+ string = string.replace(dd, '.')
+ return string
+
+def localize(string, grouping=False, monetary=False):
+ """Parses a string as locale number according to the locale settings."""
+ return _localize(string, grouping, monetary)
+
+def atof(string, func=float):
+ "Parses a string as a float according to the locale settings."
+ return func(delocalize(string))
+
+def atoi(string):
+ "Converts a string to an integer according to the locale settings."
+ return int(delocalize(string))
+
+def _test():
+ setlocale(LC_ALL, "")
+ #do grouping
+ s1 = format_string("%d", 123456789,1)
+ print(s1, "is", atoi(s1))
+ #standard formatting
+ s1 = str(3.14)
+ print(s1, "is", atof(s1))
+
+### Locale name aliasing engine
+
+# Author: Marc-Andre Lemburg, mal@lemburg.com
+# Various tweaks by Fredrik Lundh
+
+# store away the low-level version of setlocale (it's
+# overridden below)
+_setlocale = setlocale
+
+def _replace_encoding(code, encoding):
+ if '.' in code:
+ langname = code[:code.index('.')]
+ else:
+ langname = code
+ # Convert the encoding to a C lib compatible encoding string
+ norm_encoding = encodings.normalize_encoding(encoding)
+ #print('norm encoding: %r' % norm_encoding)
+ norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(),
+ norm_encoding)
+ #print('aliased encoding: %r' % norm_encoding)
+ encoding = norm_encoding
+ norm_encoding = norm_encoding.lower()
+ if norm_encoding in locale_encoding_alias:
+ encoding = locale_encoding_alias[norm_encoding]
+ else:
+ norm_encoding = norm_encoding.replace('_', '')
+ norm_encoding = norm_encoding.replace('-', '')
+ if norm_encoding in locale_encoding_alias:
+ encoding = locale_encoding_alias[norm_encoding]
+ #print('found encoding %r' % encoding)
+ return langname + '.' + encoding
+
+def _append_modifier(code, modifier):
+ if modifier == 'euro':
+ if '.' not in code:
+ return code + '.ISO8859-15'
+ _, _, encoding = code.partition('.')
+ if encoding in ('ISO8859-15', 'UTF-8'):
+ return code
+ if encoding == 'ISO8859-1':
+ return _replace_encoding(code, 'ISO8859-15')
+ return code + '@' + modifier
+
+def normalize(localename):
+
+ """ Returns a normalized locale code for the given locale
+ name.
+
+ The returned locale code is formatted for use with
+ setlocale().
+
+ If normalization fails, the original name is returned
+ unchanged.
+
+ If the given encoding is not known, the function defaults to
+ the default encoding for the locale code just like setlocale()
+ does.
+
+ """
+ # Normalize the locale name and extract the encoding and modifier
+ code = localename.lower()
+ if ':' in code:
+ # ':' is sometimes used as encoding delimiter.
+ code = code.replace(':', '.')
+ if '@' in code:
+ code, modifier = code.split('@', 1)
+ else:
+ modifier = ''
+ if '.' in code:
+ langname, encoding = code.split('.')[:2]
+ else:
+ langname = code
+ encoding = ''
+
+ # First lookup: fullname (possibly with encoding and modifier)
+ lang_enc = langname
+ if encoding:
+ norm_encoding = encoding.replace('-', '')
+ norm_encoding = norm_encoding.replace('_', '')
+ lang_enc += '.' + norm_encoding
+ lookup_name = lang_enc
+ if modifier:
+ lookup_name += '@' + modifier
+ code = locale_alias.get(lookup_name, None)
+ if code is not None:
+ return code
+ #print('first lookup failed')
+
+ if modifier:
+ # Second try: fullname without modifier (possibly with encoding)
+ code = locale_alias.get(lang_enc, None)
+ if code is not None:
+ #print('lookup without modifier succeeded')
+ if '@' not in code:
+ return _append_modifier(code, modifier)
+ if code.split('@', 1)[1].lower() == modifier:
+ return code
+ #print('second lookup failed')
+
+ if encoding:
+ # Third try: langname (without encoding, possibly with modifier)
+ lookup_name = langname
+ if modifier:
+ lookup_name += '@' + modifier
+ code = locale_alias.get(lookup_name, None)
+ if code is not None:
+ #print('lookup without encoding succeeded')
+ if '@' not in code:
+ return _replace_encoding(code, encoding)
+ code, modifier = code.split('@', 1)
+ return _replace_encoding(code, encoding) + '@' + modifier
+
+ if modifier:
+ # Fourth try: langname (without encoding and modifier)
+ code = locale_alias.get(langname, None)
+ if code is not None:
+ #print('lookup without modifier and encoding succeeded')
+ if '@' not in code:
+ code = _replace_encoding(code, encoding)
+ return _append_modifier(code, modifier)
+ code, defmod = code.split('@', 1)
+ if defmod.lower() == modifier:
+ return _replace_encoding(code, encoding) + '@' + defmod
+
+ return localename
+
+def _parse_localename(localename):
+
+ """ Parses the locale code for localename and returns the
+ result as tuple (language code, encoding).
+
+ The localename is normalized and passed through the locale
+ alias engine. A ValueError is raised in case the locale name
+ cannot be parsed.
+
+ The language code corresponds to RFC 1766. code and encoding
+ can be None in case the values cannot be determined or are
+ unknown to this implementation.
+
+ """
+ code = normalize(localename)
+ if '@' in code:
+ # Deal with locale modifiers
+ code, modifier = code.split('@', 1)
+ if modifier == 'euro' and '.' not in code:
+ # Assume Latin-9 for @euro locales. This is bogus,
+ # since some systems may use other encodings for these
+ # locales. Also, we ignore other modifiers.
+ return code, 'iso-8859-15'
+
+ if '.' in code:
+ return tuple(code.split('.')[:2])
+ elif code == 'C':
+ return None, None
+ elif code == 'UTF-8':
+ # On macOS "LC_CTYPE=UTF-8" is a valid locale setting
+ # for getting UTF-8 handling for text.
+ return None, 'UTF-8'
+ raise ValueError('unknown locale: %s' % localename)
+
+def _build_localename(localetuple):
+
+ """ Builds a locale code from the given tuple (language code,
+ encoding).
+
+ No aliasing or normalizing takes place.
+
+ """
+ try:
+ language, encoding = localetuple
+
+ if language is None:
+ language = 'C'
+ if encoding is None:
+ return language
+ else:
+ return language + '.' + encoding
+ except (TypeError, ValueError):
+ raise TypeError('Locale must be None, a string, or an iterable of '
+ 'two strings -- language code, encoding.') from None
+
+def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
+
+ """ Tries to determine the default locale settings and returns
+ them as tuple (language code, encoding).
+
+ According to POSIX, a program which has not called
+ setlocale(LC_ALL, "") runs using the portable 'C' locale.
+ Calling setlocale(LC_ALL, "") lets it use the default locale as
+ defined by the LANG variable. Since we don't want to interfere
+ with the current locale setting we thus emulate the behavior
+ in the way described above.
+
+ To maintain compatibility with other platforms, not only the
+ LANG variable is tested, but a list of variables given as
+ envvars parameter. The first found to be defined will be
+ used. envvars defaults to the search path used in GNU gettext;
+ it must always contain the variable name 'LANG'.
+
+ Except for the code 'C', the language code corresponds to RFC
+ 1766. code and encoding can be None in case the values cannot
+ be determined.
+
+ """
+
+ try:
+ # check if it's supported by the _locale module
+ import _locale
+ code, encoding = _locale._getdefaultlocale()
+ except (ImportError, AttributeError):
+ pass
+ else:
+ # make sure the code/encoding values are valid
+ if sys.platform == "win32" and code and code[:2] == "0x":
+ # map windows language identifier to language name
+ code = windows_locale.get(int(code, 0))
+ # ...add other platform-specific processing here, if
+ # necessary...
+ return code, encoding
+
+ # fall back on POSIX behaviour
+ import os
+ lookup = os.environ.get
+ for variable in envvars:
+ localename = lookup(variable,None)
+ if localename:
+ if variable == 'LANGUAGE':
+ localename = localename.split(':')[0]
+ break
+ else:
+ localename = 'C'
+ return _parse_localename(localename)
+
+
+def getlocale(category=LC_CTYPE):
+
+ """ Returns the current setting for the given locale category as
+ tuple (language code, encoding).
+
+ category may be one of the LC_* value except LC_ALL. It
+ defaults to LC_CTYPE.
+
+ Except for the code 'C', the language code corresponds to RFC
+ 1766. code and encoding can be None in case the values cannot
+ be determined.
+
+ """
+ localename = _setlocale(category)
+ if category == LC_ALL and ';' in localename:
+ raise TypeError('category LC_ALL is not supported')
+ return _parse_localename(localename)
+
+def setlocale(category, locale=None):
+
+ """ Set the locale for the given category. The locale can be
+ a string, an iterable of two strings (language code and encoding),
+ or None.
+
+ Iterables are converted to strings using the locale aliasing
+ engine. Locale strings are passed directly to the C lib.
+
+ category may be given as one of the LC_* values.
+
+ """
+ if locale and not isinstance(locale, _builtin_str):
+ # convert to string
+ locale = normalize(_build_localename(locale))
+ return _setlocale(category, locale)
+
+def resetlocale(category=LC_ALL):
+
+ """ Sets the locale for category to the default setting.
+
+ The default setting is determined by calling
+ getdefaultlocale(). category defaults to LC_ALL.
+
+ """
+ _setlocale(category, _build_localename(getdefaultlocale()))
+
+
+try:
+ from _locale import _get_locale_encoding
+except ImportError:
+ def _get_locale_encoding():
+ if hasattr(sys, 'getandroidapilevel'):
+ # On Android langinfo.h and CODESET are missing, and UTF-8 is
+ # always used in mbstowcs() and wcstombs().
+ return 'UTF-8'
+ if sys.flags.utf8_mode:
+ return 'UTF-8'
+ encoding = getdefaultlocale()[1]
+ if encoding is None:
+ # LANG not set, default conservatively to ASCII
+ encoding = 'ascii'
+ return encoding
+
+try:
+ CODESET
+except NameError:
+ def getpreferredencoding(do_setlocale=True):
+ """Return the charset that the user is likely using."""
+ return _get_locale_encoding()
+else:
+ # On Unix, if CODESET is available, use that.
+ def getpreferredencoding(do_setlocale=True):
+ """Return the charset that the user is likely using,
+ according to the system configuration."""
+ if sys.flags.utf8_mode:
+ return 'UTF-8'
+
+ if not do_setlocale:
+ return _get_locale_encoding()
+
+ old_loc = setlocale(LC_CTYPE)
+ try:
+ try:
+ setlocale(LC_CTYPE, "")
+ except Error:
+ pass
+ return _get_locale_encoding()
+ finally:
+ setlocale(LC_CTYPE, old_loc)
+
+
+### Database
+#
+# The following data was extracted from the locale.alias file which
+# comes with X11 and then hand edited removing the explicit encoding
+# definitions and adding some more aliases. The file is usually
+# available as /usr/lib/X11/locale/locale.alias.
+#
+
+#
+# The local_encoding_alias table maps lowercase encoding alias names
+# to C locale encoding names (case-sensitive). Note that normalize()
+# first looks up the encoding in the encodings.aliases dictionary and
+# then applies this mapping to find the correct C lib name for the
+# encoding.
+#
+locale_encoding_alias = {
+
+ # Mappings for non-standard encoding names used in locale names
+ '437': 'C',
+ 'c': 'C',
+ 'en': 'ISO8859-1',
+ 'jis': 'JIS7',
+ 'jis7': 'JIS7',
+ 'ajec': 'eucJP',
+ 'koi8c': 'KOI8-C',
+ 'microsoftcp1251': 'CP1251',
+ 'microsoftcp1255': 'CP1255',
+ 'microsoftcp1256': 'CP1256',
+ '88591': 'ISO8859-1',
+ '88592': 'ISO8859-2',
+ '88595': 'ISO8859-5',
+ '885915': 'ISO8859-15',
+
+ # Mappings from Python codec names to C lib encoding names
+ 'ascii': 'ISO8859-1',
+ 'latin_1': 'ISO8859-1',
+ 'iso8859_1': 'ISO8859-1',
+ 'iso8859_10': 'ISO8859-10',
+ 'iso8859_11': 'ISO8859-11',
+ 'iso8859_13': 'ISO8859-13',
+ 'iso8859_14': 'ISO8859-14',
+ 'iso8859_15': 'ISO8859-15',
+ 'iso8859_16': 'ISO8859-16',
+ 'iso8859_2': 'ISO8859-2',
+ 'iso8859_3': 'ISO8859-3',
+ 'iso8859_4': 'ISO8859-4',
+ 'iso8859_5': 'ISO8859-5',
+ 'iso8859_6': 'ISO8859-6',
+ 'iso8859_7': 'ISO8859-7',
+ 'iso8859_8': 'ISO8859-8',
+ 'iso8859_9': 'ISO8859-9',
+ 'iso2022_jp': 'JIS7',
+ 'shift_jis': 'SJIS',
+ 'tactis': 'TACTIS',
+ 'euc_jp': 'eucJP',
+ 'euc_kr': 'eucKR',
+ 'utf_8': 'UTF-8',
+ 'koi8_r': 'KOI8-R',
+ 'koi8_t': 'KOI8-T',
+ 'koi8_u': 'KOI8-U',
+ 'kz1048': 'RK1048',
+ 'cp1251': 'CP1251',
+ 'cp1255': 'CP1255',
+ 'cp1256': 'CP1256',
+
+ # XXX This list is still incomplete. If you know more
+ # mappings, please file a bug report. Thanks.
+}
+
+for k, v in sorted(locale_encoding_alias.items()):
+ k = k.replace('_', '')
+ locale_encoding_alias.setdefault(k, v)
+
+#
+# The locale_alias table maps lowercase alias names to C locale names
+# (case-sensitive). Encodings are always separated from the locale
+# name using a dot ('.'); they should only be given in case the
+# language name is needed to interpret the given encoding alias
+# correctly (CJK codes often have this need).
+#
+# Note that the normalize() function which uses this tables
+# removes '_' and '-' characters from the encoding part of the
+# locale name before doing the lookup. This saves a lot of
+# space in the table.
+#
+# MAL 2004-12-10:
+# Updated alias mapping to most recent locale.alias file
+# from X.org distribution using makelocalealias.py.
+#
+# These are the differences compared to the old mapping (Python 2.4
+# and older):
+#
+# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
+# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
+# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
+# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
+# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
+# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
+# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
+# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
+# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
+# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
+# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
+# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
+# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
+# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
+# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
+# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
+# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
+# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
+# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
+# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
+# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
+# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
+#
+# MAL 2008-05-30:
+# Updated alias mapping to most recent locale.alias file
+# from X.org distribution using makelocalealias.py.
+#
+# These are the differences compared to the old mapping (Python 2.5
+# and older):
+#
+# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'
+# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
+# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
+# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'
+# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
+# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
+# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
+# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
+# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
+# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
+# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'
+# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
+# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
+# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
+# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
+# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
+# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
+# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'
+# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
+#
+# AP 2010-04-12:
+# Updated alias mapping to most recent locale.alias file
+# from X.org distribution using makelocalealias.py.
+#
+# These are the differences compared to the old mapping (Python 2.6.5
+# and older):
+#
+# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
+# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
+# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
+# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
+# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
+# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
+# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
+# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
+# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin'
+# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
+# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin'
+# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'
+# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
+#
+# SS 2013-12-20:
+# Updated alias mapping to most recent locale.alias file
+# from X.org distribution using makelocalealias.py.
+#
+# These are the differences compared to the old mapping (Python 3.3.3
+# and older):
+#
+# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
+# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
+# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
+# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
+# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
+# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
+# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8'
+# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
+# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'
+# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
+# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
+#
+# SS 2014-10-01:
+# Updated alias mapping with glibc 2.19 supported locales.
+#
+# SS 2018-05-05:
+# Updated alias mapping with glibc 2.27 supported locales.
+#
+# These are the differences compared to the old mapping (Python 3.6.5
+# and older):
+#
+# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia'
+# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154'
+# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R'
+
+locale_alias = {
+ 'a3': 'az_AZ.KOI8-C',
+ 'a3_az': 'az_AZ.KOI8-C',
+ 'a3_az.koic': 'az_AZ.KOI8-C',
+ 'aa_dj': 'aa_DJ.ISO8859-1',
+ 'aa_er': 'aa_ER.UTF-8',
+ 'aa_et': 'aa_ET.UTF-8',
+ 'af': 'af_ZA.ISO8859-1',
+ 'af_za': 'af_ZA.ISO8859-1',
+ 'agr_pe': 'agr_PE.UTF-8',
+ 'ak_gh': 'ak_GH.UTF-8',
+ 'am': 'am_ET.UTF-8',
+ 'am_et': 'am_ET.UTF-8',
+ 'american': 'en_US.ISO8859-1',
+ 'an_es': 'an_ES.ISO8859-15',
+ 'anp_in': 'anp_IN.UTF-8',
+ 'ar': 'ar_AA.ISO8859-6',
+ 'ar_aa': 'ar_AA.ISO8859-6',
+ 'ar_ae': 'ar_AE.ISO8859-6',
+ 'ar_bh': 'ar_BH.ISO8859-6',
+ 'ar_dz': 'ar_DZ.ISO8859-6',
+ 'ar_eg': 'ar_EG.ISO8859-6',
+ 'ar_in': 'ar_IN.UTF-8',
+ 'ar_iq': 'ar_IQ.ISO8859-6',
+ 'ar_jo': 'ar_JO.ISO8859-6',
+ 'ar_kw': 'ar_KW.ISO8859-6',
+ 'ar_lb': 'ar_LB.ISO8859-6',
+ 'ar_ly': 'ar_LY.ISO8859-6',
+ 'ar_ma': 'ar_MA.ISO8859-6',
+ 'ar_om': 'ar_OM.ISO8859-6',
+ 'ar_qa': 'ar_QA.ISO8859-6',
+ 'ar_sa': 'ar_SA.ISO8859-6',
+ 'ar_sd': 'ar_SD.ISO8859-6',
+ 'ar_ss': 'ar_SS.UTF-8',
+ 'ar_sy': 'ar_SY.ISO8859-6',
+ 'ar_tn': 'ar_TN.ISO8859-6',
+ 'ar_ye': 'ar_YE.ISO8859-6',
+ 'arabic': 'ar_AA.ISO8859-6',
+ 'as': 'as_IN.UTF-8',
+ 'as_in': 'as_IN.UTF-8',
+ 'ast_es': 'ast_ES.ISO8859-15',
+ 'ayc_pe': 'ayc_PE.UTF-8',
+ 'az': 'az_AZ.ISO8859-9E',
+ 'az_az': 'az_AZ.ISO8859-9E',
+ 'az_az.iso88599e': 'az_AZ.ISO8859-9E',
+ 'az_ir': 'az_IR.UTF-8',
+ 'be': 'be_BY.CP1251',
+ 'be@latin': 'be_BY.UTF-8@latin',
+ 'be_bg.utf8': 'bg_BG.UTF-8',
+ 'be_by': 'be_BY.CP1251',
+ 'be_by@latin': 'be_BY.UTF-8@latin',
+ 'bem_zm': 'bem_ZM.UTF-8',
+ 'ber_dz': 'ber_DZ.UTF-8',
+ 'ber_ma': 'ber_MA.UTF-8',
+ 'bg': 'bg_BG.CP1251',
+ 'bg_bg': 'bg_BG.CP1251',
+ 'bhb_in.utf8': 'bhb_IN.UTF-8',
+ 'bho_in': 'bho_IN.UTF-8',
+ 'bho_np': 'bho_NP.UTF-8',
+ 'bi_vu': 'bi_VU.UTF-8',
+ 'bn_bd': 'bn_BD.UTF-8',
+ 'bn_in': 'bn_IN.UTF-8',
+ 'bo_cn': 'bo_CN.UTF-8',
+ 'bo_in': 'bo_IN.UTF-8',
+ 'bokmal': 'nb_NO.ISO8859-1',
+ 'bokm\xe5l': 'nb_NO.ISO8859-1',
+ 'br': 'br_FR.ISO8859-1',
+ 'br_fr': 'br_FR.ISO8859-1',
+ 'brx_in': 'brx_IN.UTF-8',
+ 'bs': 'bs_BA.ISO8859-2',
+ 'bs_ba': 'bs_BA.ISO8859-2',
+ 'bulgarian': 'bg_BG.CP1251',
+ 'byn_er': 'byn_ER.UTF-8',
+ 'c': 'C',
+ 'c-french': 'fr_CA.ISO8859-1',
+ 'c.ascii': 'C',
+ 'c.en': 'C',
+ 'c.iso88591': 'en_US.ISO8859-1',
+ 'c.utf8': 'en_US.UTF-8',
+ 'c_c': 'C',
+ 'c_c.c': 'C',
+ 'ca': 'ca_ES.ISO8859-1',
+ 'ca_ad': 'ca_AD.ISO8859-1',
+ 'ca_es': 'ca_ES.ISO8859-1',
+ 'ca_es@valencia': 'ca_ES.UTF-8@valencia',
+ 'ca_fr': 'ca_FR.ISO8859-1',
+ 'ca_it': 'ca_IT.ISO8859-1',
+ 'catalan': 'ca_ES.ISO8859-1',
+ 'ce_ru': 'ce_RU.UTF-8',
+ 'cextend': 'en_US.ISO8859-1',
+ 'chinese-s': 'zh_CN.eucCN',
+ 'chinese-t': 'zh_TW.eucTW',
+ 'chr_us': 'chr_US.UTF-8',
+ 'ckb_iq': 'ckb_IQ.UTF-8',
+ 'cmn_tw': 'cmn_TW.UTF-8',
+ 'crh_ua': 'crh_UA.UTF-8',
+ 'croatian': 'hr_HR.ISO8859-2',
+ 'cs': 'cs_CZ.ISO8859-2',
+ 'cs_cs': 'cs_CZ.ISO8859-2',
+ 'cs_cz': 'cs_CZ.ISO8859-2',
+ 'csb_pl': 'csb_PL.UTF-8',
+ 'cv_ru': 'cv_RU.UTF-8',
+ 'cy': 'cy_GB.ISO8859-1',
+ 'cy_gb': 'cy_GB.ISO8859-1',
+ 'cz': 'cs_CZ.ISO8859-2',
+ 'cz_cz': 'cs_CZ.ISO8859-2',
+ 'czech': 'cs_CZ.ISO8859-2',
+ 'da': 'da_DK.ISO8859-1',
+ 'da_dk': 'da_DK.ISO8859-1',
+ 'danish': 'da_DK.ISO8859-1',
+ 'dansk': 'da_DK.ISO8859-1',
+ 'de': 'de_DE.ISO8859-1',
+ 'de_at': 'de_AT.ISO8859-1',
+ 'de_be': 'de_BE.ISO8859-1',
+ 'de_ch': 'de_CH.ISO8859-1',
+ 'de_de': 'de_DE.ISO8859-1',
+ 'de_it': 'de_IT.ISO8859-1',
+ 'de_li.utf8': 'de_LI.UTF-8',
+ 'de_lu': 'de_LU.ISO8859-1',
+ 'deutsch': 'de_DE.ISO8859-1',
+ 'doi_in': 'doi_IN.UTF-8',
+ 'dutch': 'nl_NL.ISO8859-1',
+ 'dutch.iso88591': 'nl_BE.ISO8859-1',
+ 'dv_mv': 'dv_MV.UTF-8',
+ 'dz_bt': 'dz_BT.UTF-8',
+ 'ee': 'ee_EE.ISO8859-4',
+ 'ee_ee': 'ee_EE.ISO8859-4',
+ 'eesti': 'et_EE.ISO8859-1',
+ 'el': 'el_GR.ISO8859-7',
+ 'el_cy': 'el_CY.ISO8859-7',
+ 'el_gr': 'el_GR.ISO8859-7',
+ 'el_gr@euro': 'el_GR.ISO8859-15',
+ 'en': 'en_US.ISO8859-1',
+ 'en_ag': 'en_AG.UTF-8',
+ 'en_au': 'en_AU.ISO8859-1',
+ 'en_be': 'en_BE.ISO8859-1',
+ 'en_bw': 'en_BW.ISO8859-1',
+ 'en_ca': 'en_CA.ISO8859-1',
+ 'en_dk': 'en_DK.ISO8859-1',
+ 'en_dl.utf8': 'en_DL.UTF-8',
+ 'en_gb': 'en_GB.ISO8859-1',
+ 'en_hk': 'en_HK.ISO8859-1',
+ 'en_ie': 'en_IE.ISO8859-1',
+ 'en_il': 'en_IL.UTF-8',
+ 'en_in': 'en_IN.ISO8859-1',
+ 'en_ng': 'en_NG.UTF-8',
+ 'en_nz': 'en_NZ.ISO8859-1',
+ 'en_ph': 'en_PH.ISO8859-1',
+ 'en_sc.utf8': 'en_SC.UTF-8',
+ 'en_sg': 'en_SG.ISO8859-1',
+ 'en_uk': 'en_GB.ISO8859-1',
+ 'en_us': 'en_US.ISO8859-1',
+ 'en_us@euro@euro': 'en_US.ISO8859-15',
+ 'en_za': 'en_ZA.ISO8859-1',
+ 'en_zm': 'en_ZM.UTF-8',
+ 'en_zw': 'en_ZW.ISO8859-1',
+ 'en_zw.utf8': 'en_ZS.UTF-8',
+ 'eng_gb': 'en_GB.ISO8859-1',
+ 'english': 'en_EN.ISO8859-1',
+ 'english.iso88591': 'en_US.ISO8859-1',
+ 'english_uk': 'en_GB.ISO8859-1',
+ 'english_united-states': 'en_US.ISO8859-1',
+ 'english_united-states.437': 'C',
+ 'english_us': 'en_US.ISO8859-1',
+ 'eo': 'eo_XX.ISO8859-3',
+ 'eo.utf8': 'eo.UTF-8',
+ 'eo_eo': 'eo_EO.ISO8859-3',
+ 'eo_us.utf8': 'eo_US.UTF-8',
+ 'eo_xx': 'eo_XX.ISO8859-3',
+ 'es': 'es_ES.ISO8859-1',
+ 'es_ar': 'es_AR.ISO8859-1',
+ 'es_bo': 'es_BO.ISO8859-1',
+ 'es_cl': 'es_CL.ISO8859-1',
+ 'es_co': 'es_CO.ISO8859-1',
+ 'es_cr': 'es_CR.ISO8859-1',
+ 'es_cu': 'es_CU.UTF-8',
+ 'es_do': 'es_DO.ISO8859-1',
+ 'es_ec': 'es_EC.ISO8859-1',
+ 'es_es': 'es_ES.ISO8859-1',
+ 'es_gt': 'es_GT.ISO8859-1',
+ 'es_hn': 'es_HN.ISO8859-1',
+ 'es_mx': 'es_MX.ISO8859-1',
+ 'es_ni': 'es_NI.ISO8859-1',
+ 'es_pa': 'es_PA.ISO8859-1',
+ 'es_pe': 'es_PE.ISO8859-1',
+ 'es_pr': 'es_PR.ISO8859-1',
+ 'es_py': 'es_PY.ISO8859-1',
+ 'es_sv': 'es_SV.ISO8859-1',
+ 'es_us': 'es_US.ISO8859-1',
+ 'es_uy': 'es_UY.ISO8859-1',
+ 'es_ve': 'es_VE.ISO8859-1',
+ 'estonian': 'et_EE.ISO8859-1',
+ 'et': 'et_EE.ISO8859-15',
+ 'et_ee': 'et_EE.ISO8859-15',
+ 'eu': 'eu_ES.ISO8859-1',
+ 'eu_es': 'eu_ES.ISO8859-1',
+ 'eu_fr': 'eu_FR.ISO8859-1',
+ 'fa': 'fa_IR.UTF-8',
+ 'fa_ir': 'fa_IR.UTF-8',
+ 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
+ 'ff_sn': 'ff_SN.UTF-8',
+ 'fi': 'fi_FI.ISO8859-15',
+ 'fi_fi': 'fi_FI.ISO8859-15',
+ 'fil_ph': 'fil_PH.UTF-8',
+ 'finnish': 'fi_FI.ISO8859-1',
+ 'fo': 'fo_FO.ISO8859-1',
+ 'fo_fo': 'fo_FO.ISO8859-1',
+ 'fr': 'fr_FR.ISO8859-1',
+ 'fr_be': 'fr_BE.ISO8859-1',
+ 'fr_ca': 'fr_CA.ISO8859-1',
+ 'fr_ch': 'fr_CH.ISO8859-1',
+ 'fr_fr': 'fr_FR.ISO8859-1',
+ 'fr_lu': 'fr_LU.ISO8859-1',
+ 'fran\xe7ais': 'fr_FR.ISO8859-1',
+ 'fre_fr': 'fr_FR.ISO8859-1',
+ 'french': 'fr_FR.ISO8859-1',
+ 'french.iso88591': 'fr_CH.ISO8859-1',
+ 'french_france': 'fr_FR.ISO8859-1',
+ 'fur_it': 'fur_IT.UTF-8',
+ 'fy_de': 'fy_DE.UTF-8',
+ 'fy_nl': 'fy_NL.UTF-8',
+ 'ga': 'ga_IE.ISO8859-1',
+ 'ga_ie': 'ga_IE.ISO8859-1',
+ 'galego': 'gl_ES.ISO8859-1',
+ 'galician': 'gl_ES.ISO8859-1',
+ 'gd': 'gd_GB.ISO8859-1',
+ 'gd_gb': 'gd_GB.ISO8859-1',
+ 'ger_de': 'de_DE.ISO8859-1',
+ 'german': 'de_DE.ISO8859-1',
+ 'german.iso88591': 'de_CH.ISO8859-1',
+ 'german_germany': 'de_DE.ISO8859-1',
+ 'gez_er': 'gez_ER.UTF-8',
+ 'gez_et': 'gez_ET.UTF-8',
+ 'gl': 'gl_ES.ISO8859-1',
+ 'gl_es': 'gl_ES.ISO8859-1',
+ 'greek': 'el_GR.ISO8859-7',
+ 'gu_in': 'gu_IN.UTF-8',
+ 'gv': 'gv_GB.ISO8859-1',
+ 'gv_gb': 'gv_GB.ISO8859-1',
+ 'ha_ng': 'ha_NG.UTF-8',
+ 'hak_tw': 'hak_TW.UTF-8',
+ 'he': 'he_IL.ISO8859-8',
+ 'he_il': 'he_IL.ISO8859-8',
+ 'hebrew': 'he_IL.ISO8859-8',
+ 'hi': 'hi_IN.ISCII-DEV',
+ 'hi_in': 'hi_IN.ISCII-DEV',
+ 'hi_in.isciidev': 'hi_IN.ISCII-DEV',
+ 'hif_fj': 'hif_FJ.UTF-8',
+ 'hne': 'hne_IN.UTF-8',
+ 'hne_in': 'hne_IN.UTF-8',
+ 'hr': 'hr_HR.ISO8859-2',
+ 'hr_hr': 'hr_HR.ISO8859-2',
+ 'hrvatski': 'hr_HR.ISO8859-2',
+ 'hsb_de': 'hsb_DE.ISO8859-2',
+ 'ht_ht': 'ht_HT.UTF-8',
+ 'hu': 'hu_HU.ISO8859-2',
+ 'hu_hu': 'hu_HU.ISO8859-2',
+ 'hungarian': 'hu_HU.ISO8859-2',
+ 'hy_am': 'hy_AM.UTF-8',
+ 'hy_am.armscii8': 'hy_AM.ARMSCII_8',
+ 'ia': 'ia.UTF-8',
+ 'ia_fr': 'ia_FR.UTF-8',
+ 'icelandic': 'is_IS.ISO8859-1',
+ 'id': 'id_ID.ISO8859-1',
+ 'id_id': 'id_ID.ISO8859-1',
+ 'ig_ng': 'ig_NG.UTF-8',
+ 'ik_ca': 'ik_CA.UTF-8',
+ 'in': 'id_ID.ISO8859-1',
+ 'in_id': 'id_ID.ISO8859-1',
+ 'is': 'is_IS.ISO8859-1',
+ 'is_is': 'is_IS.ISO8859-1',
+ 'iso-8859-1': 'en_US.ISO8859-1',
+ 'iso-8859-15': 'en_US.ISO8859-15',
+ 'iso8859-1': 'en_US.ISO8859-1',
+ 'iso8859-15': 'en_US.ISO8859-15',
+ 'iso_8859_1': 'en_US.ISO8859-1',
+ 'iso_8859_15': 'en_US.ISO8859-15',
+ 'it': 'it_IT.ISO8859-1',
+ 'it_ch': 'it_CH.ISO8859-1',
+ 'it_it': 'it_IT.ISO8859-1',
+ 'italian': 'it_IT.ISO8859-1',
+ 'iu': 'iu_CA.NUNACOM-8',
+ 'iu_ca': 'iu_CA.NUNACOM-8',
+ 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
+ 'iw': 'he_IL.ISO8859-8',
+ 'iw_il': 'he_IL.ISO8859-8',
+ 'iw_il.utf8': 'iw_IL.UTF-8',
+ 'ja': 'ja_JP.eucJP',
+ 'ja_jp': 'ja_JP.eucJP',
+ 'ja_jp.euc': 'ja_JP.eucJP',
+ 'ja_jp.mscode': 'ja_JP.SJIS',
+ 'ja_jp.pck': 'ja_JP.SJIS',
+ 'japan': 'ja_JP.eucJP',
+ 'japanese': 'ja_JP.eucJP',
+ 'japanese-euc': 'ja_JP.eucJP',
+ 'japanese.euc': 'ja_JP.eucJP',
+ 'jp_jp': 'ja_JP.eucJP',
+ 'ka': 'ka_GE.GEORGIAN-ACADEMY',
+ 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
+ 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
+ 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
+ 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
+ 'kab_dz': 'kab_DZ.UTF-8',
+ 'kk_kz': 'kk_KZ.ptcp154',
+ 'kl': 'kl_GL.ISO8859-1',
+ 'kl_gl': 'kl_GL.ISO8859-1',
+ 'km_kh': 'km_KH.UTF-8',
+ 'kn': 'kn_IN.UTF-8',
+ 'kn_in': 'kn_IN.UTF-8',
+ 'ko': 'ko_KR.eucKR',
+ 'ko_kr': 'ko_KR.eucKR',
+ 'ko_kr.euc': 'ko_KR.eucKR',
+ 'kok_in': 'kok_IN.UTF-8',
+ 'korean': 'ko_KR.eucKR',
+ 'korean.euc': 'ko_KR.eucKR',
+ 'ks': 'ks_IN.UTF-8',
+ 'ks_in': 'ks_IN.UTF-8',
+ 'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari',
+ 'ku_tr': 'ku_TR.ISO8859-9',
+ 'kw': 'kw_GB.ISO8859-1',
+ 'kw_gb': 'kw_GB.ISO8859-1',
+ 'ky': 'ky_KG.UTF-8',
+ 'ky_kg': 'ky_KG.UTF-8',
+ 'lb_lu': 'lb_LU.UTF-8',
+ 'lg_ug': 'lg_UG.ISO8859-10',
+ 'li_be': 'li_BE.UTF-8',
+ 'li_nl': 'li_NL.UTF-8',
+ 'lij_it': 'lij_IT.UTF-8',
+ 'lithuanian': 'lt_LT.ISO8859-13',
+ 'ln_cd': 'ln_CD.UTF-8',
+ 'lo': 'lo_LA.MULELAO-1',
+ 'lo_la': 'lo_LA.MULELAO-1',
+ 'lo_la.cp1133': 'lo_LA.IBM-CP1133',
+ 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
+ 'lo_la.mulelao1': 'lo_LA.MULELAO-1',
+ 'lt': 'lt_LT.ISO8859-13',
+ 'lt_lt': 'lt_LT.ISO8859-13',
+ 'lv': 'lv_LV.ISO8859-13',
+ 'lv_lv': 'lv_LV.ISO8859-13',
+ 'lzh_tw': 'lzh_TW.UTF-8',
+ 'mag_in': 'mag_IN.UTF-8',
+ 'mai': 'mai_IN.UTF-8',
+ 'mai_in': 'mai_IN.UTF-8',
+ 'mai_np': 'mai_NP.UTF-8',
+ 'mfe_mu': 'mfe_MU.UTF-8',
+ 'mg_mg': 'mg_MG.ISO8859-15',
+ 'mhr_ru': 'mhr_RU.UTF-8',
+ 'mi': 'mi_NZ.ISO8859-1',
+ 'mi_nz': 'mi_NZ.ISO8859-1',
+ 'miq_ni': 'miq_NI.UTF-8',
+ 'mjw_in': 'mjw_IN.UTF-8',
+ 'mk': 'mk_MK.ISO8859-5',
+ 'mk_mk': 'mk_MK.ISO8859-5',
+ 'ml': 'ml_IN.UTF-8',
+ 'ml_in': 'ml_IN.UTF-8',
+ 'mn_mn': 'mn_MN.UTF-8',
+ 'mni_in': 'mni_IN.UTF-8',
+ 'mr': 'mr_IN.UTF-8',
+ 'mr_in': 'mr_IN.UTF-8',
+ 'ms': 'ms_MY.ISO8859-1',
+ 'ms_my': 'ms_MY.ISO8859-1',
+ 'mt': 'mt_MT.ISO8859-3',
+ 'mt_mt': 'mt_MT.ISO8859-3',
+ 'my_mm': 'my_MM.UTF-8',
+ 'nan_tw': 'nan_TW.UTF-8',
+ 'nb': 'nb_NO.ISO8859-1',
+ 'nb_no': 'nb_NO.ISO8859-1',
+ 'nds_de': 'nds_DE.UTF-8',
+ 'nds_nl': 'nds_NL.UTF-8',
+ 'ne_np': 'ne_NP.UTF-8',
+ 'nhn_mx': 'nhn_MX.UTF-8',
+ 'niu_nu': 'niu_NU.UTF-8',
+ 'niu_nz': 'niu_NZ.UTF-8',
+ 'nl': 'nl_NL.ISO8859-1',
+ 'nl_aw': 'nl_AW.UTF-8',
+ 'nl_be': 'nl_BE.ISO8859-1',
+ 'nl_nl': 'nl_NL.ISO8859-1',
+ 'nn': 'nn_NO.ISO8859-1',
+ 'nn_no': 'nn_NO.ISO8859-1',
+ 'no': 'no_NO.ISO8859-1',
+ 'no@nynorsk': 'ny_NO.ISO8859-1',
+ 'no_no': 'no_NO.ISO8859-1',
+ 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1',
+ 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1',
+ 'norwegian': 'no_NO.ISO8859-1',
+ 'nr': 'nr_ZA.ISO8859-1',
+ 'nr_za': 'nr_ZA.ISO8859-1',
+ 'nso': 'nso_ZA.ISO8859-15',
+ 'nso_za': 'nso_ZA.ISO8859-15',
+ 'ny': 'ny_NO.ISO8859-1',
+ 'ny_no': 'ny_NO.ISO8859-1',
+ 'nynorsk': 'nn_NO.ISO8859-1',
+ 'oc': 'oc_FR.ISO8859-1',
+ 'oc_fr': 'oc_FR.ISO8859-1',
+ 'om_et': 'om_ET.UTF-8',
+ 'om_ke': 'om_KE.ISO8859-1',
+ 'or': 'or_IN.UTF-8',
+ 'or_in': 'or_IN.UTF-8',
+ 'os_ru': 'os_RU.UTF-8',
+ 'pa': 'pa_IN.UTF-8',
+ 'pa_in': 'pa_IN.UTF-8',
+ 'pa_pk': 'pa_PK.UTF-8',
+ 'pap_an': 'pap_AN.UTF-8',
+ 'pap_aw': 'pap_AW.UTF-8',
+ 'pap_cw': 'pap_CW.UTF-8',
+ 'pd': 'pd_US.ISO8859-1',
+ 'pd_de': 'pd_DE.ISO8859-1',
+ 'pd_us': 'pd_US.ISO8859-1',
+ 'ph': 'ph_PH.ISO8859-1',
+ 'ph_ph': 'ph_PH.ISO8859-1',
+ 'pl': 'pl_PL.ISO8859-2',
+ 'pl_pl': 'pl_PL.ISO8859-2',
+ 'polish': 'pl_PL.ISO8859-2',
+ 'portuguese': 'pt_PT.ISO8859-1',
+ 'portuguese_brazil': 'pt_BR.ISO8859-1',
+ 'posix': 'C',
+ 'posix-utf2': 'C',
+ 'pp': 'pp_AN.ISO8859-1',
+ 'pp_an': 'pp_AN.ISO8859-1',
+ 'ps_af': 'ps_AF.UTF-8',
+ 'pt': 'pt_PT.ISO8859-1',
+ 'pt_br': 'pt_BR.ISO8859-1',
+ 'pt_pt': 'pt_PT.ISO8859-1',
+ 'quz_pe': 'quz_PE.UTF-8',
+ 'raj_in': 'raj_IN.UTF-8',
+ 'ro': 'ro_RO.ISO8859-2',
+ 'ro_ro': 'ro_RO.ISO8859-2',
+ 'romanian': 'ro_RO.ISO8859-2',
+ 'ru': 'ru_RU.UTF-8',
+ 'ru_ru': 'ru_RU.UTF-8',
+ 'ru_ua': 'ru_UA.KOI8-U',
+ 'rumanian': 'ro_RO.ISO8859-2',
+ 'russian': 'ru_RU.KOI8-R',
+ 'rw': 'rw_RW.ISO8859-1',
+ 'rw_rw': 'rw_RW.ISO8859-1',
+ 'sa_in': 'sa_IN.UTF-8',
+ 'sat_in': 'sat_IN.UTF-8',
+ 'sc_it': 'sc_IT.UTF-8',
+ 'sd': 'sd_IN.UTF-8',
+ 'sd_in': 'sd_IN.UTF-8',
+ 'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari',
+ 'sd_pk': 'sd_PK.UTF-8',
+ 'se_no': 'se_NO.UTF-8',
+ 'serbocroatian': 'sr_RS.UTF-8@latin',
+ 'sgs_lt': 'sgs_LT.UTF-8',
+ 'sh': 'sr_RS.UTF-8@latin',
+ 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
+ 'sh_hr': 'sh_HR.ISO8859-2',
+ 'sh_hr.iso88592': 'hr_HR.ISO8859-2',
+ 'sh_sp': 'sr_CS.ISO8859-2',
+ 'sh_yu': 'sr_RS.UTF-8@latin',
+ 'shn_mm': 'shn_MM.UTF-8',
+ 'shs_ca': 'shs_CA.UTF-8',
+ 'si': 'si_LK.UTF-8',
+ 'si_lk': 'si_LK.UTF-8',
+ 'sid_et': 'sid_ET.UTF-8',
+ 'sinhala': 'si_LK.UTF-8',
+ 'sk': 'sk_SK.ISO8859-2',
+ 'sk_sk': 'sk_SK.ISO8859-2',
+ 'sl': 'sl_SI.ISO8859-2',
+ 'sl_cs': 'sl_CS.ISO8859-2',
+ 'sl_si': 'sl_SI.ISO8859-2',
+ 'slovak': 'sk_SK.ISO8859-2',
+ 'slovene': 'sl_SI.ISO8859-2',
+ 'slovenian': 'sl_SI.ISO8859-2',
+ 'sm_ws': 'sm_WS.UTF-8',
+ 'so_dj': 'so_DJ.ISO8859-1',
+ 'so_et': 'so_ET.UTF-8',
+ 'so_ke': 'so_KE.ISO8859-1',
+ 'so_so': 'so_SO.ISO8859-1',
+ 'sp': 'sr_CS.ISO8859-5',
+ 'sp_yu': 'sr_CS.ISO8859-5',
+ 'spanish': 'es_ES.ISO8859-1',
+ 'spanish_spain': 'es_ES.ISO8859-1',
+ 'sq': 'sq_AL.ISO8859-2',
+ 'sq_al': 'sq_AL.ISO8859-2',
+ 'sq_mk': 'sq_MK.UTF-8',
+ 'sr': 'sr_RS.UTF-8',
+ 'sr@cyrillic': 'sr_RS.UTF-8',
+ 'sr@latn': 'sr_CS.UTF-8@latin',
+ 'sr_cs': 'sr_CS.UTF-8',
+ 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
+ 'sr_cs@latn': 'sr_CS.UTF-8@latin',
+ 'sr_me': 'sr_ME.UTF-8',
+ 'sr_rs': 'sr_RS.UTF-8',
+ 'sr_rs@latn': 'sr_RS.UTF-8@latin',
+ 'sr_sp': 'sr_CS.ISO8859-2',
+ 'sr_yu': 'sr_RS.UTF-8@latin',
+ 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
+ 'sr_yu.iso88592': 'sr_CS.ISO8859-2',
+ 'sr_yu.iso88595': 'sr_CS.ISO8859-5',
+ 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
+ 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
+ 'sr_yu.utf8': 'sr_RS.UTF-8',
+ 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8',
+ 'sr_yu@cyrillic': 'sr_RS.UTF-8',
+ 'ss': 'ss_ZA.ISO8859-1',
+ 'ss_za': 'ss_ZA.ISO8859-1',
+ 'st': 'st_ZA.ISO8859-1',
+ 'st_za': 'st_ZA.ISO8859-1',
+ 'sv': 'sv_SE.ISO8859-1',
+ 'sv_fi': 'sv_FI.ISO8859-1',
+ 'sv_se': 'sv_SE.ISO8859-1',
+ 'sw_ke': 'sw_KE.UTF-8',
+ 'sw_tz': 'sw_TZ.UTF-8',
+ 'swedish': 'sv_SE.ISO8859-1',
+ 'szl_pl': 'szl_PL.UTF-8',
+ 'ta': 'ta_IN.TSCII-0',
+ 'ta_in': 'ta_IN.TSCII-0',
+ 'ta_in.tscii': 'ta_IN.TSCII-0',
+ 'ta_in.tscii0': 'ta_IN.TSCII-0',
+ 'ta_lk': 'ta_LK.UTF-8',
+ 'tcy_in.utf8': 'tcy_IN.UTF-8',
+ 'te': 'te_IN.UTF-8',
+ 'te_in': 'te_IN.UTF-8',
+ 'tg': 'tg_TJ.KOI8-C',
+ 'tg_tj': 'tg_TJ.KOI8-C',
+ 'th': 'th_TH.ISO8859-11',
+ 'th_th': 'th_TH.ISO8859-11',
+ 'th_th.tactis': 'th_TH.TIS620',
+ 'th_th.tis620': 'th_TH.TIS620',
+ 'thai': 'th_TH.ISO8859-11',
+ 'the_np': 'the_NP.UTF-8',
+ 'ti_er': 'ti_ER.UTF-8',
+ 'ti_et': 'ti_ET.UTF-8',
+ 'tig_er': 'tig_ER.UTF-8',
+ 'tk_tm': 'tk_TM.UTF-8',
+ 'tl': 'tl_PH.ISO8859-1',
+ 'tl_ph': 'tl_PH.ISO8859-1',
+ 'tn': 'tn_ZA.ISO8859-15',
+ 'tn_za': 'tn_ZA.ISO8859-15',
+ 'to_to': 'to_TO.UTF-8',
+ 'tpi_pg': 'tpi_PG.UTF-8',
+ 'tr': 'tr_TR.ISO8859-9',
+ 'tr_cy': 'tr_CY.ISO8859-9',
+ 'tr_tr': 'tr_TR.ISO8859-9',
+ 'ts': 'ts_ZA.ISO8859-1',
+ 'ts_za': 'ts_ZA.ISO8859-1',
+ 'tt': 'tt_RU.TATAR-CYR',
+ 'tt_ru': 'tt_RU.TATAR-CYR',
+ 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
+ 'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif',
+ 'turkish': 'tr_TR.ISO8859-9',
+ 'ug_cn': 'ug_CN.UTF-8',
+ 'uk': 'uk_UA.KOI8-U',
+ 'uk_ua': 'uk_UA.KOI8-U',
+ 'univ': 'en_US.utf',
+ 'universal': 'en_US.utf',
+ 'universal.utf8@ucs4': 'en_US.UTF-8',
+ 'unm_us': 'unm_US.UTF-8',
+ 'ur': 'ur_PK.CP1256',
+ 'ur_in': 'ur_IN.UTF-8',
+ 'ur_pk': 'ur_PK.CP1256',
+ 'uz': 'uz_UZ.UTF-8',
+ 'uz_uz': 'uz_UZ.UTF-8',
+ 'uz_uz@cyrillic': 'uz_UZ.UTF-8',
+ 've': 've_ZA.UTF-8',
+ 've_za': 've_ZA.UTF-8',
+ 'vi': 'vi_VN.TCVN',
+ 'vi_vn': 'vi_VN.TCVN',
+ 'vi_vn.tcvn': 'vi_VN.TCVN',
+ 'vi_vn.tcvn5712': 'vi_VN.TCVN',
+ 'vi_vn.viscii': 'vi_VN.VISCII',
+ 'vi_vn.viscii111': 'vi_VN.VISCII',
+ 'wa': 'wa_BE.ISO8859-1',
+ 'wa_be': 'wa_BE.ISO8859-1',
+ 'wae_ch': 'wae_CH.UTF-8',
+ 'wal_et': 'wal_ET.UTF-8',
+ 'wo_sn': 'wo_SN.UTF-8',
+ 'xh': 'xh_ZA.ISO8859-1',
+ 'xh_za': 'xh_ZA.ISO8859-1',
+ 'yi': 'yi_US.CP1255',
+ 'yi_us': 'yi_US.CP1255',
+ 'yo_ng': 'yo_NG.UTF-8',
+ 'yue_hk': 'yue_HK.UTF-8',
+ 'yuw_pg': 'yuw_PG.UTF-8',
+ 'zh': 'zh_CN.eucCN',
+ 'zh_cn': 'zh_CN.gb2312',
+ 'zh_cn.big5': 'zh_TW.big5',
+ 'zh_cn.euc': 'zh_CN.eucCN',
+ 'zh_hk': 'zh_HK.big5hkscs',
+ 'zh_hk.big5hk': 'zh_HK.big5hkscs',
+ 'zh_sg': 'zh_SG.GB2312',
+ 'zh_sg.gbk': 'zh_SG.GBK',
+ 'zh_tw': 'zh_TW.big5',
+ 'zh_tw.euc': 'zh_TW.eucTW',
+ 'zh_tw.euctw': 'zh_TW.eucTW',
+ 'zu': 'zu_ZA.ISO8859-1',
+ 'zu_za': 'zu_ZA.ISO8859-1',
+}
+
+#
+# This maps Windows language identifiers to locale strings.
+#
+# This list has been updated from
+# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
+# to include every locale up to Windows Vista.
+#
+# NOTE: this mapping is incomplete. If your language is missing, please
+# submit a bug report to the Python bug tracker at http://bugs.python.org/
+# Make sure you include the missing language identifier and the suggested
+# locale code.
+#
+
+windows_locale = {
+ 0x0436: "af_ZA", # Afrikaans
+ 0x041c: "sq_AL", # Albanian
+ 0x0484: "gsw_FR",# Alsatian - France
+ 0x045e: "am_ET", # Amharic - Ethiopia
+ 0x0401: "ar_SA", # Arabic - Saudi Arabia
+ 0x0801: "ar_IQ", # Arabic - Iraq
+ 0x0c01: "ar_EG", # Arabic - Egypt
+ 0x1001: "ar_LY", # Arabic - Libya
+ 0x1401: "ar_DZ", # Arabic - Algeria
+ 0x1801: "ar_MA", # Arabic - Morocco
+ 0x1c01: "ar_TN", # Arabic - Tunisia
+ 0x2001: "ar_OM", # Arabic - Oman
+ 0x2401: "ar_YE", # Arabic - Yemen
+ 0x2801: "ar_SY", # Arabic - Syria
+ 0x2c01: "ar_JO", # Arabic - Jordan
+ 0x3001: "ar_LB", # Arabic - Lebanon
+ 0x3401: "ar_KW", # Arabic - Kuwait
+ 0x3801: "ar_AE", # Arabic - United Arab Emirates
+ 0x3c01: "ar_BH", # Arabic - Bahrain
+ 0x4001: "ar_QA", # Arabic - Qatar
+ 0x042b: "hy_AM", # Armenian
+ 0x044d: "as_IN", # Assamese - India
+ 0x042c: "az_AZ", # Azeri - Latin
+ 0x082c: "az_AZ", # Azeri - Cyrillic
+ 0x046d: "ba_RU", # Bashkir
+ 0x042d: "eu_ES", # Basque - Russia
+ 0x0423: "be_BY", # Belarusian
+ 0x0445: "bn_IN", # Begali
+ 0x201a: "bs_BA", # Bosnian - Cyrillic
+ 0x141a: "bs_BA", # Bosnian - Latin
+ 0x047e: "br_FR", # Breton - France
+ 0x0402: "bg_BG", # Bulgarian
+# 0x0455: "my_MM", # Burmese - Not supported
+ 0x0403: "ca_ES", # Catalan
+ 0x0004: "zh_CHS",# Chinese - Simplified
+ 0x0404: "zh_TW", # Chinese - Taiwan
+ 0x0804: "zh_CN", # Chinese - PRC
+ 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
+ 0x1004: "zh_SG", # Chinese - Singapore
+ 0x1404: "zh_MO", # Chinese - Macao S.A.R.
+ 0x7c04: "zh_CHT",# Chinese - Traditional
+ 0x0483: "co_FR", # Corsican - France
+ 0x041a: "hr_HR", # Croatian
+ 0x101a: "hr_BA", # Croatian - Bosnia
+ 0x0405: "cs_CZ", # Czech
+ 0x0406: "da_DK", # Danish
+ 0x048c: "gbz_AF",# Dari - Afghanistan
+ 0x0465: "div_MV",# Divehi - Maldives
+ 0x0413: "nl_NL", # Dutch - The Netherlands
+ 0x0813: "nl_BE", # Dutch - Belgium
+ 0x0409: "en_US", # English - United States
+ 0x0809: "en_GB", # English - United Kingdom
+ 0x0c09: "en_AU", # English - Australia
+ 0x1009: "en_CA", # English - Canada
+ 0x1409: "en_NZ", # English - New Zealand
+ 0x1809: "en_IE", # English - Ireland
+ 0x1c09: "en_ZA", # English - South Africa
+ 0x2009: "en_JA", # English - Jamaica
+ 0x2409: "en_CB", # English - Caribbean
+ 0x2809: "en_BZ", # English - Belize
+ 0x2c09: "en_TT", # English - Trinidad
+ 0x3009: "en_ZW", # English - Zimbabwe
+ 0x3409: "en_PH", # English - Philippines
+ 0x4009: "en_IN", # English - India
+ 0x4409: "en_MY", # English - Malaysia
+ 0x4809: "en_IN", # English - Singapore
+ 0x0425: "et_EE", # Estonian
+ 0x0438: "fo_FO", # Faroese
+ 0x0464: "fil_PH",# Filipino
+ 0x040b: "fi_FI", # Finnish
+ 0x040c: "fr_FR", # French - France
+ 0x080c: "fr_BE", # French - Belgium
+ 0x0c0c: "fr_CA", # French - Canada
+ 0x100c: "fr_CH", # French - Switzerland
+ 0x140c: "fr_LU", # French - Luxembourg
+ 0x180c: "fr_MC", # French - Monaco
+ 0x0462: "fy_NL", # Frisian - Netherlands
+ 0x0456: "gl_ES", # Galician
+ 0x0437: "ka_GE", # Georgian
+ 0x0407: "de_DE", # German - Germany
+ 0x0807: "de_CH", # German - Switzerland
+ 0x0c07: "de_AT", # German - Austria
+ 0x1007: "de_LU", # German - Luxembourg
+ 0x1407: "de_LI", # German - Liechtenstein
+ 0x0408: "el_GR", # Greek
+ 0x046f: "kl_GL", # Greenlandic - Greenland
+ 0x0447: "gu_IN", # Gujarati
+ 0x0468: "ha_NG", # Hausa - Latin
+ 0x040d: "he_IL", # Hebrew
+ 0x0439: "hi_IN", # Hindi
+ 0x040e: "hu_HU", # Hungarian
+ 0x040f: "is_IS", # Icelandic
+ 0x0421: "id_ID", # Indonesian
+ 0x045d: "iu_CA", # Inuktitut - Syllabics
+ 0x085d: "iu_CA", # Inuktitut - Latin
+ 0x083c: "ga_IE", # Irish - Ireland
+ 0x0410: "it_IT", # Italian - Italy
+ 0x0810: "it_CH", # Italian - Switzerland
+ 0x0411: "ja_JP", # Japanese
+ 0x044b: "kn_IN", # Kannada - India
+ 0x043f: "kk_KZ", # Kazakh
+ 0x0453: "kh_KH", # Khmer - Cambodia
+ 0x0486: "qut_GT",# K'iche - Guatemala
+ 0x0487: "rw_RW", # Kinyarwanda - Rwanda
+ 0x0457: "kok_IN",# Konkani
+ 0x0412: "ko_KR", # Korean
+ 0x0440: "ky_KG", # Kyrgyz
+ 0x0454: "lo_LA", # Lao - Lao PDR
+ 0x0426: "lv_LV", # Latvian
+ 0x0427: "lt_LT", # Lithuanian
+ 0x082e: "dsb_DE",# Lower Sorbian - Germany
+ 0x046e: "lb_LU", # Luxembourgish
+ 0x042f: "mk_MK", # FYROM Macedonian
+ 0x043e: "ms_MY", # Malay - Malaysia
+ 0x083e: "ms_BN", # Malay - Brunei Darussalam
+ 0x044c: "ml_IN", # Malayalam - India
+ 0x043a: "mt_MT", # Maltese
+ 0x0481: "mi_NZ", # Maori
+ 0x047a: "arn_CL",# Mapudungun
+ 0x044e: "mr_IN", # Marathi
+ 0x047c: "moh_CA",# Mohawk - Canada
+ 0x0450: "mn_MN", # Mongolian - Cyrillic
+ 0x0850: "mn_CN", # Mongolian - PRC
+ 0x0461: "ne_NP", # Nepali
+ 0x0414: "nb_NO", # Norwegian - Bokmal
+ 0x0814: "nn_NO", # Norwegian - Nynorsk
+ 0x0482: "oc_FR", # Occitan - France
+ 0x0448: "or_IN", # Oriya - India
+ 0x0463: "ps_AF", # Pashto - Afghanistan
+ 0x0429: "fa_IR", # Persian
+ 0x0415: "pl_PL", # Polish
+ 0x0416: "pt_BR", # Portuguese - Brazil
+ 0x0816: "pt_PT", # Portuguese - Portugal
+ 0x0446: "pa_IN", # Punjabi
+ 0x046b: "quz_BO",# Quechua (Bolivia)
+ 0x086b: "quz_EC",# Quechua (Ecuador)
+ 0x0c6b: "quz_PE",# Quechua (Peru)
+ 0x0418: "ro_RO", # Romanian - Romania
+ 0x0417: "rm_CH", # Romansh
+ 0x0419: "ru_RU", # Russian
+ 0x243b: "smn_FI",# Sami Finland
+ 0x103b: "smj_NO",# Sami Norway
+ 0x143b: "smj_SE",# Sami Sweden
+ 0x043b: "se_NO", # Sami Northern Norway
+ 0x083b: "se_SE", # Sami Northern Sweden
+ 0x0c3b: "se_FI", # Sami Northern Finland
+ 0x203b: "sms_FI",# Sami Skolt
+ 0x183b: "sma_NO",# Sami Southern Norway
+ 0x1c3b: "sma_SE",# Sami Southern Sweden
+ 0x044f: "sa_IN", # Sanskrit
+ 0x0c1a: "sr_SP", # Serbian - Cyrillic
+ 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
+ 0x081a: "sr_SP", # Serbian - Latin
+ 0x181a: "sr_BA", # Serbian - Bosnia Latin
+ 0x045b: "si_LK", # Sinhala - Sri Lanka
+ 0x046c: "ns_ZA", # Northern Sotho
+ 0x0432: "tn_ZA", # Setswana - Southern Africa
+ 0x041b: "sk_SK", # Slovak
+ 0x0424: "sl_SI", # Slovenian
+ 0x040a: "es_ES", # Spanish - Spain
+ 0x080a: "es_MX", # Spanish - Mexico
+ 0x0c0a: "es_ES", # Spanish - Spain (Modern)
+ 0x100a: "es_GT", # Spanish - Guatemala
+ 0x140a: "es_CR", # Spanish - Costa Rica
+ 0x180a: "es_PA", # Spanish - Panama
+ 0x1c0a: "es_DO", # Spanish - Dominican Republic
+ 0x200a: "es_VE", # Spanish - Venezuela
+ 0x240a: "es_CO", # Spanish - Colombia
+ 0x280a: "es_PE", # Spanish - Peru
+ 0x2c0a: "es_AR", # Spanish - Argentina
+ 0x300a: "es_EC", # Spanish - Ecuador
+ 0x340a: "es_CL", # Spanish - Chile
+ 0x380a: "es_UR", # Spanish - Uruguay
+ 0x3c0a: "es_PY", # Spanish - Paraguay
+ 0x400a: "es_BO", # Spanish - Bolivia
+ 0x440a: "es_SV", # Spanish - El Salvador
+ 0x480a: "es_HN", # Spanish - Honduras
+ 0x4c0a: "es_NI", # Spanish - Nicaragua
+ 0x500a: "es_PR", # Spanish - Puerto Rico
+ 0x540a: "es_US", # Spanish - United States
+# 0x0430: "", # Sutu - Not supported
+ 0x0441: "sw_KE", # Swahili
+ 0x041d: "sv_SE", # Swedish - Sweden
+ 0x081d: "sv_FI", # Swedish - Finland
+ 0x045a: "syr_SY",# Syriac
+ 0x0428: "tg_TJ", # Tajik - Cyrillic
+ 0x085f: "tmz_DZ",# Tamazight - Latin
+ 0x0449: "ta_IN", # Tamil
+ 0x0444: "tt_RU", # Tatar
+ 0x044a: "te_IN", # Telugu
+ 0x041e: "th_TH", # Thai
+ 0x0851: "bo_BT", # Tibetan - Bhutan
+ 0x0451: "bo_CN", # Tibetan - PRC
+ 0x041f: "tr_TR", # Turkish
+ 0x0442: "tk_TM", # Turkmen - Cyrillic
+ 0x0480: "ug_CN", # Uighur - Arabic
+ 0x0422: "uk_UA", # Ukrainian
+ 0x042e: "wen_DE",# Upper Sorbian - Germany
+ 0x0420: "ur_PK", # Urdu
+ 0x0820: "ur_IN", # Urdu - India
+ 0x0443: "uz_UZ", # Uzbek - Latin
+ 0x0843: "uz_UZ", # Uzbek - Cyrillic
+ 0x042a: "vi_VN", # Vietnamese
+ 0x0452: "cy_GB", # Welsh
+ 0x0488: "wo_SN", # Wolof - Senegal
+ 0x0434: "xh_ZA", # Xhosa - South Africa
+ 0x0485: "sah_RU",# Yakut - Cyrillic
+ 0x0478: "ii_CN", # Yi - PRC
+ 0x046a: "yo_NG", # Yoruba - Nigeria
+ 0x0435: "zu_ZA", # Zulu
+}
+
+def _print_locale():
+
+ """ Test function.
+ """
+ categories = {}
+ def _init_categories(categories=categories):
+ for k,v in globals().items():
+ if k[:3] == 'LC_':
+ categories[k] = v
+ _init_categories()
+ del categories['LC_ALL']
+
+ print('Locale defaults as determined by getdefaultlocale():')
+ print('-'*72)
+ lang, enc = getdefaultlocale()
+ print('Language: ', lang or '(undefined)')
+ print('Encoding: ', enc or '(undefined)')
+ print()
+
+ print('Locale settings on startup:')
+ print('-'*72)
+ for name,category in categories.items():
+ print(name, '...')
+ lang, enc = getlocale(category)
+ print(' Language: ', lang or '(undefined)')
+ print(' Encoding: ', enc or '(undefined)')
+ print()
+
+ print()
+ print('Locale settings after calling resetlocale():')
+ print('-'*72)
+ resetlocale()
+ for name,category in categories.items():
+ print(name, '...')
+ lang, enc = getlocale(category)
+ print(' Language: ', lang or '(undefined)')
+ print(' Encoding: ', enc or '(undefined)')
+ print()
+
+ try:
+ setlocale(LC_ALL, "")
+ except:
+ print('NOTE:')
+ print('setlocale(LC_ALL, "") does not support the default locale')
+ print('given in the OS environment variables.')
+ else:
+ print()
+ print('Locale settings after calling setlocale(LC_ALL, ""):')
+ print('-'*72)
+ for name,category in categories.items():
+ print(name, '...')
+ lang, enc = getlocale(category)
+ print(' Language: ', lang or '(undefined)')
+ print(' Encoding: ', enc or '(undefined)')
+ print()
+
+###
+
+try:
+ LC_MESSAGES
+except NameError:
+ pass
+else:
+ __all__.append("LC_MESSAGES")
+
+if __name__=='__main__':
+ print('Locale aliasing:')
+ print()
+ _print_locale()
+ print()
+ print('Number formatting:')
+ print()
+ _test()
diff --git a/infer_4_37_2/lib/python3.10/mailbox.py b/infer_4_37_2/lib/python3.10/mailbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..70da07ed2e9e8bc18dfc26ac1762f4102bcf4426
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/mailbox.py
@@ -0,0 +1,2151 @@
+"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
+
+# Notes for authors of new mailbox subclasses:
+#
+# Remember to fsync() changes to disk before closing a modified file
+# or returning from a flush() method. See functions _sync_flush() and
+# _sync_close().
+
+import os
+import time
+import calendar
+import socket
+import errno
+import copy
+import warnings
+import email
+import email.message
+import email.generator
+import io
+import contextlib
+from types import GenericAlias
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+
+__all__ = ['Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
+ 'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
+ 'BabylMessage', 'MMDFMessage', 'Error', 'NoSuchMailboxError',
+ 'NotEmptyError', 'ExternalClashError', 'FormatError']
+
+linesep = os.linesep.encode('ascii')
+
+class Mailbox:
+ """A group of messages in a particular place."""
+
+ def __init__(self, path, factory=None, create=True):
+ """Initialize a Mailbox instance."""
+ self._path = os.path.abspath(os.path.expanduser(path))
+ self._factory = factory
+
+ def add(self, message):
+ """Add message and return assigned key."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def remove(self, key):
+ """Remove the keyed message; raise KeyError if it doesn't exist."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def __delitem__(self, key):
+ self.remove(key)
+
+ def discard(self, key):
+ """If the keyed message exists, remove it."""
+ try:
+ self.remove(key)
+ except KeyError:
+ pass
+
+ def __setitem__(self, key, message):
+ """Replace the keyed message; raise KeyError if it doesn't exist."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def get(self, key, default=None):
+ """Return the keyed message, or default if it doesn't exist."""
+ try:
+ return self.__getitem__(key)
+ except KeyError:
+ return default
+
+ def __getitem__(self, key):
+ """Return the keyed message; raise KeyError if it doesn't exist."""
+ if not self._factory:
+ return self.get_message(key)
+ else:
+ with contextlib.closing(self.get_file(key)) as file:
+ return self._factory(file)
+
+ def get_message(self, key):
+ """Return a Message representation or raise a KeyError."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def get_string(self, key):
+ """Return a string representation or raise a KeyError.
+
+ Uses email.message.Message to create a 7bit clean string
+ representation of the message."""
+ return email.message_from_bytes(self.get_bytes(key)).as_string()
+
+ def get_bytes(self, key):
+ """Return a byte string representation or raise a KeyError."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def get_file(self, key):
+ """Return a file-like representation or raise a KeyError."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def iterkeys(self):
+ """Return an iterator over keys."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def keys(self):
+ """Return a list of keys."""
+ return list(self.iterkeys())
+
+ def itervalues(self):
+ """Return an iterator over all messages."""
+ for key in self.iterkeys():
+ try:
+ value = self[key]
+ except KeyError:
+ continue
+ yield value
+
+ def __iter__(self):
+ return self.itervalues()
+
+ def values(self):
+ """Return a list of messages. Memory intensive."""
+ return list(self.itervalues())
+
+ def iteritems(self):
+ """Return an iterator over (key, message) tuples."""
+ for key in self.iterkeys():
+ try:
+ value = self[key]
+ except KeyError:
+ continue
+ yield (key, value)
+
+ def items(self):
+ """Return a list of (key, message) tuples. Memory intensive."""
+ return list(self.iteritems())
+
+ def __contains__(self, key):
+ """Return True if the keyed message exists, False otherwise."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def __len__(self):
+ """Return a count of messages in the mailbox."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def clear(self):
+ """Delete all messages."""
+ for key in self.keys():
+ self.discard(key)
+
+ def pop(self, key, default=None):
+ """Delete the keyed message and return it, or default."""
+ try:
+ result = self[key]
+ except KeyError:
+ return default
+ self.discard(key)
+ return result
+
+ def popitem(self):
+ """Delete an arbitrary (key, message) pair and return it."""
+ for key in self.iterkeys():
+ return (key, self.pop(key)) # This is only run once.
+ else:
+ raise KeyError('No messages in mailbox')
+
+ def update(self, arg=None):
+ """Change the messages that correspond to certain keys."""
+ if hasattr(arg, 'iteritems'):
+ source = arg.iteritems()
+ elif hasattr(arg, 'items'):
+ source = arg.items()
+ else:
+ source = arg
+ bad_key = False
+ for key, message in source:
+ try:
+ self[key] = message
+ except KeyError:
+ bad_key = True
+ if bad_key:
+ raise KeyError('No message with key(s)')
+
+ def flush(self):
+ """Write any pending changes to the disk."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def lock(self):
+ """Lock the mailbox."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def unlock(self):
+ """Unlock the mailbox if it is locked."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def close(self):
+ """Flush and close the mailbox."""
+ raise NotImplementedError('Method must be implemented by subclass')
+
+ def _string_to_bytes(self, message):
+ # If a message is not 7bit clean, we refuse to handle it since it
+ # likely came from reading invalid messages in text mode, and that way
+ # lies mojibake.
+ try:
+ return message.encode('ascii')
+ except UnicodeError:
+ raise ValueError("String input must be ASCII-only; "
+ "use bytes or a Message instead")
+
+ # Whether each message must end in a newline
+ _append_newline = False
+
+ def _dump_message(self, message, target, mangle_from_=False):
+ # This assumes the target file is open in binary mode.
+ """Dump message contents to target file."""
+ if isinstance(message, email.message.Message):
+ buffer = io.BytesIO()
+ gen = email.generator.BytesGenerator(buffer, mangle_from_, 0)
+ gen.flatten(message)
+ buffer.seek(0)
+ data = buffer.read()
+ data = data.replace(b'\n', linesep)
+ target.write(data)
+ if self._append_newline and not data.endswith(linesep):
+ # Make sure the message ends with a newline
+ target.write(linesep)
+ elif isinstance(message, (str, bytes, io.StringIO)):
+ if isinstance(message, io.StringIO):
+ warnings.warn("Use of StringIO input is deprecated, "
+ "use BytesIO instead", DeprecationWarning, 3)
+ message = message.getvalue()
+ if isinstance(message, str):
+ message = self._string_to_bytes(message)
+ if mangle_from_:
+ message = message.replace(b'\nFrom ', b'\n>From ')
+ message = message.replace(b'\n', linesep)
+ target.write(message)
+ if self._append_newline and not message.endswith(linesep):
+ # Make sure the message ends with a newline
+ target.write(linesep)
+ elif hasattr(message, 'read'):
+ if hasattr(message, 'buffer'):
+ warnings.warn("Use of text mode files is deprecated, "
+ "use a binary mode file instead", DeprecationWarning, 3)
+ message = message.buffer
+ lastline = None
+ while True:
+ line = message.readline()
+ # Universal newline support.
+ if line.endswith(b'\r\n'):
+ line = line[:-2] + b'\n'
+ elif line.endswith(b'\r'):
+ line = line[:-1] + b'\n'
+ if not line:
+ break
+ if mangle_from_ and line.startswith(b'From '):
+ line = b'>From ' + line[5:]
+ line = line.replace(b'\n', linesep)
+ target.write(line)
+ lastline = line
+ if self._append_newline and lastline and not lastline.endswith(linesep):
+ # Make sure the message ends with a newline
+ target.write(linesep)
+ else:
+ raise TypeError('Invalid message type: %s' % type(message))
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+class Maildir(Mailbox):
+ """A qmail-style Maildir mailbox."""
+
+ colon = ':'
+
+ def __init__(self, dirname, factory=None, create=True):
+ """Initialize a Maildir instance."""
+ Mailbox.__init__(self, dirname, factory, create)
+ self._paths = {
+ 'tmp': os.path.join(self._path, 'tmp'),
+ 'new': os.path.join(self._path, 'new'),
+ 'cur': os.path.join(self._path, 'cur'),
+ }
+ if not os.path.exists(self._path):
+ if create:
+ os.mkdir(self._path, 0o700)
+ for path in self._paths.values():
+ os.mkdir(path, 0o700)
+ else:
+ raise NoSuchMailboxError(self._path)
+ self._toc = {}
+ self._toc_mtimes = {'cur': 0, 'new': 0}
+ self._last_read = 0 # Records last time we read cur/new
+ self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing
+
+ def add(self, message):
+ """Add message and return assigned key."""
+ tmp_file = self._create_tmp()
+ try:
+ self._dump_message(message, tmp_file)
+ except BaseException:
+ tmp_file.close()
+ os.remove(tmp_file.name)
+ raise
+ _sync_close(tmp_file)
+ if isinstance(message, MaildirMessage):
+ subdir = message.get_subdir()
+ suffix = self.colon + message.get_info()
+ if suffix == self.colon:
+ suffix = ''
+ else:
+ subdir = 'new'
+ suffix = ''
+ uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
+ dest = os.path.join(self._path, subdir, uniq + suffix)
+ if isinstance(message, MaildirMessage):
+ os.utime(tmp_file.name,
+ (os.path.getatime(tmp_file.name), message.get_date()))
+ # No file modification should be done after the file is moved to its
+ # final position in order to prevent race conditions with changes
+ # from other programs
+ try:
+ try:
+ os.link(tmp_file.name, dest)
+ except (AttributeError, PermissionError):
+ os.rename(tmp_file.name, dest)
+ else:
+ os.remove(tmp_file.name)
+ except OSError as e:
+ os.remove(tmp_file.name)
+ if e.errno == errno.EEXIST:
+ raise ExternalClashError('Name clash with existing message: %s'
+ % dest)
+ else:
+ raise
+ return uniq
+
+ def remove(self, key):
+ """Remove the keyed message; raise KeyError if it doesn't exist."""
+ os.remove(os.path.join(self._path, self._lookup(key)))
+
+ def discard(self, key):
+ """If the keyed message exists, remove it."""
+ # This overrides an inapplicable implementation in the superclass.
+ try:
+ self.remove(key)
+ except (KeyError, FileNotFoundError):
+ pass
+
+ def __setitem__(self, key, message):
+ """Replace the keyed message; raise KeyError if it doesn't exist."""
+ old_subpath = self._lookup(key)
+ temp_key = self.add(message)
+ temp_subpath = self._lookup(temp_key)
+ if isinstance(message, MaildirMessage):
+ # temp's subdir and suffix were specified by message.
+ dominant_subpath = temp_subpath
+ else:
+ # temp's subdir and suffix were defaults from add().
+ dominant_subpath = old_subpath
+ subdir = os.path.dirname(dominant_subpath)
+ if self.colon in dominant_subpath:
+ suffix = self.colon + dominant_subpath.split(self.colon)[-1]
+ else:
+ suffix = ''
+ self.discard(key)
+ tmp_path = os.path.join(self._path, temp_subpath)
+ new_path = os.path.join(self._path, subdir, key + suffix)
+ if isinstance(message, MaildirMessage):
+ os.utime(tmp_path,
+ (os.path.getatime(tmp_path), message.get_date()))
+ # No file modification should be done after the file is moved to its
+ # final position in order to prevent race conditions with changes
+ # from other programs
+ os.rename(tmp_path, new_path)
+
+ def get_message(self, key):
+ """Return a Message representation or raise a KeyError."""
+ subpath = self._lookup(key)
+ with open(os.path.join(self._path, subpath), 'rb') as f:
+ if self._factory:
+ msg = self._factory(f)
+ else:
+ msg = MaildirMessage(f)
+ subdir, name = os.path.split(subpath)
+ msg.set_subdir(subdir)
+ if self.colon in name:
+ msg.set_info(name.split(self.colon)[-1])
+ msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
+ return msg
+
+ def get_bytes(self, key):
+ """Return a bytes representation or raise a KeyError."""
+ with open(os.path.join(self._path, self._lookup(key)), 'rb') as f:
+ return f.read().replace(linesep, b'\n')
+
+ def get_file(self, key):
+ """Return a file-like representation or raise a KeyError."""
+ f = open(os.path.join(self._path, self._lookup(key)), 'rb')
+ return _ProxyFile(f)
+
+ def iterkeys(self):
+ """Return an iterator over keys."""
+ self._refresh()
+ for key in self._toc:
+ try:
+ self._lookup(key)
+ except KeyError:
+ continue
+ yield key
+
+ def __contains__(self, key):
+ """Return True if the keyed message exists, False otherwise."""
+ self._refresh()
+ return key in self._toc
+
+ def __len__(self):
+ """Return a count of messages in the mailbox."""
+ self._refresh()
+ return len(self._toc)
+
+ def flush(self):
+ """Write any pending changes to disk."""
+ # Maildir changes are always written immediately, so there's nothing
+ # to do.
+ pass
+
+ def lock(self):
+ """Lock the mailbox."""
+ return
+
+ def unlock(self):
+ """Unlock the mailbox if it is locked."""
+ return
+
+ def close(self):
+ """Flush and close the mailbox."""
+ return
+
+ def list_folders(self):
+ """Return a list of folder names."""
+ result = []
+ for entry in os.listdir(self._path):
+ if len(entry) > 1 and entry[0] == '.' and \
+ os.path.isdir(os.path.join(self._path, entry)):
+ result.append(entry[1:])
+ return result
+
+ def get_folder(self, folder):
+ """Return a Maildir instance for the named folder."""
+ return Maildir(os.path.join(self._path, '.' + folder),
+ factory=self._factory,
+ create=False)
+
+ def add_folder(self, folder):
+ """Create a folder and return a Maildir instance representing it."""
+ path = os.path.join(self._path, '.' + folder)
+ result = Maildir(path, factory=self._factory)
+ maildirfolder_path = os.path.join(path, 'maildirfolder')
+ if not os.path.exists(maildirfolder_path):
+ os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY,
+ 0o666))
+ return result
+
+ def remove_folder(self, folder):
+ """Delete the named folder, which must be empty."""
+ path = os.path.join(self._path, '.' + folder)
+ for entry in os.listdir(os.path.join(path, 'new')) + \
+ os.listdir(os.path.join(path, 'cur')):
+ if len(entry) < 1 or entry[0] != '.':
+ raise NotEmptyError('Folder contains message(s): %s' % folder)
+ for entry in os.listdir(path):
+ if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
+ os.path.isdir(os.path.join(path, entry)):
+ raise NotEmptyError("Folder contains subdirectory '%s': %s" %
+ (folder, entry))
+ for root, dirs, files in os.walk(path, topdown=False):
+ for entry in files:
+ os.remove(os.path.join(root, entry))
+ for entry in dirs:
+ os.rmdir(os.path.join(root, entry))
+ os.rmdir(path)
+
+ def clean(self):
+ """Delete old files in "tmp"."""
+ now = time.time()
+ for entry in os.listdir(os.path.join(self._path, 'tmp')):
+ path = os.path.join(self._path, 'tmp', entry)
+ if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
+ os.remove(path)
+
+ _count = 1 # This is used to generate unique file names.
+
+ def _create_tmp(self):
+ """Create a file in the tmp subdirectory and open and return it."""
+ now = time.time()
+ hostname = socket.gethostname()
+ if '/' in hostname:
+ hostname = hostname.replace('/', r'\057')
+ if ':' in hostname:
+ hostname = hostname.replace(':', r'\072')
+ uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
+ Maildir._count, hostname)
+ path = os.path.join(self._path, 'tmp', uniq)
+ try:
+ os.stat(path)
+ except FileNotFoundError:
+ Maildir._count += 1
+ try:
+ return _create_carefully(path)
+ except FileExistsError:
+ pass
+
+ # Fall through to here if stat succeeded or open raised EEXIST.
+ raise ExternalClashError('Name clash prevented file creation: %s' %
+ path)
+
+ def _refresh(self):
+ """Update table of contents mapping."""
+ # If it has been less than two seconds since the last _refresh() call,
+ # we have to unconditionally re-read the mailbox just in case it has
+ # been modified, because os.path.mtime() has a 2 sec resolution in the
+ # most common worst case (FAT) and a 1 sec resolution typically. This
+ # results in a few unnecessary re-reads when _refresh() is called
+ # multiple times in that interval, but once the clock ticks over, we
+ # will only re-read as needed. Because the filesystem might be being
+ # served by an independent system with its own clock, we record and
+ # compare with the mtimes from the filesystem. Because the other
+ # system's clock might be skewing relative to our clock, we add an
+ # extra delta to our wait. The default is one tenth second, but is an
+ # instance variable and so can be adjusted if dealing with a
+ # particularly skewed or irregular system.
+ if time.time() - self._last_read > 2 + self._skewfactor:
+ refresh = False
+ for subdir in self._toc_mtimes:
+ mtime = os.path.getmtime(self._paths[subdir])
+ if mtime > self._toc_mtimes[subdir]:
+ refresh = True
+ self._toc_mtimes[subdir] = mtime
+ if not refresh:
+ return
+ # Refresh toc
+ self._toc = {}
+ for subdir in self._toc_mtimes:
+ path = self._paths[subdir]
+ for entry in os.listdir(path):
+ p = os.path.join(path, entry)
+ if os.path.isdir(p):
+ continue
+ uniq = entry.split(self.colon)[0]
+ self._toc[uniq] = os.path.join(subdir, entry)
+ self._last_read = time.time()
+
+ def _lookup(self, key):
+ """Use TOC to return subpath for given key, or raise a KeyError."""
+ try:
+ if os.path.exists(os.path.join(self._path, self._toc[key])):
+ return self._toc[key]
+ except KeyError:
+ pass
+ self._refresh()
+ try:
+ return self._toc[key]
+ except KeyError:
+ raise KeyError('No message with key: %s' % key) from None
+
+ # This method is for backward compatibility only.
+ def next(self):
+ """Return the next message in a one-time iteration."""
+ if not hasattr(self, '_onetime_keys'):
+ self._onetime_keys = self.iterkeys()
+ while True:
+ try:
+ return self[next(self._onetime_keys)]
+ except StopIteration:
+ return None
+ except KeyError:
+ continue
+
+
+class _singlefileMailbox(Mailbox):
+ """A single-file mailbox."""
+
+ def __init__(self, path, factory=None, create=True):
+ """Initialize a single-file mailbox."""
+ Mailbox.__init__(self, path, factory, create)
+ try:
+ f = open(self._path, 'rb+')
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ if create:
+ f = open(self._path, 'wb+')
+ else:
+ raise NoSuchMailboxError(self._path)
+ elif e.errno in (errno.EACCES, errno.EROFS):
+ f = open(self._path, 'rb')
+ else:
+ raise
+ self._file = f
+ self._toc = None
+ self._next_key = 0
+ self._pending = False # No changes require rewriting the file.
+ self._pending_sync = False # No need to sync the file
+ self._locked = False
+ self._file_length = None # Used to record mailbox size
+
+ def add(self, message):
+ """Add message and return assigned key."""
+ self._lookup()
+ self._toc[self._next_key] = self._append_message(message)
+ self._next_key += 1
+ # _append_message appends the message to the mailbox file. We
+ # don't need a full rewrite + rename, sync is enough.
+ self._pending_sync = True
+ return self._next_key - 1
+
+ def remove(self, key):
+ """Remove the keyed message; raise KeyError if it doesn't exist."""
+ self._lookup(key)
+ del self._toc[key]
+ self._pending = True
+
+ def __setitem__(self, key, message):
+ """Replace the keyed message; raise KeyError if it doesn't exist."""
+ self._lookup(key)
+ self._toc[key] = self._append_message(message)
+ self._pending = True
+
+ def iterkeys(self):
+ """Return an iterator over keys."""
+ self._lookup()
+ yield from self._toc.keys()
+
+ def __contains__(self, key):
+ """Return True if the keyed message exists, False otherwise."""
+ self._lookup()
+ return key in self._toc
+
+ def __len__(self):
+ """Return a count of messages in the mailbox."""
+ self._lookup()
+ return len(self._toc)
+
+ def lock(self):
+ """Lock the mailbox."""
+ if not self._locked:
+ _lock_file(self._file)
+ self._locked = True
+
+ def unlock(self):
+ """Unlock the mailbox if it is locked."""
+ if self._locked:
+ _unlock_file(self._file)
+ self._locked = False
+
+ def flush(self):
+ """Write any pending changes to disk."""
+ if not self._pending:
+ if self._pending_sync:
+ # Messages have only been added, so syncing the file
+ # is enough.
+ _sync_flush(self._file)
+ self._pending_sync = False
+ return
+
+ # In order to be writing anything out at all, self._toc must
+ # already have been generated (and presumably has been modified
+ # by adding or deleting an item).
+ assert self._toc is not None
+
+ # Check length of self._file; if it's changed, some other process
+ # has modified the mailbox since we scanned it.
+ self._file.seek(0, 2)
+ cur_len = self._file.tell()
+ if cur_len != self._file_length:
+ raise ExternalClashError('Size of mailbox file changed '
+ '(expected %i, found %i)' %
+ (self._file_length, cur_len))
+
+ new_file = _create_temporary(self._path)
+ try:
+ new_toc = {}
+ self._pre_mailbox_hook(new_file)
+ for key in sorted(self._toc.keys()):
+ start, stop = self._toc[key]
+ self._file.seek(start)
+ self._pre_message_hook(new_file)
+ new_start = new_file.tell()
+ while True:
+ buffer = self._file.read(min(4096,
+ stop - self._file.tell()))
+ if not buffer:
+ break
+ new_file.write(buffer)
+ new_toc[key] = (new_start, new_file.tell())
+ self._post_message_hook(new_file)
+ self._file_length = new_file.tell()
+ except:
+ new_file.close()
+ os.remove(new_file.name)
+ raise
+ _sync_close(new_file)
+ # self._file is about to get replaced, so no need to sync.
+ self._file.close()
+ # Make sure the new file's mode is the same as the old file's
+ mode = os.stat(self._path).st_mode
+ os.chmod(new_file.name, mode)
+ try:
+ os.rename(new_file.name, self._path)
+ except FileExistsError:
+ os.remove(self._path)
+ os.rename(new_file.name, self._path)
+ self._file = open(self._path, 'rb+')
+ self._toc = new_toc
+ self._pending = False
+ self._pending_sync = False
+ if self._locked:
+ _lock_file(self._file, dotlock=False)
+
+ def _pre_mailbox_hook(self, f):
+ """Called before writing the mailbox to file f."""
+ return
+
+ def _pre_message_hook(self, f):
+ """Called before writing each message to file f."""
+ return
+
+ def _post_message_hook(self, f):
+ """Called after writing each message to file f."""
+ return
+
+ def close(self):
+ """Flush and close the mailbox."""
+ try:
+ self.flush()
+ finally:
+ try:
+ if self._locked:
+ self.unlock()
+ finally:
+ self._file.close() # Sync has been done by self.flush() above.
+
+ def _lookup(self, key=None):
+ """Return (start, stop) or raise KeyError."""
+ if self._toc is None:
+ self._generate_toc()
+ if key is not None:
+ try:
+ return self._toc[key]
+ except KeyError:
+ raise KeyError('No message with key: %s' % key) from None
+
+ def _append_message(self, message):
+ """Append message to mailbox and return (start, stop) offsets."""
+ self._file.seek(0, 2)
+ before = self._file.tell()
+ if len(self._toc) == 0 and not self._pending:
+ # This is the first message, and the _pre_mailbox_hook
+ # hasn't yet been called. If self._pending is True,
+ # messages have been removed, so _pre_mailbox_hook must
+ # have been called already.
+ self._pre_mailbox_hook(self._file)
+ try:
+ self._pre_message_hook(self._file)
+ offsets = self._install_message(message)
+ self._post_message_hook(self._file)
+ except BaseException:
+ self._file.truncate(before)
+ raise
+ self._file.flush()
+ self._file_length = self._file.tell() # Record current length of mailbox
+ return offsets
+
+
+
+class _mboxMMDF(_singlefileMailbox):
+ """An mbox or MMDF mailbox."""
+
+ _mangle_from_ = True
+
+ def get_message(self, key):
+ """Return a Message representation or raise a KeyError."""
+ start, stop = self._lookup(key)
+ self._file.seek(start)
+ from_line = self._file.readline().replace(linesep, b'')
+ string = self._file.read(stop - self._file.tell())
+ msg = self._message_factory(string.replace(linesep, b'\n'))
+ msg.set_from(from_line[5:].decode('ascii'))
+ return msg
+
+ def get_string(self, key, from_=False):
+ """Return a string representation or raise a KeyError."""
+ return email.message_from_bytes(
+ self.get_bytes(key, from_)).as_string(unixfrom=from_)
+
+ def get_bytes(self, key, from_=False):
+ """Return a string representation or raise a KeyError."""
+ start, stop = self._lookup(key)
+ self._file.seek(start)
+ if not from_:
+ self._file.readline()
+ string = self._file.read(stop - self._file.tell())
+ return string.replace(linesep, b'\n')
+
+ def get_file(self, key, from_=False):
+ """Return a file-like representation or raise a KeyError."""
+ start, stop = self._lookup(key)
+ self._file.seek(start)
+ if not from_:
+ self._file.readline()
+ return _PartialFile(self._file, self._file.tell(), stop)
+
+ def _install_message(self, message):
+ """Format a message and blindly write to self._file."""
+ from_line = None
+ if isinstance(message, str):
+ message = self._string_to_bytes(message)
+ if isinstance(message, bytes) and message.startswith(b'From '):
+ newline = message.find(b'\n')
+ if newline != -1:
+ from_line = message[:newline]
+ message = message[newline + 1:]
+ else:
+ from_line = message
+ message = b''
+ elif isinstance(message, _mboxMMDFMessage):
+ author = message.get_from().encode('ascii')
+ from_line = b'From ' + author
+ elif isinstance(message, email.message.Message):
+ from_line = message.get_unixfrom() # May be None.
+ if from_line is not None:
+ from_line = from_line.encode('ascii')
+ if from_line is None:
+ from_line = b'From MAILER-DAEMON ' + time.asctime(time.gmtime()).encode()
+ start = self._file.tell()
+ self._file.write(from_line + linesep)
+ self._dump_message(message, self._file, self._mangle_from_)
+ stop = self._file.tell()
+ return (start, stop)
+
+
+class mbox(_mboxMMDF):
+ """A classic mbox mailbox."""
+
+ _mangle_from_ = True
+
+ # All messages must end in a newline character, and
+ # _post_message_hooks outputs an empty line between messages.
+ _append_newline = True
+
+ def __init__(self, path, factory=None, create=True):
+ """Initialize an mbox mailbox."""
+ self._message_factory = mboxMessage
+ _mboxMMDF.__init__(self, path, factory, create)
+
+ def _post_message_hook(self, f):
+ """Called after writing each message to file f."""
+ f.write(linesep)
+
+ def _generate_toc(self):
+ """Generate key-to-(start, stop) table of contents."""
+ starts, stops = [], []
+ last_was_empty = False
+ self._file.seek(0)
+ while True:
+ line_pos = self._file.tell()
+ line = self._file.readline()
+ if line.startswith(b'From '):
+ if len(stops) < len(starts):
+ if last_was_empty:
+ stops.append(line_pos - len(linesep))
+ else:
+ # The last line before the "From " line wasn't
+ # blank, but we consider it a start of a
+ # message anyway.
+ stops.append(line_pos)
+ starts.append(line_pos)
+ last_was_empty = False
+ elif not line:
+ if last_was_empty:
+ stops.append(line_pos - len(linesep))
+ else:
+ stops.append(line_pos)
+ break
+ elif line == linesep:
+ last_was_empty = True
+ else:
+ last_was_empty = False
+ self._toc = dict(enumerate(zip(starts, stops)))
+ self._next_key = len(self._toc)
+ self._file_length = self._file.tell()
+
+
+class MMDF(_mboxMMDF):
+ """An MMDF mailbox."""
+
+ def __init__(self, path, factory=None, create=True):
+ """Initialize an MMDF mailbox."""
+ self._message_factory = MMDFMessage
+ _mboxMMDF.__init__(self, path, factory, create)
+
+ def _pre_message_hook(self, f):
+ """Called before writing each message to file f."""
+ f.write(b'\001\001\001\001' + linesep)
+
+ def _post_message_hook(self, f):
+ """Called after writing each message to file f."""
+ f.write(linesep + b'\001\001\001\001' + linesep)
+
+ def _generate_toc(self):
+ """Generate key-to-(start, stop) table of contents."""
+ starts, stops = [], []
+ self._file.seek(0)
+ next_pos = 0
+ while True:
+ line_pos = next_pos
+ line = self._file.readline()
+ next_pos = self._file.tell()
+ if line.startswith(b'\001\001\001\001' + linesep):
+ starts.append(next_pos)
+ while True:
+ line_pos = next_pos
+ line = self._file.readline()
+ next_pos = self._file.tell()
+ if line == b'\001\001\001\001' + linesep:
+ stops.append(line_pos - len(linesep))
+ break
+ elif not line:
+ stops.append(line_pos)
+ break
+ elif not line:
+ break
+ self._toc = dict(enumerate(zip(starts, stops)))
+ self._next_key = len(self._toc)
+ self._file.seek(0, 2)
+ self._file_length = self._file.tell()
+
+
+class MH(Mailbox):
+ """An MH mailbox."""
+
+ def __init__(self, path, factory=None, create=True):
+ """Initialize an MH instance."""
+ Mailbox.__init__(self, path, factory, create)
+ if not os.path.exists(self._path):
+ if create:
+ os.mkdir(self._path, 0o700)
+ os.close(os.open(os.path.join(self._path, '.mh_sequences'),
+ os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600))
+ else:
+ raise NoSuchMailboxError(self._path)
+ self._locked = False
+
+ def add(self, message):
+ """Add message and return assigned key."""
+ keys = self.keys()
+ if len(keys) == 0:
+ new_key = 1
+ else:
+ new_key = max(keys) + 1
+ new_path = os.path.join(self._path, str(new_key))
+ f = _create_carefully(new_path)
+ closed = False
+ try:
+ if self._locked:
+ _lock_file(f)
+ try:
+ try:
+ self._dump_message(message, f)
+ except BaseException:
+ # Unlock and close so it can be deleted on Windows
+ if self._locked:
+ _unlock_file(f)
+ _sync_close(f)
+ closed = True
+ os.remove(new_path)
+ raise
+ if isinstance(message, MHMessage):
+ self._dump_sequences(message, new_key)
+ finally:
+ if self._locked:
+ _unlock_file(f)
+ finally:
+ if not closed:
+ _sync_close(f)
+ return new_key
+
+ def remove(self, key):
+ """Remove the keyed message; raise KeyError if it doesn't exist."""
+ path = os.path.join(self._path, str(key))
+ try:
+ f = open(path, 'rb+')
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError('No message with key: %s' % key)
+ else:
+ raise
+ else:
+ f.close()
+ os.remove(path)
+
+ def __setitem__(self, key, message):
+ """Replace the keyed message; raise KeyError if it doesn't exist."""
+ path = os.path.join(self._path, str(key))
+ try:
+ f = open(path, 'rb+')
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError('No message with key: %s' % key)
+ else:
+ raise
+ try:
+ if self._locked:
+ _lock_file(f)
+ try:
+ os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
+ self._dump_message(message, f)
+ if isinstance(message, MHMessage):
+ self._dump_sequences(message, key)
+ finally:
+ if self._locked:
+ _unlock_file(f)
+ finally:
+ _sync_close(f)
+
+ def get_message(self, key):
+ """Return a Message representation or raise a KeyError."""
+ try:
+ if self._locked:
+ f = open(os.path.join(self._path, str(key)), 'rb+')
+ else:
+ f = open(os.path.join(self._path, str(key)), 'rb')
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError('No message with key: %s' % key)
+ else:
+ raise
+ with f:
+ if self._locked:
+ _lock_file(f)
+ try:
+ msg = MHMessage(f)
+ finally:
+ if self._locked:
+ _unlock_file(f)
+ for name, key_list in self.get_sequences().items():
+ if key in key_list:
+ msg.add_sequence(name)
+ return msg
+
+ def get_bytes(self, key):
+ """Return a bytes representation or raise a KeyError."""
+ try:
+ if self._locked:
+ f = open(os.path.join(self._path, str(key)), 'rb+')
+ else:
+ f = open(os.path.join(self._path, str(key)), 'rb')
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError('No message with key: %s' % key)
+ else:
+ raise
+ with f:
+ if self._locked:
+ _lock_file(f)
+ try:
+ return f.read().replace(linesep, b'\n')
+ finally:
+ if self._locked:
+ _unlock_file(f)
+
+ def get_file(self, key):
+ """Return a file-like representation or raise a KeyError."""
+ try:
+ f = open(os.path.join(self._path, str(key)), 'rb')
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError('No message with key: %s' % key)
+ else:
+ raise
+ return _ProxyFile(f)
+
+ def iterkeys(self):
+ """Return an iterator over keys."""
+ return iter(sorted(int(entry) for entry in os.listdir(self._path)
+ if entry.isdigit()))
+
+ def __contains__(self, key):
+ """Return True if the keyed message exists, False otherwise."""
+ return os.path.exists(os.path.join(self._path, str(key)))
+
+ def __len__(self):
+ """Return a count of messages in the mailbox."""
+ return len(list(self.iterkeys()))
+
+ def lock(self):
+ """Lock the mailbox."""
+ if not self._locked:
+ self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
+ _lock_file(self._file)
+ self._locked = True
+
+ def unlock(self):
+ """Unlock the mailbox if it is locked."""
+ if self._locked:
+ _unlock_file(self._file)
+ _sync_close(self._file)
+ del self._file
+ self._locked = False
+
+ def flush(self):
+ """Write any pending changes to the disk."""
+ return
+
+ def close(self):
+ """Flush and close the mailbox."""
+ if self._locked:
+ self.unlock()
+
+ def list_folders(self):
+ """Return a list of folder names."""
+ result = []
+ for entry in os.listdir(self._path):
+ if os.path.isdir(os.path.join(self._path, entry)):
+ result.append(entry)
+ return result
+
+ def get_folder(self, folder):
+ """Return an MH instance for the named folder."""
+ return MH(os.path.join(self._path, folder),
+ factory=self._factory, create=False)
+
+ def add_folder(self, folder):
+ """Create a folder and return an MH instance representing it."""
+ return MH(os.path.join(self._path, folder),
+ factory=self._factory)
+
+ def remove_folder(self, folder):
+ """Delete the named folder, which must be empty."""
+ path = os.path.join(self._path, folder)
+ entries = os.listdir(path)
+ if entries == ['.mh_sequences']:
+ os.remove(os.path.join(path, '.mh_sequences'))
+ elif entries == []:
+ pass
+ else:
+ raise NotEmptyError('Folder not empty: %s' % self._path)
+ os.rmdir(path)
+
+ def get_sequences(self):
+ """Return a name-to-key-list dictionary to define each sequence."""
+ results = {}
+ with open(os.path.join(self._path, '.mh_sequences'), 'r', encoding='ASCII') as f:
+ all_keys = set(self.keys())
+ for line in f:
+ try:
+ name, contents = line.split(':')
+ keys = set()
+ for spec in contents.split():
+ if spec.isdigit():
+ keys.add(int(spec))
+ else:
+ start, stop = (int(x) for x in spec.split('-'))
+ keys.update(range(start, stop + 1))
+ results[name] = [key for key in sorted(keys) \
+ if key in all_keys]
+ if len(results[name]) == 0:
+ del results[name]
+ except ValueError:
+ raise FormatError('Invalid sequence specification: %s' %
+ line.rstrip())
+ return results
+
+ def set_sequences(self, sequences):
+ """Set sequences using the given name-to-key-list dictionary."""
+ f = open(os.path.join(self._path, '.mh_sequences'), 'r+', encoding='ASCII')
+ try:
+ os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
+ for name, keys in sequences.items():
+ if len(keys) == 0:
+ continue
+ f.write(name + ':')
+ prev = None
+ completing = False
+ for key in sorted(set(keys)):
+ if key - 1 == prev:
+ if not completing:
+ completing = True
+ f.write('-')
+ elif completing:
+ completing = False
+ f.write('%s %s' % (prev, key))
+ else:
+ f.write(' %s' % key)
+ prev = key
+ if completing:
+ f.write(str(prev) + '\n')
+ else:
+ f.write('\n')
+ finally:
+ _sync_close(f)
+
+ def pack(self):
+ """Re-name messages to eliminate numbering gaps. Invalidates keys."""
+ sequences = self.get_sequences()
+ prev = 0
+ changes = []
+ for key in self.iterkeys():
+ if key - 1 != prev:
+ changes.append((key, prev + 1))
+ try:
+ os.link(os.path.join(self._path, str(key)),
+ os.path.join(self._path, str(prev + 1)))
+ except (AttributeError, PermissionError):
+ os.rename(os.path.join(self._path, str(key)),
+ os.path.join(self._path, str(prev + 1)))
+ else:
+ os.unlink(os.path.join(self._path, str(key)))
+ prev += 1
+ self._next_key = prev + 1
+ if len(changes) == 0:
+ return
+ for name, key_list in sequences.items():
+ for old, new in changes:
+ if old in key_list:
+ key_list[key_list.index(old)] = new
+ self.set_sequences(sequences)
+
+ def _dump_sequences(self, message, key):
+ """Inspect a new MHMessage and update sequences appropriately."""
+ pending_sequences = message.get_sequences()
+ all_sequences = self.get_sequences()
+ for name, key_list in all_sequences.items():
+ if name in pending_sequences:
+ key_list.append(key)
+ elif key in key_list:
+ del key_list[key_list.index(key)]
+ for sequence in pending_sequences:
+ if sequence not in all_sequences:
+ all_sequences[sequence] = [key]
+ self.set_sequences(all_sequences)
+
+
+class Babyl(_singlefileMailbox):
+ """An Rmail-style Babyl mailbox."""
+
+ _special_labels = frozenset({'unseen', 'deleted', 'filed', 'answered',
+ 'forwarded', 'edited', 'resent'})
+
+ def __init__(self, path, factory=None, create=True):
+ """Initialize a Babyl mailbox."""
+ _singlefileMailbox.__init__(self, path, factory, create)
+ self._labels = {}
+
+ def add(self, message):
+ """Add message and return assigned key."""
+ key = _singlefileMailbox.add(self, message)
+ if isinstance(message, BabylMessage):
+ self._labels[key] = message.get_labels()
+ return key
+
+ def remove(self, key):
+ """Remove the keyed message; raise KeyError if it doesn't exist."""
+ _singlefileMailbox.remove(self, key)
+ if key in self._labels:
+ del self._labels[key]
+
+ def __setitem__(self, key, message):
+ """Replace the keyed message; raise KeyError if it doesn't exist."""
+ _singlefileMailbox.__setitem__(self, key, message)
+ if isinstance(message, BabylMessage):
+ self._labels[key] = message.get_labels()
+
+ def get_message(self, key):
+ """Return a Message representation or raise a KeyError."""
+ start, stop = self._lookup(key)
+ self._file.seek(start)
+ self._file.readline() # Skip b'1,' line specifying labels.
+ original_headers = io.BytesIO()
+ while True:
+ line = self._file.readline()
+ if line == b'*** EOOH ***' + linesep or not line:
+ break
+ original_headers.write(line.replace(linesep, b'\n'))
+ visible_headers = io.BytesIO()
+ while True:
+ line = self._file.readline()
+ if line == linesep or not line:
+ break
+ visible_headers.write(line.replace(linesep, b'\n'))
+ # Read up to the stop, or to the end
+ n = stop - self._file.tell()
+ assert n >= 0
+ body = self._file.read(n)
+ body = body.replace(linesep, b'\n')
+ msg = BabylMessage(original_headers.getvalue() + body)
+ msg.set_visible(visible_headers.getvalue())
+ if key in self._labels:
+ msg.set_labels(self._labels[key])
+ return msg
+
+ def get_bytes(self, key):
+ """Return a string representation or raise a KeyError."""
+ start, stop = self._lookup(key)
+ self._file.seek(start)
+ self._file.readline() # Skip b'1,' line specifying labels.
+ original_headers = io.BytesIO()
+ while True:
+ line = self._file.readline()
+ if line == b'*** EOOH ***' + linesep or not line:
+ break
+ original_headers.write(line.replace(linesep, b'\n'))
+ while True:
+ line = self._file.readline()
+ if line == linesep or not line:
+ break
+ headers = original_headers.getvalue()
+ n = stop - self._file.tell()
+ assert n >= 0
+ data = self._file.read(n)
+ data = data.replace(linesep, b'\n')
+ return headers + data
+
+ def get_file(self, key):
+ """Return a file-like representation or raise a KeyError."""
+ return io.BytesIO(self.get_bytes(key).replace(b'\n', linesep))
+
+ def get_labels(self):
+ """Return a list of user-defined labels in the mailbox."""
+ self._lookup()
+ labels = set()
+ for label_list in self._labels.values():
+ labels.update(label_list)
+ labels.difference_update(self._special_labels)
+ return list(labels)
+
+ def _generate_toc(self):
+ """Generate key-to-(start, stop) table of contents."""
+ starts, stops = [], []
+ self._file.seek(0)
+ next_pos = 0
+ label_lists = []
+ while True:
+ line_pos = next_pos
+ line = self._file.readline()
+ next_pos = self._file.tell()
+ if line == b'\037\014' + linesep:
+ if len(stops) < len(starts):
+ stops.append(line_pos - len(linesep))
+ starts.append(next_pos)
+ labels = [label.strip() for label
+ in self._file.readline()[1:].split(b',')
+ if label.strip()]
+ label_lists.append(labels)
+ elif line == b'\037' or line == b'\037' + linesep:
+ if len(stops) < len(starts):
+ stops.append(line_pos - len(linesep))
+ elif not line:
+ stops.append(line_pos - len(linesep))
+ break
+ self._toc = dict(enumerate(zip(starts, stops)))
+ self._labels = dict(enumerate(label_lists))
+ self._next_key = len(self._toc)
+ self._file.seek(0, 2)
+ self._file_length = self._file.tell()
+
+ def _pre_mailbox_hook(self, f):
+ """Called before writing the mailbox to file f."""
+ babyl = b'BABYL OPTIONS:' + linesep
+ babyl += b'Version: 5' + linesep
+ labels = self.get_labels()
+ labels = (label.encode() for label in labels)
+ babyl += b'Labels:' + b','.join(labels) + linesep
+ babyl += b'\037'
+ f.write(babyl)
+
+ def _pre_message_hook(self, f):
+ """Called before writing each message to file f."""
+ f.write(b'\014' + linesep)
+
+ def _post_message_hook(self, f):
+ """Called after writing each message to file f."""
+ f.write(linesep + b'\037')
+
+ def _install_message(self, message):
+ """Write message contents and return (start, stop)."""
+ start = self._file.tell()
+ if isinstance(message, BabylMessage):
+ special_labels = []
+ labels = []
+ for label in message.get_labels():
+ if label in self._special_labels:
+ special_labels.append(label)
+ else:
+ labels.append(label)
+ self._file.write(b'1')
+ for label in special_labels:
+ self._file.write(b', ' + label.encode())
+ self._file.write(b',,')
+ for label in labels:
+ self._file.write(b' ' + label.encode() + b',')
+ self._file.write(linesep)
+ else:
+ self._file.write(b'1,,' + linesep)
+ if isinstance(message, email.message.Message):
+ orig_buffer = io.BytesIO()
+ orig_generator = email.generator.BytesGenerator(orig_buffer, False, 0)
+ orig_generator.flatten(message)
+ orig_buffer.seek(0)
+ while True:
+ line = orig_buffer.readline()
+ self._file.write(line.replace(b'\n', linesep))
+ if line == b'\n' or not line:
+ break
+ self._file.write(b'*** EOOH ***' + linesep)
+ if isinstance(message, BabylMessage):
+ vis_buffer = io.BytesIO()
+ vis_generator = email.generator.BytesGenerator(vis_buffer, False, 0)
+ vis_generator.flatten(message.get_visible())
+ while True:
+ line = vis_buffer.readline()
+ self._file.write(line.replace(b'\n', linesep))
+ if line == b'\n' or not line:
+ break
+ else:
+ orig_buffer.seek(0)
+ while True:
+ line = orig_buffer.readline()
+ self._file.write(line.replace(b'\n', linesep))
+ if line == b'\n' or not line:
+ break
+ while True:
+ buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
+ if not buffer:
+ break
+ self._file.write(buffer.replace(b'\n', linesep))
+ elif isinstance(message, (bytes, str, io.StringIO)):
+ if isinstance(message, io.StringIO):
+ warnings.warn("Use of StringIO input is deprecated, "
+ "use BytesIO instead", DeprecationWarning, 3)
+ message = message.getvalue()
+ if isinstance(message, str):
+ message = self._string_to_bytes(message)
+ body_start = message.find(b'\n\n') + 2
+ if body_start - 2 != -1:
+ self._file.write(message[:body_start].replace(b'\n', linesep))
+ self._file.write(b'*** EOOH ***' + linesep)
+ self._file.write(message[:body_start].replace(b'\n', linesep))
+ self._file.write(message[body_start:].replace(b'\n', linesep))
+ else:
+ self._file.write(b'*** EOOH ***' + linesep + linesep)
+ self._file.write(message.replace(b'\n', linesep))
+ elif hasattr(message, 'readline'):
+ if hasattr(message, 'buffer'):
+ warnings.warn("Use of text mode files is deprecated, "
+ "use a binary mode file instead", DeprecationWarning, 3)
+ message = message.buffer
+ original_pos = message.tell()
+ first_pass = True
+ while True:
+ line = message.readline()
+ # Universal newline support.
+ if line.endswith(b'\r\n'):
+ line = line[:-2] + b'\n'
+ elif line.endswith(b'\r'):
+ line = line[:-1] + b'\n'
+ self._file.write(line.replace(b'\n', linesep))
+ if line == b'\n' or not line:
+ if first_pass:
+ first_pass = False
+ self._file.write(b'*** EOOH ***' + linesep)
+ message.seek(original_pos)
+ else:
+ break
+ while True:
+ line = message.readline()
+ if not line:
+ break
+ # Universal newline support.
+ if line.endswith(b'\r\n'):
+ line = line[:-2] + linesep
+ elif line.endswith(b'\r'):
+ line = line[:-1] + linesep
+ elif line.endswith(b'\n'):
+ line = line[:-1] + linesep
+ self._file.write(line)
+ else:
+ raise TypeError('Invalid message type: %s' % type(message))
+ stop = self._file.tell()
+ return (start, stop)
+
+
+class Message(email.message.Message):
+ """Message with mailbox-format-specific properties."""
+
+ def __init__(self, message=None):
+ """Initialize a Message instance."""
+ if isinstance(message, email.message.Message):
+ self._become_message(copy.deepcopy(message))
+ if isinstance(message, Message):
+ message._explain_to(self)
+ elif isinstance(message, bytes):
+ self._become_message(email.message_from_bytes(message))
+ elif isinstance(message, str):
+ self._become_message(email.message_from_string(message))
+ elif isinstance(message, io.TextIOWrapper):
+ self._become_message(email.message_from_file(message))
+ elif hasattr(message, "read"):
+ self._become_message(email.message_from_binary_file(message))
+ elif message is None:
+ email.message.Message.__init__(self)
+ else:
+ raise TypeError('Invalid message type: %s' % type(message))
+
+ def _become_message(self, message):
+ """Assume the non-format-specific state of message."""
+ type_specific = getattr(message, '_type_specific_attributes', [])
+ for name in message.__dict__:
+ if name not in type_specific:
+ self.__dict__[name] = message.__dict__[name]
+
+ def _explain_to(self, message):
+ """Copy format-specific state to message insofar as possible."""
+ if isinstance(message, Message):
+ return # There's nothing format-specific to explain.
+ else:
+ raise TypeError('Cannot convert to specified type')
+
+
+class MaildirMessage(Message):
+ """Message with Maildir-specific properties."""
+
+ _type_specific_attributes = ['_subdir', '_info', '_date']
+
+ def __init__(self, message=None):
+ """Initialize a MaildirMessage instance."""
+ self._subdir = 'new'
+ self._info = ''
+ self._date = time.time()
+ Message.__init__(self, message)
+
+ def get_subdir(self):
+ """Return 'new' or 'cur'."""
+ return self._subdir
+
+ def set_subdir(self, subdir):
+ """Set subdir to 'new' or 'cur'."""
+ if subdir == 'new' or subdir == 'cur':
+ self._subdir = subdir
+ else:
+ raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
+
+ def get_flags(self):
+ """Return as a string the flags that are set."""
+ if self._info.startswith('2,'):
+ return self._info[2:]
+ else:
+ return ''
+
+ def set_flags(self, flags):
+ """Set the given flags and unset all others."""
+ self._info = '2,' + ''.join(sorted(flags))
+
+ def add_flag(self, flag):
+ """Set the given flag(s) without changing others."""
+ self.set_flags(''.join(set(self.get_flags()) | set(flag)))
+
+ def remove_flag(self, flag):
+ """Unset the given string flag(s) without changing others."""
+ if self.get_flags():
+ self.set_flags(''.join(set(self.get_flags()) - set(flag)))
+
+ def get_date(self):
+ """Return delivery date of message, in seconds since the epoch."""
+ return self._date
+
+ def set_date(self, date):
+ """Set delivery date of message, in seconds since the epoch."""
+ try:
+ self._date = float(date)
+ except ValueError:
+ raise TypeError("can't convert to float: %s" % date) from None
+
+ def get_info(self):
+ """Get the message's "info" as a string."""
+ return self._info
+
+ def set_info(self, info):
+ """Set the message's "info" string."""
+ if isinstance(info, str):
+ self._info = info
+ else:
+ raise TypeError('info must be a string: %s' % type(info))
+
+ def _explain_to(self, message):
+ """Copy Maildir-specific state to message insofar as possible."""
+ if isinstance(message, MaildirMessage):
+ message.set_flags(self.get_flags())
+ message.set_subdir(self.get_subdir())
+ message.set_date(self.get_date())
+ elif isinstance(message, _mboxMMDFMessage):
+ flags = set(self.get_flags())
+ if 'S' in flags:
+ message.add_flag('R')
+ if self.get_subdir() == 'cur':
+ message.add_flag('O')
+ if 'T' in flags:
+ message.add_flag('D')
+ if 'F' in flags:
+ message.add_flag('F')
+ if 'R' in flags:
+ message.add_flag('A')
+ message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
+ elif isinstance(message, MHMessage):
+ flags = set(self.get_flags())
+ if 'S' not in flags:
+ message.add_sequence('unseen')
+ if 'R' in flags:
+ message.add_sequence('replied')
+ if 'F' in flags:
+ message.add_sequence('flagged')
+ elif isinstance(message, BabylMessage):
+ flags = set(self.get_flags())
+ if 'S' not in flags:
+ message.add_label('unseen')
+ if 'T' in flags:
+ message.add_label('deleted')
+ if 'R' in flags:
+ message.add_label('answered')
+ if 'P' in flags:
+ message.add_label('forwarded')
+ elif isinstance(message, Message):
+ pass
+ else:
+ raise TypeError('Cannot convert to specified type: %s' %
+ type(message))
+
+
+class _mboxMMDFMessage(Message):
+ """Message with mbox- or MMDF-specific properties."""
+
+ _type_specific_attributes = ['_from']
+
+ def __init__(self, message=None):
+ """Initialize an mboxMMDFMessage instance."""
+ self.set_from('MAILER-DAEMON', True)
+ if isinstance(message, email.message.Message):
+ unixfrom = message.get_unixfrom()
+ if unixfrom is not None and unixfrom.startswith('From '):
+ self.set_from(unixfrom[5:])
+ Message.__init__(self, message)
+
+ def get_from(self):
+ """Return contents of "From " line."""
+ return self._from
+
+ def set_from(self, from_, time_=None):
+ """Set "From " line, formatting and appending time_ if specified."""
+ if time_ is not None:
+ if time_ is True:
+ time_ = time.gmtime()
+ from_ += ' ' + time.asctime(time_)
+ self._from = from_
+
+ def get_flags(self):
+ """Return as a string the flags that are set."""
+ return self.get('Status', '') + self.get('X-Status', '')
+
+ def set_flags(self, flags):
+ """Set the given flags and unset all others."""
+ flags = set(flags)
+ status_flags, xstatus_flags = '', ''
+ for flag in ('R', 'O'):
+ if flag in flags:
+ status_flags += flag
+ flags.remove(flag)
+ for flag in ('D', 'F', 'A'):
+ if flag in flags:
+ xstatus_flags += flag
+ flags.remove(flag)
+ xstatus_flags += ''.join(sorted(flags))
+ try:
+ self.replace_header('Status', status_flags)
+ except KeyError:
+ self.add_header('Status', status_flags)
+ try:
+ self.replace_header('X-Status', xstatus_flags)
+ except KeyError:
+ self.add_header('X-Status', xstatus_flags)
+
+ def add_flag(self, flag):
+ """Set the given flag(s) without changing others."""
+ self.set_flags(''.join(set(self.get_flags()) | set(flag)))
+
+ def remove_flag(self, flag):
+ """Unset the given string flag(s) without changing others."""
+ if 'Status' in self or 'X-Status' in self:
+ self.set_flags(''.join(set(self.get_flags()) - set(flag)))
+
+ def _explain_to(self, message):
+ """Copy mbox- or MMDF-specific state to message insofar as possible."""
+ if isinstance(message, MaildirMessage):
+ flags = set(self.get_flags())
+ if 'O' in flags:
+ message.set_subdir('cur')
+ if 'F' in flags:
+ message.add_flag('F')
+ if 'A' in flags:
+ message.add_flag('R')
+ if 'R' in flags:
+ message.add_flag('S')
+ if 'D' in flags:
+ message.add_flag('T')
+ del message['status']
+ del message['x-status']
+ maybe_date = ' '.join(self.get_from().split()[-5:])
+ try:
+ message.set_date(calendar.timegm(time.strptime(maybe_date,
+ '%a %b %d %H:%M:%S %Y')))
+ except (ValueError, OverflowError):
+ pass
+ elif isinstance(message, _mboxMMDFMessage):
+ message.set_flags(self.get_flags())
+ message.set_from(self.get_from())
+ elif isinstance(message, MHMessage):
+ flags = set(self.get_flags())
+ if 'R' not in flags:
+ message.add_sequence('unseen')
+ if 'A' in flags:
+ message.add_sequence('replied')
+ if 'F' in flags:
+ message.add_sequence('flagged')
+ del message['status']
+ del message['x-status']
+ elif isinstance(message, BabylMessage):
+ flags = set(self.get_flags())
+ if 'R' not in flags:
+ message.add_label('unseen')
+ if 'D' in flags:
+ message.add_label('deleted')
+ if 'A' in flags:
+ message.add_label('answered')
+ del message['status']
+ del message['x-status']
+ elif isinstance(message, Message):
+ pass
+ else:
+ raise TypeError('Cannot convert to specified type: %s' %
+ type(message))
+
+
+class mboxMessage(_mboxMMDFMessage):
+ """Message with mbox-specific properties."""
+
+
+class MHMessage(Message):
+ """Message with MH-specific properties."""
+
+ _type_specific_attributes = ['_sequences']
+
+ def __init__(self, message=None):
+ """Initialize an MHMessage instance."""
+ self._sequences = []
+ Message.__init__(self, message)
+
+ def get_sequences(self):
+ """Return a list of sequences that include the message."""
+ return self._sequences[:]
+
+ def set_sequences(self, sequences):
+ """Set the list of sequences that include the message."""
+ self._sequences = list(sequences)
+
+ def add_sequence(self, sequence):
+ """Add sequence to list of sequences including the message."""
+ if isinstance(sequence, str):
+ if not sequence in self._sequences:
+ self._sequences.append(sequence)
+ else:
+ raise TypeError('sequence type must be str: %s' % type(sequence))
+
+ def remove_sequence(self, sequence):
+ """Remove sequence from the list of sequences including the message."""
+ try:
+ self._sequences.remove(sequence)
+ except ValueError:
+ pass
+
+ def _explain_to(self, message):
+ """Copy MH-specific state to message insofar as possible."""
+ if isinstance(message, MaildirMessage):
+ sequences = set(self.get_sequences())
+ if 'unseen' in sequences:
+ message.set_subdir('cur')
+ else:
+ message.set_subdir('cur')
+ message.add_flag('S')
+ if 'flagged' in sequences:
+ message.add_flag('F')
+ if 'replied' in sequences:
+ message.add_flag('R')
+ elif isinstance(message, _mboxMMDFMessage):
+ sequences = set(self.get_sequences())
+ if 'unseen' not in sequences:
+ message.add_flag('RO')
+ else:
+ message.add_flag('O')
+ if 'flagged' in sequences:
+ message.add_flag('F')
+ if 'replied' in sequences:
+ message.add_flag('A')
+ elif isinstance(message, MHMessage):
+ for sequence in self.get_sequences():
+ message.add_sequence(sequence)
+ elif isinstance(message, BabylMessage):
+ sequences = set(self.get_sequences())
+ if 'unseen' in sequences:
+ message.add_label('unseen')
+ if 'replied' in sequences:
+ message.add_label('answered')
+ elif isinstance(message, Message):
+ pass
+ else:
+ raise TypeError('Cannot convert to specified type: %s' %
+ type(message))
+
+
+class BabylMessage(Message):
+ """Message with Babyl-specific properties."""
+
+ _type_specific_attributes = ['_labels', '_visible']
+
+ def __init__(self, message=None):
+ """Initialize a BabylMessage instance."""
+ self._labels = []
+ self._visible = Message()
+ Message.__init__(self, message)
+
+ def get_labels(self):
+ """Return a list of labels on the message."""
+ return self._labels[:]
+
+ def set_labels(self, labels):
+ """Set the list of labels on the message."""
+ self._labels = list(labels)
+
+ def add_label(self, label):
+ """Add label to list of labels on the message."""
+ if isinstance(label, str):
+ if label not in self._labels:
+ self._labels.append(label)
+ else:
+ raise TypeError('label must be a string: %s' % type(label))
+
+ def remove_label(self, label):
+ """Remove label from the list of labels on the message."""
+ try:
+ self._labels.remove(label)
+ except ValueError:
+ pass
+
+ def get_visible(self):
+ """Return a Message representation of visible headers."""
+ return Message(self._visible)
+
+ def set_visible(self, visible):
+ """Set the Message representation of visible headers."""
+ self._visible = Message(visible)
+
+ def update_visible(self):
+ """Update and/or sensibly generate a set of visible headers."""
+ for header in self._visible.keys():
+ if header in self:
+ self._visible.replace_header(header, self[header])
+ else:
+ del self._visible[header]
+ for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
+ if header in self and header not in self._visible:
+ self._visible[header] = self[header]
+
+ def _explain_to(self, message):
+ """Copy Babyl-specific state to message insofar as possible."""
+ if isinstance(message, MaildirMessage):
+ labels = set(self.get_labels())
+ if 'unseen' in labels:
+ message.set_subdir('cur')
+ else:
+ message.set_subdir('cur')
+ message.add_flag('S')
+ if 'forwarded' in labels or 'resent' in labels:
+ message.add_flag('P')
+ if 'answered' in labels:
+ message.add_flag('R')
+ if 'deleted' in labels:
+ message.add_flag('T')
+ elif isinstance(message, _mboxMMDFMessage):
+ labels = set(self.get_labels())
+ if 'unseen' not in labels:
+ message.add_flag('RO')
+ else:
+ message.add_flag('O')
+ if 'deleted' in labels:
+ message.add_flag('D')
+ if 'answered' in labels:
+ message.add_flag('A')
+ elif isinstance(message, MHMessage):
+ labels = set(self.get_labels())
+ if 'unseen' in labels:
+ message.add_sequence('unseen')
+ if 'answered' in labels:
+ message.add_sequence('replied')
+ elif isinstance(message, BabylMessage):
+ message.set_visible(self.get_visible())
+ for label in self.get_labels():
+ message.add_label(label)
+ elif isinstance(message, Message):
+ pass
+ else:
+ raise TypeError('Cannot convert to specified type: %s' %
+ type(message))
+
+
+class MMDFMessage(_mboxMMDFMessage):
+ """Message with MMDF-specific properties."""
+
+
+class _ProxyFile:
+ """A read-only wrapper of a file."""
+
+ def __init__(self, f, pos=None):
+ """Initialize a _ProxyFile."""
+ self._file = f
+ if pos is None:
+ self._pos = f.tell()
+ else:
+ self._pos = pos
+
+ def read(self, size=None):
+ """Read bytes."""
+ return self._read(size, self._file.read)
+
+ def read1(self, size=None):
+ """Read bytes."""
+ return self._read(size, self._file.read1)
+
+ def readline(self, size=None):
+ """Read a line."""
+ return self._read(size, self._file.readline)
+
+ def readlines(self, sizehint=None):
+ """Read multiple lines."""
+ result = []
+ for line in self:
+ result.append(line)
+ if sizehint is not None:
+ sizehint -= len(line)
+ if sizehint <= 0:
+ break
+ return result
+
+ def __iter__(self):
+ """Iterate over lines."""
+ while True:
+ line = self.readline()
+ if not line:
+ return
+ yield line
+
+ def tell(self):
+ """Return the position."""
+ return self._pos
+
+ def seek(self, offset, whence=0):
+ """Change position."""
+ if whence == 1:
+ self._file.seek(self._pos)
+ self._file.seek(offset, whence)
+ self._pos = self._file.tell()
+
+ def close(self):
+ """Close the file."""
+ if hasattr(self, '_file'):
+ try:
+ if hasattr(self._file, 'close'):
+ self._file.close()
+ finally:
+ del self._file
+
+ def _read(self, size, read_method):
+ """Read size bytes using read_method."""
+ if size is None:
+ size = -1
+ self._file.seek(self._pos)
+ result = read_method(size)
+ self._pos = self._file.tell()
+ return result
+
+ def __enter__(self):
+ """Context management protocol support."""
+ return self
+
+ def __exit__(self, *exc):
+ self.close()
+
+ def readable(self):
+ return self._file.readable()
+
+ def writable(self):
+ return self._file.writable()
+
+ def seekable(self):
+ return self._file.seekable()
+
+ def flush(self):
+ return self._file.flush()
+
+ @property
+ def closed(self):
+ if not hasattr(self, '_file'):
+ return True
+ if not hasattr(self._file, 'closed'):
+ return False
+ return self._file.closed
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+class _PartialFile(_ProxyFile):
+ """A read-only wrapper of part of a file."""
+
+ def __init__(self, f, start=None, stop=None):
+ """Initialize a _PartialFile."""
+ _ProxyFile.__init__(self, f, start)
+ self._start = start
+ self._stop = stop
+
+ def tell(self):
+ """Return the position with respect to start."""
+ return _ProxyFile.tell(self) - self._start
+
+ def seek(self, offset, whence=0):
+ """Change position, possibly with respect to start or stop."""
+ if whence == 0:
+ self._pos = self._start
+ whence = 1
+ elif whence == 2:
+ self._pos = self._stop
+ whence = 1
+ _ProxyFile.seek(self, offset, whence)
+
+ def _read(self, size, read_method):
+ """Read size bytes using read_method, honoring start and stop."""
+ remaining = self._stop - self._pos
+ if remaining <= 0:
+ return b''
+ if size is None or size < 0 or size > remaining:
+ size = remaining
+ return _ProxyFile._read(self, size, read_method)
+
+ def close(self):
+ # do *not* close the underlying file object for partial files,
+ # since it's global to the mailbox object
+ if hasattr(self, '_file'):
+ del self._file
+
+
+def _lock_file(f, dotlock=True):
+ """Lock file f using lockf and dot locking."""
+ dotlock_done = False
+ try:
+ if fcntl:
+ try:
+ fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except OSError as e:
+ if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
+ raise ExternalClashError('lockf: lock unavailable: %s' %
+ f.name)
+ else:
+ raise
+ if dotlock:
+ try:
+ pre_lock = _create_temporary(f.name + '.lock')
+ pre_lock.close()
+ except OSError as e:
+ if e.errno in (errno.EACCES, errno.EROFS):
+ return # Without write access, just skip dotlocking.
+ else:
+ raise
+ try:
+ try:
+ os.link(pre_lock.name, f.name + '.lock')
+ dotlock_done = True
+ except (AttributeError, PermissionError):
+ os.rename(pre_lock.name, f.name + '.lock')
+ dotlock_done = True
+ else:
+ os.unlink(pre_lock.name)
+ except FileExistsError:
+ os.remove(pre_lock.name)
+ raise ExternalClashError('dot lock unavailable: %s' %
+ f.name)
+ except:
+ if fcntl:
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ if dotlock_done:
+ os.remove(f.name + '.lock')
+ raise
+
+def _unlock_file(f):
+ """Unlock file f using lockf and dot locking."""
+ if fcntl:
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ if os.path.exists(f.name + '.lock'):
+ os.remove(f.name + '.lock')
+
+def _create_carefully(path):
+ """Create a file if it doesn't exist and open for reading and writing."""
+ fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o666)
+ try:
+ return open(path, 'rb+')
+ finally:
+ os.close(fd)
+
+def _create_temporary(path):
+ """Create a temp file based on path and open for reading and writing."""
+ return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
+ socket.gethostname(),
+ os.getpid()))
+
+def _sync_flush(f):
+ """Ensure changes to file f are physically on disk."""
+ f.flush()
+ if hasattr(os, 'fsync'):
+ os.fsync(f.fileno())
+
+def _sync_close(f):
+ """Close file f, ensuring all changes are physically on disk."""
+ _sync_flush(f)
+ f.close()
+
+
+class Error(Exception):
+ """Raised for module-specific errors."""
+
+class NoSuchMailboxError(Error):
+ """The specified mailbox does not exist and won't be created."""
+
+class NotEmptyError(Error):
+ """The specified mailbox is not empty and deletion was requested."""
+
+class ExternalClashError(Error):
+ """Another process caused an action to fail."""
+
+class FormatError(Error):
+ """A file appears to have an invalid format."""
diff --git a/infer_4_37_2/lib/python3.10/modulefinder.py b/infer_4_37_2/lib/python3.10/modulefinder.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb455f40c4d7894ef73ab25bed6659e917565394
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/modulefinder.py
@@ -0,0 +1,685 @@
+"""Find modules used by a script, using introspection."""
+
+import dis
+import importlib._bootstrap_external
+import importlib.machinery
+import marshal
+import os
+import io
+import sys
+
+
+LOAD_CONST = dis.opmap['LOAD_CONST']
+IMPORT_NAME = dis.opmap['IMPORT_NAME']
+STORE_NAME = dis.opmap['STORE_NAME']
+STORE_GLOBAL = dis.opmap['STORE_GLOBAL']
+STORE_OPS = STORE_NAME, STORE_GLOBAL
+EXTENDED_ARG = dis.EXTENDED_ARG
+
+# Old imp constants:
+
+_SEARCH_ERROR = 0
+_PY_SOURCE = 1
+_PY_COMPILED = 2
+_C_EXTENSION = 3
+_PKG_DIRECTORY = 5
+_C_BUILTIN = 6
+_PY_FROZEN = 7
+
+# Modulefinder does a good job at simulating Python's, but it can not
+# handle __path__ modifications packages make at runtime. Therefore there
+# is a mechanism whereby you can register extra paths in this map for a
+# package, and it will be honored.
+
+# Note this is a mapping is lists of paths.
+packagePathMap = {}
+
+# A Public interface
+def AddPackagePath(packagename, path):
+ packagePathMap.setdefault(packagename, []).append(path)
+
+replacePackageMap = {}
+
+# This ReplacePackage mechanism allows modulefinder to work around
+# situations in which a package injects itself under the name
+# of another package into sys.modules at runtime by calling
+# ReplacePackage("real_package_name", "faked_package_name")
+# before running ModuleFinder.
+
+def ReplacePackage(oldname, newname):
+ replacePackageMap[oldname] = newname
+
+
+def _find_module(name, path=None):
+ """An importlib reimplementation of imp.find_module (for our purposes)."""
+
+ # It's necessary to clear the caches for our Finder first, in case any
+ # modules are being added/deleted/modified at runtime. In particular,
+ # test_modulefinder.py changes file tree contents in a cache-breaking way:
+
+ importlib.machinery.PathFinder.invalidate_caches()
+
+ spec = importlib.machinery.PathFinder.find_spec(name, path)
+
+ if spec is None:
+ raise ImportError("No module named {name!r}".format(name=name), name=name)
+
+ # Some special cases:
+
+ if spec.loader is importlib.machinery.BuiltinImporter:
+ return None, None, ("", "", _C_BUILTIN)
+
+ if spec.loader is importlib.machinery.FrozenImporter:
+ return None, None, ("", "", _PY_FROZEN)
+
+ file_path = spec.origin
+
+ if spec.loader.is_package(name):
+ return None, os.path.dirname(file_path), ("", "", _PKG_DIRECTORY)
+
+ if isinstance(spec.loader, importlib.machinery.SourceFileLoader):
+ kind = _PY_SOURCE
+
+ elif isinstance(spec.loader, importlib.machinery.ExtensionFileLoader):
+ kind = _C_EXTENSION
+
+ elif isinstance(spec.loader, importlib.machinery.SourcelessFileLoader):
+ kind = _PY_COMPILED
+
+ else: # Should never happen.
+ return None, None, ("", "", _SEARCH_ERROR)
+
+ file = io.open_code(file_path)
+ suffix = os.path.splitext(file_path)[-1]
+
+ return file, file_path, (suffix, "rb", kind)
+
+
+class Module:
+
+ def __init__(self, name, file=None, path=None):
+ self.__name__ = name
+ self.__file__ = file
+ self.__path__ = path
+ self.__code__ = None
+ # The set of global names that are assigned to in the module.
+ # This includes those names imported through starimports of
+ # Python modules.
+ self.globalnames = {}
+ # The set of starimports this module did that could not be
+ # resolved, ie. a starimport from a non-Python module.
+ self.starimports = {}
+
+ def __repr__(self):
+ s = "Module(%r" % (self.__name__,)
+ if self.__file__ is not None:
+ s = s + ", %r" % (self.__file__,)
+ if self.__path__ is not None:
+ s = s + ", %r" % (self.__path__,)
+ s = s + ")"
+ return s
+
+class ModuleFinder:
+
+ def __init__(self, path=None, debug=0, excludes=None, replace_paths=None):
+ if path is None:
+ path = sys.path
+ self.path = path
+ self.modules = {}
+ self.badmodules = {}
+ self.debug = debug
+ self.indent = 0
+ self.excludes = excludes if excludes is not None else []
+ self.replace_paths = replace_paths if replace_paths is not None else []
+ self.processed_paths = [] # Used in debugging only
+
+ def msg(self, level, str, *args):
+ if level <= self.debug:
+ for i in range(self.indent):
+ print(" ", end=' ')
+ print(str, end=' ')
+ for arg in args:
+ print(repr(arg), end=' ')
+ print()
+
+ def msgin(self, *args):
+ level = args[0]
+ if level <= self.debug:
+ self.indent = self.indent + 1
+ self.msg(*args)
+
+ def msgout(self, *args):
+ level = args[0]
+ if level <= self.debug:
+ self.indent = self.indent - 1
+ self.msg(*args)
+
+ def run_script(self, pathname):
+ self.msg(2, "run_script", pathname)
+ with io.open_code(pathname) as fp:
+ stuff = ("", "rb", _PY_SOURCE)
+ self.load_module('__main__', fp, pathname, stuff)
+
+ def load_file(self, pathname):
+ dir, name = os.path.split(pathname)
+ name, ext = os.path.splitext(name)
+ with io.open_code(pathname) as fp:
+ stuff = (ext, "rb", _PY_SOURCE)
+ self.load_module(name, fp, pathname, stuff)
+
+ def import_hook(self, name, caller=None, fromlist=None, level=-1):
+ self.msg(3, "import_hook", name, caller, fromlist, level)
+ parent = self.determine_parent(caller, level=level)
+ q, tail = self.find_head_package(parent, name)
+ m = self.load_tail(q, tail)
+ if not fromlist:
+ return q
+ if m.__path__:
+ self.ensure_fromlist(m, fromlist)
+ return None
+
+ def determine_parent(self, caller, level=-1):
+ self.msgin(4, "determine_parent", caller, level)
+ if not caller or level == 0:
+ self.msgout(4, "determine_parent -> None")
+ return None
+ pname = caller.__name__
+ if level >= 1: # relative import
+ if caller.__path__:
+ level -= 1
+ if level == 0:
+ parent = self.modules[pname]
+ assert parent is caller
+ self.msgout(4, "determine_parent ->", parent)
+ return parent
+ if pname.count(".") < level:
+ raise ImportError("relative importpath too deep")
+ pname = ".".join(pname.split(".")[:-level])
+ parent = self.modules[pname]
+ self.msgout(4, "determine_parent ->", parent)
+ return parent
+ if caller.__path__:
+ parent = self.modules[pname]
+ assert caller is parent
+ self.msgout(4, "determine_parent ->", parent)
+ return parent
+ if '.' in pname:
+ i = pname.rfind('.')
+ pname = pname[:i]
+ parent = self.modules[pname]
+ assert parent.__name__ == pname
+ self.msgout(4, "determine_parent ->", parent)
+ return parent
+ self.msgout(4, "determine_parent -> None")
+ return None
+
+ def find_head_package(self, parent, name):
+ self.msgin(4, "find_head_package", parent, name)
+ if '.' in name:
+ i = name.find('.')
+ head = name[:i]
+ tail = name[i+1:]
+ else:
+ head = name
+ tail = ""
+ if parent:
+ qname = "%s.%s" % (parent.__name__, head)
+ else:
+ qname = head
+ q = self.import_module(head, qname, parent)
+ if q:
+ self.msgout(4, "find_head_package ->", (q, tail))
+ return q, tail
+ if parent:
+ qname = head
+ parent = None
+ q = self.import_module(head, qname, parent)
+ if q:
+ self.msgout(4, "find_head_package ->", (q, tail))
+ return q, tail
+ self.msgout(4, "raise ImportError: No module named", qname)
+ raise ImportError("No module named " + qname)
+
+ def load_tail(self, q, tail):
+ self.msgin(4, "load_tail", q, tail)
+ m = q
+ while tail:
+ i = tail.find('.')
+ if i < 0: i = len(tail)
+ head, tail = tail[:i], tail[i+1:]
+ mname = "%s.%s" % (m.__name__, head)
+ m = self.import_module(head, mname, m)
+ if not m:
+ self.msgout(4, "raise ImportError: No module named", mname)
+ raise ImportError("No module named " + mname)
+ self.msgout(4, "load_tail ->", m)
+ return m
+
+ def ensure_fromlist(self, m, fromlist, recursive=0):
+ self.msg(4, "ensure_fromlist", m, fromlist, recursive)
+ for sub in fromlist:
+ if sub == "*":
+ if not recursive:
+ all = self.find_all_submodules(m)
+ if all:
+ self.ensure_fromlist(m, all, 1)
+ elif not hasattr(m, sub):
+ subname = "%s.%s" % (m.__name__, sub)
+ submod = self.import_module(sub, subname, m)
+ if not submod:
+ raise ImportError("No module named " + subname)
+
+ def find_all_submodules(self, m):
+ if not m.__path__:
+ return
+ modules = {}
+ # 'suffixes' used to be a list hardcoded to [".py", ".pyc"].
+ # But we must also collect Python extension modules - although
+ # we cannot separate normal dlls from Python extensions.
+ suffixes = []
+ suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
+ suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
+ suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
+ for dir in m.__path__:
+ try:
+ names = os.listdir(dir)
+ except OSError:
+ self.msg(2, "can't list directory", dir)
+ continue
+ for name in names:
+ mod = None
+ for suff in suffixes:
+ n = len(suff)
+ if name[-n:] == suff:
+ mod = name[:-n]
+ break
+ if mod and mod != "__init__":
+ modules[mod] = mod
+ return modules.keys()
+
+ def import_module(self, partname, fqname, parent):
+ self.msgin(3, "import_module", partname, fqname, parent)
+ try:
+ m = self.modules[fqname]
+ except KeyError:
+ pass
+ else:
+ self.msgout(3, "import_module ->", m)
+ return m
+ if fqname in self.badmodules:
+ self.msgout(3, "import_module -> None")
+ return None
+ if parent and parent.__path__ is None:
+ self.msgout(3, "import_module -> None")
+ return None
+ try:
+ fp, pathname, stuff = self.find_module(partname,
+ parent and parent.__path__, parent)
+ except ImportError:
+ self.msgout(3, "import_module ->", None)
+ return None
+
+ try:
+ m = self.load_module(fqname, fp, pathname, stuff)
+ finally:
+ if fp:
+ fp.close()
+ if parent:
+ setattr(parent, partname, m)
+ self.msgout(3, "import_module ->", m)
+ return m
+
+ def load_module(self, fqname, fp, pathname, file_info):
+ suffix, mode, type = file_info
+ self.msgin(2, "load_module", fqname, fp and "fp", pathname)
+ if type == _PKG_DIRECTORY:
+ m = self.load_package(fqname, pathname)
+ self.msgout(2, "load_module ->", m)
+ return m
+ if type == _PY_SOURCE:
+ co = compile(fp.read(), pathname, 'exec')
+ elif type == _PY_COMPILED:
+ try:
+ data = fp.read()
+ importlib._bootstrap_external._classify_pyc(data, fqname, {})
+ except ImportError as exc:
+ self.msgout(2, "raise ImportError: " + str(exc), pathname)
+ raise
+ co = marshal.loads(memoryview(data)[16:])
+ else:
+ co = None
+ m = self.add_module(fqname)
+ m.__file__ = pathname
+ if co:
+ if self.replace_paths:
+ co = self.replace_paths_in_code(co)
+ m.__code__ = co
+ self.scan_code(co, m)
+ self.msgout(2, "load_module ->", m)
+ return m
+
+ def _add_badmodule(self, name, caller):
+ if name not in self.badmodules:
+ self.badmodules[name] = {}
+ if caller:
+ self.badmodules[name][caller.__name__] = 1
+ else:
+ self.badmodules[name]["-"] = 1
+
+ def _safe_import_hook(self, name, caller, fromlist, level=-1):
+ # wrapper for self.import_hook() that won't raise ImportError
+ if name in self.badmodules:
+ self._add_badmodule(name, caller)
+ return
+ try:
+ self.import_hook(name, caller, level=level)
+ except ImportError as msg:
+ self.msg(2, "ImportError:", str(msg))
+ self._add_badmodule(name, caller)
+ except SyntaxError as msg:
+ self.msg(2, "SyntaxError:", str(msg))
+ self._add_badmodule(name, caller)
+ else:
+ if fromlist:
+ for sub in fromlist:
+ fullname = name + "." + sub
+ if fullname in self.badmodules:
+ self._add_badmodule(fullname, caller)
+ continue
+ try:
+ self.import_hook(name, caller, [sub], level=level)
+ except ImportError as msg:
+ self.msg(2, "ImportError:", str(msg))
+ self._add_badmodule(fullname, caller)
+
+ def scan_opcodes(self, co):
+ # Scan the code, and yield 'interesting' opcode combinations
+ code = co.co_code
+ names = co.co_names
+ consts = co.co_consts
+ opargs = [(op, arg) for _, op, arg in dis._unpack_opargs(code)
+ if op != EXTENDED_ARG]
+ for i, (op, oparg) in enumerate(opargs):
+ if op in STORE_OPS:
+ yield "store", (names[oparg],)
+ continue
+ if (op == IMPORT_NAME and i >= 2
+ and opargs[i-1][0] == opargs[i-2][0] == LOAD_CONST):
+ level = consts[opargs[i-2][1]]
+ fromlist = consts[opargs[i-1][1]]
+ if level == 0: # absolute import
+ yield "absolute_import", (fromlist, names[oparg])
+ else: # relative import
+ yield "relative_import", (level, fromlist, names[oparg])
+ continue
+
+ def scan_code(self, co, m):
+ code = co.co_code
+ scanner = self.scan_opcodes
+ for what, args in scanner(co):
+ if what == "store":
+ name, = args
+ m.globalnames[name] = 1
+ elif what == "absolute_import":
+ fromlist, name = args
+ have_star = 0
+ if fromlist is not None:
+ if "*" in fromlist:
+ have_star = 1
+ fromlist = [f for f in fromlist if f != "*"]
+ self._safe_import_hook(name, m, fromlist, level=0)
+ if have_star:
+ # We've encountered an "import *". If it is a Python module,
+ # the code has already been parsed and we can suck out the
+ # global names.
+ mm = None
+ if m.__path__:
+ # At this point we don't know whether 'name' is a
+ # submodule of 'm' or a global module. Let's just try
+ # the full name first.
+ mm = self.modules.get(m.__name__ + "." + name)
+ if mm is None:
+ mm = self.modules.get(name)
+ if mm is not None:
+ m.globalnames.update(mm.globalnames)
+ m.starimports.update(mm.starimports)
+ if mm.__code__ is None:
+ m.starimports[name] = 1
+ else:
+ m.starimports[name] = 1
+ elif what == "relative_import":
+ level, fromlist, name = args
+ if name:
+ self._safe_import_hook(name, m, fromlist, level=level)
+ else:
+ parent = self.determine_parent(m, level=level)
+ self._safe_import_hook(parent.__name__, None, fromlist, level=0)
+ else:
+ # We don't expect anything else from the generator.
+ raise RuntimeError(what)
+
+ for c in co.co_consts:
+ if isinstance(c, type(co)):
+ self.scan_code(c, m)
+
+ def load_package(self, fqname, pathname):
+ self.msgin(2, "load_package", fqname, pathname)
+ newname = replacePackageMap.get(fqname)
+ if newname:
+ fqname = newname
+ m = self.add_module(fqname)
+ m.__file__ = pathname
+ m.__path__ = [pathname]
+
+ # As per comment at top of file, simulate runtime __path__ additions.
+ m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
+
+ fp, buf, stuff = self.find_module("__init__", m.__path__)
+ try:
+ self.load_module(fqname, fp, buf, stuff)
+ self.msgout(2, "load_package ->", m)
+ return m
+ finally:
+ if fp:
+ fp.close()
+
+ def add_module(self, fqname):
+ if fqname in self.modules:
+ return self.modules[fqname]
+ self.modules[fqname] = m = Module(fqname)
+ return m
+
+ def find_module(self, name, path, parent=None):
+ if parent is not None:
+ # assert path is not None
+ fullname = parent.__name__+'.'+name
+ else:
+ fullname = name
+ if fullname in self.excludes:
+ self.msgout(3, "find_module -> Excluded", fullname)
+ raise ImportError(name)
+
+ if path is None:
+ if name in sys.builtin_module_names:
+ return (None, None, ("", "", _C_BUILTIN))
+
+ path = self.path
+
+ return _find_module(name, path)
+
+ def report(self):
+ """Print a report to stdout, listing the found modules with their
+ paths, as well as modules that are missing, or seem to be missing.
+ """
+ print()
+ print(" %-25s %s" % ("Name", "File"))
+ print(" %-25s %s" % ("----", "----"))
+ # Print modules found
+ keys = sorted(self.modules.keys())
+ for key in keys:
+ m = self.modules[key]
+ if m.__path__:
+ print("P", end=' ')
+ else:
+ print("m", end=' ')
+ print("%-25s" % key, m.__file__ or "")
+
+ # Print missing modules
+ missing, maybe = self.any_missing_maybe()
+ if missing:
+ print()
+ print("Missing modules:")
+ for name in missing:
+ mods = sorted(self.badmodules[name].keys())
+ print("?", name, "imported from", ', '.join(mods))
+ # Print modules that may be missing, but then again, maybe not...
+ if maybe:
+ print()
+ print("Submodules that appear to be missing, but could also be", end=' ')
+ print("global names in the parent package:")
+ for name in maybe:
+ mods = sorted(self.badmodules[name].keys())
+ print("?", name, "imported from", ', '.join(mods))
+
+ def any_missing(self):
+ """Return a list of modules that appear to be missing. Use
+ any_missing_maybe() if you want to know which modules are
+ certain to be missing, and which *may* be missing.
+ """
+ missing, maybe = self.any_missing_maybe()
+ return missing + maybe
+
+ def any_missing_maybe(self):
+ """Return two lists, one with modules that are certainly missing
+ and one with modules that *may* be missing. The latter names could
+ either be submodules *or* just global names in the package.
+
+ The reason it can't always be determined is that it's impossible to
+ tell which names are imported when "from module import *" is done
+ with an extension module, short of actually importing it.
+ """
+ missing = []
+ maybe = []
+ for name in self.badmodules:
+ if name in self.excludes:
+ continue
+ i = name.rfind(".")
+ if i < 0:
+ missing.append(name)
+ continue
+ subname = name[i+1:]
+ pkgname = name[:i]
+ pkg = self.modules.get(pkgname)
+ if pkg is not None:
+ if pkgname in self.badmodules[name]:
+ # The package tried to import this module itself and
+ # failed. It's definitely missing.
+ missing.append(name)
+ elif subname in pkg.globalnames:
+ # It's a global in the package: definitely not missing.
+ pass
+ elif pkg.starimports:
+ # It could be missing, but the package did an "import *"
+ # from a non-Python module, so we simply can't be sure.
+ maybe.append(name)
+ else:
+ # It's not a global in the package, the package didn't
+ # do funny star imports, it's very likely to be missing.
+ # The symbol could be inserted into the package from the
+ # outside, but since that's not good style we simply list
+ # it missing.
+ missing.append(name)
+ else:
+ missing.append(name)
+ missing.sort()
+ maybe.sort()
+ return missing, maybe
+
+ def replace_paths_in_code(self, co):
+ new_filename = original_filename = os.path.normpath(co.co_filename)
+ for f, r in self.replace_paths:
+ if original_filename.startswith(f):
+ new_filename = r + original_filename[len(f):]
+ break
+
+ if self.debug and original_filename not in self.processed_paths:
+ if new_filename != original_filename:
+ self.msgout(2, "co_filename %r changed to %r" \
+ % (original_filename,new_filename,))
+ else:
+ self.msgout(2, "co_filename %r remains unchanged" \
+ % (original_filename,))
+ self.processed_paths.append(original_filename)
+
+ consts = list(co.co_consts)
+ for i in range(len(consts)):
+ if isinstance(consts[i], type(co)):
+ consts[i] = self.replace_paths_in_code(consts[i])
+
+ return co.replace(co_consts=tuple(consts), co_filename=new_filename)
+
+
+def test():
+ # Parse command line
+ import getopt
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
+ except getopt.error as msg:
+ print(msg)
+ return
+
+ # Process options
+ debug = 1
+ domods = 0
+ addpath = []
+ exclude = []
+ for o, a in opts:
+ if o == '-d':
+ debug = debug + 1
+ if o == '-m':
+ domods = 1
+ if o == '-p':
+ addpath = addpath + a.split(os.pathsep)
+ if o == '-q':
+ debug = 0
+ if o == '-x':
+ exclude.append(a)
+
+ # Provide default arguments
+ if not args:
+ script = "hello.py"
+ else:
+ script = args[0]
+
+ # Set the path based on sys.path and the script directory
+ path = sys.path[:]
+ path[0] = os.path.dirname(script)
+ path = addpath + path
+ if debug > 1:
+ print("path:")
+ for item in path:
+ print(" ", repr(item))
+
+ # Create the module finder and turn its crank
+ mf = ModuleFinder(path, debug, exclude)
+ for arg in args[1:]:
+ if arg == '-m':
+ domods = 1
+ continue
+ if domods:
+ if arg[-2:] == '.*':
+ mf.import_hook(arg[:-2], None, ["*"])
+ else:
+ mf.import_hook(arg)
+ else:
+ mf.load_file(arg)
+ mf.run_script(script)
+ mf.report()
+ return mf # for -i debugging
+
+
+if __name__ == '__main__':
+ try:
+ mf = test()
+ except KeyboardInterrupt:
+ print("\n[interrupted]")
diff --git a/infer_4_37_2/lib/python3.10/ntpath.py b/infer_4_37_2/lib/python3.10/ntpath.py
new file mode 100644
index 0000000000000000000000000000000000000000..c14e5c7ceca5deb5a712145b6fe27354e4842b9f
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/ntpath.py
@@ -0,0 +1,838 @@
+# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
+"""Common pathname manipulations, WindowsNT/95 version.
+
+Instead of importing this module directly, import os and refer to this
+module as os.path.
+"""
+
+# strings representing various path-related bits and pieces
+# These are primarily for export; internally, they are hardcoded.
+# Should be set before imports for resolving cyclic dependency.
+curdir = '.'
+pardir = '..'
+extsep = '.'
+sep = '\\'
+pathsep = ';'
+altsep = '/'
+defpath = '.;C:\\bin'
+devnull = 'nul'
+
+import os
+import sys
+import stat
+import genericpath
+from genericpath import *
+
+
+__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
+ "basename","dirname","commonprefix","getsize","getmtime",
+ "getatime","getctime", "islink","exists","lexists","isdir","isfile",
+ "ismount", "expanduser","expandvars","normpath","abspath",
+ "curdir","pardir","sep","pathsep","defpath","altsep",
+ "extsep","devnull","realpath","supports_unicode_filenames","relpath",
+ "samefile", "sameopenfile", "samestat", "commonpath"]
+
+def _get_bothseps(path):
+ if isinstance(path, bytes):
+ return b'\\/'
+ else:
+ return '\\/'
+
+# Normalize the case of a pathname and map slashes to backslashes.
+# Other normalizations (such as optimizing '../' away) are not done
+# (this is done by normpath).
+
+try:
+ from _winapi import (
+ LCMapStringEx as _LCMapStringEx,
+ LOCALE_NAME_INVARIANT as _LOCALE_NAME_INVARIANT,
+ LCMAP_LOWERCASE as _LCMAP_LOWERCASE)
+
+ def normcase(s):
+ """Normalize case of pathname.
+
+ Makes all characters lowercase and all slashes into backslashes.
+ """
+ s = os.fspath(s)
+ if not s:
+ return s
+ if isinstance(s, bytes):
+ encoding = sys.getfilesystemencoding()
+ s = s.decode(encoding, 'surrogateescape').replace('/', '\\')
+ s = _LCMapStringEx(_LOCALE_NAME_INVARIANT,
+ _LCMAP_LOWERCASE, s)
+ return s.encode(encoding, 'surrogateescape')
+ else:
+ return _LCMapStringEx(_LOCALE_NAME_INVARIANT,
+ _LCMAP_LOWERCASE,
+ s.replace('/', '\\'))
+except ImportError:
+ def normcase(s):
+ """Normalize case of pathname.
+
+ Makes all characters lowercase and all slashes into backslashes.
+ """
+ s = os.fspath(s)
+ if isinstance(s, bytes):
+ return os.fsencode(os.fsdecode(s).replace('/', '\\').lower())
+ return s.replace('/', '\\').lower()
+
+
+# Return whether a path is absolute.
+# Trivial in Posix, harder on Windows.
+# For Windows it is absolute if it starts with a slash or backslash (current
+# volume), or if a pathname after the volume-letter-and-colon or UNC-resource
+# starts with a slash or backslash.
+
+def isabs(s):
+ """Test whether a path is absolute"""
+ s = os.fspath(s)
+ # Paths beginning with \\?\ are always absolute, but do not
+ # necessarily contain a drive.
+ if isinstance(s, bytes):
+ if s.replace(b'/', b'\\').startswith(b'\\\\?\\'):
+ return True
+ else:
+ if s.replace('/', '\\').startswith('\\\\?\\'):
+ return True
+ s = splitdrive(s)[1]
+ return len(s) > 0 and s[0] in _get_bothseps(s)
+
+
+# Join two (or more) paths.
+def join(path, *paths):
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ sep = b'\\'
+ seps = b'\\/'
+ colon = b':'
+ else:
+ sep = '\\'
+ seps = '\\/'
+ colon = ':'
+ try:
+ if not paths:
+ path[:0] + sep #23780: Ensure compatible data type even if p is null.
+ result_drive, result_path = splitdrive(path)
+ for p in map(os.fspath, paths):
+ p_drive, p_path = splitdrive(p)
+ if p_path and p_path[0] in seps:
+ # Second path is absolute
+ if p_drive or not result_drive:
+ result_drive = p_drive
+ result_path = p_path
+ continue
+ elif p_drive and p_drive != result_drive:
+ if p_drive.lower() != result_drive.lower():
+ # Different drives => ignore the first path entirely
+ result_drive = p_drive
+ result_path = p_path
+ continue
+ # Same drive in different case
+ result_drive = p_drive
+ # Second path is relative to the first
+ if result_path and result_path[-1] not in seps:
+ result_path = result_path + sep
+ result_path = result_path + p_path
+ ## add separator between UNC and non-absolute path
+ if (result_path and result_path[0] not in seps and
+ result_drive and result_drive[-1:] != colon):
+ return result_drive + sep + result_path
+ return result_drive + result_path
+ except (TypeError, AttributeError, BytesWarning):
+ genericpath._check_arg_types('join', path, *paths)
+ raise
+
+
+# Split a path in a drive specification (a drive letter followed by a
+# colon) and the path specification.
+# It is always true that drivespec + pathspec == p
+def splitdrive(p):
+ """Split a pathname into drive/UNC sharepoint and relative path specifiers.
+ Returns a 2-tuple (drive_or_unc, path); either part may be empty.
+
+ If you assign
+ result = splitdrive(p)
+ It is always true that:
+ result[0] + result[1] == p
+
+ If the path contained a drive letter, drive_or_unc will contain everything
+ up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir")
+
+ If the path contained a UNC path, the drive_or_unc will contain the host name
+ and share up to but not including the fourth directory separator character.
+ e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir")
+
+ Paths cannot contain both a drive letter and a UNC path.
+
+ """
+ p = os.fspath(p)
+ if len(p) >= 2:
+ if isinstance(p, bytes):
+ sep = b'\\'
+ altsep = b'/'
+ colon = b':'
+ else:
+ sep = '\\'
+ altsep = '/'
+ colon = ':'
+ normp = p.replace(altsep, sep)
+ if (normp[0:2] == sep*2) and (normp[2:3] != sep):
+ # is a UNC path:
+ # vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
+ # \\machine\mountpoint\directory\etc\...
+ # directory ^^^^^^^^^^^^^^^
+ index = normp.find(sep, 2)
+ if index == -1:
+ return p[:0], p
+ index2 = normp.find(sep, index + 1)
+ # a UNC path can't have two slashes in a row
+ # (after the initial two)
+ if index2 == index + 1:
+ return p[:0], p
+ if index2 == -1:
+ index2 = len(p)
+ return p[:index2], p[index2:]
+ if normp[1:2] == colon:
+ return p[:2], p[2:]
+ return p[:0], p
+
+
+# Split a path in head (everything up to the last '/') and tail (the
+# rest). After the trailing '/' is stripped, the invariant
+# join(head, tail) == p holds.
+# The resulting head won't end in '/' unless it is the root.
+
+def split(p):
+ """Split a pathname.
+
+ Return tuple (head, tail) where tail is everything after the final slash.
+ Either part may be empty."""
+ p = os.fspath(p)
+ seps = _get_bothseps(p)
+ d, p = splitdrive(p)
+ # set i to index beyond p's last slash
+ i = len(p)
+ while i and p[i-1] not in seps:
+ i -= 1
+ head, tail = p[:i], p[i:] # now tail has no slashes
+ # remove trailing slashes from head, unless it's all slashes
+ head = head.rstrip(seps) or head
+ return d + head, tail
+
+
+# Split a path in root and extension.
+# The extension is everything starting at the last dot in the last
+# pathname component; the root is everything before that.
+# It is always true that root + ext == p.
+
+def splitext(p):
+ p = os.fspath(p)
+ if isinstance(p, bytes):
+ return genericpath._splitext(p, b'\\', b'/', b'.')
+ else:
+ return genericpath._splitext(p, '\\', '/', '.')
+splitext.__doc__ = genericpath._splitext.__doc__
+
+
+# Return the tail (basename) part of a path.
+
+def basename(p):
+ """Returns the final component of a pathname"""
+ return split(p)[1]
+
+
+# Return the head (dirname) part of a path.
+
+def dirname(p):
+ """Returns the directory component of a pathname"""
+ return split(p)[0]
+
+# Is a path a symbolic link?
+# This will always return false on systems where os.lstat doesn't exist.
+
+def islink(path):
+ """Test whether a path is a symbolic link.
+ This will always return false for Windows prior to 6.0.
+ """
+ try:
+ st = os.lstat(path)
+ except (OSError, ValueError, AttributeError):
+ return False
+ return stat.S_ISLNK(st.st_mode)
+
+# Being true for dangling symbolic links is also useful.
+
+def lexists(path):
+ """Test whether a path exists. Returns True for broken symbolic links"""
+ try:
+ st = os.lstat(path)
+ except (OSError, ValueError):
+ return False
+ return True
+
+# Is a path a mount point?
+# Any drive letter root (eg c:\)
+# Any share UNC (eg \\server\share)
+# Any volume mounted on a filesystem folder
+#
+# No one method detects all three situations. Historically we've lexically
+# detected drive letter roots and share UNCs. The canonical approach to
+# detecting mounted volumes (querying the reparse tag) fails for the most
+# common case: drive letter roots. The alternative which uses GetVolumePathName
+# fails if the drive letter is the result of a SUBST.
+try:
+ from nt import _getvolumepathname
+except ImportError:
+ _getvolumepathname = None
+def ismount(path):
+ """Test whether a path is a mount point (a drive root, the root of a
+ share, or a mounted volume)"""
+ path = os.fspath(path)
+ seps = _get_bothseps(path)
+ path = abspath(path)
+ root, rest = splitdrive(path)
+ if root and root[0] in seps:
+ return (not rest) or (rest in seps)
+ if rest in seps:
+ return True
+
+ if _getvolumepathname:
+ return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps)
+ else:
+ return False
+
+
+# Expand paths beginning with '~' or '~user'.
+# '~' means $HOME; '~user' means that user's home directory.
+# If the path doesn't begin with '~', or if the user or $HOME is unknown,
+# the path is returned unchanged (leaving error reporting to whatever
+# function is called with the expanded path as argument).
+# See also module 'glob' for expansion of *, ? and [...] in pathnames.
+# (A function should also be defined to do full *sh-style environment
+# variable expansion.)
+
+def expanduser(path):
+ """Expand ~ and ~user constructs.
+
+ If user or $HOME is unknown, do nothing."""
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ tilde = b'~'
+ else:
+ tilde = '~'
+ if not path.startswith(tilde):
+ return path
+ i, n = 1, len(path)
+ while i < n and path[i] not in _get_bothseps(path):
+ i += 1
+
+ if 'USERPROFILE' in os.environ:
+ userhome = os.environ['USERPROFILE']
+ elif not 'HOMEPATH' in os.environ:
+ return path
+ else:
+ try:
+ drive = os.environ['HOMEDRIVE']
+ except KeyError:
+ drive = ''
+ userhome = join(drive, os.environ['HOMEPATH'])
+
+ if i != 1: #~user
+ target_user = path[1:i]
+ if isinstance(target_user, bytes):
+ target_user = os.fsdecode(target_user)
+ current_user = os.environ.get('USERNAME')
+
+ if target_user != current_user:
+ # Try to guess user home directory. By default all user
+ # profile directories are located in the same place and are
+ # named by corresponding usernames. If userhome isn't a
+ # normal profile directory, this guess is likely wrong,
+ # so we bail out.
+ if current_user != basename(userhome):
+ return path
+ userhome = join(dirname(userhome), target_user)
+
+ if isinstance(path, bytes):
+ userhome = os.fsencode(userhome)
+
+ return userhome + path[i:]
+
+
+# Expand paths containing shell variable substitutions.
+# The following rules apply:
+# - no expansion within single quotes
+# - '$$' is translated into '$'
+# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
+# - ${varname} is accepted.
+# - $varname is accepted.
+# - %varname% is accepted.
+# - varnames can be made out of letters, digits and the characters '_-'
+# (though is not verified in the ${varname} and %varname% cases)
+# XXX With COMMAND.COM you can use any characters in a variable name,
+# XXX except '^|<>='.
+
+def expandvars(path):
+ """Expand shell variables of the forms $var, ${var} and %var%.
+
+ Unknown variables are left unchanged."""
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ if b'$' not in path and b'%' not in path:
+ return path
+ import string
+ varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii')
+ quote = b'\''
+ percent = b'%'
+ brace = b'{'
+ rbrace = b'}'
+ dollar = b'$'
+ environ = getattr(os, 'environb', None)
+ else:
+ if '$' not in path and '%' not in path:
+ return path
+ import string
+ varchars = string.ascii_letters + string.digits + '_-'
+ quote = '\''
+ percent = '%'
+ brace = '{'
+ rbrace = '}'
+ dollar = '$'
+ environ = os.environ
+ res = path[:0]
+ index = 0
+ pathlen = len(path)
+ while index < pathlen:
+ c = path[index:index+1]
+ if c == quote: # no expansion within single quotes
+ path = path[index + 1:]
+ pathlen = len(path)
+ try:
+ index = path.index(c)
+ res += c + path[:index + 1]
+ except ValueError:
+ res += c + path
+ index = pathlen - 1
+ elif c == percent: # variable or '%'
+ if path[index + 1:index + 2] == percent:
+ res += c
+ index += 1
+ else:
+ path = path[index+1:]
+ pathlen = len(path)
+ try:
+ index = path.index(percent)
+ except ValueError:
+ res += percent + path
+ index = pathlen - 1
+ else:
+ var = path[:index]
+ try:
+ if environ is None:
+ value = os.fsencode(os.environ[os.fsdecode(var)])
+ else:
+ value = environ[var]
+ except KeyError:
+ value = percent + var + percent
+ res += value
+ elif c == dollar: # variable or '$$'
+ if path[index + 1:index + 2] == dollar:
+ res += c
+ index += 1
+ elif path[index + 1:index + 2] == brace:
+ path = path[index+2:]
+ pathlen = len(path)
+ try:
+ index = path.index(rbrace)
+ except ValueError:
+ res += dollar + brace + path
+ index = pathlen - 1
+ else:
+ var = path[:index]
+ try:
+ if environ is None:
+ value = os.fsencode(os.environ[os.fsdecode(var)])
+ else:
+ value = environ[var]
+ except KeyError:
+ value = dollar + brace + var + rbrace
+ res += value
+ else:
+ var = path[:0]
+ index += 1
+ c = path[index:index + 1]
+ while c and c in varchars:
+ var += c
+ index += 1
+ c = path[index:index + 1]
+ try:
+ if environ is None:
+ value = os.fsencode(os.environ[os.fsdecode(var)])
+ else:
+ value = environ[var]
+ except KeyError:
+ value = dollar + var
+ res += value
+ if c:
+ index -= 1
+ else:
+ res += c
+ index += 1
+ return res
+
+
+# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
+# Previously, this function also truncated pathnames to 8+3 format,
+# but as this module is called "ntpath", that's obviously wrong!
+
+def normpath(path):
+ """Normalize path, eliminating double slashes, etc."""
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ sep = b'\\'
+ altsep = b'/'
+ curdir = b'.'
+ pardir = b'..'
+ special_prefixes = (b'\\\\.\\', b'\\\\?\\')
+ else:
+ sep = '\\'
+ altsep = '/'
+ curdir = '.'
+ pardir = '..'
+ special_prefixes = ('\\\\.\\', '\\\\?\\')
+ if path.startswith(special_prefixes):
+ # in the case of paths with these prefixes:
+ # \\.\ -> device names
+ # \\?\ -> literal paths
+ # do not do any normalization, but return the path
+ # unchanged apart from the call to os.fspath()
+ return path
+ path = path.replace(altsep, sep)
+ prefix, path = splitdrive(path)
+
+ # collapse initial backslashes
+ if path.startswith(sep):
+ prefix += sep
+ path = path.lstrip(sep)
+
+ comps = path.split(sep)
+ i = 0
+ while i < len(comps):
+ if not comps[i] or comps[i] == curdir:
+ del comps[i]
+ elif comps[i] == pardir:
+ if i > 0 and comps[i-1] != pardir:
+ del comps[i-1:i+1]
+ i -= 1
+ elif i == 0 and prefix.endswith(sep):
+ del comps[i]
+ else:
+ i += 1
+ else:
+ i += 1
+ # If the path is now empty, substitute '.'
+ if not prefix and not comps:
+ comps.append(curdir)
+ return prefix + sep.join(comps)
+
+def _abspath_fallback(path):
+ """Return the absolute version of a path as a fallback function in case
+ `nt._getfullpathname` is not available or raises OSError. See bpo-31047 for
+ more.
+
+ """
+
+ path = os.fspath(path)
+ if not isabs(path):
+ if isinstance(path, bytes):
+ cwd = os.getcwdb()
+ else:
+ cwd = os.getcwd()
+ path = join(cwd, path)
+ return normpath(path)
+
+# Return an absolute path.
+try:
+ from nt import _getfullpathname
+
+except ImportError: # not running on Windows - mock up something sensible
+ abspath = _abspath_fallback
+
+else: # use native Windows method on Windows
+ def abspath(path):
+ """Return the absolute version of a path."""
+ try:
+ return normpath(_getfullpathname(path))
+ except (OSError, ValueError):
+ return _abspath_fallback(path)
+
+try:
+ from nt import _getfinalpathname, readlink as _nt_readlink
+except ImportError:
+ # realpath is a no-op on systems without _getfinalpathname support.
+ realpath = abspath
+else:
+ def _readlink_deep(path):
+ # These error codes indicate that we should stop reading links and
+ # return the path we currently have.
+ # 1: ERROR_INVALID_FUNCTION
+ # 2: ERROR_FILE_NOT_FOUND
+ # 3: ERROR_DIRECTORY_NOT_FOUND
+ # 5: ERROR_ACCESS_DENIED
+ # 21: ERROR_NOT_READY (implies drive with no media)
+ # 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file)
+ # 50: ERROR_NOT_SUPPORTED (implies no support for reparse points)
+ # 67: ERROR_BAD_NET_NAME (implies remote server unavailable)
+ # 87: ERROR_INVALID_PARAMETER
+ # 4390: ERROR_NOT_A_REPARSE_POINT
+ # 4392: ERROR_INVALID_REPARSE_DATA
+ # 4393: ERROR_REPARSE_TAG_INVALID
+ allowed_winerror = 1, 2, 3, 5, 21, 32, 50, 67, 87, 4390, 4392, 4393
+
+ seen = set()
+ while normcase(path) not in seen:
+ seen.add(normcase(path))
+ try:
+ old_path = path
+ path = _nt_readlink(path)
+ # Links may be relative, so resolve them against their
+ # own location
+ if not isabs(path):
+ # If it's something other than a symlink, we don't know
+ # what it's actually going to be resolved against, so
+ # just return the old path.
+ if not islink(old_path):
+ path = old_path
+ break
+ path = normpath(join(dirname(old_path), path))
+ except OSError as ex:
+ if ex.winerror in allowed_winerror:
+ break
+ raise
+ except ValueError:
+ # Stop on reparse points that are not symlinks
+ break
+ return path
+
+ def _getfinalpathname_nonstrict(path):
+ # These error codes indicate that we should stop resolving the path
+ # and return the value we currently have.
+ # 1: ERROR_INVALID_FUNCTION
+ # 2: ERROR_FILE_NOT_FOUND
+ # 3: ERROR_DIRECTORY_NOT_FOUND
+ # 5: ERROR_ACCESS_DENIED
+ # 21: ERROR_NOT_READY (implies drive with no media)
+ # 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file)
+ # 50: ERROR_NOT_SUPPORTED
+ # 53: ERROR_BAD_NETPATH
+ # 65: ERROR_NETWORK_ACCESS_DENIED
+ # 67: ERROR_BAD_NET_NAME (implies remote server unavailable)
+ # 87: ERROR_INVALID_PARAMETER
+ # 123: ERROR_INVALID_NAME
+ # 161: ERROR_BAD_PATHNAME
+ # 1920: ERROR_CANT_ACCESS_FILE
+ # 1921: ERROR_CANT_RESOLVE_FILENAME (implies unfollowable symlink)
+ allowed_winerror = 1, 2, 3, 5, 21, 32, 50, 53, 65, 67, 87, 123, 161, 1920, 1921
+
+ # Non-strict algorithm is to find as much of the target directory
+ # as we can and join the rest.
+ tail = ''
+ while path:
+ try:
+ path = _getfinalpathname(path)
+ return join(path, tail) if tail else path
+ except OSError as ex:
+ if ex.winerror not in allowed_winerror:
+ raise
+ try:
+ # The OS could not resolve this path fully, so we attempt
+ # to follow the link ourselves. If we succeed, join the tail
+ # and return.
+ new_path = _readlink_deep(path)
+ if new_path != path:
+ return join(new_path, tail) if tail else new_path
+ except OSError:
+ # If we fail to readlink(), let's keep traversing
+ pass
+ path, name = split(path)
+ # TODO (bpo-38186): Request the real file name from the directory
+ # entry using FindFirstFileW. For now, we will return the path
+ # as best we have it
+ if path and not name:
+ return path + tail
+ tail = join(name, tail) if tail else name
+ return tail
+
+ def realpath(path, *, strict=False):
+ path = normpath(path)
+ if isinstance(path, bytes):
+ prefix = b'\\\\?\\'
+ unc_prefix = b'\\\\?\\UNC\\'
+ new_unc_prefix = b'\\\\'
+ cwd = os.getcwdb()
+ # bpo-38081: Special case for realpath(b'nul')
+ if normcase(path) == normcase(os.fsencode(devnull)):
+ return b'\\\\.\\NUL'
+ else:
+ prefix = '\\\\?\\'
+ unc_prefix = '\\\\?\\UNC\\'
+ new_unc_prefix = '\\\\'
+ cwd = os.getcwd()
+ # bpo-38081: Special case for realpath('nul')
+ if normcase(path) == normcase(devnull):
+ return '\\\\.\\NUL'
+ had_prefix = path.startswith(prefix)
+ if not had_prefix and not isabs(path):
+ path = join(cwd, path)
+ try:
+ path = _getfinalpathname(path)
+ initial_winerror = 0
+ except OSError as ex:
+ if strict:
+ raise
+ initial_winerror = ex.winerror
+ path = _getfinalpathname_nonstrict(path)
+ # The path returned by _getfinalpathname will always start with \\?\ -
+ # strip off that prefix unless it was already provided on the original
+ # path.
+ if not had_prefix and path.startswith(prefix):
+ # For UNC paths, the prefix will actually be \\?\UNC\
+ # Handle that case as well.
+ if path.startswith(unc_prefix):
+ spath = new_unc_prefix + path[len(unc_prefix):]
+ else:
+ spath = path[len(prefix):]
+ # Ensure that the non-prefixed path resolves to the same path
+ try:
+ if _getfinalpathname(spath) == path:
+ path = spath
+ except OSError as ex:
+ # If the path does not exist and originally did not exist, then
+ # strip the prefix anyway.
+ if ex.winerror == initial_winerror:
+ path = spath
+ return path
+
+
+# Win9x family and earlier have no Unicode filename support.
+supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
+ sys.getwindowsversion()[3] >= 2)
+
+def relpath(path, start=None):
+ """Return a relative version of a path"""
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ sep = b'\\'
+ curdir = b'.'
+ pardir = b'..'
+ else:
+ sep = '\\'
+ curdir = '.'
+ pardir = '..'
+
+ if start is None:
+ start = curdir
+
+ if not path:
+ raise ValueError("no path specified")
+
+ start = os.fspath(start)
+ try:
+ start_abs = abspath(normpath(start))
+ path_abs = abspath(normpath(path))
+ start_drive, start_rest = splitdrive(start_abs)
+ path_drive, path_rest = splitdrive(path_abs)
+ if normcase(start_drive) != normcase(path_drive):
+ raise ValueError("path is on mount %r, start on mount %r" % (
+ path_drive, start_drive))
+
+ start_list = [x for x in start_rest.split(sep) if x]
+ path_list = [x for x in path_rest.split(sep) if x]
+ # Work out how much of the filepath is shared by start and path.
+ i = 0
+ for e1, e2 in zip(start_list, path_list):
+ if normcase(e1) != normcase(e2):
+ break
+ i += 1
+
+ rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return curdir
+ return join(*rel_list)
+ except (TypeError, ValueError, AttributeError, BytesWarning, DeprecationWarning):
+ genericpath._check_arg_types('relpath', path, start)
+ raise
+
+
+# Return the longest common sub-path of the sequence of paths given as input.
+# The function is case-insensitive and 'separator-insensitive', i.e. if the
+# only difference between two paths is the use of '\' versus '/' as separator,
+# they are deemed to be equal.
+#
+# However, the returned path will have the standard '\' separator (even if the
+# given paths had the alternative '/' separator) and will have the case of the
+# first path given in the sequence. Additionally, any trailing separator is
+# stripped from the returned path.
+
+def commonpath(paths):
+ """Given a sequence of path names, returns the longest common sub-path."""
+
+ if not paths:
+ raise ValueError('commonpath() arg is an empty sequence')
+
+ paths = tuple(map(os.fspath, paths))
+ if isinstance(paths[0], bytes):
+ sep = b'\\'
+ altsep = b'/'
+ curdir = b'.'
+ else:
+ sep = '\\'
+ altsep = '/'
+ curdir = '.'
+
+ try:
+ drivesplits = [splitdrive(p.replace(altsep, sep).lower()) for p in paths]
+ split_paths = [p.split(sep) for d, p in drivesplits]
+
+ try:
+ isabs, = set(p[:1] == sep for d, p in drivesplits)
+ except ValueError:
+ raise ValueError("Can't mix absolute and relative paths") from None
+
+ # Check that all drive letters or UNC paths match. The check is made only
+ # now otherwise type errors for mixing strings and bytes would not be
+ # caught.
+ if len(set(d for d, p in drivesplits)) != 1:
+ raise ValueError("Paths don't have the same drive")
+
+ drive, path = splitdrive(paths[0].replace(altsep, sep))
+ common = path.split(sep)
+ common = [c for c in common if c and c != curdir]
+
+ split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
+ s1 = min(split_paths)
+ s2 = max(split_paths)
+ for i, c in enumerate(s1):
+ if c != s2[i]:
+ common = common[:i]
+ break
+ else:
+ common = common[:len(s1)]
+
+ prefix = drive + sep if isabs else drive
+ return prefix + sep.join(common)
+ except (TypeError, AttributeError):
+ genericpath._check_arg_types('commonpath', *paths)
+ raise
+
+
+try:
+ # The genericpath.isdir implementation uses os.stat and checks the mode
+ # attribute to tell whether or not the path is a directory.
+ # This is overkill on Windows - just pass the path to GetFileAttributes
+ # and check the attribute from there.
+ from nt import _isdir as isdir
+except ImportError:
+ # Use genericpath.isdir as imported above.
+ pass
diff --git a/infer_4_37_2/lib/python3.10/numbers.py b/infer_4_37_2/lib/python3.10/numbers.py
new file mode 100644
index 0000000000000000000000000000000000000000..0985dd85f60a781603f867b7a4bede1c0b313fb1
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/numbers.py
@@ -0,0 +1,393 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
+
+TODO: Fill out more detailed documentation on the operators."""
+
+from abc import ABCMeta, abstractmethod
+
+__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
+
+class Number(metaclass=ABCMeta):
+ """All numbers inherit from this class.
+
+ If you just want to check if an argument x is a number, without
+ caring what kind, use isinstance(x, Number).
+ """
+ __slots__ = ()
+
+ # Concrete numeric types must provide their own hash implementation
+ __hash__ = None
+
+
+## Notes on Decimal
+## ----------------
+## Decimal has all of the methods specified by the Real abc, but it should
+## not be registered as a Real because decimals do not interoperate with
+## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
+## abstract reals are expected to interoperate (i.e. R1 + R2 should be
+## expected to work if R1 and R2 are both Reals).
+
+class Complex(Number):
+ """Complex defines the operations that work on the builtin complex type.
+
+ In short, those are: a conversion to complex, .real, .imag, +, -,
+ *, /, **, abs(), .conjugate, ==, and !=.
+
+ If it is given heterogeneous arguments, and doesn't have special
+ knowledge about them, it should fall back to the builtin complex
+ type as described below.
+ """
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __complex__(self):
+ """Return a builtin complex instance. Called for complex(self)."""
+
+ def __bool__(self):
+ """True if self != 0. Called for bool(self)."""
+ return self != 0
+
+ @property
+ @abstractmethod
+ def real(self):
+ """Retrieve the real component of this number.
+
+ This should subclass Real.
+ """
+ raise NotImplementedError
+
+ @property
+ @abstractmethod
+ def imag(self):
+ """Retrieve the imaginary component of this number.
+
+ This should subclass Real.
+ """
+ raise NotImplementedError
+
+ @abstractmethod
+ def __add__(self, other):
+ """self + other"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __radd__(self, other):
+ """other + self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __neg__(self):
+ """-self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __pos__(self):
+ """+self"""
+ raise NotImplementedError
+
+ def __sub__(self, other):
+ """self - other"""
+ return self + -other
+
+ def __rsub__(self, other):
+ """other - self"""
+ return -self + other
+
+ @abstractmethod
+ def __mul__(self, other):
+ """self * other"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rmul__(self, other):
+ """other * self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __truediv__(self, other):
+ """self / other: Should promote to float when necessary."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rtruediv__(self, other):
+ """other / self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __pow__(self, exponent):
+ """self**exponent; should promote to float or complex when necessary."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rpow__(self, base):
+ """base ** self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __abs__(self):
+ """Returns the Real distance from 0. Called for abs(self)."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def conjugate(self):
+ """(x+y*i).conjugate() returns (x-y*i)."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __eq__(self, other):
+ """self == other"""
+ raise NotImplementedError
+
+Complex.register(complex)
+
+
+class Real(Complex):
+ """To Complex, Real adds the operations that work on real numbers.
+
+ In short, those are: a conversion to float, trunc(), divmod,
+ %, <, <=, >, and >=.
+
+ Real also provides defaults for the derived operations.
+ """
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __float__(self):
+ """Any Real can be converted to a native float object.
+
+ Called for float(self)."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __trunc__(self):
+ """trunc(self): Truncates self to an Integral.
+
+ Returns an Integral i such that:
+ * i>0 iff self>0;
+ * abs(i) <= abs(self);
+ * for any Integral j satisfying the first two conditions,
+ abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
+ i.e. "truncate towards 0".
+ """
+ raise NotImplementedError
+
+ @abstractmethod
+ def __floor__(self):
+ """Finds the greatest Integral <= self."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __ceil__(self):
+ """Finds the least Integral >= self."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __round__(self, ndigits=None):
+ """Rounds self to ndigits decimal places, defaulting to 0.
+
+ If ndigits is omitted or None, returns an Integral, otherwise
+ returns a Real. Rounds half toward even.
+ """
+ raise NotImplementedError
+
+ def __divmod__(self, other):
+ """divmod(self, other): The pair (self // other, self % other).
+
+ Sometimes this can be computed faster than the pair of
+ operations.
+ """
+ return (self // other, self % other)
+
+ def __rdivmod__(self, other):
+ """divmod(other, self): The pair (self // other, self % other).
+
+ Sometimes this can be computed faster than the pair of
+ operations.
+ """
+ return (other // self, other % self)
+
+ @abstractmethod
+ def __floordiv__(self, other):
+ """self // other: The floor() of self/other."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rfloordiv__(self, other):
+ """other // self: The floor() of other/self."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __mod__(self, other):
+ """self % other"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rmod__(self, other):
+ """other % self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __lt__(self, other):
+ """self < other
+
+ < on Reals defines a total ordering, except perhaps for NaN."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __le__(self, other):
+ """self <= other"""
+ raise NotImplementedError
+
+ # Concrete implementations of Complex abstract methods.
+ def __complex__(self):
+ """complex(self) == complex(float(self), 0)"""
+ return complex(float(self))
+
+ @property
+ def real(self):
+ """Real numbers are their real component."""
+ return +self
+
+ @property
+ def imag(self):
+ """Real numbers have no imaginary component."""
+ return 0
+
+ def conjugate(self):
+ """Conjugate is a no-op for Reals."""
+ return +self
+
+Real.register(float)
+
+
+class Rational(Real):
+ """.numerator and .denominator should be in lowest terms."""
+
+ __slots__ = ()
+
+ @property
+ @abstractmethod
+ def numerator(self):
+ raise NotImplementedError
+
+ @property
+ @abstractmethod
+ def denominator(self):
+ raise NotImplementedError
+
+ # Concrete implementation of Real's conversion to float.
+ def __float__(self):
+ """float(self) = self.numerator / self.denominator
+
+ It's important that this conversion use the integer's "true"
+ division rather than casting one side to float before dividing
+ so that ratios of huge integers convert without overflowing.
+
+ """
+ return int(self.numerator) / int(self.denominator)
+
+
+class Integral(Rational):
+ """Integral adds methods that work on integral numbers.
+
+ In short, these are conversion to int, pow with modulus, and the
+ bit-string operations.
+ """
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __int__(self):
+ """int(self)"""
+ raise NotImplementedError
+
+ def __index__(self):
+ """Called whenever an index is needed, such as in slicing"""
+ return int(self)
+
+ @abstractmethod
+ def __pow__(self, exponent, modulus=None):
+ """self ** exponent % modulus, but maybe faster.
+
+ Accept the modulus argument if you want to support the
+ 3-argument version of pow(). Raise a TypeError if exponent < 0
+ or any argument isn't Integral. Otherwise, just implement the
+ 2-argument version described in Complex.
+ """
+ raise NotImplementedError
+
+ @abstractmethod
+ def __lshift__(self, other):
+ """self << other"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rlshift__(self, other):
+ """other << self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rshift__(self, other):
+ """self >> other"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rrshift__(self, other):
+ """other >> self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __and__(self, other):
+ """self & other"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rand__(self, other):
+ """other & self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __xor__(self, other):
+ """self ^ other"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __rxor__(self, other):
+ """other ^ self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __or__(self, other):
+ """self | other"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __ror__(self, other):
+ """other | self"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def __invert__(self):
+ """~self"""
+ raise NotImplementedError
+
+ # Concrete implementations of Rational and Real abstract methods.
+ def __float__(self):
+ """float(self) == float(int(self))"""
+ return float(int(self))
+
+ @property
+ def numerator(self):
+ """Integers are their own numerators."""
+ return +self
+
+ @property
+ def denominator(self):
+ """Integers have a denominator of 1."""
+ return 1
+
+Integral.register(int)
diff --git a/infer_4_37_2/lib/python3.10/operator.py b/infer_4_37_2/lib/python3.10/operator.py
new file mode 100644
index 0000000000000000000000000000000000000000..241fdbb679e7c136808336539cd97a458543662d
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/operator.py
@@ -0,0 +1,460 @@
+"""
+Operator Interface
+
+This module exports a set of functions corresponding to the intrinsic
+operators of Python. For example, operator.add(x, y) is equivalent
+to the expression x+y. The function names are those used for special
+methods; variants without leading and trailing '__' are also provided
+for convenience.
+
+This is the pure Python implementation of the module.
+"""
+
+__all__ = ['abs', 'add', 'and_', 'attrgetter', 'concat', 'contains', 'countOf',
+ 'delitem', 'eq', 'floordiv', 'ge', 'getitem', 'gt', 'iadd', 'iand',
+ 'iconcat', 'ifloordiv', 'ilshift', 'imatmul', 'imod', 'imul',
+ 'index', 'indexOf', 'inv', 'invert', 'ior', 'ipow', 'irshift',
+ 'is_', 'is_not', 'isub', 'itemgetter', 'itruediv', 'ixor', 'le',
+ 'length_hint', 'lshift', 'lt', 'matmul', 'methodcaller', 'mod',
+ 'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift',
+ 'setitem', 'sub', 'truediv', 'truth', 'xor']
+
+from builtins import abs as _abs
+
+
+# Comparison Operations *******************************************************#
+
+def lt(a, b):
+ "Same as a < b."
+ return a < b
+
+def le(a, b):
+ "Same as a <= b."
+ return a <= b
+
+def eq(a, b):
+ "Same as a == b."
+ return a == b
+
+def ne(a, b):
+ "Same as a != b."
+ return a != b
+
+def ge(a, b):
+ "Same as a >= b."
+ return a >= b
+
+def gt(a, b):
+ "Same as a > b."
+ return a > b
+
+# Logical Operations **********************************************************#
+
+def not_(a):
+ "Same as not a."
+ return not a
+
+def truth(a):
+ "Return True if a is true, False otherwise."
+ return True if a else False
+
+def is_(a, b):
+ "Same as a is b."
+ return a is b
+
+def is_not(a, b):
+ "Same as a is not b."
+ return a is not b
+
+# Mathematical/Bitwise Operations *********************************************#
+
+def abs(a):
+ "Same as abs(a)."
+ return _abs(a)
+
+def add(a, b):
+ "Same as a + b."
+ return a + b
+
+def and_(a, b):
+ "Same as a & b."
+ return a & b
+
+def floordiv(a, b):
+ "Same as a // b."
+ return a // b
+
+def index(a):
+ "Same as a.__index__()."
+ return a.__index__()
+
+def inv(a):
+ "Same as ~a."
+ return ~a
+invert = inv
+
+def lshift(a, b):
+ "Same as a << b."
+ return a << b
+
+def mod(a, b):
+ "Same as a % b."
+ return a % b
+
+def mul(a, b):
+ "Same as a * b."
+ return a * b
+
+def matmul(a, b):
+ "Same as a @ b."
+ return a @ b
+
+def neg(a):
+ "Same as -a."
+ return -a
+
+def or_(a, b):
+ "Same as a | b."
+ return a | b
+
+def pos(a):
+ "Same as +a."
+ return +a
+
+def pow(a, b):
+ "Same as a ** b."
+ return a ** b
+
+def rshift(a, b):
+ "Same as a >> b."
+ return a >> b
+
+def sub(a, b):
+ "Same as a - b."
+ return a - b
+
+def truediv(a, b):
+ "Same as a / b."
+ return a / b
+
+def xor(a, b):
+ "Same as a ^ b."
+ return a ^ b
+
+# Sequence Operations *********************************************************#
+
+def concat(a, b):
+ "Same as a + b, for a and b sequences."
+ if not hasattr(a, '__getitem__'):
+ msg = "'%s' object can't be concatenated" % type(a).__name__
+ raise TypeError(msg)
+ return a + b
+
+def contains(a, b):
+ "Same as b in a (note reversed operands)."
+ return b in a
+
+def countOf(a, b):
+ "Return the number of items in a which are, or which equal, b."
+ count = 0
+ for i in a:
+ if i is b or i == b:
+ count += 1
+ return count
+
+def delitem(a, b):
+ "Same as del a[b]."
+ del a[b]
+
+def getitem(a, b):
+ "Same as a[b]."
+ return a[b]
+
+def indexOf(a, b):
+ "Return the first index of b in a."
+ for i, j in enumerate(a):
+ if j is b or j == b:
+ return i
+ else:
+ raise ValueError('sequence.index(x): x not in sequence')
+
+def setitem(a, b, c):
+ "Same as a[b] = c."
+ a[b] = c
+
+def length_hint(obj, default=0):
+ """
+ Return an estimate of the number of items in obj.
+ This is useful for presizing containers when building from an iterable.
+
+ If the object supports len(), the result will be exact. Otherwise, it may
+ over- or under-estimate by an arbitrary amount. The result will be an
+ integer >= 0.
+ """
+ if not isinstance(default, int):
+ msg = ("'%s' object cannot be interpreted as an integer" %
+ type(default).__name__)
+ raise TypeError(msg)
+
+ try:
+ return len(obj)
+ except TypeError:
+ pass
+
+ try:
+ hint = type(obj).__length_hint__
+ except AttributeError:
+ return default
+
+ try:
+ val = hint(obj)
+ except TypeError:
+ return default
+ if val is NotImplemented:
+ return default
+ if not isinstance(val, int):
+ msg = ('__length_hint__ must be integer, not %s' %
+ type(val).__name__)
+ raise TypeError(msg)
+ if val < 0:
+ msg = '__length_hint__() should return >= 0'
+ raise ValueError(msg)
+ return val
+
+# Generalized Lookup Objects **************************************************#
+
+class attrgetter:
+ """
+ Return a callable object that fetches the given attribute(s) from its operand.
+ After f = attrgetter('name'), the call f(r) returns r.name.
+ After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).
+ After h = attrgetter('name.first', 'name.last'), the call h(r) returns
+ (r.name.first, r.name.last).
+ """
+ __slots__ = ('_attrs', '_call')
+
+ def __init__(self, attr, *attrs):
+ if not attrs:
+ if not isinstance(attr, str):
+ raise TypeError('attribute name must be a string')
+ self._attrs = (attr,)
+ names = attr.split('.')
+ def func(obj):
+ for name in names:
+ obj = getattr(obj, name)
+ return obj
+ self._call = func
+ else:
+ self._attrs = (attr,) + attrs
+ getters = tuple(map(attrgetter, self._attrs))
+ def func(obj):
+ return tuple(getter(obj) for getter in getters)
+ self._call = func
+
+ def __call__(self, obj):
+ return self._call(obj)
+
+ def __repr__(self):
+ return '%s.%s(%s)' % (self.__class__.__module__,
+ self.__class__.__qualname__,
+ ', '.join(map(repr, self._attrs)))
+
+ def __reduce__(self):
+ return self.__class__, self._attrs
+
+class itemgetter:
+ """
+ Return a callable object that fetches the given item(s) from its operand.
+ After f = itemgetter(2), the call f(r) returns r[2].
+ After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])
+ """
+ __slots__ = ('_items', '_call')
+
+ def __init__(self, item, *items):
+ if not items:
+ self._items = (item,)
+ def func(obj):
+ return obj[item]
+ self._call = func
+ else:
+ self._items = items = (item,) + items
+ def func(obj):
+ return tuple(obj[i] for i in items)
+ self._call = func
+
+ def __call__(self, obj):
+ return self._call(obj)
+
+ def __repr__(self):
+ return '%s.%s(%s)' % (self.__class__.__module__,
+ self.__class__.__name__,
+ ', '.join(map(repr, self._items)))
+
+ def __reduce__(self):
+ return self.__class__, self._items
+
+class methodcaller:
+ """
+ Return a callable object that calls the given method on its operand.
+ After f = methodcaller('name'), the call f(r) returns r.name().
+ After g = methodcaller('name', 'date', foo=1), the call g(r) returns
+ r.name('date', foo=1).
+ """
+ __slots__ = ('_name', '_args', '_kwargs')
+
+ def __init__(self, name, /, *args, **kwargs):
+ self._name = name
+ if not isinstance(self._name, str):
+ raise TypeError('method name must be a string')
+ self._args = args
+ self._kwargs = kwargs
+
+ def __call__(self, obj):
+ return getattr(obj, self._name)(*self._args, **self._kwargs)
+
+ def __repr__(self):
+ args = [repr(self._name)]
+ args.extend(map(repr, self._args))
+ args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items())
+ return '%s.%s(%s)' % (self.__class__.__module__,
+ self.__class__.__name__,
+ ', '.join(args))
+
+ def __reduce__(self):
+ if not self._kwargs:
+ return self.__class__, (self._name,) + self._args
+ else:
+ from functools import partial
+ return partial(self.__class__, self._name, **self._kwargs), self._args
+
+
+# In-place Operations *********************************************************#
+
+def iadd(a, b):
+ "Same as a += b."
+ a += b
+ return a
+
+def iand(a, b):
+ "Same as a &= b."
+ a &= b
+ return a
+
+def iconcat(a, b):
+ "Same as a += b, for a and b sequences."
+ if not hasattr(a, '__getitem__'):
+ msg = "'%s' object can't be concatenated" % type(a).__name__
+ raise TypeError(msg)
+ a += b
+ return a
+
+def ifloordiv(a, b):
+ "Same as a //= b."
+ a //= b
+ return a
+
+def ilshift(a, b):
+ "Same as a <<= b."
+ a <<= b
+ return a
+
+def imod(a, b):
+ "Same as a %= b."
+ a %= b
+ return a
+
+def imul(a, b):
+ "Same as a *= b."
+ a *= b
+ return a
+
+def imatmul(a, b):
+ "Same as a @= b."
+ a @= b
+ return a
+
+def ior(a, b):
+ "Same as a |= b."
+ a |= b
+ return a
+
+def ipow(a, b):
+ "Same as a **= b."
+ a **=b
+ return a
+
+def irshift(a, b):
+ "Same as a >>= b."
+ a >>= b
+ return a
+
+def isub(a, b):
+ "Same as a -= b."
+ a -= b
+ return a
+
+def itruediv(a, b):
+ "Same as a /= b."
+ a /= b
+ return a
+
+def ixor(a, b):
+ "Same as a ^= b."
+ a ^= b
+ return a
+
+
+try:
+ from _operator import *
+except ImportError:
+ pass
+else:
+ from _operator import __doc__
+
+# All of these "__func__ = func" assignments have to happen after importing
+# from _operator to make sure they're set to the right function
+__lt__ = lt
+__le__ = le
+__eq__ = eq
+__ne__ = ne
+__ge__ = ge
+__gt__ = gt
+__not__ = not_
+__abs__ = abs
+__add__ = add
+__and__ = and_
+__floordiv__ = floordiv
+__index__ = index
+__inv__ = inv
+__invert__ = invert
+__lshift__ = lshift
+__mod__ = mod
+__mul__ = mul
+__matmul__ = matmul
+__neg__ = neg
+__or__ = or_
+__pos__ = pos
+__pow__ = pow
+__rshift__ = rshift
+__sub__ = sub
+__truediv__ = truediv
+__xor__ = xor
+__concat__ = concat
+__contains__ = contains
+__delitem__ = delitem
+__getitem__ = getitem
+__setitem__ = setitem
+__iadd__ = iadd
+__iand__ = iand
+__iconcat__ = iconcat
+__ifloordiv__ = ifloordiv
+__ilshift__ = ilshift
+__imod__ = imod
+__imul__ = imul
+__imatmul__ = imatmul
+__ior__ = ior
+__ipow__ = ipow
+__irshift__ = irshift
+__isub__ = isub
+__itruediv__ = itruediv
+__ixor__ = ixor
diff --git a/infer_4_37_2/lib/python3.10/optparse.py b/infer_4_37_2/lib/python3.10/optparse.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c450c6fcbe3b62b2247c2fb25a8112f6abca6f6
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/optparse.py
@@ -0,0 +1,1681 @@
+"""A powerful, extensible, and easy-to-use option parser.
+
+By Greg Ward
+
+Originally distributed as Optik.
+
+For support, use the optik-users@lists.sourceforge.net mailing list
+(http://lists.sourceforge.net/lists/listinfo/optik-users).
+
+Simple usage example:
+
+ from optparse import OptionParser
+
+ parser = OptionParser()
+ parser.add_option("-f", "--file", dest="filename",
+ help="write report to FILE", metavar="FILE")
+ parser.add_option("-q", "--quiet",
+ action="store_false", dest="verbose", default=True,
+ help="don't print status messages to stdout")
+
+ (options, args) = parser.parse_args()
+"""
+
+__version__ = "1.5.3"
+
+__all__ = ['Option',
+ 'make_option',
+ 'SUPPRESS_HELP',
+ 'SUPPRESS_USAGE',
+ 'Values',
+ 'OptionContainer',
+ 'OptionGroup',
+ 'OptionParser',
+ 'HelpFormatter',
+ 'IndentedHelpFormatter',
+ 'TitledHelpFormatter',
+ 'OptParseError',
+ 'OptionError',
+ 'OptionConflictError',
+ 'OptionValueError',
+ 'BadOptionError',
+ 'check_choice']
+
+__copyright__ = """
+Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
+Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the author nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import sys, os
+import textwrap
+
+def _repr(self):
+ return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
+
+
+# This file was generated from:
+# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
+# Id: option.py 522 2006-06-11 16:22:03Z gward
+# Id: help.py 527 2006-07-23 15:21:30Z greg
+# Id: errors.py 509 2006-04-20 00:58:24Z gward
+
+try:
+ from gettext import gettext, ngettext
+except ImportError:
+ def gettext(message):
+ return message
+
+ def ngettext(singular, plural, n):
+ if n == 1:
+ return singular
+ return plural
+
+_ = gettext
+
+
+class OptParseError (Exception):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+class OptionError (OptParseError):
+ """
+ Raised if an Option instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+class OptionConflictError (OptionError):
+ """
+ Raised if conflicting options are added to an OptionParser.
+ """
+
+class OptionValueError (OptParseError):
+ """
+ Raised if an invalid option value is encountered on the command
+ line.
+ """
+
+class BadOptionError (OptParseError):
+ """
+ Raised if an invalid option is seen on the command line.
+ """
+ def __init__(self, opt_str):
+ self.opt_str = opt_str
+
+ def __str__(self):
+ return _("no such option: %s") % self.opt_str
+
+class AmbiguousOptionError (BadOptionError):
+ """
+ Raised if an ambiguous option is seen on the command line.
+ """
+ def __init__(self, opt_str, possibilities):
+ BadOptionError.__init__(self, opt_str)
+ self.possibilities = possibilities
+
+ def __str__(self):
+ return (_("ambiguous option: %s (%s?)")
+ % (self.opt_str, ", ".join(self.possibilities)))
+
+
+class HelpFormatter:
+
+ """
+ Abstract base class for formatting option help. OptionParser
+ instances should use one of the HelpFormatter subclasses for
+ formatting help; by default IndentedHelpFormatter is used.
+
+ Instance attributes:
+ parser : OptionParser
+ the controlling OptionParser instance
+ indent_increment : int
+ the number of columns to indent per nesting level
+ max_help_position : int
+ the maximum starting column for option help text
+ help_position : int
+ the calculated starting column for option help text;
+ initially the same as the maximum
+ width : int
+ total number of columns for output (pass None to constructor for
+ this value to be taken from the $COLUMNS environment variable)
+ level : int
+ current indentation level
+ current_indent : int
+ current indentation level (in columns)
+ help_width : int
+ number of columns available for option help text (calculated)
+ default_tag : str
+ text to replace with each option's default value, "%default"
+ by default. Set to false value to disable default value expansion.
+ option_strings : { Option : str }
+ maps Option instances to the snippet of help text explaining
+ the syntax of that option, e.g. "-h, --help" or
+ "-fFILE, --file=FILE"
+ _short_opt_fmt : str
+ format string controlling how short options with values are
+ printed in help text. Must be either "%s%s" ("-fFILE") or
+ "%s %s" ("-f FILE"), because those are the two syntaxes that
+ Optik supports.
+ _long_opt_fmt : str
+ similar but for long options; must be either "%s %s" ("--file FILE")
+ or "%s=%s" ("--file=FILE").
+ """
+
+ NO_DEFAULT_VALUE = "none"
+
+ def __init__(self,
+ indent_increment,
+ max_help_position,
+ width,
+ short_first):
+ self.parser = None
+ self.indent_increment = indent_increment
+ if width is None:
+ try:
+ width = int(os.environ['COLUMNS'])
+ except (KeyError, ValueError):
+ width = 80
+ width -= 2
+ self.width = width
+ self.help_position = self.max_help_position = \
+ min(max_help_position, max(width - 20, indent_increment * 2))
+ self.current_indent = 0
+ self.level = 0
+ self.help_width = None # computed later
+ self.short_first = short_first
+ self.default_tag = "%default"
+ self.option_strings = {}
+ self._short_opt_fmt = "%s %s"
+ self._long_opt_fmt = "%s=%s"
+
+ def set_parser(self, parser):
+ self.parser = parser
+
+ def set_short_opt_delimiter(self, delim):
+ if delim not in ("", " "):
+ raise ValueError(
+ "invalid metavar delimiter for short options: %r" % delim)
+ self._short_opt_fmt = "%s" + delim + "%s"
+
+ def set_long_opt_delimiter(self, delim):
+ if delim not in ("=", " "):
+ raise ValueError(
+ "invalid metavar delimiter for long options: %r" % delim)
+ self._long_opt_fmt = "%s" + delim + "%s"
+
+ def indent(self):
+ self.current_indent += self.indent_increment
+ self.level += 1
+
+ def dedent(self):
+ self.current_indent -= self.indent_increment
+ assert self.current_indent >= 0, "Indent decreased below 0."
+ self.level -= 1
+
+ def format_usage(self, usage):
+ raise NotImplementedError("subclasses must implement")
+
+ def format_heading(self, heading):
+ raise NotImplementedError("subclasses must implement")
+
+ def _format_text(self, text):
+ """
+ Format a paragraph of free-form text for inclusion in the
+ help output at the current indentation level.
+ """
+ text_width = max(self.width - self.current_indent, 11)
+ indent = " "*self.current_indent
+ return textwrap.fill(text,
+ text_width,
+ initial_indent=indent,
+ subsequent_indent=indent)
+
+ def format_description(self, description):
+ if description:
+ return self._format_text(description) + "\n"
+ else:
+ return ""
+
+ def format_epilog(self, epilog):
+ if epilog:
+ return "\n" + self._format_text(epilog) + "\n"
+ else:
+ return ""
+
+
+ def expand_default(self, option):
+ if self.parser is None or not self.default_tag:
+ return option.help
+
+ default_value = self.parser.defaults.get(option.dest)
+ if default_value is NO_DEFAULT or default_value is None:
+ default_value = self.NO_DEFAULT_VALUE
+
+ return option.help.replace(self.default_tag, str(default_value))
+
+ def format_option(self, option):
+ # The help for each option consists of two parts:
+ # * the opt strings and metavars
+ # eg. ("-x", or "-fFILENAME, --file=FILENAME")
+ # * the user-supplied help string
+ # eg. ("turn on expert mode", "read data from FILENAME")
+ #
+ # If possible, we write both of these on the same line:
+ # -x turn on expert mode
+ #
+ # But if the opt string list is too long, we put the help
+ # string on a second line, indented to the same column it would
+ # start in if it fit on the first line.
+ # -fFILENAME, --file=FILENAME
+ # read data from FILENAME
+ result = []
+ opts = self.option_strings[option]
+ opt_width = self.help_position - self.current_indent - 2
+ if len(opts) > opt_width:
+ opts = "%*s%s\n" % (self.current_indent, "", opts)
+ indent_first = self.help_position
+ else: # start help on same line as opts
+ opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
+ indent_first = 0
+ result.append(opts)
+ if option.help:
+ help_text = self.expand_default(option)
+ help_lines = textwrap.wrap(help_text, self.help_width)
+ result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
+ result.extend(["%*s%s\n" % (self.help_position, "", line)
+ for line in help_lines[1:]])
+ elif opts[-1] != "\n":
+ result.append("\n")
+ return "".join(result)
+
+ def store_option_strings(self, parser):
+ self.indent()
+ max_len = 0
+ for opt in parser.option_list:
+ strings = self.format_option_strings(opt)
+ self.option_strings[opt] = strings
+ max_len = max(max_len, len(strings) + self.current_indent)
+ self.indent()
+ for group in parser.option_groups:
+ for opt in group.option_list:
+ strings = self.format_option_strings(opt)
+ self.option_strings[opt] = strings
+ max_len = max(max_len, len(strings) + self.current_indent)
+ self.dedent()
+ self.dedent()
+ self.help_position = min(max_len + 2, self.max_help_position)
+ self.help_width = max(self.width - self.help_position, 11)
+
+ def format_option_strings(self, option):
+ """Return a comma-separated list of option strings & metavariables."""
+ if option.takes_value():
+ metavar = option.metavar or option.dest.upper()
+ short_opts = [self._short_opt_fmt % (sopt, metavar)
+ for sopt in option._short_opts]
+ long_opts = [self._long_opt_fmt % (lopt, metavar)
+ for lopt in option._long_opts]
+ else:
+ short_opts = option._short_opts
+ long_opts = option._long_opts
+
+ if self.short_first:
+ opts = short_opts + long_opts
+ else:
+ opts = long_opts + short_opts
+
+ return ", ".join(opts)
+
+class IndentedHelpFormatter (HelpFormatter):
+ """Format help with indented section bodies.
+ """
+
+ def __init__(self,
+ indent_increment=2,
+ max_help_position=24,
+ width=None,
+ short_first=1):
+ HelpFormatter.__init__(
+ self, indent_increment, max_help_position, width, short_first)
+
+ def format_usage(self, usage):
+ return _("Usage: %s\n") % usage
+
+ def format_heading(self, heading):
+ return "%*s%s:\n" % (self.current_indent, "", heading)
+
+
+class TitledHelpFormatter (HelpFormatter):
+ """Format help with underlined section headers.
+ """
+
+ def __init__(self,
+ indent_increment=0,
+ max_help_position=24,
+ width=None,
+ short_first=0):
+ HelpFormatter.__init__ (
+ self, indent_increment, max_help_position, width, short_first)
+
+ def format_usage(self, usage):
+ return "%s %s\n" % (self.format_heading(_("Usage")), usage)
+
+ def format_heading(self, heading):
+ return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
+
+
+def _parse_num(val, type):
+ if val[:2].lower() == "0x": # hexadecimal
+ radix = 16
+ elif val[:2].lower() == "0b": # binary
+ radix = 2
+ val = val[2:] or "0" # have to remove "0b" prefix
+ elif val[:1] == "0": # octal
+ radix = 8
+ else: # decimal
+ radix = 10
+
+ return type(val, radix)
+
+def _parse_int(val):
+ return _parse_num(val, int)
+
+_builtin_cvt = { "int" : (_parse_int, _("integer")),
+ "long" : (_parse_int, _("integer")),
+ "float" : (float, _("floating-point")),
+ "complex" : (complex, _("complex")) }
+
+def check_builtin(option, opt, value):
+ (cvt, what) = _builtin_cvt[option.type]
+ try:
+ return cvt(value)
+ except ValueError:
+ raise OptionValueError(
+ _("option %s: invalid %s value: %r") % (opt, what, value))
+
+def check_choice(option, opt, value):
+ if value in option.choices:
+ return value
+ else:
+ choices = ", ".join(map(repr, option.choices))
+ raise OptionValueError(
+ _("option %s: invalid choice: %r (choose from %s)")
+ % (opt, value, choices))
+
+# Not supplying a default is different from a default of None,
+# so we need an explicit "not supplied" value.
+NO_DEFAULT = ("NO", "DEFAULT")
+
+
+class Option:
+ """
+ Instance attributes:
+ _short_opts : [string]
+ _long_opts : [string]
+
+ action : string
+ type : string
+ dest : string
+ default : any
+ nargs : int
+ const : any
+ choices : [string]
+ callback : function
+ callback_args : (any*)
+ callback_kwargs : { string : any }
+ help : string
+ metavar : string
+ """
+
+ # The list of instance attributes that may be set through
+ # keyword args to the constructor.
+ ATTRS = ['action',
+ 'type',
+ 'dest',
+ 'default',
+ 'nargs',
+ 'const',
+ 'choices',
+ 'callback',
+ 'callback_args',
+ 'callback_kwargs',
+ 'help',
+ 'metavar']
+
+ # The set of actions allowed by option parsers. Explicitly listed
+ # here so the constructor can validate its arguments.
+ ACTIONS = ("store",
+ "store_const",
+ "store_true",
+ "store_false",
+ "append",
+ "append_const",
+ "count",
+ "callback",
+ "help",
+ "version")
+
+ # The set of actions that involve storing a value somewhere;
+ # also listed just for constructor argument validation. (If
+ # the action is one of these, there must be a destination.)
+ STORE_ACTIONS = ("store",
+ "store_const",
+ "store_true",
+ "store_false",
+ "append",
+ "append_const",
+ "count")
+
+ # The set of actions for which it makes sense to supply a value
+ # type, ie. which may consume an argument from the command line.
+ TYPED_ACTIONS = ("store",
+ "append",
+ "callback")
+
+ # The set of actions which *require* a value type, ie. that
+ # always consume an argument from the command line.
+ ALWAYS_TYPED_ACTIONS = ("store",
+ "append")
+
+ # The set of actions which take a 'const' attribute.
+ CONST_ACTIONS = ("store_const",
+ "append_const")
+
+ # The set of known types for option parsers. Again, listed here for
+ # constructor argument validation.
+ TYPES = ("string", "int", "long", "float", "complex", "choice")
+
+ # Dictionary of argument checking functions, which convert and
+ # validate option arguments according to the option type.
+ #
+ # Signature of checking functions is:
+ # check(option : Option, opt : string, value : string) -> any
+ # where
+ # option is the Option instance calling the checker
+ # opt is the actual option seen on the command-line
+ # (eg. "-a", "--file")
+ # value is the option argument seen on the command-line
+ #
+ # The return value should be in the appropriate Python type
+ # for option.type -- eg. an integer if option.type == "int".
+ #
+ # If no checker is defined for a type, arguments will be
+ # unchecked and remain strings.
+ TYPE_CHECKER = { "int" : check_builtin,
+ "long" : check_builtin,
+ "float" : check_builtin,
+ "complex": check_builtin,
+ "choice" : check_choice,
+ }
+
+
+ # CHECK_METHODS is a list of unbound method objects; they are called
+ # by the constructor, in order, after all attributes are
+ # initialized. The list is created and filled in later, after all
+ # the methods are actually defined. (I just put it here because I
+ # like to define and document all class attributes in the same
+ # place.) Subclasses that add another _check_*() method should
+ # define their own CHECK_METHODS list that adds their check method
+ # to those from this class.
+ CHECK_METHODS = None
+
+
+ # -- Constructor/initialization methods ----------------------------
+
+ def __init__(self, *opts, **attrs):
+ # Set _short_opts, _long_opts attrs from 'opts' tuple.
+ # Have to be set now, in case no option strings are supplied.
+ self._short_opts = []
+ self._long_opts = []
+ opts = self._check_opt_strings(opts)
+ self._set_opt_strings(opts)
+
+ # Set all other attrs (action, type, etc.) from 'attrs' dict
+ self._set_attrs(attrs)
+
+ # Check all the attributes we just set. There are lots of
+ # complicated interdependencies, but luckily they can be farmed
+ # out to the _check_*() methods listed in CHECK_METHODS -- which
+ # could be handy for subclasses! The one thing these all share
+ # is that they raise OptionError if they discover a problem.
+ for checker in self.CHECK_METHODS:
+ checker(self)
+
+ def _check_opt_strings(self, opts):
+ # Filter out None because early versions of Optik had exactly
+ # one short option and one long option, either of which
+ # could be None.
+ opts = [opt for opt in opts if opt]
+ if not opts:
+ raise TypeError("at least one option string must be supplied")
+ return opts
+
+ def _set_opt_strings(self, opts):
+ for opt in opts:
+ if len(opt) < 2:
+ raise OptionError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise OptionError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise OptionError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def _set_attrs(self, attrs):
+ for attr in self.ATTRS:
+ if attr in attrs:
+ setattr(self, attr, attrs[attr])
+ del attrs[attr]
+ else:
+ if attr == 'default':
+ setattr(self, attr, NO_DEFAULT)
+ else:
+ setattr(self, attr, None)
+ if attrs:
+ attrs = sorted(attrs.keys())
+ raise OptionError(
+ "invalid keyword arguments: %s" % ", ".join(attrs),
+ self)
+
+
+ # -- Constructor validation methods --------------------------------
+
+ def _check_action(self):
+ if self.action is None:
+ self.action = "store"
+ elif self.action not in self.ACTIONS:
+ raise OptionError("invalid action: %r" % self.action, self)
+
+ def _check_type(self):
+ if self.type is None:
+ if self.action in self.ALWAYS_TYPED_ACTIONS:
+ if self.choices is not None:
+ # The "choices" attribute implies "choice" type.
+ self.type = "choice"
+ else:
+ # No type given? "string" is the most sensible default.
+ self.type = "string"
+ else:
+ # Allow type objects or builtin type conversion functions
+ # (int, str, etc.) as an alternative to their names.
+ if isinstance(self.type, type):
+ self.type = self.type.__name__
+
+ if self.type == "str":
+ self.type = "string"
+
+ if self.type not in self.TYPES:
+ raise OptionError("invalid option type: %r" % self.type, self)
+ if self.action not in self.TYPED_ACTIONS:
+ raise OptionError(
+ "must not supply a type for action %r" % self.action, self)
+
+ def _check_choice(self):
+ if self.type == "choice":
+ if self.choices is None:
+ raise OptionError(
+ "must supply a list of choices for type 'choice'", self)
+ elif not isinstance(self.choices, (tuple, list)):
+ raise OptionError(
+ "choices must be a list of strings ('%s' supplied)"
+ % str(type(self.choices)).split("'")[1], self)
+ elif self.choices is not None:
+ raise OptionError(
+ "must not supply choices for type %r" % self.type, self)
+
+ def _check_dest(self):
+ # No destination given, and we need one for this action. The
+ # self.type check is for callbacks that take a value.
+ takes_value = (self.action in self.STORE_ACTIONS or
+ self.type is not None)
+ if self.dest is None and takes_value:
+
+ # Glean a destination from the first long option string,
+ # or from the first short option string if no long options.
+ if self._long_opts:
+ # eg. "--foo-bar" -> "foo_bar"
+ self.dest = self._long_opts[0][2:].replace('-', '_')
+ else:
+ self.dest = self._short_opts[0][1]
+
+ def _check_const(self):
+ if self.action not in self.CONST_ACTIONS and self.const is not None:
+ raise OptionError(
+ "'const' must not be supplied for action %r" % self.action,
+ self)
+
+ def _check_nargs(self):
+ if self.action in self.TYPED_ACTIONS:
+ if self.nargs is None:
+ self.nargs = 1
+ elif self.nargs is not None:
+ raise OptionError(
+ "'nargs' must not be supplied for action %r" % self.action,
+ self)
+
+ def _check_callback(self):
+ if self.action == "callback":
+ if not callable(self.callback):
+ raise OptionError(
+ "callback not callable: %r" % self.callback, self)
+ if (self.callback_args is not None and
+ not isinstance(self.callback_args, tuple)):
+ raise OptionError(
+ "callback_args, if supplied, must be a tuple: not %r"
+ % self.callback_args, self)
+ if (self.callback_kwargs is not None and
+ not isinstance(self.callback_kwargs, dict)):
+ raise OptionError(
+ "callback_kwargs, if supplied, must be a dict: not %r"
+ % self.callback_kwargs, self)
+ else:
+ if self.callback is not None:
+ raise OptionError(
+ "callback supplied (%r) for non-callback option"
+ % self.callback, self)
+ if self.callback_args is not None:
+ raise OptionError(
+ "callback_args supplied for non-callback option", self)
+ if self.callback_kwargs is not None:
+ raise OptionError(
+ "callback_kwargs supplied for non-callback option", self)
+
+
+ CHECK_METHODS = [_check_action,
+ _check_type,
+ _check_choice,
+ _check_dest,
+ _check_const,
+ _check_nargs,
+ _check_callback]
+
+
+ # -- Miscellaneous methods -----------------------------------------
+
+ def __str__(self):
+ return "/".join(self._short_opts + self._long_opts)
+
+ __repr__ = _repr
+
+ def takes_value(self):
+ return self.type is not None
+
+ def get_opt_string(self):
+ if self._long_opts:
+ return self._long_opts[0]
+ else:
+ return self._short_opts[0]
+
+
+ # -- Processing methods --------------------------------------------
+
+ def check_value(self, opt, value):
+ checker = self.TYPE_CHECKER.get(self.type)
+ if checker is None:
+ return value
+ else:
+ return checker(self, opt, value)
+
+ def convert_value(self, opt, value):
+ if value is not None:
+ if self.nargs == 1:
+ return self.check_value(opt, value)
+ else:
+ return tuple([self.check_value(opt, v) for v in value])
+
+ def process(self, opt, value, values, parser):
+
+ # First, convert the value(s) to the right type. Howl if any
+ # value(s) are bogus.
+ value = self.convert_value(opt, value)
+
+ # And then take whatever action is expected of us.
+ # This is a separate method to make life easier for
+ # subclasses to add new actions.
+ return self.take_action(
+ self.action, self.dest, opt, value, values, parser)
+
+ def take_action(self, action, dest, opt, value, values, parser):
+ if action == "store":
+ setattr(values, dest, value)
+ elif action == "store_const":
+ setattr(values, dest, self.const)
+ elif action == "store_true":
+ setattr(values, dest, True)
+ elif action == "store_false":
+ setattr(values, dest, False)
+ elif action == "append":
+ values.ensure_value(dest, []).append(value)
+ elif action == "append_const":
+ values.ensure_value(dest, []).append(self.const)
+ elif action == "count":
+ setattr(values, dest, values.ensure_value(dest, 0) + 1)
+ elif action == "callback":
+ args = self.callback_args or ()
+ kwargs = self.callback_kwargs or {}
+ self.callback(self, opt, value, parser, *args, **kwargs)
+ elif action == "help":
+ parser.print_help()
+ parser.exit()
+ elif action == "version":
+ parser.print_version()
+ parser.exit()
+ else:
+ raise ValueError("unknown action %r" % self.action)
+
+ return 1
+
+# class Option
+
+
+SUPPRESS_HELP = "SUPPRESS"+"HELP"
+SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
+
+class Values:
+
+ def __init__(self, defaults=None):
+ if defaults:
+ for (attr, val) in defaults.items():
+ setattr(self, attr, val)
+
+ def __str__(self):
+ return str(self.__dict__)
+
+ __repr__ = _repr
+
+ def __eq__(self, other):
+ if isinstance(other, Values):
+ return self.__dict__ == other.__dict__
+ elif isinstance(other, dict):
+ return self.__dict__ == other
+ else:
+ return NotImplemented
+
+ def _update_careful(self, dict):
+ """
+ Update the option values from an arbitrary dictionary, but only
+ use keys from dict that already have a corresponding attribute
+ in self. Any keys in dict without a corresponding attribute
+ are silently ignored.
+ """
+ for attr in dir(self):
+ if attr in dict:
+ dval = dict[attr]
+ if dval is not None:
+ setattr(self, attr, dval)
+
+ def _update_loose(self, dict):
+ """
+ Update the option values from an arbitrary dictionary,
+ using all keys from the dictionary regardless of whether
+ they have a corresponding attribute in self or not.
+ """
+ self.__dict__.update(dict)
+
+ def _update(self, dict, mode):
+ if mode == "careful":
+ self._update_careful(dict)
+ elif mode == "loose":
+ self._update_loose(dict)
+ else:
+ raise ValueError("invalid update mode: %r" % mode)
+
+ def read_module(self, modname, mode="careful"):
+ __import__(modname)
+ mod = sys.modules[modname]
+ self._update(vars(mod), mode)
+
+ def read_file(self, filename, mode="careful"):
+ vars = {}
+ exec(open(filename).read(), vars)
+ self._update(vars, mode)
+
+ def ensure_value(self, attr, value):
+ if not hasattr(self, attr) or getattr(self, attr) is None:
+ setattr(self, attr, value)
+ return getattr(self, attr)
+
+
+class OptionContainer:
+
+ """
+ Abstract base class.
+
+ Class attributes:
+ standard_option_list : [Option]
+ list of standard options that will be accepted by all instances
+ of this parser class (intended to be overridden by subclasses).
+
+ Instance attributes:
+ option_list : [Option]
+ the list of Option objects contained by this OptionContainer
+ _short_opt : { string : Option }
+ dictionary mapping short option strings, eg. "-f" or "-X",
+ to the Option instances that implement them. If an Option
+ has multiple short option strings, it will appear in this
+ dictionary multiple times. [1]
+ _long_opt : { string : Option }
+ dictionary mapping long option strings, eg. "--file" or
+ "--exclude", to the Option instances that implement them.
+ Again, a given Option can occur multiple times in this
+ dictionary. [1]
+ defaults : { string : any }
+ dictionary mapping option destination names to default
+ values for each destination [1]
+
+ [1] These mappings are common to (shared by) all components of the
+ controlling OptionParser, where they are initially created.
+
+ """
+
+ def __init__(self, option_class, conflict_handler, description):
+ # Initialize the option list and related data structures.
+ # This method must be provided by subclasses, and it must
+ # initialize at least the following instance attributes:
+ # option_list, _short_opt, _long_opt, defaults.
+ self._create_option_list()
+
+ self.option_class = option_class
+ self.set_conflict_handler(conflict_handler)
+ self.set_description(description)
+
+ def _create_option_mappings(self):
+ # For use by OptionParser constructor -- create the main
+ # option mappings used by this OptionParser and all
+ # OptionGroups that it owns.
+ self._short_opt = {} # single letter -> Option instance
+ self._long_opt = {} # long option -> Option instance
+ self.defaults = {} # maps option dest -> default value
+
+
+ def _share_option_mappings(self, parser):
+ # For use by OptionGroup constructor -- use shared option
+ # mappings from the OptionParser that owns this OptionGroup.
+ self._short_opt = parser._short_opt
+ self._long_opt = parser._long_opt
+ self.defaults = parser.defaults
+
+ def set_conflict_handler(self, handler):
+ if handler not in ("error", "resolve"):
+ raise ValueError("invalid conflict_resolution value %r" % handler)
+ self.conflict_handler = handler
+
+ def set_description(self, description):
+ self.description = description
+
+ def get_description(self):
+ return self.description
+
+
+ def destroy(self):
+ """see OptionParser.destroy()."""
+ del self._short_opt
+ del self._long_opt
+ del self.defaults
+
+
+ # -- Option-adding methods -----------------------------------------
+
+ def _check_conflict(self, option):
+ conflict_opts = []
+ for opt in option._short_opts:
+ if opt in self._short_opt:
+ conflict_opts.append((opt, self._short_opt[opt]))
+ for opt in option._long_opts:
+ if opt in self._long_opt:
+ conflict_opts.append((opt, self._long_opt[opt]))
+
+ if conflict_opts:
+ handler = self.conflict_handler
+ if handler == "error":
+ raise OptionConflictError(
+ "conflicting option string(s): %s"
+ % ", ".join([co[0] for co in conflict_opts]),
+ option)
+ elif handler == "resolve":
+ for (opt, c_option) in conflict_opts:
+ if opt.startswith("--"):
+ c_option._long_opts.remove(opt)
+ del self._long_opt[opt]
+ else:
+ c_option._short_opts.remove(opt)
+ del self._short_opt[opt]
+ if not (c_option._short_opts or c_option._long_opts):
+ c_option.container.option_list.remove(c_option)
+
+ def add_option(self, *args, **kwargs):
+ """add_option(Option)
+ add_option(opt_str, ..., kwarg=val, ...)
+ """
+ if isinstance(args[0], str):
+ option = self.option_class(*args, **kwargs)
+ elif len(args) == 1 and not kwargs:
+ option = args[0]
+ if not isinstance(option, Option):
+ raise TypeError("not an Option instance: %r" % option)
+ else:
+ raise TypeError("invalid arguments")
+
+ self._check_conflict(option)
+
+ self.option_list.append(option)
+ option.container = self
+ for opt in option._short_opts:
+ self._short_opt[opt] = option
+ for opt in option._long_opts:
+ self._long_opt[opt] = option
+
+ if option.dest is not None: # option has a dest, we need a default
+ if option.default is not NO_DEFAULT:
+ self.defaults[option.dest] = option.default
+ elif option.dest not in self.defaults:
+ self.defaults[option.dest] = None
+
+ return option
+
+ def add_options(self, option_list):
+ for option in option_list:
+ self.add_option(option)
+
+ # -- Option query/removal methods ----------------------------------
+
+ def get_option(self, opt_str):
+ return (self._short_opt.get(opt_str) or
+ self._long_opt.get(opt_str))
+
+ def has_option(self, opt_str):
+ return (opt_str in self._short_opt or
+ opt_str in self._long_opt)
+
+ def remove_option(self, opt_str):
+ option = self._short_opt.get(opt_str)
+ if option is None:
+ option = self._long_opt.get(opt_str)
+ if option is None:
+ raise ValueError("no such option %r" % opt_str)
+
+ for opt in option._short_opts:
+ del self._short_opt[opt]
+ for opt in option._long_opts:
+ del self._long_opt[opt]
+ option.container.option_list.remove(option)
+
+
+ # -- Help-formatting methods ---------------------------------------
+
+ def format_option_help(self, formatter):
+ if not self.option_list:
+ return ""
+ result = []
+ for option in self.option_list:
+ if not option.help is SUPPRESS_HELP:
+ result.append(formatter.format_option(option))
+ return "".join(result)
+
+ def format_description(self, formatter):
+ return formatter.format_description(self.get_description())
+
+ def format_help(self, formatter):
+ result = []
+ if self.description:
+ result.append(self.format_description(formatter))
+ if self.option_list:
+ result.append(self.format_option_help(formatter))
+ return "\n".join(result)
+
+
+class OptionGroup (OptionContainer):
+
+ def __init__(self, parser, title, description=None):
+ self.parser = parser
+ OptionContainer.__init__(
+ self, parser.option_class, parser.conflict_handler, description)
+ self.title = title
+
+ def _create_option_list(self):
+ self.option_list = []
+ self._share_option_mappings(self.parser)
+
+ def set_title(self, title):
+ self.title = title
+
+ def destroy(self):
+ """see OptionParser.destroy()."""
+ OptionContainer.destroy(self)
+ del self.option_list
+
+ # -- Help-formatting methods ---------------------------------------
+
+ def format_help(self, formatter):
+ result = formatter.format_heading(self.title)
+ formatter.indent()
+ result += OptionContainer.format_help(self, formatter)
+ formatter.dedent()
+ return result
+
+
+class OptionParser (OptionContainer):
+
+ """
+ Class attributes:
+ standard_option_list : [Option]
+ list of standard options that will be accepted by all instances
+ of this parser class (intended to be overridden by subclasses).
+
+ Instance attributes:
+ usage : string
+ a usage string for your program. Before it is displayed
+ to the user, "%prog" will be expanded to the name of
+ your program (self.prog or os.path.basename(sys.argv[0])).
+ prog : string
+ the name of the current program (to override
+ os.path.basename(sys.argv[0])).
+ description : string
+ A paragraph of text giving a brief overview of your program.
+ optparse reformats this paragraph to fit the current terminal
+ width and prints it when the user requests help (after usage,
+ but before the list of options).
+ epilog : string
+ paragraph of help text to print after option help
+
+ option_groups : [OptionGroup]
+ list of option groups in this parser (option groups are
+ irrelevant for parsing the command-line, but very useful
+ for generating help)
+
+ allow_interspersed_args : bool = true
+ if true, positional arguments may be interspersed with options.
+ Assuming -a and -b each take a single argument, the command-line
+ -ablah foo bar -bboo baz
+ will be interpreted the same as
+ -ablah -bboo -- foo bar baz
+ If this flag were false, that command line would be interpreted as
+ -ablah -- foo bar -bboo baz
+ -- ie. we stop processing options as soon as we see the first
+ non-option argument. (This is the tradition followed by
+ Python's getopt module, Perl's Getopt::Std, and other argument-
+ parsing libraries, but it is generally annoying to users.)
+
+ process_default_values : bool = true
+ if true, option default values are processed similarly to option
+ values from the command line: that is, they are passed to the
+ type-checking function for the option's type (as long as the
+ default value is a string). (This really only matters if you
+ have defined custom types; see SF bug #955889.) Set it to false
+ to restore the behaviour of Optik 1.4.1 and earlier.
+
+ rargs : [string]
+ the argument list currently being parsed. Only set when
+ parse_args() is active, and continually trimmed down as
+ we consume arguments. Mainly there for the benefit of
+ callback options.
+ largs : [string]
+ the list of leftover arguments that we have skipped while
+ parsing options. If allow_interspersed_args is false, this
+ list is always empty.
+ values : Values
+ the set of option values currently being accumulated. Only
+ set when parse_args() is active. Also mainly for callbacks.
+
+ Because of the 'rargs', 'largs', and 'values' attributes,
+ OptionParser is not thread-safe. If, for some perverse reason, you
+ need to parse command-line arguments simultaneously in different
+ threads, use different OptionParser instances.
+
+ """
+
+ standard_option_list = []
+
+ def __init__(self,
+ usage=None,
+ option_list=None,
+ option_class=Option,
+ version=None,
+ conflict_handler="error",
+ description=None,
+ formatter=None,
+ add_help_option=True,
+ prog=None,
+ epilog=None):
+ OptionContainer.__init__(
+ self, option_class, conflict_handler, description)
+ self.set_usage(usage)
+ self.prog = prog
+ self.version = version
+ self.allow_interspersed_args = True
+ self.process_default_values = True
+ if formatter is None:
+ formatter = IndentedHelpFormatter()
+ self.formatter = formatter
+ self.formatter.set_parser(self)
+ self.epilog = epilog
+
+ # Populate the option list; initial sources are the
+ # standard_option_list class attribute, the 'option_list'
+ # argument, and (if applicable) the _add_version_option() and
+ # _add_help_option() methods.
+ self._populate_option_list(option_list,
+ add_help=add_help_option)
+
+ self._init_parsing_state()
+
+
+ def destroy(self):
+ """
+ Declare that you are done with this OptionParser. This cleans up
+ reference cycles so the OptionParser (and all objects referenced by
+ it) can be garbage-collected promptly. After calling destroy(), the
+ OptionParser is unusable.
+ """
+ OptionContainer.destroy(self)
+ for group in self.option_groups:
+ group.destroy()
+ del self.option_list
+ del self.option_groups
+ del self.formatter
+
+
+ # -- Private methods -----------------------------------------------
+ # (used by our or OptionContainer's constructor)
+
+ def _create_option_list(self):
+ self.option_list = []
+ self.option_groups = []
+ self._create_option_mappings()
+
+ def _add_help_option(self):
+ self.add_option("-h", "--help",
+ action="help",
+ help=_("show this help message and exit"))
+
+ def _add_version_option(self):
+ self.add_option("--version",
+ action="version",
+ help=_("show program's version number and exit"))
+
+ def _populate_option_list(self, option_list, add_help=True):
+ if self.standard_option_list:
+ self.add_options(self.standard_option_list)
+ if option_list:
+ self.add_options(option_list)
+ if self.version:
+ self._add_version_option()
+ if add_help:
+ self._add_help_option()
+
+ def _init_parsing_state(self):
+ # These are set in parse_args() for the convenience of callbacks.
+ self.rargs = None
+ self.largs = None
+ self.values = None
+
+
+ # -- Simple modifier methods ---------------------------------------
+
+ def set_usage(self, usage):
+ if usage is None:
+ self.usage = _("%prog [options]")
+ elif usage is SUPPRESS_USAGE:
+ self.usage = None
+ # For backwards compatibility with Optik 1.3 and earlier.
+ elif usage.lower().startswith("usage: "):
+ self.usage = usage[7:]
+ else:
+ self.usage = usage
+
+ def enable_interspersed_args(self):
+ """Set parsing to not stop on the first non-option, allowing
+ interspersing switches with command arguments. This is the
+ default behavior. See also disable_interspersed_args() and the
+ class documentation description of the attribute
+ allow_interspersed_args."""
+ self.allow_interspersed_args = True
+
+ def disable_interspersed_args(self):
+ """Set parsing to stop on the first non-option. Use this if
+ you have a command processor which runs another command that
+ has options of its own and you want to make sure these options
+ don't get confused.
+ """
+ self.allow_interspersed_args = False
+
+ def set_process_default_values(self, process):
+ self.process_default_values = process
+
+ def set_default(self, dest, value):
+ self.defaults[dest] = value
+
+ def set_defaults(self, **kwargs):
+ self.defaults.update(kwargs)
+
+ def _get_all_options(self):
+ options = self.option_list[:]
+ for group in self.option_groups:
+ options.extend(group.option_list)
+ return options
+
+ def get_default_values(self):
+ if not self.process_default_values:
+ # Old, pre-Optik 1.5 behaviour.
+ return Values(self.defaults)
+
+ defaults = self.defaults.copy()
+ for option in self._get_all_options():
+ default = defaults.get(option.dest)
+ if isinstance(default, str):
+ opt_str = option.get_opt_string()
+ defaults[option.dest] = option.check_value(opt_str, default)
+
+ return Values(defaults)
+
+
+ # -- OptionGroup methods -------------------------------------------
+
+ def add_option_group(self, *args, **kwargs):
+ # XXX lots of overlap with OptionContainer.add_option()
+ if isinstance(args[0], str):
+ group = OptionGroup(self, *args, **kwargs)
+ elif len(args) == 1 and not kwargs:
+ group = args[0]
+ if not isinstance(group, OptionGroup):
+ raise TypeError("not an OptionGroup instance: %r" % group)
+ if group.parser is not self:
+ raise ValueError("invalid OptionGroup (wrong parser)")
+ else:
+ raise TypeError("invalid arguments")
+
+ self.option_groups.append(group)
+ return group
+
+ def get_option_group(self, opt_str):
+ option = (self._short_opt.get(opt_str) or
+ self._long_opt.get(opt_str))
+ if option and option.container is not self:
+ return option.container
+ return None
+
+
+ # -- Option-parsing methods ----------------------------------------
+
+ def _get_args(self, args):
+ if args is None:
+ return sys.argv[1:]
+ else:
+ return args[:] # don't modify caller's list
+
+ def parse_args(self, args=None, values=None):
+ """
+ parse_args(args : [string] = sys.argv[1:],
+ values : Values = None)
+ -> (values : Values, args : [string])
+
+ Parse the command-line options found in 'args' (default:
+ sys.argv[1:]). Any errors result in a call to 'error()', which
+ by default prints the usage message to stderr and calls
+ sys.exit() with an error message. On success returns a pair
+ (values, args) where 'values' is a Values instance (with all
+ your option values) and 'args' is the list of arguments left
+ over after parsing options.
+ """
+ rargs = self._get_args(args)
+ if values is None:
+ values = self.get_default_values()
+
+ # Store the halves of the argument list as attributes for the
+ # convenience of callbacks:
+ # rargs
+ # the rest of the command-line (the "r" stands for
+ # "remaining" or "right-hand")
+ # largs
+ # the leftover arguments -- ie. what's left after removing
+ # options and their arguments (the "l" stands for "leftover"
+ # or "left-hand")
+ self.rargs = rargs
+ self.largs = largs = []
+ self.values = values
+
+ try:
+ stop = self._process_args(largs, rargs, values)
+ except (BadOptionError, OptionValueError) as err:
+ self.error(str(err))
+
+ args = largs + rargs
+ return self.check_values(values, args)
+
+ def check_values(self, values, args):
+ """
+ check_values(values : Values, args : [string])
+ -> (values : Values, args : [string])
+
+ Check that the supplied option values and leftover arguments are
+ valid. Returns the option values and leftover arguments
+ (possibly adjusted, possibly completely new -- whatever you
+ like). Default implementation just returns the passed-in
+ values; subclasses may override as desired.
+ """
+ return (values, args)
+
+ def _process_args(self, largs, rargs, values):
+ """_process_args(largs : [string],
+ rargs : [string],
+ values : Values)
+
+ Process command-line arguments and populate 'values', consuming
+ options and arguments from 'rargs'. If 'allow_interspersed_args' is
+ false, stop at the first non-option argument. If true, accumulate any
+ interspersed non-option arguments in 'largs'.
+ """
+ while rargs:
+ arg = rargs[0]
+ # We handle bare "--" explicitly, and bare "-" is handled by the
+ # standard arg handler since the short arg case ensures that the
+ # len of the opt string is greater than 1.
+ if arg == "--":
+ del rargs[0]
+ return
+ elif arg[0:2] == "--":
+ # process a single long option (possibly with value(s))
+ self._process_long_opt(rargs, values)
+ elif arg[:1] == "-" and len(arg) > 1:
+ # process a cluster of short options (possibly with
+ # value(s) for the last one only)
+ self._process_short_opts(rargs, values)
+ elif self.allow_interspersed_args:
+ largs.append(arg)
+ del rargs[0]
+ else:
+ return # stop now, leave this arg in rargs
+
+ # Say this is the original argument list:
+ # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+ # ^
+ # (we are about to process arg(i)).
+ #
+ # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+ # [arg0, ..., arg(i-1)] (any options and their arguments will have
+ # been removed from largs).
+ #
+ # The while loop will usually consume 1 or more arguments per pass.
+ # If it consumes 1 (eg. arg is an option that takes no arguments),
+ # then after _process_arg() is done the situation is:
+ #
+ # largs = subset of [arg0, ..., arg(i)]
+ # rargs = [arg(i+1), ..., arg(N-1)]
+ #
+ # If allow_interspersed_args is false, largs will always be
+ # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+ # not a very interesting subset!
+
+ def _match_long_opt(self, opt):
+ """_match_long_opt(opt : string) -> string
+
+ Determine which long option string 'opt' matches, ie. which one
+ it is an unambiguous abbreviation for. Raises BadOptionError if
+ 'opt' doesn't unambiguously match any long option string.
+ """
+ return _match_abbrev(opt, self._long_opt)
+
+ def _process_long_opt(self, rargs, values):
+ arg = rargs.pop(0)
+
+ # Value explicitly attached to arg? Pretend it's the next
+ # argument.
+ if "=" in arg:
+ (opt, next_arg) = arg.split("=", 1)
+ rargs.insert(0, next_arg)
+ had_explicit_value = True
+ else:
+ opt = arg
+ had_explicit_value = False
+
+ opt = self._match_long_opt(opt)
+ option = self._long_opt[opt]
+ if option.takes_value():
+ nargs = option.nargs
+ if len(rargs) < nargs:
+ self.error(ngettext(
+ "%(option)s option requires %(number)d argument",
+ "%(option)s option requires %(number)d arguments",
+ nargs) % {"option": opt, "number": nargs})
+ elif nargs == 1:
+ value = rargs.pop(0)
+ else:
+ value = tuple(rargs[0:nargs])
+ del rargs[0:nargs]
+
+ elif had_explicit_value:
+ self.error(_("%s option does not take a value") % opt)
+
+ else:
+ value = None
+
+ option.process(opt, value, values, self)
+
+ def _process_short_opts(self, rargs, values):
+ arg = rargs.pop(0)
+ stop = False
+ i = 1
+ for ch in arg[1:]:
+ opt = "-" + ch
+ option = self._short_opt.get(opt)
+ i += 1 # we have consumed a character
+
+ if not option:
+ raise BadOptionError(opt)
+ if option.takes_value():
+ # Any characters left in arg? Pretend they're the
+ # next arg, and stop consuming characters of arg.
+ if i < len(arg):
+ rargs.insert(0, arg[i:])
+ stop = True
+
+ nargs = option.nargs
+ if len(rargs) < nargs:
+ self.error(ngettext(
+ "%(option)s option requires %(number)d argument",
+ "%(option)s option requires %(number)d arguments",
+ nargs) % {"option": opt, "number": nargs})
+ elif nargs == 1:
+ value = rargs.pop(0)
+ else:
+ value = tuple(rargs[0:nargs])
+ del rargs[0:nargs]
+
+ else: # option doesn't take a value
+ value = None
+
+ option.process(opt, value, values, self)
+
+ if stop:
+ break
+
+
+ # -- Feedback methods ----------------------------------------------
+
+ def get_prog_name(self):
+ if self.prog is None:
+ return os.path.basename(sys.argv[0])
+ else:
+ return self.prog
+
+ def expand_prog_name(self, s):
+ return s.replace("%prog", self.get_prog_name())
+
+ def get_description(self):
+ return self.expand_prog_name(self.description)
+
+ def exit(self, status=0, msg=None):
+ if msg:
+ sys.stderr.write(msg)
+ sys.exit(status)
+
+ def error(self, msg):
+ """error(msg : string)
+
+ Print a usage message incorporating 'msg' to stderr and exit.
+ If you override this in a subclass, it should not return -- it
+ should either exit or raise an exception.
+ """
+ self.print_usage(sys.stderr)
+ self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
+
+ def get_usage(self):
+ if self.usage:
+ return self.formatter.format_usage(
+ self.expand_prog_name(self.usage))
+ else:
+ return ""
+
+ def print_usage(self, file=None):
+ """print_usage(file : file = stdout)
+
+ Print the usage message for the current program (self.usage) to
+ 'file' (default stdout). Any occurrence of the string "%prog" in
+ self.usage is replaced with the name of the current program
+ (basename of sys.argv[0]). Does nothing if self.usage is empty
+ or not defined.
+ """
+ if self.usage:
+ print(self.get_usage(), file=file)
+
+ def get_version(self):
+ if self.version:
+ return self.expand_prog_name(self.version)
+ else:
+ return ""
+
+ def print_version(self, file=None):
+ """print_version(file : file = stdout)
+
+ Print the version message for this program (self.version) to
+ 'file' (default stdout). As with print_usage(), any occurrence
+ of "%prog" in self.version is replaced by the current program's
+ name. Does nothing if self.version is empty or undefined.
+ """
+ if self.version:
+ print(self.get_version(), file=file)
+
+ def format_option_help(self, formatter=None):
+ if formatter is None:
+ formatter = self.formatter
+ formatter.store_option_strings(self)
+ result = []
+ result.append(formatter.format_heading(_("Options")))
+ formatter.indent()
+ if self.option_list:
+ result.append(OptionContainer.format_option_help(self, formatter))
+ result.append("\n")
+ for group in self.option_groups:
+ result.append(group.format_help(formatter))
+ result.append("\n")
+ formatter.dedent()
+ # Drop the last "\n", or the header if no options or option groups:
+ return "".join(result[:-1])
+
+ def format_epilog(self, formatter):
+ return formatter.format_epilog(self.epilog)
+
+ def format_help(self, formatter=None):
+ if formatter is None:
+ formatter = self.formatter
+ result = []
+ if self.usage:
+ result.append(self.get_usage() + "\n")
+ if self.description:
+ result.append(self.format_description(formatter) + "\n")
+ result.append(self.format_option_help(formatter))
+ result.append(self.format_epilog(formatter))
+ return "".join(result)
+
+ def print_help(self, file=None):
+ """print_help(file : file = stdout)
+
+ Print an extended help message, listing all options and any
+ help text provided with them, to 'file' (default stdout).
+ """
+ if file is None:
+ file = sys.stdout
+ file.write(self.format_help())
+
+# class OptionParser
+
+
+def _match_abbrev(s, wordmap):
+ """_match_abbrev(s : string, wordmap : {string : Option}) -> string
+
+ Return the string key in 'wordmap' for which 's' is an unambiguous
+ abbreviation. If 's' is found to be ambiguous or doesn't match any of
+ 'words', raise BadOptionError.
+ """
+ # Is there an exact match?
+ if s in wordmap:
+ return s
+ else:
+ # Isolate all words with s as a prefix.
+ possibilities = [word for word in wordmap.keys()
+ if word.startswith(s)]
+ # No exact match, so there had better be just one possibility.
+ if len(possibilities) == 1:
+ return possibilities[0]
+ elif not possibilities:
+ raise BadOptionError(s)
+ else:
+ # More than one possible completion: ambiguous prefix.
+ possibilities.sort()
+ raise AmbiguousOptionError(s, possibilities)
+
+
+# Some day, there might be many Option classes. As of Optik 1.3, the
+# preferred way to instantiate Options is indirectly, via make_option(),
+# which will become a factory function when there are many Option
+# classes.
+make_option = Option
diff --git a/infer_4_37_2/lib/python3.10/os.py b/infer_4_37_2/lib/python3.10/os.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f2ffceaaf383556921b8d18c05998563eaff4bb
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/os.py
@@ -0,0 +1,1123 @@
+r"""OS routines for NT or Posix depending on what system we're on.
+
+This exports:
+ - all functions from posix or nt, e.g. unlink, stat, etc.
+ - os.path is either posixpath or ntpath
+ - os.name is either 'posix' or 'nt'
+ - os.curdir is a string representing the current directory (always '.')
+ - os.pardir is a string representing the parent directory (always '..')
+ - os.sep is the (or a most common) pathname separator ('/' or '\\')
+ - os.extsep is the extension separator (always '.')
+ - os.altsep is the alternate pathname separator (None or '/')
+ - os.pathsep is the component separator used in $PATH etc
+ - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
+ - os.defpath is the default search path for executables
+ - os.devnull is the file path of the null device ('/dev/null', etc.)
+
+Programs that import and use 'os' stand a better chance of being
+portable between different platforms. Of course, they must then
+only use functions that are defined by all platforms (e.g., unlink
+and opendir), and leave all pathname manipulation to os.path
+(e.g., split and join).
+"""
+
+#'
+import abc
+import sys
+import stat as st
+
+from _collections_abc import _check_methods
+
+GenericAlias = type(list[int])
+
+_names = sys.builtin_module_names
+
+# Note: more names are added to __all__ later.
+__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
+ "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
+ "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
+ "extsep"]
+
+def _exists(name):
+ return name in globals()
+
+def _get_exports_list(module):
+ try:
+ return list(module.__all__)
+ except AttributeError:
+ return [n for n in dir(module) if n[0] != '_']
+
+# Any new dependencies of the os module and/or changes in path separator
+# requires updating importlib as well.
+if 'posix' in _names:
+ name = 'posix'
+ linesep = '\n'
+ from posix import *
+ try:
+ from posix import _exit
+ __all__.append('_exit')
+ except ImportError:
+ pass
+ import posixpath as path
+
+ try:
+ from posix import _have_functions
+ except ImportError:
+ pass
+
+ import posix
+ __all__.extend(_get_exports_list(posix))
+ del posix
+
+elif 'nt' in _names:
+ name = 'nt'
+ linesep = '\r\n'
+ from nt import *
+ try:
+ from nt import _exit
+ __all__.append('_exit')
+ except ImportError:
+ pass
+ import ntpath as path
+
+ import nt
+ __all__.extend(_get_exports_list(nt))
+ del nt
+
+ try:
+ from nt import _have_functions
+ except ImportError:
+ pass
+
+else:
+ raise ImportError('no os specific module found')
+
+sys.modules['os.path'] = path
+from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
+ devnull)
+
+del _names
+
+
+if _exists("_have_functions"):
+ _globals = globals()
+ def _add(str, fn):
+ if (fn in _globals) and (str in _have_functions):
+ _set.add(_globals[fn])
+
+ _set = set()
+ _add("HAVE_FACCESSAT", "access")
+ _add("HAVE_FCHMODAT", "chmod")
+ _add("HAVE_FCHOWNAT", "chown")
+ _add("HAVE_FSTATAT", "stat")
+ _add("HAVE_FUTIMESAT", "utime")
+ _add("HAVE_LINKAT", "link")
+ _add("HAVE_MKDIRAT", "mkdir")
+ _add("HAVE_MKFIFOAT", "mkfifo")
+ _add("HAVE_MKNODAT", "mknod")
+ _add("HAVE_OPENAT", "open")
+ _add("HAVE_READLINKAT", "readlink")
+ _add("HAVE_RENAMEAT", "rename")
+ _add("HAVE_SYMLINKAT", "symlink")
+ _add("HAVE_UNLINKAT", "unlink")
+ _add("HAVE_UNLINKAT", "rmdir")
+ _add("HAVE_UTIMENSAT", "utime")
+ supports_dir_fd = _set
+
+ _set = set()
+ _add("HAVE_FACCESSAT", "access")
+ supports_effective_ids = _set
+
+ _set = set()
+ _add("HAVE_FCHDIR", "chdir")
+ _add("HAVE_FCHMOD", "chmod")
+ _add("HAVE_FCHOWN", "chown")
+ _add("HAVE_FDOPENDIR", "listdir")
+ _add("HAVE_FDOPENDIR", "scandir")
+ _add("HAVE_FEXECVE", "execve")
+ _set.add(stat) # fstat always works
+ _add("HAVE_FTRUNCATE", "truncate")
+ _add("HAVE_FUTIMENS", "utime")
+ _add("HAVE_FUTIMES", "utime")
+ _add("HAVE_FPATHCONF", "pathconf")
+ if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
+ _add("HAVE_FSTATVFS", "statvfs")
+ supports_fd = _set
+
+ _set = set()
+ _add("HAVE_FACCESSAT", "access")
+ # Some platforms don't support lchmod(). Often the function exists
+ # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
+ # (No, I don't know why that's a good design.) ./configure will detect
+ # this and reject it--so HAVE_LCHMOD still won't be defined on such
+ # platforms. This is Very Helpful.
+ #
+ # However, sometimes platforms without a working lchmod() *do* have
+ # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
+ # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
+ # it behave like lchmod(). So in theory it would be a suitable
+ # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
+ # flag doesn't work *either*. Sadly ./configure isn't sophisticated
+ # enough to detect this condition--it only determines whether or not
+ # fchmodat() minimally works.
+ #
+ # Therefore we simply ignore fchmodat() when deciding whether or not
+ # os.chmod supports follow_symlinks. Just checking lchmod() is
+ # sufficient. After all--if you have a working fchmodat(), your
+ # lchmod() almost certainly works too.
+ #
+ # _add("HAVE_FCHMODAT", "chmod")
+ _add("HAVE_FCHOWNAT", "chown")
+ _add("HAVE_FSTATAT", "stat")
+ _add("HAVE_LCHFLAGS", "chflags")
+ _add("HAVE_LCHMOD", "chmod")
+ if _exists("lchown"): # mac os x10.3
+ _add("HAVE_LCHOWN", "chown")
+ _add("HAVE_LINKAT", "link")
+ _add("HAVE_LUTIMES", "utime")
+ _add("HAVE_LSTAT", "stat")
+ _add("HAVE_FSTATAT", "stat")
+ _add("HAVE_UTIMENSAT", "utime")
+ _add("MS_WINDOWS", "stat")
+ supports_follow_symlinks = _set
+
+ del _set
+ del _have_functions
+ del _globals
+ del _add
+
+
+# Python uses fixed values for the SEEK_ constants; they are mapped
+# to native constants if necessary in posixmodule.c
+# Other possible SEEK values are directly imported from posixmodule.c
+SEEK_SET = 0
+SEEK_CUR = 1
+SEEK_END = 2
+
+# Super directory utilities.
+# (Inspired by Eric Raymond; the doc strings are mostly his)
+
+def makedirs(name, mode=0o777, exist_ok=False):
+ """makedirs(name [, mode=0o777][, exist_ok=False])
+
+ Super-mkdir; create a leaf directory and all intermediate ones. Works like
+ mkdir, except that any intermediate path segment (not just the rightmost)
+ will be created if it does not exist. If the target directory already
+ exists, raise an OSError if exist_ok is False. Otherwise no exception is
+ raised. This is recursive.
+
+ """
+ head, tail = path.split(name)
+ if not tail:
+ head, tail = path.split(head)
+ if head and tail and not path.exists(head):
+ try:
+ makedirs(head, exist_ok=exist_ok)
+ except FileExistsError:
+ # Defeats race condition when another thread created the path
+ pass
+ cdir = curdir
+ if isinstance(tail, bytes):
+ cdir = bytes(curdir, 'ASCII')
+ if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
+ return
+ try:
+ mkdir(name, mode)
+ except OSError:
+ # Cannot rely on checking for EEXIST, since the operating system
+ # could give priority to other errors like EACCES or EROFS
+ if not exist_ok or not path.isdir(name):
+ raise
+
+def removedirs(name):
+ """removedirs(name)
+
+ Super-rmdir; remove a leaf directory and all empty intermediate
+ ones. Works like rmdir except that, if the leaf directory is
+ successfully removed, directories corresponding to rightmost path
+ segments will be pruned away until either the whole path is
+ consumed or an error occurs. Errors during this latter phase are
+ ignored -- they generally mean that a directory was not empty.
+
+ """
+ rmdir(name)
+ head, tail = path.split(name)
+ if not tail:
+ head, tail = path.split(head)
+ while head and tail:
+ try:
+ rmdir(head)
+ except OSError:
+ break
+ head, tail = path.split(head)
+
+def renames(old, new):
+ """renames(old, new)
+
+ Super-rename; create directories as necessary and delete any left
+ empty. Works like rename, except creation of any intermediate
+ directories needed to make the new pathname good is attempted
+ first. After the rename, directories corresponding to rightmost
+ path segments of the old name will be pruned until either the
+ whole path is consumed or a nonempty directory is found.
+
+ Note: this function can fail with the new directory structure made
+ if you lack permissions needed to unlink the leaf directory or
+ file.
+
+ """
+ head, tail = path.split(new)
+ if head and tail and not path.exists(head):
+ makedirs(head)
+ rename(old, new)
+ head, tail = path.split(old)
+ if head and tail:
+ try:
+ removedirs(head)
+ except OSError:
+ pass
+
+__all__.extend(["makedirs", "removedirs", "renames"])
+
+def walk(top, topdown=True, onerror=None, followlinks=False):
+ """Directory tree generator.
+
+ For each directory in the directory tree rooted at top (including top
+ itself, but excluding '.' and '..'), yields a 3-tuple
+
+ dirpath, dirnames, filenames
+
+ dirpath is a string, the path to the directory. dirnames is a list of
+ the names of the subdirectories in dirpath (including symlinks to directories,
+ and excluding '.' and '..').
+ filenames is a list of the names of the non-directory files in dirpath.
+ Note that the names in the lists are just names, with no path components.
+ To get a full path (which begins with top) to a file or directory in
+ dirpath, do os.path.join(dirpath, name).
+
+ If optional arg 'topdown' is true or not specified, the triple for a
+ directory is generated before the triples for any of its subdirectories
+ (directories are generated top down). If topdown is false, the triple
+ for a directory is generated after the triples for all of its
+ subdirectories (directories are generated bottom up).
+
+ When topdown is true, the caller can modify the dirnames list in-place
+ (e.g., via del or slice assignment), and walk will only recurse into the
+ subdirectories whose names remain in dirnames; this can be used to prune the
+ search, or to impose a specific order of visiting. Modifying dirnames when
+ topdown is false has no effect on the behavior of os.walk(), since the
+ directories in dirnames have already been generated by the time dirnames
+ itself is generated. No matter the value of topdown, the list of
+ subdirectories is retrieved before the tuples for the directory and its
+ subdirectories are generated.
+
+ By default errors from the os.scandir() call are ignored. If
+ optional arg 'onerror' is specified, it should be a function; it
+ will be called with one argument, an OSError instance. It can
+ report the error to continue with the walk, or raise the exception
+ to abort the walk. Note that the filename is available as the
+ filename attribute of the exception object.
+
+ By default, os.walk does not follow symbolic links to subdirectories on
+ systems that support them. In order to get this functionality, set the
+ optional argument 'followlinks' to true.
+
+ Caution: if you pass a relative pathname for top, don't change the
+ current working directory between resumptions of walk. walk never
+ changes the current directory, and assumes that the client doesn't
+ either.
+
+ Example:
+
+ import os
+ from os.path import join, getsize
+ for root, dirs, files in os.walk('python/Lib/email'):
+ print(root, "consumes", end="")
+ print(sum(getsize(join(root, name)) for name in files), end="")
+ print("bytes in", len(files), "non-directory files")
+ if 'CVS' in dirs:
+ dirs.remove('CVS') # don't visit CVS directories
+
+ """
+ sys.audit("os.walk", top, topdown, onerror, followlinks)
+ return _walk(fspath(top), topdown, onerror, followlinks)
+
+def _walk(top, topdown, onerror, followlinks):
+ dirs = []
+ nondirs = []
+ walk_dirs = []
+
+ # We may not have read permission for top, in which case we can't
+ # get a list of the files the directory contains. os.walk
+ # always suppressed the exception then, rather than blow up for a
+ # minor reason when (say) a thousand readable directories are still
+ # left to visit. That logic is copied here.
+ try:
+ # Note that scandir is global in this module due
+ # to earlier import-*.
+ scandir_it = scandir(top)
+ except OSError as error:
+ if onerror is not None:
+ onerror(error)
+ return
+
+ with scandir_it:
+ while True:
+ try:
+ try:
+ entry = next(scandir_it)
+ except StopIteration:
+ break
+ except OSError as error:
+ if onerror is not None:
+ onerror(error)
+ return
+
+ try:
+ is_dir = entry.is_dir()
+ except OSError:
+ # If is_dir() raises an OSError, consider that the entry is not
+ # a directory, same behaviour than os.path.isdir().
+ is_dir = False
+
+ if is_dir:
+ dirs.append(entry.name)
+ else:
+ nondirs.append(entry.name)
+
+ if not topdown and is_dir:
+ # Bottom-up: recurse into sub-directory, but exclude symlinks to
+ # directories if followlinks is False
+ if followlinks:
+ walk_into = True
+ else:
+ try:
+ is_symlink = entry.is_symlink()
+ except OSError:
+ # If is_symlink() raises an OSError, consider that the
+ # entry is not a symbolic link, same behaviour than
+ # os.path.islink().
+ is_symlink = False
+ walk_into = not is_symlink
+
+ if walk_into:
+ walk_dirs.append(entry.path)
+
+ # Yield before recursion if going top down
+ if topdown:
+ yield top, dirs, nondirs
+
+ # Recurse into sub-directories
+ islink, join = path.islink, path.join
+ for dirname in dirs:
+ new_path = join(top, dirname)
+ # Issue #23605: os.path.islink() is used instead of caching
+ # entry.is_symlink() result during the loop on os.scandir() because
+ # the caller can replace the directory entry during the "yield"
+ # above.
+ if followlinks or not islink(new_path):
+ yield from _walk(new_path, topdown, onerror, followlinks)
+ else:
+ # Recurse into sub-directories
+ for new_path in walk_dirs:
+ yield from _walk(new_path, topdown, onerror, followlinks)
+ # Yield after recursion if going bottom up
+ yield top, dirs, nondirs
+
+__all__.append("walk")
+
+if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd:
+
+ def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
+ """Directory tree generator.
+
+ This behaves exactly like walk(), except that it yields a 4-tuple
+
+ dirpath, dirnames, filenames, dirfd
+
+ `dirpath`, `dirnames` and `filenames` are identical to walk() output,
+ and `dirfd` is a file descriptor referring to the directory `dirpath`.
+
+ The advantage of fwalk() over walk() is that it's safe against symlink
+ races (when follow_symlinks is False).
+
+ If dir_fd is not None, it should be a file descriptor open to a directory,
+ and top should be relative; top will then be relative to that directory.
+ (dir_fd is always supported for fwalk.)
+
+ Caution:
+ Since fwalk() yields file descriptors, those are only valid until the
+ next iteration step, so you should dup() them if you want to keep them
+ for a longer period.
+
+ Example:
+
+ import os
+ for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
+ print(root, "consumes", end="")
+ print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files),
+ end="")
+ print("bytes in", len(files), "non-directory files")
+ if 'CVS' in dirs:
+ dirs.remove('CVS') # don't visit CVS directories
+ """
+ sys.audit("os.fwalk", top, topdown, onerror, follow_symlinks, dir_fd)
+ if not isinstance(top, int) or not hasattr(top, '__index__'):
+ top = fspath(top)
+ # Note: To guard against symlink races, we use the standard
+ # lstat()/open()/fstat() trick.
+ if not follow_symlinks:
+ orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
+ topfd = open(top, O_RDONLY, dir_fd=dir_fd)
+ try:
+ if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
+ path.samestat(orig_st, stat(topfd)))):
+ yield from _fwalk(topfd, top, isinstance(top, bytes),
+ topdown, onerror, follow_symlinks)
+ finally:
+ close(topfd)
+
+ def _fwalk(topfd, toppath, isbytes, topdown, onerror, follow_symlinks):
+ # Note: This uses O(depth of the directory tree) file descriptors: if
+ # necessary, it can be adapted to only require O(1) FDs, see issue
+ # #13734.
+
+ scandir_it = scandir(topfd)
+ dirs = []
+ nondirs = []
+ entries = None if topdown or follow_symlinks else []
+ for entry in scandir_it:
+ name = entry.name
+ if isbytes:
+ name = fsencode(name)
+ try:
+ if entry.is_dir():
+ dirs.append(name)
+ if entries is not None:
+ entries.append(entry)
+ else:
+ nondirs.append(name)
+ except OSError:
+ try:
+ # Add dangling symlinks, ignore disappeared files
+ if entry.is_symlink():
+ nondirs.append(name)
+ except OSError:
+ pass
+
+ if topdown:
+ yield toppath, dirs, nondirs, topfd
+
+ for name in dirs if entries is None else zip(dirs, entries):
+ try:
+ if not follow_symlinks:
+ if topdown:
+ orig_st = stat(name, dir_fd=topfd, follow_symlinks=False)
+ else:
+ assert entries is not None
+ name, entry = name
+ orig_st = entry.stat(follow_symlinks=False)
+ dirfd = open(name, O_RDONLY, dir_fd=topfd)
+ except OSError as err:
+ if onerror is not None:
+ onerror(err)
+ continue
+ try:
+ if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
+ dirpath = path.join(toppath, name)
+ yield from _fwalk(dirfd, dirpath, isbytes,
+ topdown, onerror, follow_symlinks)
+ finally:
+ close(dirfd)
+
+ if not topdown:
+ yield toppath, dirs, nondirs, topfd
+
+ __all__.append("fwalk")
+
+def execl(file, *args):
+ """execl(file, *args)
+
+ Execute the executable file with argument list args, replacing the
+ current process. """
+ execv(file, args)
+
+def execle(file, *args):
+ """execle(file, *args, env)
+
+ Execute the executable file with argument list args and
+ environment env, replacing the current process. """
+ env = args[-1]
+ execve(file, args[:-1], env)
+
+def execlp(file, *args):
+ """execlp(file, *args)
+
+ Execute the executable file (which is searched for along $PATH)
+ with argument list args, replacing the current process. """
+ execvp(file, args)
+
+def execlpe(file, *args):
+ """execlpe(file, *args, env)
+
+ Execute the executable file (which is searched for along $PATH)
+ with argument list args and environment env, replacing the current
+ process. """
+ env = args[-1]
+ execvpe(file, args[:-1], env)
+
+def execvp(file, args):
+ """execvp(file, args)
+
+ Execute the executable file (which is searched for along $PATH)
+ with argument list args, replacing the current process.
+ args may be a list or tuple of strings. """
+ _execvpe(file, args)
+
+def execvpe(file, args, env):
+ """execvpe(file, args, env)
+
+ Execute the executable file (which is searched for along $PATH)
+ with argument list args and environment env, replacing the
+ current process.
+ args may be a list or tuple of strings. """
+ _execvpe(file, args, env)
+
+__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
+
+def _execvpe(file, args, env=None):
+ if env is not None:
+ exec_func = execve
+ argrest = (args, env)
+ else:
+ exec_func = execv
+ argrest = (args,)
+ env = environ
+
+ if path.dirname(file):
+ exec_func(file, *argrest)
+ return
+ saved_exc = None
+ path_list = get_exec_path(env)
+ if name != 'nt':
+ file = fsencode(file)
+ path_list = map(fsencode, path_list)
+ for dir in path_list:
+ fullname = path.join(dir, file)
+ try:
+ exec_func(fullname, *argrest)
+ except (FileNotFoundError, NotADirectoryError) as e:
+ last_exc = e
+ except OSError as e:
+ last_exc = e
+ if saved_exc is None:
+ saved_exc = e
+ if saved_exc is not None:
+ raise saved_exc
+ raise last_exc
+
+
+def get_exec_path(env=None):
+ """Returns the sequence of directories that will be searched for the
+ named executable (similar to a shell) when launching a process.
+
+ *env* must be an environment variable dict or None. If *env* is None,
+ os.environ will be used.
+ """
+ # Use a local import instead of a global import to limit the number of
+ # modules loaded at startup: the os module is always loaded at startup by
+ # Python. It may also avoid a bootstrap issue.
+ import warnings
+
+ if env is None:
+ env = environ
+
+ # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
+ # BytesWarning when using python -b or python -bb: ignore the warning
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", BytesWarning)
+
+ try:
+ path_list = env.get('PATH')
+ except TypeError:
+ path_list = None
+
+ if supports_bytes_environ:
+ try:
+ path_listb = env[b'PATH']
+ except (KeyError, TypeError):
+ pass
+ else:
+ if path_list is not None:
+ raise ValueError(
+ "env cannot contain 'PATH' and b'PATH' keys")
+ path_list = path_listb
+
+ if path_list is not None and isinstance(path_list, bytes):
+ path_list = fsdecode(path_list)
+
+ if path_list is None:
+ path_list = defpath
+ return path_list.split(pathsep)
+
+
+# Change environ to automatically call putenv() and unsetenv()
+from _collections_abc import MutableMapping, Mapping
+
+class _Environ(MutableMapping):
+ def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue):
+ self.encodekey = encodekey
+ self.decodekey = decodekey
+ self.encodevalue = encodevalue
+ self.decodevalue = decodevalue
+ self._data = data
+
+ def __getitem__(self, key):
+ try:
+ value = self._data[self.encodekey(key)]
+ except KeyError:
+ # raise KeyError with the original key value
+ raise KeyError(key) from None
+ return self.decodevalue(value)
+
+ def __setitem__(self, key, value):
+ key = self.encodekey(key)
+ value = self.encodevalue(value)
+ putenv(key, value)
+ self._data[key] = value
+
+ def __delitem__(self, key):
+ encodedkey = self.encodekey(key)
+ unsetenv(encodedkey)
+ try:
+ del self._data[encodedkey]
+ except KeyError:
+ # raise KeyError with the original key value
+ raise KeyError(key) from None
+
+ def __iter__(self):
+ # list() from dict object is an atomic operation
+ keys = list(self._data)
+ for key in keys:
+ yield self.decodekey(key)
+
+ def __len__(self):
+ return len(self._data)
+
+ def __repr__(self):
+ return 'environ({{{}}})'.format(', '.join(
+ ('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
+ for key, value in self._data.items())))
+
+ def copy(self):
+ return dict(self)
+
+ def setdefault(self, key, value):
+ if key not in self:
+ self[key] = value
+ return self[key]
+
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ def __or__(self, other):
+ if not isinstance(other, Mapping):
+ return NotImplemented
+ new = dict(self)
+ new.update(other)
+ return new
+
+ def __ror__(self, other):
+ if not isinstance(other, Mapping):
+ return NotImplemented
+ new = dict(other)
+ new.update(self)
+ return new
+
+def _createenviron():
+ if name == 'nt':
+ # Where Env Var Names Must Be UPPERCASE
+ def check_str(value):
+ if not isinstance(value, str):
+ raise TypeError("str expected, not %s" % type(value).__name__)
+ return value
+ encode = check_str
+ decode = str
+ def encodekey(key):
+ return encode(key).upper()
+ data = {}
+ for key, value in environ.items():
+ data[encodekey(key)] = value
+ else:
+ # Where Env Var Names Can Be Mixed Case
+ encoding = sys.getfilesystemencoding()
+ def encode(value):
+ if not isinstance(value, str):
+ raise TypeError("str expected, not %s" % type(value).__name__)
+ return value.encode(encoding, 'surrogateescape')
+ def decode(value):
+ return value.decode(encoding, 'surrogateescape')
+ encodekey = encode
+ data = environ
+ return _Environ(data,
+ encodekey, decode,
+ encode, decode)
+
+# unicode environ
+environ = _createenviron()
+del _createenviron
+
+
+def getenv(key, default=None):
+ """Get an environment variable, return None if it doesn't exist.
+ The optional second argument can specify an alternate default.
+ key, default and the result are str."""
+ return environ.get(key, default)
+
+supports_bytes_environ = (name != 'nt')
+__all__.extend(("getenv", "supports_bytes_environ"))
+
+if supports_bytes_environ:
+ def _check_bytes(value):
+ if not isinstance(value, bytes):
+ raise TypeError("bytes expected, not %s" % type(value).__name__)
+ return value
+
+ # bytes environ
+ environb = _Environ(environ._data,
+ _check_bytes, bytes,
+ _check_bytes, bytes)
+ del _check_bytes
+
+ def getenvb(key, default=None):
+ """Get an environment variable, return None if it doesn't exist.
+ The optional second argument can specify an alternate default.
+ key, default and the result are bytes."""
+ return environb.get(key, default)
+
+ __all__.extend(("environb", "getenvb"))
+
+def _fscodec():
+ encoding = sys.getfilesystemencoding()
+ errors = sys.getfilesystemencodeerrors()
+
+ def fsencode(filename):
+ """Encode filename (an os.PathLike, bytes, or str) to the filesystem
+ encoding with 'surrogateescape' error handler, return bytes unchanged.
+ On Windows, use 'strict' error handler if the file system encoding is
+ 'mbcs' (which is the default encoding).
+ """
+ filename = fspath(filename) # Does type-checking of `filename`.
+ if isinstance(filename, str):
+ return filename.encode(encoding, errors)
+ else:
+ return filename
+
+ def fsdecode(filename):
+ """Decode filename (an os.PathLike, bytes, or str) from the filesystem
+ encoding with 'surrogateescape' error handler, return str unchanged. On
+ Windows, use 'strict' error handler if the file system encoding is
+ 'mbcs' (which is the default encoding).
+ """
+ filename = fspath(filename) # Does type-checking of `filename`.
+ if isinstance(filename, bytes):
+ return filename.decode(encoding, errors)
+ else:
+ return filename
+
+ return fsencode, fsdecode
+
+fsencode, fsdecode = _fscodec()
+del _fscodec
+
+# Supply spawn*() (probably only for Unix)
+if _exists("fork") and not _exists("spawnv") and _exists("execv"):
+
+ P_WAIT = 0
+ P_NOWAIT = P_NOWAITO = 1
+
+ __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
+
+ # XXX Should we support P_DETACH? I suppose it could fork()**2
+ # and close the std I/O streams. Also, P_OVERLAY is the same
+ # as execv*()?
+
+ def _spawnvef(mode, file, args, env, func):
+ # Internal helper; func is the exec*() function to use
+ if not isinstance(args, (tuple, list)):
+ raise TypeError('argv must be a tuple or a list')
+ if not args or not args[0]:
+ raise ValueError('argv first element cannot be empty')
+ pid = fork()
+ if not pid:
+ # Child
+ try:
+ if env is None:
+ func(file, args)
+ else:
+ func(file, args, env)
+ except:
+ _exit(127)
+ else:
+ # Parent
+ if mode == P_NOWAIT:
+ return pid # Caller is responsible for waiting!
+ while 1:
+ wpid, sts = waitpid(pid, 0)
+ if WIFSTOPPED(sts):
+ continue
+
+ return waitstatus_to_exitcode(sts)
+
+ def spawnv(mode, file, args):
+ """spawnv(mode, file, args) -> integer
+
+Execute file with arguments from args in a subprocess.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+ return _spawnvef(mode, file, args, None, execv)
+
+ def spawnve(mode, file, args, env):
+ """spawnve(mode, file, args, env) -> integer
+
+Execute file with arguments from args in a subprocess with the
+specified environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+ return _spawnvef(mode, file, args, env, execve)
+
+ # Note: spawnvp[e] isn't currently supported on Windows
+
+ def spawnvp(mode, file, args):
+ """spawnvp(mode, file, args) -> integer
+
+Execute file (which is looked for along $PATH) with arguments from
+args in a subprocess.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+ return _spawnvef(mode, file, args, None, execvp)
+
+ def spawnvpe(mode, file, args, env):
+ """spawnvpe(mode, file, args, env) -> integer
+
+Execute file (which is looked for along $PATH) with arguments from
+args in a subprocess with the supplied environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+ return _spawnvef(mode, file, args, env, execvpe)
+
+
+ __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
+
+
+if _exists("spawnv"):
+ # These aren't supplied by the basic Windows code
+ # but can be easily implemented in Python
+
+ def spawnl(mode, file, *args):
+ """spawnl(mode, file, *args) -> integer
+
+Execute file with arguments from args in a subprocess.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+ return spawnv(mode, file, args)
+
+ def spawnle(mode, file, *args):
+ """spawnle(mode, file, *args, env) -> integer
+
+Execute file with arguments from args in a subprocess with the
+supplied environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+ env = args[-1]
+ return spawnve(mode, file, args[:-1], env)
+
+
+ __all__.extend(["spawnl", "spawnle"])
+
+
+if _exists("spawnvp"):
+ # At the moment, Windows doesn't implement spawnvp[e],
+ # so it won't have spawnlp[e] either.
+ def spawnlp(mode, file, *args):
+ """spawnlp(mode, file, *args) -> integer
+
+Execute file (which is looked for along $PATH) with arguments from
+args in a subprocess with the supplied environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+ return spawnvp(mode, file, args)
+
+ def spawnlpe(mode, file, *args):
+ """spawnlpe(mode, file, *args, env) -> integer
+
+Execute file (which is looked for along $PATH) with arguments from
+args in a subprocess with the supplied environment.
+If mode == P_NOWAIT return the pid of the process.
+If mode == P_WAIT return the process's exit code if it exits normally;
+otherwise return -SIG, where SIG is the signal that killed it. """
+ env = args[-1]
+ return spawnvpe(mode, file, args[:-1], env)
+
+
+ __all__.extend(["spawnlp", "spawnlpe"])
+
+# VxWorks has no user space shell provided. As a result, running
+# command in a shell can't be supported.
+if sys.platform != 'vxworks':
+ # Supply os.popen()
+ def popen(cmd, mode="r", buffering=-1):
+ if not isinstance(cmd, str):
+ raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
+ if mode not in ("r", "w"):
+ raise ValueError("invalid mode %r" % mode)
+ if buffering == 0 or buffering is None:
+ raise ValueError("popen() does not support unbuffered streams")
+ import subprocess, io
+ if mode == "r":
+ proc = subprocess.Popen(cmd,
+ shell=True, text=True,
+ stdout=subprocess.PIPE,
+ bufsize=buffering)
+ return _wrap_close(proc.stdout, proc)
+ else:
+ proc = subprocess.Popen(cmd,
+ shell=True, text=True,
+ stdin=subprocess.PIPE,
+ bufsize=buffering)
+ return _wrap_close(proc.stdin, proc)
+
+ # Helper for popen() -- a proxy for a file whose close waits for the process
+ class _wrap_close:
+ def __init__(self, stream, proc):
+ self._stream = stream
+ self._proc = proc
+ def close(self):
+ self._stream.close()
+ returncode = self._proc.wait()
+ if returncode == 0:
+ return None
+ if name == 'nt':
+ return returncode
+ else:
+ return returncode << 8 # Shift left to match old behavior
+ def __enter__(self):
+ return self
+ def __exit__(self, *args):
+ self.close()
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+ def __iter__(self):
+ return iter(self._stream)
+
+ __all__.append("popen")
+
+# Supply os.fdopen()
+def fdopen(fd, mode="r", buffering=-1, encoding=None, *args, **kwargs):
+ if not isinstance(fd, int):
+ raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
+ import io
+ if "b" not in mode:
+ encoding = io.text_encoding(encoding)
+ return io.open(fd, mode, buffering, encoding, *args, **kwargs)
+
+
+# For testing purposes, make sure the function is available when the C
+# implementation exists.
+def _fspath(path):
+ """Return the path representation of a path-like object.
+
+ If str or bytes is passed in, it is returned unchanged. Otherwise the
+ os.PathLike interface is used to get the path representation. If the
+ path representation is not str or bytes, TypeError is raised. If the
+ provided path is not str, bytes, or os.PathLike, TypeError is raised.
+ """
+ if isinstance(path, (str, bytes)):
+ return path
+
+ # Work from the object's type to match method resolution of other magic
+ # methods.
+ path_type = type(path)
+ try:
+ path_repr = path_type.__fspath__(path)
+ except AttributeError:
+ if hasattr(path_type, '__fspath__'):
+ raise
+ else:
+ raise TypeError("expected str, bytes or os.PathLike object, "
+ "not " + path_type.__name__)
+ if isinstance(path_repr, (str, bytes)):
+ return path_repr
+ else:
+ raise TypeError("expected {}.__fspath__() to return str or bytes, "
+ "not {}".format(path_type.__name__,
+ type(path_repr).__name__))
+
+# If there is no C implementation, make the pure Python version the
+# implementation as transparently as possible.
+if not _exists('fspath'):
+ fspath = _fspath
+ fspath.__name__ = "fspath"
+
+
+class PathLike(abc.ABC):
+
+ """Abstract base class for implementing the file system path protocol."""
+
+ @abc.abstractmethod
+ def __fspath__(self):
+ """Return the file system path representation of the object."""
+ raise NotImplementedError
+
+ @classmethod
+ def __subclasshook__(cls, subclass):
+ if cls is PathLike:
+ return _check_methods(subclass, '__fspath__')
+ return NotImplemented
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+if name == 'nt':
+ class _AddedDllDirectory:
+ def __init__(self, path, cookie, remove_dll_directory):
+ self.path = path
+ self._cookie = cookie
+ self._remove_dll_directory = remove_dll_directory
+ def close(self):
+ self._remove_dll_directory(self._cookie)
+ self.path = None
+ def __enter__(self):
+ return self
+ def __exit__(self, *args):
+ self.close()
+ def __repr__(self):
+ if self.path:
+ return "".format(self.path)
+ return ""
+
+ def add_dll_directory(path):
+ """Add a path to the DLL search path.
+
+ This search path is used when resolving dependencies for imported
+ extension modules (the module itself is resolved through sys.path),
+ and also by ctypes.
+
+ Remove the directory by calling close() on the returned object or
+ using it in a with statement.
+ """
+ import nt
+ cookie = nt._add_dll_directory(path)
+ return _AddedDllDirectory(
+ path,
+ cookie,
+ nt._remove_dll_directory
+ )
diff --git a/infer_4_37_2/lib/python3.10/pathlib.py b/infer_4_37_2/lib/python3.10/pathlib.py
new file mode 100644
index 0000000000000000000000000000000000000000..97b23ca45a3a19ccb7824c2b7232730f4860e4e8
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/pathlib.py
@@ -0,0 +1,1461 @@
+import fnmatch
+import functools
+import io
+import ntpath
+import os
+import posixpath
+import re
+import sys
+import warnings
+from _collections_abc import Sequence
+from errno import EINVAL, ENOENT, ENOTDIR, EBADF, ELOOP
+from operator import attrgetter
+from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
+from urllib.parse import quote_from_bytes as urlquote_from_bytes
+
+
+__all__ = [
+ "PurePath", "PurePosixPath", "PureWindowsPath",
+ "Path", "PosixPath", "WindowsPath",
+ ]
+
+#
+# Internals
+#
+
+_WINERROR_NOT_READY = 21 # drive exists but is not accessible
+_WINERROR_INVALID_NAME = 123 # fix for bpo-35306
+_WINERROR_CANT_RESOLVE_FILENAME = 1921 # broken symlink pointing to itself
+
+# EBADF - guard against macOS `stat` throwing EBADF
+_IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF, ELOOP)
+
+_IGNORED_WINERRORS = (
+ _WINERROR_NOT_READY,
+ _WINERROR_INVALID_NAME,
+ _WINERROR_CANT_RESOLVE_FILENAME)
+
+def _ignore_error(exception):
+ return (getattr(exception, 'errno', None) in _IGNORED_ERROS or
+ getattr(exception, 'winerror', None) in _IGNORED_WINERRORS)
+
+
+def _is_wildcard_pattern(pat):
+ # Whether this pattern needs actual matching using fnmatch, or can
+ # be looked up directly as a file.
+ return "*" in pat or "?" in pat or "[" in pat
+
+
+class _Flavour(object):
+ """A flavour implements a particular (platform-specific) set of path
+ semantics."""
+
+ def __init__(self):
+ self.join = self.sep.join
+
+ def parse_parts(self, parts):
+ parsed = []
+ sep = self.sep
+ altsep = self.altsep
+ drv = root = ''
+ it = reversed(parts)
+ for part in it:
+ if not part:
+ continue
+ if altsep:
+ part = part.replace(altsep, sep)
+ drv, root, rel = self.splitroot(part)
+ if sep in rel:
+ for x in reversed(rel.split(sep)):
+ if x and x != '.':
+ parsed.append(sys.intern(x))
+ else:
+ if rel and rel != '.':
+ parsed.append(sys.intern(rel))
+ if drv or root:
+ if not drv:
+ # If no drive is present, try to find one in the previous
+ # parts. This makes the result of parsing e.g.
+ # ("C:", "/", "a") reasonably intuitive.
+ for part in it:
+ if not part:
+ continue
+ if altsep:
+ part = part.replace(altsep, sep)
+ drv = self.splitroot(part)[0]
+ if drv:
+ break
+ break
+ if drv or root:
+ parsed.append(drv + root)
+ parsed.reverse()
+ return drv, root, parsed
+
+ def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
+ """
+ Join the two paths represented by the respective
+ (drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
+ """
+ if root2:
+ if not drv2 and drv:
+ return drv, root2, [drv + root2] + parts2[1:]
+ elif drv2:
+ if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
+ # Same drive => second path is relative to the first
+ return drv, root, parts + parts2[1:]
+ else:
+ # Second path is non-anchored (common case)
+ return drv, root, parts + parts2
+ return drv2, root2, parts2
+
+
+class _WindowsFlavour(_Flavour):
+ # Reference for Windows paths can be found at
+ # http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
+
+ sep = '\\'
+ altsep = '/'
+ has_drv = True
+ pathmod = ntpath
+
+ is_supported = (os.name == 'nt')
+
+ drive_letters = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
+ ext_namespace_prefix = '\\\\?\\'
+
+ reserved_names = (
+ {'CON', 'PRN', 'AUX', 'NUL', 'CONIN$', 'CONOUT$'} |
+ {'COM%s' % c for c in '123456789\xb9\xb2\xb3'} |
+ {'LPT%s' % c for c in '123456789\xb9\xb2\xb3'}
+ )
+
+ # Interesting findings about extended paths:
+ # * '\\?\c:\a' is an extended path, which bypasses normal Windows API
+ # path processing. Thus relative paths are not resolved and slash is not
+ # translated to backslash. It has the native NT path limit of 32767
+ # characters, but a bit less after resolving device symbolic links,
+ # such as '\??\C:' => '\Device\HarddiskVolume2'.
+ # * '\\?\c:/a' looks for a device named 'C:/a' because slash is a
+ # regular name character in the object namespace.
+ # * '\\?\c:\foo/bar' is invalid because '/' is illegal in NT filesystems.
+ # The only path separator at the filesystem level is backslash.
+ # * '//?/c:\a' and '//?/c:/a' are effectively equivalent to '\\.\c:\a' and
+ # thus limited to MAX_PATH.
+ # * Prior to Windows 8, ANSI API bytes paths are limited to MAX_PATH,
+ # even with the '\\?\' prefix.
+
+ def splitroot(self, part, sep=sep):
+ first = part[0:1]
+ second = part[1:2]
+ if (second == sep and first == sep):
+ # XXX extended paths should also disable the collapsing of "."
+ # components (according to MSDN docs).
+ prefix, part = self._split_extended_path(part)
+ first = part[0:1]
+ second = part[1:2]
+ else:
+ prefix = ''
+ third = part[2:3]
+ if (second == sep and first == sep and third != sep):
+ # is a UNC path:
+ # vvvvvvvvvvvvvvvvvvvvv root
+ # \\machine\mountpoint\directory\etc\...
+ # directory ^^^^^^^^^^^^^^
+ index = part.find(sep, 2)
+ if index != -1:
+ index2 = part.find(sep, index + 1)
+ # a UNC path can't have two slashes in a row
+ # (after the initial two)
+ if index2 != index + 1:
+ if index2 == -1:
+ index2 = len(part)
+ if prefix:
+ return prefix + part[1:index2], sep, part[index2+1:]
+ else:
+ return part[:index2], sep, part[index2+1:]
+ drv = root = ''
+ if second == ':' and first in self.drive_letters:
+ drv = part[:2]
+ part = part[2:]
+ first = third
+ if first == sep:
+ root = first
+ part = part.lstrip(sep)
+ return prefix + drv, root, part
+
+ def casefold(self, s):
+ return s.lower()
+
+ def casefold_parts(self, parts):
+ return [p.lower() for p in parts]
+
+ def compile_pattern(self, pattern):
+ return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch
+
+ def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
+ prefix = ''
+ if s.startswith(ext_prefix):
+ prefix = s[:4]
+ s = s[4:]
+ if s.startswith('UNC\\'):
+ prefix += s[:3]
+ s = '\\' + s[3:]
+ return prefix, s
+
+ def is_reserved(self, parts):
+ # NOTE: the rules for reserved names seem somewhat complicated
+ # (e.g. r"..\NUL" is reserved but not r"foo\NUL" if "foo" does not
+ # exist). We err on the side of caution and return True for paths
+ # which are not considered reserved by Windows.
+ if not parts:
+ return False
+ if parts[0].startswith('\\\\'):
+ # UNC paths are never reserved
+ return False
+ name = parts[-1].partition('.')[0].partition(':')[0].rstrip(' ')
+ return name.upper() in self.reserved_names
+
+ def make_uri(self, path):
+ # Under Windows, file URIs use the UTF-8 encoding.
+ drive = path.drive
+ if len(drive) == 2 and drive[1] == ':':
+ # It's a path on a local drive => 'file:///c:/a/b'
+ rest = path.as_posix()[2:].lstrip('/')
+ return 'file:///%s/%s' % (
+ drive, urlquote_from_bytes(rest.encode('utf-8')))
+ else:
+ # It's a path on a network drive => 'file://host/share/a/b'
+ return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
+
+
+class _PosixFlavour(_Flavour):
+ sep = '/'
+ altsep = ''
+ has_drv = False
+ pathmod = posixpath
+
+ is_supported = (os.name != 'nt')
+
+ def splitroot(self, part, sep=sep):
+ if part and part[0] == sep:
+ stripped_part = part.lstrip(sep)
+ # According to POSIX path resolution:
+ # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
+ # "A pathname that begins with two successive slashes may be
+ # interpreted in an implementation-defined manner, although more
+ # than two leading slashes shall be treated as a single slash".
+ if len(part) - len(stripped_part) == 2:
+ return '', sep * 2, stripped_part
+ else:
+ return '', sep, stripped_part
+ else:
+ return '', '', part
+
+ def casefold(self, s):
+ return s
+
+ def casefold_parts(self, parts):
+ return parts
+
+ def compile_pattern(self, pattern):
+ return re.compile(fnmatch.translate(pattern)).fullmatch
+
+ def is_reserved(self, parts):
+ return False
+
+ def make_uri(self, path):
+ # We represent the path using the local filesystem encoding,
+ # for portability to other applications.
+ bpath = bytes(path)
+ return 'file://' + urlquote_from_bytes(bpath)
+
+
+_windows_flavour = _WindowsFlavour()
+_posix_flavour = _PosixFlavour()
+
+
+class _Accessor:
+ """An accessor implements a particular (system-specific or not) way of
+ accessing paths on the filesystem."""
+
+
+class _NormalAccessor(_Accessor):
+
+ stat = os.stat
+
+ open = io.open
+
+ listdir = os.listdir
+
+ scandir = os.scandir
+
+ chmod = os.chmod
+
+ mkdir = os.mkdir
+
+ unlink = os.unlink
+
+ if hasattr(os, "link"):
+ link = os.link
+ else:
+ def link(self, src, dst):
+ raise NotImplementedError("os.link() not available on this system")
+
+ rmdir = os.rmdir
+
+ rename = os.rename
+
+ replace = os.replace
+
+ if hasattr(os, "symlink"):
+ symlink = os.symlink
+ else:
+ def symlink(self, src, dst, target_is_directory=False):
+ raise NotImplementedError("os.symlink() not available on this system")
+
+ def touch(self, path, mode=0o666, exist_ok=True):
+ if exist_ok:
+ # First try to bump modification time
+ # Implementation note: GNU touch uses the UTIME_NOW option of
+ # the utimensat() / futimens() functions.
+ try:
+ os.utime(path, None)
+ except OSError:
+ # Avoid exception chaining
+ pass
+ else:
+ return
+ flags = os.O_CREAT | os.O_WRONLY
+ if not exist_ok:
+ flags |= os.O_EXCL
+ fd = os.open(path, flags, mode)
+ os.close(fd)
+
+ if hasattr(os, "readlink"):
+ readlink = os.readlink
+ else:
+ def readlink(self, path):
+ raise NotImplementedError("os.readlink() not available on this system")
+
+ def owner(self, path):
+ try:
+ import pwd
+ return pwd.getpwuid(self.stat(path).st_uid).pw_name
+ except ImportError:
+ raise NotImplementedError("Path.owner() is unsupported on this system")
+
+ def group(self, path):
+ try:
+ import grp
+ return grp.getgrgid(self.stat(path).st_gid).gr_name
+ except ImportError:
+ raise NotImplementedError("Path.group() is unsupported on this system")
+
+ getcwd = os.getcwd
+
+ expanduser = staticmethod(os.path.expanduser)
+
+ realpath = staticmethod(os.path.realpath)
+
+
+_normal_accessor = _NormalAccessor()
+
+
+#
+# Globbing helpers
+#
+
+def _make_selector(pattern_parts, flavour):
+ pat = pattern_parts[0]
+ child_parts = pattern_parts[1:]
+ if pat == '**':
+ cls = _RecursiveWildcardSelector
+ elif '**' in pat:
+ raise ValueError("Invalid pattern: '**' can only be an entire path component")
+ elif _is_wildcard_pattern(pat):
+ cls = _WildcardSelector
+ else:
+ cls = _PreciseSelector
+ return cls(pat, child_parts, flavour)
+
+if hasattr(functools, "lru_cache"):
+ _make_selector = functools.lru_cache()(_make_selector)
+
+
+class _Selector:
+ """A selector matches a specific glob pattern part against the children
+ of a given path."""
+
+ def __init__(self, child_parts, flavour):
+ self.child_parts = child_parts
+ if child_parts:
+ self.successor = _make_selector(child_parts, flavour)
+ self.dironly = True
+ else:
+ self.successor = _TerminatingSelector()
+ self.dironly = False
+
+ def select_from(self, parent_path):
+ """Iterate over all child paths of `parent_path` matched by this
+ selector. This can contain parent_path itself."""
+ path_cls = type(parent_path)
+ is_dir = path_cls.is_dir
+ exists = path_cls.exists
+ scandir = parent_path._accessor.scandir
+ if not is_dir(parent_path):
+ return iter([])
+ return self._select_from(parent_path, is_dir, exists, scandir)
+
+
+class _TerminatingSelector:
+
+ def _select_from(self, parent_path, is_dir, exists, scandir):
+ yield parent_path
+
+
+class _PreciseSelector(_Selector):
+
+ def __init__(self, name, child_parts, flavour):
+ self.name = name
+ _Selector.__init__(self, child_parts, flavour)
+
+ def _select_from(self, parent_path, is_dir, exists, scandir):
+ try:
+ path = parent_path._make_child_relpath(self.name)
+ if (is_dir if self.dironly else exists)(path):
+ for p in self.successor._select_from(path, is_dir, exists, scandir):
+ yield p
+ except PermissionError:
+ return
+
+
+class _WildcardSelector(_Selector):
+
+ def __init__(self, pat, child_parts, flavour):
+ self.match = flavour.compile_pattern(pat)
+ _Selector.__init__(self, child_parts, flavour)
+
+ def _select_from(self, parent_path, is_dir, exists, scandir):
+ try:
+ with scandir(parent_path) as scandir_it:
+ entries = list(scandir_it)
+ for entry in entries:
+ if self.dironly:
+ try:
+ # "entry.is_dir()" can raise PermissionError
+ # in some cases (see bpo-38894), which is not
+ # among the errors ignored by _ignore_error()
+ if not entry.is_dir():
+ continue
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ continue
+ name = entry.name
+ if self.match(name):
+ path = parent_path._make_child_relpath(name)
+ for p in self.successor._select_from(path, is_dir, exists, scandir):
+ yield p
+ except PermissionError:
+ return
+
+
+class _RecursiveWildcardSelector(_Selector):
+
+ def __init__(self, pat, child_parts, flavour):
+ _Selector.__init__(self, child_parts, flavour)
+
+ def _iterate_directories(self, parent_path, is_dir, scandir):
+ yield parent_path
+ try:
+ with scandir(parent_path) as scandir_it:
+ entries = list(scandir_it)
+ for entry in entries:
+ entry_is_dir = False
+ try:
+ entry_is_dir = entry.is_dir()
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ if entry_is_dir and not entry.is_symlink():
+ path = parent_path._make_child_relpath(entry.name)
+ for p in self._iterate_directories(path, is_dir, scandir):
+ yield p
+ except PermissionError:
+ return
+
+ def _select_from(self, parent_path, is_dir, exists, scandir):
+ try:
+ yielded = set()
+ try:
+ successor_select = self.successor._select_from
+ for starting_point in self._iterate_directories(parent_path, is_dir, scandir):
+ for p in successor_select(starting_point, is_dir, exists, scandir):
+ if p not in yielded:
+ yield p
+ yielded.add(p)
+ finally:
+ yielded.clear()
+ except PermissionError:
+ return
+
+
+#
+# Public API
+#
+
+class _PathParents(Sequence):
+ """This object provides sequence-like access to the logical ancestors
+ of a path. Don't try to construct it yourself."""
+ __slots__ = ('_pathcls', '_drv', '_root', '_parts')
+
+ def __init__(self, path):
+ # We don't store the instance to avoid reference cycles
+ self._pathcls = type(path)
+ self._drv = path._drv
+ self._root = path._root
+ self._parts = path._parts
+
+ def __len__(self):
+ if self._drv or self._root:
+ return len(self._parts) - 1
+ else:
+ return len(self._parts)
+
+ def __getitem__(self, idx):
+ if isinstance(idx, slice):
+ return tuple(self[i] for i in range(*idx.indices(len(self))))
+
+ if idx >= len(self) or idx < -len(self):
+ raise IndexError(idx)
+ if idx < 0:
+ idx += len(self)
+ return self._pathcls._from_parsed_parts(self._drv, self._root,
+ self._parts[:-idx - 1])
+
+ def __repr__(self):
+ return "<{}.parents>".format(self._pathcls.__name__)
+
+
+class PurePath(object):
+ """Base class for manipulating paths without I/O.
+
+ PurePath represents a filesystem path and offers operations which
+ don't imply any actual filesystem I/O. Depending on your system,
+ instantiating a PurePath will return either a PurePosixPath or a
+ PureWindowsPath object. You can also instantiate either of these classes
+ directly, regardless of your system.
+ """
+ __slots__ = (
+ '_drv', '_root', '_parts',
+ '_str', '_hash', '_pparts', '_cached_cparts',
+ )
+
+ def __new__(cls, *args):
+ """Construct a PurePath from one or several strings and or existing
+ PurePath objects. The strings and path objects are combined so as
+ to yield a canonicalized path, which is incorporated into the
+ new PurePath object.
+ """
+ if cls is PurePath:
+ cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
+ return cls._from_parts(args)
+
+ def __reduce__(self):
+ # Using the parts tuple helps share interned path parts
+ # when pickling related paths.
+ return (self.__class__, tuple(self._parts))
+
+ @classmethod
+ def _parse_args(cls, args):
+ # This is useful when you don't want to create an instance, just
+ # canonicalize some constructor arguments.
+ parts = []
+ for a in args:
+ if isinstance(a, PurePath):
+ parts += a._parts
+ else:
+ a = os.fspath(a)
+ if isinstance(a, str):
+ # Force-cast str subclasses to str (issue #21127)
+ parts.append(str(a))
+ else:
+ raise TypeError(
+ "argument should be a str object or an os.PathLike "
+ "object returning str, not %r"
+ % type(a))
+ return cls._flavour.parse_parts(parts)
+
+ @classmethod
+ def _from_parts(cls, args):
+ # We need to call _parse_args on the instance, so as to get the
+ # right flavour.
+ self = object.__new__(cls)
+ drv, root, parts = self._parse_args(args)
+ self._drv = drv
+ self._root = root
+ self._parts = parts
+ return self
+
+ @classmethod
+ def _from_parsed_parts(cls, drv, root, parts):
+ self = object.__new__(cls)
+ self._drv = drv
+ self._root = root
+ self._parts = parts
+ return self
+
+ @classmethod
+ def _format_parsed_parts(cls, drv, root, parts):
+ if drv or root:
+ return drv + root + cls._flavour.join(parts[1:])
+ else:
+ return cls._flavour.join(parts)
+
+ def _make_child(self, args):
+ drv, root, parts = self._parse_args(args)
+ drv, root, parts = self._flavour.join_parsed_parts(
+ self._drv, self._root, self._parts, drv, root, parts)
+ return self._from_parsed_parts(drv, root, parts)
+
+ def __str__(self):
+ """Return the string representation of the path, suitable for
+ passing to system calls."""
+ try:
+ return self._str
+ except AttributeError:
+ self._str = self._format_parsed_parts(self._drv, self._root,
+ self._parts) or '.'
+ return self._str
+
+ def __fspath__(self):
+ return str(self)
+
+ def as_posix(self):
+ """Return the string representation of the path with forward (/)
+ slashes."""
+ f = self._flavour
+ return str(self).replace(f.sep, '/')
+
+ def __bytes__(self):
+ """Return the bytes representation of the path. This is only
+ recommended to use under Unix."""
+ return os.fsencode(self)
+
+ def __repr__(self):
+ return "{}({!r})".format(self.__class__.__name__, self.as_posix())
+
+ def as_uri(self):
+ """Return the path as a 'file' URI."""
+ if not self.is_absolute():
+ raise ValueError("relative path can't be expressed as a file URI")
+ return self._flavour.make_uri(self)
+
+ @property
+ def _cparts(self):
+ # Cached casefolded parts, for hashing and comparison
+ try:
+ return self._cached_cparts
+ except AttributeError:
+ self._cached_cparts = self._flavour.casefold_parts(self._parts)
+ return self._cached_cparts
+
+ def __eq__(self, other):
+ if not isinstance(other, PurePath):
+ return NotImplemented
+ return self._cparts == other._cparts and self._flavour is other._flavour
+
+ def __hash__(self):
+ try:
+ return self._hash
+ except AttributeError:
+ self._hash = hash(tuple(self._cparts))
+ return self._hash
+
+ def __lt__(self, other):
+ if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+ return NotImplemented
+ return self._cparts < other._cparts
+
+ def __le__(self, other):
+ if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+ return NotImplemented
+ return self._cparts <= other._cparts
+
+ def __gt__(self, other):
+ if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+ return NotImplemented
+ return self._cparts > other._cparts
+
+ def __ge__(self, other):
+ if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+ return NotImplemented
+ return self._cparts >= other._cparts
+
+ def __class_getitem__(cls, type):
+ return cls
+
+ drive = property(attrgetter('_drv'),
+ doc="""The drive prefix (letter or UNC path), if any.""")
+
+ root = property(attrgetter('_root'),
+ doc="""The root of the path, if any.""")
+
+ @property
+ def anchor(self):
+ """The concatenation of the drive and root, or ''."""
+ anchor = self._drv + self._root
+ return anchor
+
+ @property
+ def name(self):
+ """The final path component, if any."""
+ parts = self._parts
+ if len(parts) == (1 if (self._drv or self._root) else 0):
+ return ''
+ return parts[-1]
+
+ @property
+ def suffix(self):
+ """
+ The final component's last suffix, if any.
+
+ This includes the leading period. For example: '.txt'
+ """
+ name = self.name
+ i = name.rfind('.')
+ if 0 < i < len(name) - 1:
+ return name[i:]
+ else:
+ return ''
+
+ @property
+ def suffixes(self):
+ """
+ A list of the final component's suffixes, if any.
+
+ These include the leading periods. For example: ['.tar', '.gz']
+ """
+ name = self.name
+ if name.endswith('.'):
+ return []
+ name = name.lstrip('.')
+ return ['.' + suffix for suffix in name.split('.')[1:]]
+
+ @property
+ def stem(self):
+ """The final path component, minus its last suffix."""
+ name = self.name
+ i = name.rfind('.')
+ if 0 < i < len(name) - 1:
+ return name[:i]
+ else:
+ return name
+
+ def with_name(self, name):
+ """Return a new path with the file name changed."""
+ if not self.name:
+ raise ValueError("%r has an empty name" % (self,))
+ drv, root, parts = self._flavour.parse_parts((name,))
+ if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
+ or drv or root or len(parts) != 1):
+ raise ValueError("Invalid name %r" % (name))
+ return self._from_parsed_parts(self._drv, self._root,
+ self._parts[:-1] + [name])
+
+ def with_stem(self, stem):
+ """Return a new path with the stem changed."""
+ return self.with_name(stem + self.suffix)
+
+ def with_suffix(self, suffix):
+ """Return a new path with the file suffix changed. If the path
+ has no suffix, add given suffix. If the given suffix is an empty
+ string, remove the suffix from the path.
+ """
+ f = self._flavour
+ if f.sep in suffix or f.altsep and f.altsep in suffix:
+ raise ValueError("Invalid suffix %r" % (suffix,))
+ if suffix and not suffix.startswith('.') or suffix == '.':
+ raise ValueError("Invalid suffix %r" % (suffix))
+ name = self.name
+ if not name:
+ raise ValueError("%r has an empty name" % (self,))
+ old_suffix = self.suffix
+ if not old_suffix:
+ name = name + suffix
+ else:
+ name = name[:-len(old_suffix)] + suffix
+ return self._from_parsed_parts(self._drv, self._root,
+ self._parts[:-1] + [name])
+
+ def relative_to(self, *other):
+ """Return the relative path to another path identified by the passed
+ arguments. If the operation is not possible (because this is not
+ a subpath of the other path), raise ValueError.
+ """
+ # For the purpose of this method, drive and root are considered
+ # separate parts, i.e.:
+ # Path('c:/').relative_to('c:') gives Path('/')
+ # Path('c:/').relative_to('/') raise ValueError
+ if not other:
+ raise TypeError("need at least one argument")
+ parts = self._parts
+ drv = self._drv
+ root = self._root
+ if root:
+ abs_parts = [drv, root] + parts[1:]
+ else:
+ abs_parts = parts
+ to_drv, to_root, to_parts = self._parse_args(other)
+ if to_root:
+ to_abs_parts = [to_drv, to_root] + to_parts[1:]
+ else:
+ to_abs_parts = to_parts
+ n = len(to_abs_parts)
+ cf = self._flavour.casefold_parts
+ if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
+ formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
+ raise ValueError("{!r} is not in the subpath of {!r}"
+ " OR one path is relative and the other is absolute."
+ .format(str(self), str(formatted)))
+ return self._from_parsed_parts('', root if n == 1 else '',
+ abs_parts[n:])
+
+ def is_relative_to(self, *other):
+ """Return True if the path is relative to another path or False.
+ """
+ try:
+ self.relative_to(*other)
+ return True
+ except ValueError:
+ return False
+
+ @property
+ def parts(self):
+ """An object providing sequence-like access to the
+ components in the filesystem path."""
+ # We cache the tuple to avoid building a new one each time .parts
+ # is accessed. XXX is this necessary?
+ try:
+ return self._pparts
+ except AttributeError:
+ self._pparts = tuple(self._parts)
+ return self._pparts
+
+ def joinpath(self, *args):
+ """Combine this path with one or several arguments, and return a
+ new path representing either a subpath (if all arguments are relative
+ paths) or a totally different path (if one of the arguments is
+ anchored).
+ """
+ return self._make_child(args)
+
+ def __truediv__(self, key):
+ try:
+ return self._make_child((key,))
+ except TypeError:
+ return NotImplemented
+
+ def __rtruediv__(self, key):
+ try:
+ return self._from_parts([key] + self._parts)
+ except TypeError:
+ return NotImplemented
+
+ @property
+ def parent(self):
+ """The logical parent of the path."""
+ drv = self._drv
+ root = self._root
+ parts = self._parts
+ if len(parts) == 1 and (drv or root):
+ return self
+ return self._from_parsed_parts(drv, root, parts[:-1])
+
+ @property
+ def parents(self):
+ """A sequence of this path's logical parents."""
+ return _PathParents(self)
+
+ def is_absolute(self):
+ """True if the path is absolute (has both a root and, if applicable,
+ a drive)."""
+ if not self._root:
+ return False
+ return not self._flavour.has_drv or bool(self._drv)
+
+ def is_reserved(self):
+ """Return True if the path contains one of the special names reserved
+ by the system, if any."""
+ return self._flavour.is_reserved(self._parts)
+
+ def match(self, path_pattern):
+ """
+ Return True if this path matches the given pattern.
+ """
+ cf = self._flavour.casefold
+ path_pattern = cf(path_pattern)
+ drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
+ if not pat_parts:
+ raise ValueError("empty pattern")
+ if drv and drv != cf(self._drv):
+ return False
+ if root and root != cf(self._root):
+ return False
+ parts = self._cparts
+ if drv or root:
+ if len(pat_parts) != len(parts):
+ return False
+ pat_parts = pat_parts[1:]
+ elif len(pat_parts) > len(parts):
+ return False
+ for part, pat in zip(reversed(parts), reversed(pat_parts)):
+ if not fnmatch.fnmatchcase(part, pat):
+ return False
+ return True
+
+# Can't subclass os.PathLike from PurePath and keep the constructor
+# optimizations in PurePath._parse_args().
+os.PathLike.register(PurePath)
+
+
+class PurePosixPath(PurePath):
+ """PurePath subclass for non-Windows systems.
+
+ On a POSIX system, instantiating a PurePath should return this object.
+ However, you can also instantiate it directly on any system.
+ """
+ _flavour = _posix_flavour
+ __slots__ = ()
+
+
+class PureWindowsPath(PurePath):
+ """PurePath subclass for Windows systems.
+
+ On a Windows system, instantiating a PurePath should return this object.
+ However, you can also instantiate it directly on any system.
+ """
+ _flavour = _windows_flavour
+ __slots__ = ()
+
+
+# Filesystem-accessing classes
+
+
+class Path(PurePath):
+ """PurePath subclass that can make system calls.
+
+ Path represents a filesystem path but unlike PurePath, also offers
+ methods to do system calls on path objects. Depending on your system,
+ instantiating a Path will return either a PosixPath or a WindowsPath
+ object. You can also instantiate a PosixPath or WindowsPath directly,
+ but cannot instantiate a WindowsPath on a POSIX system or vice versa.
+ """
+ _accessor = _normal_accessor
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ if cls is Path:
+ cls = WindowsPath if os.name == 'nt' else PosixPath
+ self = cls._from_parts(args)
+ if not self._flavour.is_supported:
+ raise NotImplementedError("cannot instantiate %r on your system"
+ % (cls.__name__,))
+ return self
+
+ def _make_child_relpath(self, part):
+ # This is an optimization used for dir walking. `part` must be
+ # a single part relative to this path.
+ parts = self._parts + [part]
+ return self._from_parsed_parts(self._drv, self._root, parts)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, t, v, tb):
+ # https://bugs.python.org/issue39682
+ # In previous versions of pathlib, this method marked this path as
+ # closed; subsequent attempts to perform I/O would raise an IOError.
+ # This functionality was never documented, and had the effect of
+ # making Path objects mutable, contrary to PEP 428. In Python 3.9 the
+ # _closed attribute was removed, and this method made a no-op.
+ # This method and __enter__()/__exit__() should be deprecated and
+ # removed in the future.
+ pass
+
+ # Public API
+
+ @classmethod
+ def cwd(cls):
+ """Return a new path pointing to the current working directory
+ (as returned by os.getcwd()).
+ """
+ return cls(cls._accessor.getcwd())
+
+ @classmethod
+ def home(cls):
+ """Return a new path pointing to the user's home directory (as
+ returned by os.path.expanduser('~')).
+ """
+ return cls("~").expanduser()
+
+ def samefile(self, other_path):
+ """Return whether other_path is the same or not as this file
+ (as returned by os.path.samefile()).
+ """
+ st = self.stat()
+ try:
+ other_st = other_path.stat()
+ except AttributeError:
+ other_st = self._accessor.stat(other_path)
+ return os.path.samestat(st, other_st)
+
+ def iterdir(self):
+ """Iterate over the files in this directory. Does not yield any
+ result for the special paths '.' and '..'.
+ """
+ for name in self._accessor.listdir(self):
+ if name in {'.', '..'}:
+ # Yielding a path object for these makes little sense
+ continue
+ yield self._make_child_relpath(name)
+
+ def glob(self, pattern):
+ """Iterate over this subtree and yield all existing files (of any
+ kind, including directories) matching the given relative pattern.
+ """
+ sys.audit("pathlib.Path.glob", self, pattern)
+ if not pattern:
+ raise ValueError("Unacceptable pattern: {!r}".format(pattern))
+ drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
+ if drv or root:
+ raise NotImplementedError("Non-relative patterns are unsupported")
+ selector = _make_selector(tuple(pattern_parts), self._flavour)
+ for p in selector.select_from(self):
+ yield p
+
+ def rglob(self, pattern):
+ """Recursively yield all existing files (of any kind, including
+ directories) matching the given relative pattern, anywhere in
+ this subtree.
+ """
+ sys.audit("pathlib.Path.rglob", self, pattern)
+ drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
+ if drv or root:
+ raise NotImplementedError("Non-relative patterns are unsupported")
+ selector = _make_selector(("**",) + tuple(pattern_parts), self._flavour)
+ for p in selector.select_from(self):
+ yield p
+
+ def absolute(self):
+ """Return an absolute version of this path. This function works
+ even if the path doesn't point to anything.
+
+ No normalization is done, i.e. all '.' and '..' will be kept along.
+ Use resolve() to get the canonical path to a file.
+ """
+ # XXX untested yet!
+ if self.is_absolute():
+ return self
+ # FIXME this must defer to the specific flavour (and, under Windows,
+ # use nt._getfullpathname())
+ return self._from_parts([self._accessor.getcwd()] + self._parts)
+
+ def resolve(self, strict=False):
+ """
+ Make the path absolute, resolving all symlinks on the way and also
+ normalizing it (for example turning slashes into backslashes under
+ Windows).
+ """
+
+ def check_eloop(e):
+ winerror = getattr(e, 'winerror', 0)
+ if e.errno == ELOOP or winerror == _WINERROR_CANT_RESOLVE_FILENAME:
+ raise RuntimeError("Symlink loop from %r" % e.filename)
+
+ try:
+ s = self._accessor.realpath(self, strict=strict)
+ except OSError as e:
+ check_eloop(e)
+ raise
+ p = self._from_parts((s,))
+
+ # In non-strict mode, realpath() doesn't raise on symlink loops.
+ # Ensure we get an exception by calling stat()
+ if not strict:
+ try:
+ p.stat()
+ except OSError as e:
+ check_eloop(e)
+ return p
+
+ def stat(self, *, follow_symlinks=True):
+ """
+ Return the result of the stat() system call on this path, like
+ os.stat() does.
+ """
+ return self._accessor.stat(self, follow_symlinks=follow_symlinks)
+
+ def owner(self):
+ """
+ Return the login name of the file owner.
+ """
+ return self._accessor.owner(self)
+
+ def group(self):
+ """
+ Return the group name of the file gid.
+ """
+ return self._accessor.group(self)
+
+ def open(self, mode='r', buffering=-1, encoding=None,
+ errors=None, newline=None):
+ """
+ Open the file pointed by this path and return a file object, as
+ the built-in open() function does.
+ """
+ if "b" not in mode:
+ encoding = io.text_encoding(encoding)
+ return self._accessor.open(self, mode, buffering, encoding, errors,
+ newline)
+
+ def read_bytes(self):
+ """
+ Open the file in bytes mode, read it, and close the file.
+ """
+ with self.open(mode='rb') as f:
+ return f.read()
+
+ def read_text(self, encoding=None, errors=None):
+ """
+ Open the file in text mode, read it, and close the file.
+ """
+ encoding = io.text_encoding(encoding)
+ with self.open(mode='r', encoding=encoding, errors=errors) as f:
+ return f.read()
+
+ def write_bytes(self, data):
+ """
+ Open the file in bytes mode, write to it, and close the file.
+ """
+ # type-check for the buffer interface before truncating the file
+ view = memoryview(data)
+ with self.open(mode='wb') as f:
+ return f.write(view)
+
+ def write_text(self, data, encoding=None, errors=None, newline=None):
+ """
+ Open the file in text mode, write to it, and close the file.
+ """
+ if not isinstance(data, str):
+ raise TypeError('data must be str, not %s' %
+ data.__class__.__name__)
+ encoding = io.text_encoding(encoding)
+ with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
+ return f.write(data)
+
+ def readlink(self):
+ """
+ Return the path to which the symbolic link points.
+ """
+ path = self._accessor.readlink(self)
+ return self._from_parts((path,))
+
+ def touch(self, mode=0o666, exist_ok=True):
+ """
+ Create this file with the given access mode, if it doesn't exist.
+ """
+ self._accessor.touch(self, mode, exist_ok)
+
+ def mkdir(self, mode=0o777, parents=False, exist_ok=False):
+ """
+ Create a new directory at this given path.
+ """
+ try:
+ self._accessor.mkdir(self, mode)
+ except FileNotFoundError:
+ if not parents or self.parent == self:
+ raise
+ self.parent.mkdir(parents=True, exist_ok=True)
+ self.mkdir(mode, parents=False, exist_ok=exist_ok)
+ except OSError:
+ # Cannot rely on checking for EEXIST, since the operating system
+ # could give priority to other errors like EACCES or EROFS
+ if not exist_ok or not self.is_dir():
+ raise
+
+ def chmod(self, mode, *, follow_symlinks=True):
+ """
+ Change the permissions of the path, like os.chmod().
+ """
+ self._accessor.chmod(self, mode, follow_symlinks=follow_symlinks)
+
+ def lchmod(self, mode):
+ """
+ Like chmod(), except if the path points to a symlink, the symlink's
+ permissions are changed, rather than its target's.
+ """
+ self.chmod(mode, follow_symlinks=False)
+
+ def unlink(self, missing_ok=False):
+ """
+ Remove this file or link.
+ If the path is a directory, use rmdir() instead.
+ """
+ try:
+ self._accessor.unlink(self)
+ except FileNotFoundError:
+ if not missing_ok:
+ raise
+
+ def rmdir(self):
+ """
+ Remove this directory. The directory must be empty.
+ """
+ self._accessor.rmdir(self)
+
+ def lstat(self):
+ """
+ Like stat(), except if the path points to a symlink, the symlink's
+ status information is returned, rather than its target's.
+ """
+ return self.stat(follow_symlinks=False)
+
+ def rename(self, target):
+ """
+ Rename this path to the target path.
+
+ The target path may be absolute or relative. Relative paths are
+ interpreted relative to the current working directory, *not* the
+ directory of the Path object.
+
+ Returns the new Path instance pointing to the target path.
+ """
+ self._accessor.rename(self, target)
+ return self.__class__(target)
+
+ def replace(self, target):
+ """
+ Rename this path to the target path, overwriting if that path exists.
+
+ The target path may be absolute or relative. Relative paths are
+ interpreted relative to the current working directory, *not* the
+ directory of the Path object.
+
+ Returns the new Path instance pointing to the target path.
+ """
+ self._accessor.replace(self, target)
+ return self.__class__(target)
+
+ def symlink_to(self, target, target_is_directory=False):
+ """
+ Make this path a symlink pointing to the target path.
+ Note the order of arguments (link, target) is the reverse of os.symlink.
+ """
+ self._accessor.symlink(target, self, target_is_directory)
+
+ def hardlink_to(self, target):
+ """
+ Make this path a hard link pointing to the same file as *target*.
+
+ Note the order of arguments (self, target) is the reverse of os.link's.
+ """
+ self._accessor.link(target, self)
+
+ def link_to(self, target):
+ """
+ Make the target path a hard link pointing to this path.
+
+ Note this function does not make this path a hard link to *target*,
+ despite the implication of the function and argument names. The order
+ of arguments (target, link) is the reverse of Path.symlink_to, but
+ matches that of os.link.
+
+ Deprecated since Python 3.10 and scheduled for removal in Python 3.12.
+ Use `hardlink_to()` instead.
+ """
+ warnings.warn("pathlib.Path.link_to() is deprecated and is scheduled "
+ "for removal in Python 3.12. "
+ "Use pathlib.Path.hardlink_to() instead.",
+ DeprecationWarning, stacklevel=2)
+ self._accessor.link(self, target)
+
+ # Convenience functions for querying the stat results
+
+ def exists(self):
+ """
+ Whether this path exists.
+ """
+ try:
+ self.stat()
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ return False
+ except ValueError:
+ # Non-encodable path
+ return False
+ return True
+
+ def is_dir(self):
+ """
+ Whether this path is a directory.
+ """
+ try:
+ return S_ISDIR(self.stat().st_mode)
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+ return False
+ except ValueError:
+ # Non-encodable path
+ return False
+
+ def is_file(self):
+ """
+ Whether this path is a regular file (also True for symlinks pointing
+ to regular files).
+ """
+ try:
+ return S_ISREG(self.stat().st_mode)
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+ return False
+ except ValueError:
+ # Non-encodable path
+ return False
+
+ def is_mount(self):
+ """
+ Check if this path is a POSIX mount point
+ """
+ # Need to exist and be a dir
+ if not self.exists() or not self.is_dir():
+ return False
+
+ try:
+ parent_dev = self.parent.stat().st_dev
+ except OSError:
+ return False
+
+ dev = self.stat().st_dev
+ if dev != parent_dev:
+ return True
+ ino = self.stat().st_ino
+ parent_ino = self.parent.stat().st_ino
+ return ino == parent_ino
+
+ def is_symlink(self):
+ """
+ Whether this path is a symbolic link.
+ """
+ try:
+ return S_ISLNK(self.lstat().st_mode)
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ # Path doesn't exist
+ return False
+ except ValueError:
+ # Non-encodable path
+ return False
+
+ def is_block_device(self):
+ """
+ Whether this path is a block device.
+ """
+ try:
+ return S_ISBLK(self.stat().st_mode)
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+ return False
+ except ValueError:
+ # Non-encodable path
+ return False
+
+ def is_char_device(self):
+ """
+ Whether this path is a character device.
+ """
+ try:
+ return S_ISCHR(self.stat().st_mode)
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+ return False
+ except ValueError:
+ # Non-encodable path
+ return False
+
+ def is_fifo(self):
+ """
+ Whether this path is a FIFO.
+ """
+ try:
+ return S_ISFIFO(self.stat().st_mode)
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+ return False
+ except ValueError:
+ # Non-encodable path
+ return False
+
+ def is_socket(self):
+ """
+ Whether this path is a socket.
+ """
+ try:
+ return S_ISSOCK(self.stat().st_mode)
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ )
+ return False
+ except ValueError:
+ # Non-encodable path
+ return False
+
+ def expanduser(self):
+ """ Return a new path with expanded ~ and ~user constructs
+ (as returned by os.path.expanduser)
+ """
+ if (not (self._drv or self._root) and
+ self._parts and self._parts[0][:1] == '~'):
+ homedir = self._accessor.expanduser(self._parts[0])
+ if homedir[:1] == "~":
+ raise RuntimeError("Could not determine home directory.")
+ return self._from_parts([homedir] + self._parts[1:])
+
+ return self
+
+
+class PosixPath(Path, PurePosixPath):
+ """Path subclass for non-Windows systems.
+
+ On a POSIX system, instantiating a Path should return this object.
+ """
+ __slots__ = ()
+
+class WindowsPath(Path, PureWindowsPath):
+ """Path subclass for Windows systems.
+
+ On a Windows system, instantiating a Path should return this object.
+ """
+ __slots__ = ()
+
+ def is_mount(self):
+ raise NotImplementedError("Path.is_mount() is unsupported on this system")
diff --git a/infer_4_37_2/lib/python3.10/pickle.py b/infer_4_37_2/lib/python3.10/pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..f027e0432045b762f9661a90d380ebb9f8c1d8d8
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/pickle.py
@@ -0,0 +1,1820 @@
+"""Create portable serialized representations of Python objects.
+
+See module copyreg for a mechanism for registering custom picklers.
+See module pickletools source for extensive comments.
+
+Classes:
+
+ Pickler
+ Unpickler
+
+Functions:
+
+ dump(object, file)
+ dumps(object) -> string
+ load(file) -> object
+ loads(bytes) -> object
+
+Misc variables:
+
+ __version__
+ format_version
+ compatible_formats
+
+"""
+
+from types import FunctionType
+from copyreg import dispatch_table
+from copyreg import _extension_registry, _inverted_registry, _extension_cache
+from itertools import islice
+from functools import partial
+import sys
+from sys import maxsize
+from struct import pack, unpack
+import re
+import io
+import codecs
+import _compat_pickle
+
+__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
+ "Unpickler", "dump", "dumps", "load", "loads"]
+
+try:
+ from _pickle import PickleBuffer
+ __all__.append("PickleBuffer")
+ _HAVE_PICKLE_BUFFER = True
+except ImportError:
+ _HAVE_PICKLE_BUFFER = False
+
+
+# Shortcut for use in isinstance testing
+bytes_types = (bytes, bytearray)
+
+# These are purely informational; no code uses these.
+format_version = "4.0" # File format version we write
+compatible_formats = ["1.0", # Original protocol 0
+ "1.1", # Protocol 0 with INST added
+ "1.2", # Original protocol 1
+ "1.3", # Protocol 1 with BINFLOAT added
+ "2.0", # Protocol 2
+ "3.0", # Protocol 3
+ "4.0", # Protocol 4
+ "5.0", # Protocol 5
+ ] # Old format versions we can read
+
+# This is the highest protocol number we know how to read.
+HIGHEST_PROTOCOL = 5
+
+# The protocol we write by default. May be less than HIGHEST_PROTOCOL.
+# Only bump this if the oldest still supported version of Python already
+# includes it.
+DEFAULT_PROTOCOL = 4
+
+class PickleError(Exception):
+ """A common base class for the other pickling exceptions."""
+ pass
+
+class PicklingError(PickleError):
+ """This exception is raised when an unpicklable object is passed to the
+ dump() method.
+
+ """
+ pass
+
+class UnpicklingError(PickleError):
+ """This exception is raised when there is a problem unpickling an object,
+ such as a security violation.
+
+ Note that other exceptions may also be raised during unpickling, including
+ (but not necessarily limited to) AttributeError, EOFError, ImportError,
+ and IndexError.
+
+ """
+ pass
+
+# An instance of _Stop is raised by Unpickler.load_stop() in response to
+# the STOP opcode, passing the object that is the result of unpickling.
+class _Stop(Exception):
+ def __init__(self, value):
+ self.value = value
+
+# Jython has PyStringMap; it's a dict subclass with string keys
+try:
+ from org.python.core import PyStringMap
+except ImportError:
+ PyStringMap = None
+
+# Pickle opcodes. See pickletools.py for extensive docs. The listing
+# here is in kind-of alphabetical order of 1-character pickle code.
+# pickletools groups them by purpose.
+
+MARK = b'(' # push special markobject on stack
+STOP = b'.' # every pickle ends with STOP
+POP = b'0' # discard topmost stack item
+POP_MARK = b'1' # discard stack top through topmost markobject
+DUP = b'2' # duplicate top stack item
+FLOAT = b'F' # push float object; decimal string argument
+INT = b'I' # push integer or bool; decimal string argument
+BININT = b'J' # push four-byte signed int
+BININT1 = b'K' # push 1-byte unsigned int
+LONG = b'L' # push long; decimal string argument
+BININT2 = b'M' # push 2-byte unsigned int
+NONE = b'N' # push None
+PERSID = b'P' # push persistent object; id is taken from string arg
+BINPERSID = b'Q' # " " " ; " " " " stack
+REDUCE = b'R' # apply callable to argtuple, both on stack
+STRING = b'S' # push string; NL-terminated string argument
+BINSTRING = b'T' # push string; counted binary string argument
+SHORT_BINSTRING= b'U' # " " ; " " " " < 256 bytes
+UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument
+BINUNICODE = b'X' # " " " ; counted UTF-8 string argument
+APPEND = b'a' # append stack top to list below it
+BUILD = b'b' # call __setstate__ or __dict__.update()
+GLOBAL = b'c' # push self.find_class(modname, name); 2 string args
+DICT = b'd' # build a dict from stack items
+EMPTY_DICT = b'}' # push empty dict
+APPENDS = b'e' # extend list on stack by topmost stack slice
+GET = b'g' # push item from memo on stack; index is string arg
+BINGET = b'h' # " " " " " " ; " " 1-byte arg
+INST = b'i' # build & push class instance
+LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg
+LIST = b'l' # build list from topmost stack items
+EMPTY_LIST = b']' # push empty list
+OBJ = b'o' # build & push class instance
+PUT = b'p' # store stack top in memo; index is string arg
+BINPUT = b'q' # " " " " " ; " " 1-byte arg
+LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg
+SETITEM = b's' # add key+value pair to dict
+TUPLE = b't' # build tuple from topmost stack items
+EMPTY_TUPLE = b')' # push empty tuple
+SETITEMS = b'u' # modify dict by adding topmost key+value pairs
+BINFLOAT = b'G' # push float; arg is 8-byte float encoding
+
+TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py
+FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py
+
+# Protocol 2
+
+PROTO = b'\x80' # identify pickle protocol
+NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple
+EXT1 = b'\x82' # push object from extension registry; 1-byte index
+EXT2 = b'\x83' # ditto, but 2-byte index
+EXT4 = b'\x84' # ditto, but 4-byte index
+TUPLE1 = b'\x85' # build 1-tuple from stack top
+TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items
+TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items
+NEWTRUE = b'\x88' # push True
+NEWFALSE = b'\x89' # push False
+LONG1 = b'\x8a' # push long from < 256 bytes
+LONG4 = b'\x8b' # push really big long
+
+_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
+
+# Protocol 3 (Python 3.x)
+
+BINBYTES = b'B' # push bytes; counted binary string argument
+SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes
+
+# Protocol 4
+
+SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes
+BINUNICODE8 = b'\x8d' # push very long string
+BINBYTES8 = b'\x8e' # push very long bytes string
+EMPTY_SET = b'\x8f' # push empty set on the stack
+ADDITEMS = b'\x90' # modify set by adding topmost stack items
+FROZENSET = b'\x91' # build frozenset from topmost stack items
+NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments
+STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks
+MEMOIZE = b'\x94' # store top of the stack in memo
+FRAME = b'\x95' # indicate the beginning of a new frame
+
+# Protocol 5
+
+BYTEARRAY8 = b'\x96' # push bytearray
+NEXT_BUFFER = b'\x97' # push next out-of-band buffer
+READONLY_BUFFER = b'\x98' # make top of stack readonly
+
+__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)])
+
+
+class _Framer:
+
+ _FRAME_SIZE_MIN = 4
+ _FRAME_SIZE_TARGET = 64 * 1024
+
+ def __init__(self, file_write):
+ self.file_write = file_write
+ self.current_frame = None
+
+ def start_framing(self):
+ self.current_frame = io.BytesIO()
+
+ def end_framing(self):
+ if self.current_frame and self.current_frame.tell() > 0:
+ self.commit_frame(force=True)
+ self.current_frame = None
+
+ def commit_frame(self, force=False):
+ if self.current_frame:
+ f = self.current_frame
+ if f.tell() >= self._FRAME_SIZE_TARGET or force:
+ data = f.getbuffer()
+ write = self.file_write
+ if len(data) >= self._FRAME_SIZE_MIN:
+ # Issue a single call to the write method of the underlying
+ # file object for the frame opcode with the size of the
+ # frame. The concatenation is expected to be less expensive
+ # than issuing an additional call to write.
+ write(FRAME + pack("':
+ raise AttributeError("Can't get local attribute {!r} on {!r}"
+ .format(name, obj))
+ try:
+ parent = obj
+ obj = getattr(obj, subpath)
+ except AttributeError:
+ raise AttributeError("Can't get attribute {!r} on {!r}"
+ .format(name, obj)) from None
+ return obj, parent
+
+def whichmodule(obj, name):
+ """Find the module an object belong to."""
+ module_name = getattr(obj, '__module__', None)
+ if module_name is not None:
+ return module_name
+ # Protect the iteration by using a list copy of sys.modules against dynamic
+ # modules that trigger imports of other modules upon calls to getattr.
+ for module_name, module in sys.modules.copy().items():
+ if (module_name == '__main__'
+ or module_name == '__mp_main__' # bpo-42406
+ or module is None):
+ continue
+ try:
+ if _getattribute(module, name)[0] is obj:
+ return module_name
+ except AttributeError:
+ pass
+ return '__main__'
+
+def encode_long(x):
+ r"""Encode a long to a two's complement little-endian binary string.
+ Note that 0 is a special case, returning an empty string, to save a
+ byte in the LONG1 pickling context.
+
+ >>> encode_long(0)
+ b''
+ >>> encode_long(255)
+ b'\xff\x00'
+ >>> encode_long(32767)
+ b'\xff\x7f'
+ >>> encode_long(-256)
+ b'\x00\xff'
+ >>> encode_long(-32768)
+ b'\x00\x80'
+ >>> encode_long(-128)
+ b'\x80'
+ >>> encode_long(127)
+ b'\x7f'
+ >>>
+ """
+ if x == 0:
+ return b''
+ nbytes = (x.bit_length() >> 3) + 1
+ result = x.to_bytes(nbytes, byteorder='little', signed=True)
+ if x < 0 and nbytes > 1:
+ if result[-1] == 0xff and (result[-2] & 0x80) != 0:
+ result = result[:-1]
+ return result
+
+def decode_long(data):
+ r"""Decode a long from a two's complement little-endian binary string.
+
+ >>> decode_long(b'')
+ 0
+ >>> decode_long(b"\xff\x00")
+ 255
+ >>> decode_long(b"\xff\x7f")
+ 32767
+ >>> decode_long(b"\x00\xff")
+ -256
+ >>> decode_long(b"\x00\x80")
+ -32768
+ >>> decode_long(b"\x80")
+ -128
+ >>> decode_long(b"\x7f")
+ 127
+ """
+ return int.from_bytes(data, byteorder='little', signed=True)
+
+
+# Pickling machinery
+
+class _Pickler:
+
+ def __init__(self, file, protocol=None, *, fix_imports=True,
+ buffer_callback=None):
+ """This takes a binary file for writing a pickle data stream.
+
+ The optional *protocol* argument tells the pickler to use the
+ given protocol; supported protocols are 0, 1, 2, 3, 4 and 5.
+ The default protocol is 4. It was introduced in Python 3.4, and
+ is incompatible with previous versions.
+
+ Specifying a negative protocol version selects the highest
+ protocol version supported. The higher the protocol used, the
+ more recent the version of Python needed to read the pickle
+ produced.
+
+ The *file* argument must have a write() method that accepts a
+ single bytes argument. It can thus be a file object opened for
+ binary writing, an io.BytesIO instance, or any other custom
+ object that meets this interface.
+
+ If *fix_imports* is True and *protocol* is less than 3, pickle
+ will try to map the new Python 3 names to the old module names
+ used in Python 2, so that the pickle data stream is readable
+ with Python 2.
+
+ If *buffer_callback* is None (the default), buffer views are
+ serialized into *file* as part of the pickle stream.
+
+ If *buffer_callback* is not None, then it can be called any number
+ of times with a buffer view. If the callback returns a false value
+ (such as None), the given buffer is out-of-band; otherwise the
+ buffer is serialized in-band, i.e. inside the pickle stream.
+
+ It is an error if *buffer_callback* is not None and *protocol*
+ is None or smaller than 5.
+ """
+ if protocol is None:
+ protocol = DEFAULT_PROTOCOL
+ if protocol < 0:
+ protocol = HIGHEST_PROTOCOL
+ elif not 0 <= protocol <= HIGHEST_PROTOCOL:
+ raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
+ if buffer_callback is not None and protocol < 5:
+ raise ValueError("buffer_callback needs protocol >= 5")
+ self._buffer_callback = buffer_callback
+ try:
+ self._file_write = file.write
+ except AttributeError:
+ raise TypeError("file must have a 'write' attribute")
+ self.framer = _Framer(self._file_write)
+ self.write = self.framer.write
+ self._write_large_bytes = self.framer.write_large_bytes
+ self.memo = {}
+ self.proto = int(protocol)
+ self.bin = protocol >= 1
+ self.fast = 0
+ self.fix_imports = fix_imports and protocol < 3
+
+ def clear_memo(self):
+ """Clears the pickler's "memo".
+
+ The memo is the data structure that remembers which objects the
+ pickler has already seen, so that shared or recursive objects
+ are pickled by reference and not by value. This method is
+ useful when re-using picklers.
+ """
+ self.memo.clear()
+
+ def dump(self, obj):
+ """Write a pickled representation of obj to the open file."""
+ # Check whether Pickler was initialized correctly. This is
+ # only needed to mimic the behavior of _pickle.Pickler.dump().
+ if not hasattr(self, "_file_write"):
+ raise PicklingError("Pickler.__init__() was not called by "
+ "%s.__init__()" % (self.__class__.__name__,))
+ if self.proto >= 2:
+ self.write(PROTO + pack("= 4:
+ self.framer.start_framing()
+ self.save(obj)
+ self.write(STOP)
+ self.framer.end_framing()
+
+ def memoize(self, obj):
+ """Store an object in the memo."""
+
+ # The Pickler memo is a dictionary mapping object ids to 2-tuples
+ # that contain the Unpickler memo key and the object being memoized.
+ # The memo key is written to the pickle and will become
+ # the key in the Unpickler's memo. The object is stored in the
+ # Pickler memo so that transient objects are kept alive during
+ # pickling.
+
+ # The use of the Unpickler memo length as the memo key is just a
+ # convention. The only requirement is that the memo values be unique.
+ # But there appears no advantage to any other scheme, and this
+ # scheme allows the Unpickler memo to be implemented as a plain (but
+ # growable) array, indexed by memo key.
+ if self.fast:
+ return
+ assert id(obj) not in self.memo
+ idx = len(self.memo)
+ self.write(self.put(idx))
+ self.memo[id(obj)] = idx, obj
+
+ # Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
+ def put(self, idx):
+ if self.proto >= 4:
+ return MEMOIZE
+ elif self.bin:
+ if idx < 256:
+ return BINPUT + pack("= 2 and func_name == "__newobj_ex__":
+ cls, args, kwargs = args
+ if not hasattr(cls, "__new__"):
+ raise PicklingError("args[0] from {} args has no __new__"
+ .format(func_name))
+ if obj is not None and cls is not obj.__class__:
+ raise PicklingError("args[0] from {} args has the wrong class"
+ .format(func_name))
+ if self.proto >= 4:
+ save(cls)
+ save(args)
+ save(kwargs)
+ write(NEWOBJ_EX)
+ else:
+ func = partial(cls.__new__, cls, *args, **kwargs)
+ save(func)
+ save(())
+ write(REDUCE)
+ elif self.proto >= 2 and func_name == "__newobj__":
+ # A __reduce__ implementation can direct protocol 2 or newer to
+ # use the more efficient NEWOBJ opcode, while still
+ # allowing protocol 0 and 1 to work normally. For this to
+ # work, the function returned by __reduce__ should be
+ # called __newobj__, and its first argument should be a
+ # class. The implementation for __newobj__
+ # should be as follows, although pickle has no way to
+ # verify this:
+ #
+ # def __newobj__(cls, *args):
+ # return cls.__new__(cls, *args)
+ #
+ # Protocols 0 and 1 will pickle a reference to __newobj__,
+ # while protocol 2 (and above) will pickle a reference to
+ # cls, the remaining args tuple, and the NEWOBJ code,
+ # which calls cls.__new__(cls, *args) at unpickling time
+ # (see load_newobj below). If __reduce__ returns a
+ # three-tuple, the state from the third tuple item will be
+ # pickled regardless of the protocol, calling __setstate__
+ # at unpickling time (see load_build below).
+ #
+ # Note that no standard __newobj__ implementation exists;
+ # you have to provide your own. This is to enforce
+ # compatibility with Python 2.2 (pickles written using
+ # protocol 0 or 1 in Python 2.3 should be unpicklable by
+ # Python 2.2).
+ cls = args[0]
+ if not hasattr(cls, "__new__"):
+ raise PicklingError(
+ "args[0] from __newobj__ args has no __new__")
+ if obj is not None and cls is not obj.__class__:
+ raise PicklingError(
+ "args[0] from __newobj__ args has the wrong class")
+ args = args[1:]
+ save(cls)
+ save(args)
+ write(NEWOBJ)
+ else:
+ save(func)
+ save(args)
+ write(REDUCE)
+
+ if obj is not None:
+ # If the object is already in the memo, this means it is
+ # recursive. In this case, throw away everything we put on the
+ # stack, and fetch the object back from the memo.
+ if id(obj) in self.memo:
+ write(POP + self.get(self.memo[id(obj)][0]))
+ else:
+ self.memoize(obj)
+
+ # More new special cases (that work with older protocols as
+ # well): when __reduce__ returns a tuple with 4 or 5 items,
+ # the 4th and 5th item should be iterators that provide list
+ # items and dict items (as (key, value) tuples), or None.
+
+ if listitems is not None:
+ self._batch_appends(listitems)
+
+ if dictitems is not None:
+ self._batch_setitems(dictitems)
+
+ if state is not None:
+ if state_setter is None:
+ save(state)
+ write(BUILD)
+ else:
+ # If a state_setter is specified, call it instead of load_build
+ # to update obj's with its previous state.
+ # First, push state_setter and its tuple of expected arguments
+ # (obj, state) onto the stack.
+ save(state_setter)
+ save(obj) # simple BINGET opcode as obj is already memoized.
+ save(state)
+ write(TUPLE2)
+ # Trigger a state_setter(obj, state) function call.
+ write(REDUCE)
+ # The purpose of state_setter is to carry-out an
+ # inplace modification of obj. We do not care about what the
+ # method might return, so its output is eventually removed from
+ # the stack.
+ write(POP)
+
+ # Methods below this point are dispatched through the dispatch table
+
+ dispatch = {}
+
+ def save_none(self, obj):
+ self.write(NONE)
+ dispatch[type(None)] = save_none
+
+ def save_bool(self, obj):
+ if self.proto >= 2:
+ self.write(NEWTRUE if obj else NEWFALSE)
+ else:
+ self.write(TRUE if obj else FALSE)
+ dispatch[bool] = save_bool
+
+ def save_long(self, obj):
+ if self.bin:
+ # If the int is small enough to fit in a signed 4-byte 2's-comp
+ # format, we can store it more efficiently than the general
+ # case.
+ # First one- and two-byte unsigned ints:
+ if obj >= 0:
+ if obj <= 0xff:
+ self.write(BININT1 + pack("= 2:
+ encoded = encode_long(obj)
+ n = len(encoded)
+ if n < 256:
+ self.write(LONG1 + pack("d', obj))
+ else:
+ self.write(FLOAT + repr(obj).encode("ascii") + b'\n')
+ dispatch[float] = save_float
+
+ def save_bytes(self, obj):
+ if self.proto < 3:
+ if not obj: # bytes object is empty
+ self.save_reduce(bytes, (), obj=obj)
+ else:
+ self.save_reduce(codecs.encode,
+ (str(obj, 'latin1'), 'latin1'), obj=obj)
+ return
+ n = len(obj)
+ if n <= 0xff:
+ self.write(SHORT_BINBYTES + pack(" 0xffffffff and self.proto >= 4:
+ self._write_large_bytes(BINBYTES8 + pack("= self.framer._FRAME_SIZE_TARGET:
+ self._write_large_bytes(BINBYTES + pack("= self.framer._FRAME_SIZE_TARGET:
+ self._write_large_bytes(BYTEARRAY8 + pack("= 5")
+ with obj.raw() as m:
+ if not m.contiguous:
+ raise PicklingError("PickleBuffer can not be pickled when "
+ "pointing to a non-contiguous buffer")
+ in_band = True
+ if self._buffer_callback is not None:
+ in_band = bool(self._buffer_callback(obj))
+ if in_band:
+ # Write data in-band
+ # XXX The C implementation avoids a copy here
+ if m.readonly:
+ self.save_bytes(m.tobytes())
+ else:
+ self.save_bytearray(m.tobytes())
+ else:
+ # Write data out-of-band
+ self.write(NEXT_BUFFER)
+ if m.readonly:
+ self.write(READONLY_BUFFER)
+
+ dispatch[PickleBuffer] = save_picklebuffer
+
+ def save_str(self, obj):
+ if self.bin:
+ encoded = obj.encode('utf-8', 'surrogatepass')
+ n = len(encoded)
+ if n <= 0xff and self.proto >= 4:
+ self.write(SHORT_BINUNICODE + pack(" 0xffffffff and self.proto >= 4:
+ self._write_large_bytes(BINUNICODE8 + pack("= self.framer._FRAME_SIZE_TARGET:
+ self._write_large_bytes(BINUNICODE + pack("= 2:
+ for element in obj:
+ save(element)
+ # Subtle. Same as in the big comment below.
+ if id(obj) in memo:
+ get = self.get(memo[id(obj)][0])
+ self.write(POP * n + get)
+ else:
+ self.write(_tuplesize2code[n])
+ self.memoize(obj)
+ return
+
+ # proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
+ # has more than 3 elements.
+ write = self.write
+ write(MARK)
+ for element in obj:
+ save(element)
+
+ if id(obj) in memo:
+ # Subtle. d was not in memo when we entered save_tuple(), so
+ # the process of saving the tuple's elements must have saved
+ # the tuple itself: the tuple is recursive. The proper action
+ # now is to throw away everything we put on the stack, and
+ # simply GET the tuple (it's already constructed). This check
+ # could have been done in the "for element" loop instead, but
+ # recursive tuples are a rare thing.
+ get = self.get(memo[id(obj)][0])
+ if self.bin:
+ write(POP_MARK + get)
+ else: # proto 0 -- POP_MARK not available
+ write(POP * (n+1) + get)
+ return
+
+ # No recursion.
+ write(TUPLE)
+ self.memoize(obj)
+
+ dispatch[tuple] = save_tuple
+
+ def save_list(self, obj):
+ if self.bin:
+ self.write(EMPTY_LIST)
+ else: # proto 0 -- can't use EMPTY_LIST
+ self.write(MARK + LIST)
+
+ self.memoize(obj)
+ self._batch_appends(obj)
+
+ dispatch[list] = save_list
+
+ _BATCHSIZE = 1000
+
+ def _batch_appends(self, items):
+ # Helper to batch up APPENDS sequences
+ save = self.save
+ write = self.write
+
+ if not self.bin:
+ for x in items:
+ save(x)
+ write(APPEND)
+ return
+
+ it = iter(items)
+ while True:
+ tmp = list(islice(it, self._BATCHSIZE))
+ n = len(tmp)
+ if n > 1:
+ write(MARK)
+ for x in tmp:
+ save(x)
+ write(APPENDS)
+ elif n:
+ save(tmp[0])
+ write(APPEND)
+ # else tmp is empty, and we're done
+ if n < self._BATCHSIZE:
+ return
+
+ def save_dict(self, obj):
+ if self.bin:
+ self.write(EMPTY_DICT)
+ else: # proto 0 -- can't use EMPTY_DICT
+ self.write(MARK + DICT)
+
+ self.memoize(obj)
+ self._batch_setitems(obj.items())
+
+ dispatch[dict] = save_dict
+ if PyStringMap is not None:
+ dispatch[PyStringMap] = save_dict
+
+ def _batch_setitems(self, items):
+ # Helper to batch up SETITEMS sequences; proto >= 1 only
+ save = self.save
+ write = self.write
+
+ if not self.bin:
+ for k, v in items:
+ save(k)
+ save(v)
+ write(SETITEM)
+ return
+
+ it = iter(items)
+ while True:
+ tmp = list(islice(it, self._BATCHSIZE))
+ n = len(tmp)
+ if n > 1:
+ write(MARK)
+ for k, v in tmp:
+ save(k)
+ save(v)
+ write(SETITEMS)
+ elif n:
+ k, v = tmp[0]
+ save(k)
+ save(v)
+ write(SETITEM)
+ # else tmp is empty, and we're done
+ if n < self._BATCHSIZE:
+ return
+
+ def save_set(self, obj):
+ save = self.save
+ write = self.write
+
+ if self.proto < 4:
+ self.save_reduce(set, (list(obj),), obj=obj)
+ return
+
+ write(EMPTY_SET)
+ self.memoize(obj)
+
+ it = iter(obj)
+ while True:
+ batch = list(islice(it, self._BATCHSIZE))
+ n = len(batch)
+ if n > 0:
+ write(MARK)
+ for item in batch:
+ save(item)
+ write(ADDITEMS)
+ if n < self._BATCHSIZE:
+ return
+ dispatch[set] = save_set
+
+ def save_frozenset(self, obj):
+ save = self.save
+ write = self.write
+
+ if self.proto < 4:
+ self.save_reduce(frozenset, (list(obj),), obj=obj)
+ return
+
+ write(MARK)
+ for item in obj:
+ save(item)
+
+ if id(obj) in self.memo:
+ # If the object is already in the memo, this means it is
+ # recursive. In this case, throw away everything we put on the
+ # stack, and fetch the object back from the memo.
+ write(POP_MARK + self.get(self.memo[id(obj)][0]))
+ return
+
+ write(FROZENSET)
+ self.memoize(obj)
+ dispatch[frozenset] = save_frozenset
+
+ def save_global(self, obj, name=None):
+ write = self.write
+ memo = self.memo
+
+ if name is None:
+ name = getattr(obj, '__qualname__', None)
+ if name is None:
+ name = obj.__name__
+
+ module_name = whichmodule(obj, name)
+ try:
+ __import__(module_name, level=0)
+ module = sys.modules[module_name]
+ obj2, parent = _getattribute(module, name)
+ except (ImportError, KeyError, AttributeError):
+ raise PicklingError(
+ "Can't pickle %r: it's not found as %s.%s" %
+ (obj, module_name, name)) from None
+ else:
+ if obj2 is not obj:
+ raise PicklingError(
+ "Can't pickle %r: it's not the same object as %s.%s" %
+ (obj, module_name, name))
+
+ if self.proto >= 2:
+ code = _extension_registry.get((module_name, name))
+ if code:
+ assert code > 0
+ if code <= 0xff:
+ write(EXT1 + pack("= 3.
+ if self.proto >= 4:
+ self.save(module_name)
+ self.save(name)
+ write(STACK_GLOBAL)
+ elif parent is not module:
+ self.save_reduce(getattr, (parent, lastname))
+ elif self.proto >= 3:
+ write(GLOBAL + bytes(module_name, "utf-8") + b'\n' +
+ bytes(name, "utf-8") + b'\n')
+ else:
+ if self.fix_imports:
+ r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
+ r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
+ if (module_name, name) in r_name_mapping:
+ module_name, name = r_name_mapping[(module_name, name)]
+ elif module_name in r_import_mapping:
+ module_name = r_import_mapping[module_name]
+ try:
+ write(GLOBAL + bytes(module_name, "ascii") + b'\n' +
+ bytes(name, "ascii") + b'\n')
+ except UnicodeEncodeError:
+ raise PicklingError(
+ "can't pickle global identifier '%s.%s' using "
+ "pickle protocol %i" % (module, name, self.proto)) from None
+
+ self.memoize(obj)
+
+ def save_type(self, obj):
+ if obj is type(None):
+ return self.save_reduce(type, (None,), obj=obj)
+ elif obj is type(NotImplemented):
+ return self.save_reduce(type, (NotImplemented,), obj=obj)
+ elif obj is type(...):
+ return self.save_reduce(type, (...,), obj=obj)
+ return self.save_global(obj)
+
+ dispatch[FunctionType] = save_global
+ dispatch[type] = save_type
+
+
+# Unpickling machinery
+
+class _Unpickler:
+
+ def __init__(self, file, *, fix_imports=True,
+ encoding="ASCII", errors="strict", buffers=None):
+ """This takes a binary file for reading a pickle data stream.
+
+ The protocol version of the pickle is detected automatically, so
+ no proto argument is needed.
+
+ The argument *file* must have two methods, a read() method that
+ takes an integer argument, and a readline() method that requires
+ no arguments. Both methods should return bytes. Thus *file*
+ can be a binary file object opened for reading, an io.BytesIO
+ object, or any other custom object that meets this interface.
+
+ The file-like object must have two methods, a read() method
+ that takes an integer argument, and a readline() method that
+ requires no arguments. Both methods should return bytes.
+ Thus file-like object can be a binary file object opened for
+ reading, a BytesIO object, or any other custom object that
+ meets this interface.
+
+ If *buffers* is not None, it should be an iterable of buffer-enabled
+ objects that is consumed each time the pickle stream references
+ an out-of-band buffer view. Such buffers have been given in order
+ to the *buffer_callback* of a Pickler object.
+
+ If *buffers* is None (the default), then the buffers are taken
+ from the pickle stream, assuming they are serialized there.
+ It is an error for *buffers* to be None if the pickle stream
+ was produced with a non-None *buffer_callback*.
+
+ Other optional arguments are *fix_imports*, *encoding* and
+ *errors*, which are used to control compatibility support for
+ pickle stream generated by Python 2. If *fix_imports* is True,
+ pickle will try to map the old Python 2 names to the new names
+ used in Python 3. The *encoding* and *errors* tell pickle how
+ to decode 8-bit string instances pickled by Python 2; these
+ default to 'ASCII' and 'strict', respectively. *encoding* can be
+ 'bytes' to read these 8-bit string instances as bytes objects.
+ """
+ self._buffers = iter(buffers) if buffers is not None else None
+ self._file_readline = file.readline
+ self._file_read = file.read
+ self.memo = {}
+ self.encoding = encoding
+ self.errors = errors
+ self.proto = 0
+ self.fix_imports = fix_imports
+
+ def load(self):
+ """Read a pickled object representation from the open file.
+
+ Return the reconstituted object hierarchy specified in the file.
+ """
+ # Check whether Unpickler was initialized correctly. This is
+ # only needed to mimic the behavior of _pickle.Unpickler.dump().
+ if not hasattr(self, "_file_read"):
+ raise UnpicklingError("Unpickler.__init__() was not called by "
+ "%s.__init__()" % (self.__class__.__name__,))
+ self._unframer = _Unframer(self._file_read, self._file_readline)
+ self.read = self._unframer.read
+ self.readinto = self._unframer.readinto
+ self.readline = self._unframer.readline
+ self.metastack = []
+ self.stack = []
+ self.append = self.stack.append
+ self.proto = 0
+ read = self.read
+ dispatch = self.dispatch
+ try:
+ while True:
+ key = read(1)
+ if not key:
+ raise EOFError
+ assert isinstance(key, bytes_types)
+ dispatch[key[0]](self)
+ except _Stop as stopinst:
+ return stopinst.value
+
+ # Return a list of items pushed in the stack after last MARK instruction.
+ def pop_mark(self):
+ items = self.stack
+ self.stack = self.metastack.pop()
+ self.append = self.stack.append
+ return items
+
+ def persistent_load(self, pid):
+ raise UnpicklingError("unsupported persistent id encountered")
+
+ dispatch = {}
+
+ def load_proto(self):
+ proto = self.read(1)[0]
+ if not 0 <= proto <= HIGHEST_PROTOCOL:
+ raise ValueError("unsupported pickle protocol: %d" % proto)
+ self.proto = proto
+ dispatch[PROTO[0]] = load_proto
+
+ def load_frame(self):
+ frame_size, = unpack(' sys.maxsize:
+ raise ValueError("frame size > sys.maxsize: %d" % frame_size)
+ self._unframer.load_frame(frame_size)
+ dispatch[FRAME[0]] = load_frame
+
+ def load_persid(self):
+ try:
+ pid = self.readline()[:-1].decode("ascii")
+ except UnicodeDecodeError:
+ raise UnpicklingError(
+ "persistent IDs in protocol 0 must be ASCII strings")
+ self.append(self.persistent_load(pid))
+ dispatch[PERSID[0]] = load_persid
+
+ def load_binpersid(self):
+ pid = self.stack.pop()
+ self.append(self.persistent_load(pid))
+ dispatch[BINPERSID[0]] = load_binpersid
+
+ def load_none(self):
+ self.append(None)
+ dispatch[NONE[0]] = load_none
+
+ def load_false(self):
+ self.append(False)
+ dispatch[NEWFALSE[0]] = load_false
+
+ def load_true(self):
+ self.append(True)
+ dispatch[NEWTRUE[0]] = load_true
+
+ def load_int(self):
+ data = self.readline()
+ if data == FALSE[1:]:
+ val = False
+ elif data == TRUE[1:]:
+ val = True
+ else:
+ val = int(data, 0)
+ self.append(val)
+ dispatch[INT[0]] = load_int
+
+ def load_binint(self):
+ self.append(unpack('d', self.read(8))[0])
+ dispatch[BINFLOAT[0]] = load_binfloat
+
+ def _decode_string(self, value):
+ # Used to allow strings from Python 2 to be decoded either as
+ # bytes or Unicode strings. This should be used only with the
+ # STRING, BINSTRING and SHORT_BINSTRING opcodes.
+ if self.encoding == "bytes":
+ return value
+ else:
+ return value.decode(self.encoding, self.errors)
+
+ def load_string(self):
+ data = self.readline()[:-1]
+ # Strip outermost quotes
+ if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
+ data = data[1:-1]
+ else:
+ raise UnpicklingError("the STRING opcode argument must be quoted")
+ self.append(self._decode_string(codecs.escape_decode(data)[0]))
+ dispatch[STRING[0]] = load_string
+
+ def load_binstring(self):
+ # Deprecated BINSTRING uses signed 32-bit length
+ len, = unpack(' maxsize:
+ raise UnpicklingError("BINBYTES exceeds system's maximum size "
+ "of %d bytes" % maxsize)
+ self.append(self.read(len))
+ dispatch[BINBYTES[0]] = load_binbytes
+
+ def load_unicode(self):
+ self.append(str(self.readline()[:-1], 'raw-unicode-escape'))
+ dispatch[UNICODE[0]] = load_unicode
+
+ def load_binunicode(self):
+ len, = unpack(' maxsize:
+ raise UnpicklingError("BINUNICODE exceeds system's maximum size "
+ "of %d bytes" % maxsize)
+ self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
+ dispatch[BINUNICODE[0]] = load_binunicode
+
+ def load_binunicode8(self):
+ len, = unpack(' maxsize:
+ raise UnpicklingError("BINUNICODE8 exceeds system's maximum size "
+ "of %d bytes" % maxsize)
+ self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
+ dispatch[BINUNICODE8[0]] = load_binunicode8
+
+ def load_binbytes8(self):
+ len, = unpack(' maxsize:
+ raise UnpicklingError("BINBYTES8 exceeds system's maximum size "
+ "of %d bytes" % maxsize)
+ self.append(self.read(len))
+ dispatch[BINBYTES8[0]] = load_binbytes8
+
+ def load_bytearray8(self):
+ len, = unpack(' maxsize:
+ raise UnpicklingError("BYTEARRAY8 exceeds system's maximum size "
+ "of %d bytes" % maxsize)
+ b = bytearray(len)
+ self.readinto(b)
+ self.append(b)
+ dispatch[BYTEARRAY8[0]] = load_bytearray8
+
+ def load_next_buffer(self):
+ if self._buffers is None:
+ raise UnpicklingError("pickle stream refers to out-of-band data "
+ "but no *buffers* argument was given")
+ try:
+ buf = next(self._buffers)
+ except StopIteration:
+ raise UnpicklingError("not enough out-of-band buffers")
+ self.append(buf)
+ dispatch[NEXT_BUFFER[0]] = load_next_buffer
+
+ def load_readonly_buffer(self):
+ buf = self.stack[-1]
+ with memoryview(buf) as m:
+ if not m.readonly:
+ self.stack[-1] = m.toreadonly()
+ dispatch[READONLY_BUFFER[0]] = load_readonly_buffer
+
+ def load_short_binstring(self):
+ len = self.read(1)[0]
+ data = self.read(len)
+ self.append(self._decode_string(data))
+ dispatch[SHORT_BINSTRING[0]] = load_short_binstring
+
+ def load_short_binbytes(self):
+ len = self.read(1)[0]
+ self.append(self.read(len))
+ dispatch[SHORT_BINBYTES[0]] = load_short_binbytes
+
+ def load_short_binunicode(self):
+ len = self.read(1)[0]
+ self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
+ dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode
+
+ def load_tuple(self):
+ items = self.pop_mark()
+ self.append(tuple(items))
+ dispatch[TUPLE[0]] = load_tuple
+
+ def load_empty_tuple(self):
+ self.append(())
+ dispatch[EMPTY_TUPLE[0]] = load_empty_tuple
+
+ def load_tuple1(self):
+ self.stack[-1] = (self.stack[-1],)
+ dispatch[TUPLE1[0]] = load_tuple1
+
+ def load_tuple2(self):
+ self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
+ dispatch[TUPLE2[0]] = load_tuple2
+
+ def load_tuple3(self):
+ self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
+ dispatch[TUPLE3[0]] = load_tuple3
+
+ def load_empty_list(self):
+ self.append([])
+ dispatch[EMPTY_LIST[0]] = load_empty_list
+
+ def load_empty_dictionary(self):
+ self.append({})
+ dispatch[EMPTY_DICT[0]] = load_empty_dictionary
+
+ def load_empty_set(self):
+ self.append(set())
+ dispatch[EMPTY_SET[0]] = load_empty_set
+
+ def load_frozenset(self):
+ items = self.pop_mark()
+ self.append(frozenset(items))
+ dispatch[FROZENSET[0]] = load_frozenset
+
+ def load_list(self):
+ items = self.pop_mark()
+ self.append(items)
+ dispatch[LIST[0]] = load_list
+
+ def load_dict(self):
+ items = self.pop_mark()
+ d = {items[i]: items[i+1]
+ for i in range(0, len(items), 2)}
+ self.append(d)
+ dispatch[DICT[0]] = load_dict
+
+ # INST and OBJ differ only in how they get a class object. It's not
+ # only sensible to do the rest in a common routine, the two routines
+ # previously diverged and grew different bugs.
+ # klass is the class to instantiate, and k points to the topmost mark
+ # object, following which are the arguments for klass.__init__.
+ def _instantiate(self, klass, args):
+ if (args or not isinstance(klass, type) or
+ hasattr(klass, "__getinitargs__")):
+ try:
+ value = klass(*args)
+ except TypeError as err:
+ raise TypeError("in constructor for %s: %s" %
+ (klass.__name__, str(err)), sys.exc_info()[2])
+ else:
+ value = klass.__new__(klass)
+ self.append(value)
+
+ def load_inst(self):
+ module = self.readline()[:-1].decode("ascii")
+ name = self.readline()[:-1].decode("ascii")
+ klass = self.find_class(module, name)
+ self._instantiate(klass, self.pop_mark())
+ dispatch[INST[0]] = load_inst
+
+ def load_obj(self):
+ # Stack is ... markobject classobject arg1 arg2 ...
+ args = self.pop_mark()
+ cls = args.pop(0)
+ self._instantiate(cls, args)
+ dispatch[OBJ[0]] = load_obj
+
+ def load_newobj(self):
+ args = self.stack.pop()
+ cls = self.stack.pop()
+ obj = cls.__new__(cls, *args)
+ self.append(obj)
+ dispatch[NEWOBJ[0]] = load_newobj
+
+ def load_newobj_ex(self):
+ kwargs = self.stack.pop()
+ args = self.stack.pop()
+ cls = self.stack.pop()
+ obj = cls.__new__(cls, *args, **kwargs)
+ self.append(obj)
+ dispatch[NEWOBJ_EX[0]] = load_newobj_ex
+
+ def load_global(self):
+ module = self.readline()[:-1].decode("utf-8")
+ name = self.readline()[:-1].decode("utf-8")
+ klass = self.find_class(module, name)
+ self.append(klass)
+ dispatch[GLOBAL[0]] = load_global
+
+ def load_stack_global(self):
+ name = self.stack.pop()
+ module = self.stack.pop()
+ if type(name) is not str or type(module) is not str:
+ raise UnpicklingError("STACK_GLOBAL requires str")
+ self.append(self.find_class(module, name))
+ dispatch[STACK_GLOBAL[0]] = load_stack_global
+
+ def load_ext1(self):
+ code = self.read(1)[0]
+ self.get_extension(code)
+ dispatch[EXT1[0]] = load_ext1
+
+ def load_ext2(self):
+ code, = unpack('= 4:
+ return _getattribute(sys.modules[module], name)[0]
+ else:
+ return getattr(sys.modules[module], name)
+
+ def load_reduce(self):
+ stack = self.stack
+ args = stack.pop()
+ func = stack[-1]
+ stack[-1] = func(*args)
+ dispatch[REDUCE[0]] = load_reduce
+
+ def load_pop(self):
+ if self.stack:
+ del self.stack[-1]
+ else:
+ self.pop_mark()
+ dispatch[POP[0]] = load_pop
+
+ def load_pop_mark(self):
+ self.pop_mark()
+ dispatch[POP_MARK[0]] = load_pop_mark
+
+ def load_dup(self):
+ self.append(self.stack[-1])
+ dispatch[DUP[0]] = load_dup
+
+ def load_get(self):
+ i = int(self.readline()[:-1])
+ try:
+ self.append(self.memo[i])
+ except KeyError:
+ msg = f'Memo value not found at index {i}'
+ raise UnpicklingError(msg) from None
+ dispatch[GET[0]] = load_get
+
+ def load_binget(self):
+ i = self.read(1)[0]
+ try:
+ self.append(self.memo[i])
+ except KeyError as exc:
+ msg = f'Memo value not found at index {i}'
+ raise UnpicklingError(msg) from None
+ dispatch[BINGET[0]] = load_binget
+
+ def load_long_binget(self):
+ i, = unpack(' maxsize:
+ raise ValueError("negative LONG_BINPUT argument")
+ self.memo[i] = self.stack[-1]
+ dispatch[LONG_BINPUT[0]] = load_long_binput
+
+ def load_memoize(self):
+ memo = self.memo
+ memo[len(memo)] = self.stack[-1]
+ dispatch[MEMOIZE[0]] = load_memoize
+
+ def load_append(self):
+ stack = self.stack
+ value = stack.pop()
+ list = stack[-1]
+ list.append(value)
+ dispatch[APPEND[0]] = load_append
+
+ def load_appends(self):
+ items = self.pop_mark()
+ list_obj = self.stack[-1]
+ try:
+ extend = list_obj.extend
+ except AttributeError:
+ pass
+ else:
+ extend(items)
+ return
+ # Even if the PEP 307 requires extend() and append() methods,
+ # fall back on append() if the object has no extend() method
+ # for backward compatibility.
+ append = list_obj.append
+ for item in items:
+ append(item)
+ dispatch[APPENDS[0]] = load_appends
+
+ def load_setitem(self):
+ stack = self.stack
+ value = stack.pop()
+ key = stack.pop()
+ dict = stack[-1]
+ dict[key] = value
+ dispatch[SETITEM[0]] = load_setitem
+
+ def load_setitems(self):
+ items = self.pop_mark()
+ dict = self.stack[-1]
+ for i in range(0, len(items), 2):
+ dict[items[i]] = items[i + 1]
+ dispatch[SETITEMS[0]] = load_setitems
+
+ def load_additems(self):
+ items = self.pop_mark()
+ set_obj = self.stack[-1]
+ if isinstance(set_obj, set):
+ set_obj.update(items)
+ else:
+ add = set_obj.add
+ for item in items:
+ add(item)
+ dispatch[ADDITEMS[0]] = load_additems
+
+ def load_build(self):
+ stack = self.stack
+ state = stack.pop()
+ inst = stack[-1]
+ setstate = getattr(inst, "__setstate__", None)
+ if setstate is not None:
+ setstate(state)
+ return
+ slotstate = None
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if state:
+ inst_dict = inst.__dict__
+ intern = sys.intern
+ for k, v in state.items():
+ if type(k) is str:
+ inst_dict[intern(k)] = v
+ else:
+ inst_dict[k] = v
+ if slotstate:
+ for k, v in slotstate.items():
+ setattr(inst, k, v)
+ dispatch[BUILD[0]] = load_build
+
+ def load_mark(self):
+ self.metastack.append(self.stack)
+ self.stack = []
+ self.append = self.stack.append
+ dispatch[MARK[0]] = load_mark
+
+ def load_stop(self):
+ value = self.stack.pop()
+ raise _Stop(value)
+ dispatch[STOP[0]] = load_stop
+
+
+# Shorthands
+
+def _dump(obj, file, protocol=None, *, fix_imports=True, buffer_callback=None):
+ _Pickler(file, protocol, fix_imports=fix_imports,
+ buffer_callback=buffer_callback).dump(obj)
+
+def _dumps(obj, protocol=None, *, fix_imports=True, buffer_callback=None):
+ f = io.BytesIO()
+ _Pickler(f, protocol, fix_imports=fix_imports,
+ buffer_callback=buffer_callback).dump(obj)
+ res = f.getvalue()
+ assert isinstance(res, bytes_types)
+ return res
+
+def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict",
+ buffers=None):
+ return _Unpickler(file, fix_imports=fix_imports, buffers=buffers,
+ encoding=encoding, errors=errors).load()
+
+def _loads(s, /, *, fix_imports=True, encoding="ASCII", errors="strict",
+ buffers=None):
+ if isinstance(s, str):
+ raise TypeError("Can't load pickle from unicode string")
+ file = io.BytesIO(s)
+ return _Unpickler(file, fix_imports=fix_imports, buffers=buffers,
+ encoding=encoding, errors=errors).load()
+
+# Use the faster _pickle if possible
+try:
+ from _pickle import (
+ PickleError,
+ PicklingError,
+ UnpicklingError,
+ Pickler,
+ Unpickler,
+ dump,
+ dumps,
+ load,
+ loads
+ )
+except ImportError:
+ Pickler, Unpickler = _Pickler, _Unpickler
+ dump, dumps, load, loads = _dump, _dumps, _load, _loads
+
+# Doctest
+def _test():
+ import doctest
+ return doctest.testmod()
+
+if __name__ == "__main__":
+ import argparse
+ parser = argparse.ArgumentParser(
+ description='display contents of the pickle files')
+ parser.add_argument(
+ 'pickle_file', type=argparse.FileType('br'),
+ nargs='*', help='the pickle file')
+ parser.add_argument(
+ '-t', '--test', action='store_true',
+ help='run self-test suite')
+ parser.add_argument(
+ '-v', action='store_true',
+ help='run verbosely; only affects self-test run')
+ args = parser.parse_args()
+ if args.test:
+ _test()
+ else:
+ if not args.pickle_file:
+ parser.print_help()
+ else:
+ import pprint
+ for f in args.pickle_file:
+ obj = load(f)
+ pprint.pprint(obj)
diff --git a/infer_4_37_2/lib/python3.10/pipes.py b/infer_4_37_2/lib/python3.10/pipes.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cc74b0f1f781b15a095f33ce0d2bdd88ccfe008
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/pipes.py
@@ -0,0 +1,247 @@
+"""Conversion pipeline templates.
+
+The problem:
+------------
+
+Suppose you have some data that you want to convert to another format,
+such as from GIF image format to PPM image format. Maybe the
+conversion involves several steps (e.g. piping it through compress or
+uuencode). Some of the conversion steps may require that their input
+is a disk file, others may be able to read standard input; similar for
+their output. The input to the entire conversion may also be read
+from a disk file or from an open file, and similar for its output.
+
+The module lets you construct a pipeline template by sticking one or
+more conversion steps together. It will take care of creating and
+removing temporary files if they are necessary to hold intermediate
+data. You can then use the template to do conversions from many
+different sources to many different destinations. The temporary
+file names used are different each time the template is used.
+
+The templates are objects so you can create templates for many
+different conversion steps and store them in a dictionary, for
+instance.
+
+
+Directions:
+-----------
+
+To create a template:
+ t = Template()
+
+To add a conversion step to a template:
+ t.append(command, kind)
+where kind is a string of two characters: the first is '-' if the
+command reads its standard input or 'f' if it requires a file; the
+second likewise for the output. The command must be valid /bin/sh
+syntax. If input or output files are required, they are passed as
+$IN and $OUT; otherwise, it must be possible to use the command in
+a pipeline.
+
+To add a conversion step at the beginning:
+ t.prepend(command, kind)
+
+To convert a file to another file using a template:
+ sts = t.copy(infile, outfile)
+If infile or outfile are the empty string, standard input is read or
+standard output is written, respectively. The return value is the
+exit status of the conversion pipeline.
+
+To open a file for reading or writing through a conversion pipeline:
+ fp = t.open(file, mode)
+where mode is 'r' to read the file, or 'w' to write it -- just like
+for the built-in function open() or for os.popen().
+
+To create a new template object initialized to a given one:
+ t2 = t.clone()
+""" # '
+
+
+import re
+import os
+import tempfile
+# we import the quote function rather than the module for backward compat
+# (quote used to be an undocumented but used function in pipes)
+from shlex import quote
+
+__all__ = ["Template"]
+
+# Conversion step kinds
+
+FILEIN_FILEOUT = 'ff' # Must read & write real files
+STDIN_FILEOUT = '-f' # Must write a real file
+FILEIN_STDOUT = 'f-' # Must read a real file
+STDIN_STDOUT = '--' # Normal pipeline element
+SOURCE = '.-' # Must be first, writes stdout
+SINK = '-.' # Must be last, reads stdin
+
+stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
+ SOURCE, SINK]
+
+
+class Template:
+ """Class representing a pipeline template."""
+
+ def __init__(self):
+ """Template() returns a fresh pipeline template."""
+ self.debugging = 0
+ self.reset()
+
+ def __repr__(self):
+ """t.__repr__() implements repr(t)."""
+ return '' % (self.steps,)
+
+ def reset(self):
+ """t.reset() restores a pipeline template to its initial state."""
+ self.steps = []
+
+ def clone(self):
+ """t.clone() returns a new pipeline template with identical
+ initial state as the current one."""
+ t = Template()
+ t.steps = self.steps[:]
+ t.debugging = self.debugging
+ return t
+
+ def debug(self, flag):
+ """t.debug(flag) turns debugging on or off."""
+ self.debugging = flag
+
+ def append(self, cmd, kind):
+ """t.append(cmd, kind) adds a new step at the end."""
+ if not isinstance(cmd, str):
+ raise TypeError('Template.append: cmd must be a string')
+ if kind not in stepkinds:
+ raise ValueError('Template.append: bad kind %r' % (kind,))
+ if kind == SOURCE:
+ raise ValueError('Template.append: SOURCE can only be prepended')
+ if self.steps and self.steps[-1][1] == SINK:
+ raise ValueError('Template.append: already ends with SINK')
+ if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
+ raise ValueError('Template.append: missing $IN in cmd')
+ if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
+ raise ValueError('Template.append: missing $OUT in cmd')
+ self.steps.append((cmd, kind))
+
+ def prepend(self, cmd, kind):
+ """t.prepend(cmd, kind) adds a new step at the front."""
+ if not isinstance(cmd, str):
+ raise TypeError('Template.prepend: cmd must be a string')
+ if kind not in stepkinds:
+ raise ValueError('Template.prepend: bad kind %r' % (kind,))
+ if kind == SINK:
+ raise ValueError('Template.prepend: SINK can only be appended')
+ if self.steps and self.steps[0][1] == SOURCE:
+ raise ValueError('Template.prepend: already begins with SOURCE')
+ if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
+ raise ValueError('Template.prepend: missing $IN in cmd')
+ if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
+ raise ValueError('Template.prepend: missing $OUT in cmd')
+ self.steps.insert(0, (cmd, kind))
+
+ def open(self, file, rw):
+ """t.open(file, rw) returns a pipe or file object open for
+ reading or writing; the file is the other end of the pipeline."""
+ if rw == 'r':
+ return self.open_r(file)
+ if rw == 'w':
+ return self.open_w(file)
+ raise ValueError('Template.open: rw must be \'r\' or \'w\', not %r'
+ % (rw,))
+
+ def open_r(self, file):
+ """t.open_r(file) and t.open_w(file) implement
+ t.open(file, 'r') and t.open(file, 'w') respectively."""
+ if not self.steps:
+ return open(file, 'r')
+ if self.steps[-1][1] == SINK:
+ raise ValueError('Template.open_r: pipeline ends width SINK')
+ cmd = self.makepipeline(file, '')
+ return os.popen(cmd, 'r')
+
+ def open_w(self, file):
+ if not self.steps:
+ return open(file, 'w')
+ if self.steps[0][1] == SOURCE:
+ raise ValueError('Template.open_w: pipeline begins with SOURCE')
+ cmd = self.makepipeline('', file)
+ return os.popen(cmd, 'w')
+
+ def copy(self, infile, outfile):
+ return os.system(self.makepipeline(infile, outfile))
+
+ def makepipeline(self, infile, outfile):
+ cmd = makepipeline(infile, self.steps, outfile)
+ if self.debugging:
+ print(cmd)
+ cmd = 'set -x; ' + cmd
+ return cmd
+
+
+def makepipeline(infile, steps, outfile):
+ # Build a list with for each command:
+ # [input filename or '', command string, kind, output filename or '']
+
+ list = []
+ for cmd, kind in steps:
+ list.append(['', cmd, kind, ''])
+ #
+ # Make sure there is at least one step
+ #
+ if not list:
+ list.append(['', 'cat', '--', ''])
+ #
+ # Take care of the input and output ends
+ #
+ [cmd, kind] = list[0][1:3]
+ if kind[0] == 'f' and not infile:
+ list.insert(0, ['', 'cat', '--', ''])
+ list[0][0] = infile
+ #
+ [cmd, kind] = list[-1][1:3]
+ if kind[1] == 'f' and not outfile:
+ list.append(['', 'cat', '--', ''])
+ list[-1][-1] = outfile
+ #
+ # Invent temporary files to connect stages that need files
+ #
+ garbage = []
+ for i in range(1, len(list)):
+ lkind = list[i-1][2]
+ rkind = list[i][2]
+ if lkind[1] == 'f' or rkind[0] == 'f':
+ (fd, temp) = tempfile.mkstemp()
+ os.close(fd)
+ garbage.append(temp)
+ list[i-1][-1] = list[i][0] = temp
+ #
+ for item in list:
+ [inf, cmd, kind, outf] = item
+ if kind[1] == 'f':
+ cmd = 'OUT=' + quote(outf) + '; ' + cmd
+ if kind[0] == 'f':
+ cmd = 'IN=' + quote(inf) + '; ' + cmd
+ if kind[0] == '-' and inf:
+ cmd = cmd + ' <' + quote(inf)
+ if kind[1] == '-' and outf:
+ cmd = cmd + ' >' + quote(outf)
+ item[1] = cmd
+ #
+ cmdlist = list[0][1]
+ for item in list[1:]:
+ [cmd, kind] = item[1:3]
+ if item[0] == '':
+ if 'f' in kind:
+ cmd = '{ ' + cmd + '; }'
+ cmdlist = cmdlist + ' |\n' + cmd
+ else:
+ cmdlist = cmdlist + '\n' + cmd
+ #
+ if garbage:
+ rmcmd = 'rm -f'
+ for file in garbage:
+ rmcmd = rmcmd + ' ' + quote(file)
+ trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
+ cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
+ #
+ return cmdlist
diff --git a/infer_4_37_2/lib/python3.10/pkgutil.py b/infer_4_37_2/lib/python3.10/pkgutil.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e010c79c12668c36ca700a6926f0cce54e5eb83
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/pkgutil.py
@@ -0,0 +1,715 @@
+"""Utilities to support packages."""
+
+from collections import namedtuple
+from functools import singledispatch as simplegeneric
+import importlib
+import importlib.util
+import importlib.machinery
+import os
+import os.path
+import sys
+from types import ModuleType
+import warnings
+
+__all__ = [
+ 'get_importer', 'iter_importers', 'get_loader', 'find_loader',
+ 'walk_packages', 'iter_modules', 'get_data',
+ 'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
+ 'ModuleInfo',
+]
+
+
+ModuleInfo = namedtuple('ModuleInfo', 'module_finder name ispkg')
+ModuleInfo.__doc__ = 'A namedtuple with minimal info about a module.'
+
+
+def _get_spec(finder, name):
+ """Return the finder-specific module spec."""
+ # Works with legacy finders.
+ try:
+ find_spec = finder.find_spec
+ except AttributeError:
+ loader = finder.find_module(name)
+ if loader is None:
+ return None
+ return importlib.util.spec_from_loader(name, loader)
+ else:
+ return find_spec(name)
+
+
+def read_code(stream):
+ # This helper is needed in order for the PEP 302 emulation to
+ # correctly handle compiled files
+ import marshal
+
+ magic = stream.read(4)
+ if magic != importlib.util.MAGIC_NUMBER:
+ return None
+
+ stream.read(12) # Skip rest of the header
+ return marshal.load(stream)
+
+
+def walk_packages(path=None, prefix='', onerror=None):
+ """Yields ModuleInfo for all modules recursively
+ on path, or, if path is None, all accessible modules.
+
+ 'path' should be either None or a list of paths to look for
+ modules in.
+
+ 'prefix' is a string to output on the front of every module name
+ on output.
+
+ Note that this function must import all *packages* (NOT all
+ modules!) on the given path, in order to access the __path__
+ attribute to find submodules.
+
+ 'onerror' is a function which gets called with one argument (the
+ name of the package which was being imported) if any exception
+ occurs while trying to import a package. If no onerror function is
+ supplied, ImportErrors are caught and ignored, while all other
+ exceptions are propagated, terminating the search.
+
+ Examples:
+
+ # list all modules python can access
+ walk_packages()
+
+ # list all submodules of ctypes
+ walk_packages(ctypes.__path__, ctypes.__name__+'.')
+ """
+
+ def seen(p, m={}):
+ if p in m:
+ return True
+ m[p] = True
+
+ for info in iter_modules(path, prefix):
+ yield info
+
+ if info.ispkg:
+ try:
+ __import__(info.name)
+ except ImportError:
+ if onerror is not None:
+ onerror(info.name)
+ except Exception:
+ if onerror is not None:
+ onerror(info.name)
+ else:
+ raise
+ else:
+ path = getattr(sys.modules[info.name], '__path__', None) or []
+
+ # don't traverse path items we've seen before
+ path = [p for p in path if not seen(p)]
+
+ yield from walk_packages(path, info.name+'.', onerror)
+
+
+def iter_modules(path=None, prefix=''):
+ """Yields ModuleInfo for all submodules on path,
+ or, if path is None, all top-level modules on sys.path.
+
+ 'path' should be either None or a list of paths to look for
+ modules in.
+
+ 'prefix' is a string to output on the front of every module name
+ on output.
+ """
+ if path is None:
+ importers = iter_importers()
+ elif isinstance(path, str):
+ raise ValueError("path must be None or list of paths to look for "
+ "modules in")
+ else:
+ importers = map(get_importer, path)
+
+ yielded = {}
+ for i in importers:
+ for name, ispkg in iter_importer_modules(i, prefix):
+ if name not in yielded:
+ yielded[name] = 1
+ yield ModuleInfo(i, name, ispkg)
+
+
+@simplegeneric
+def iter_importer_modules(importer, prefix=''):
+ if not hasattr(importer, 'iter_modules'):
+ return []
+ return importer.iter_modules(prefix)
+
+
+# Implement a file walker for the normal importlib path hook
+def _iter_file_finder_modules(importer, prefix=''):
+ if importer.path is None or not os.path.isdir(importer.path):
+ return
+
+ yielded = {}
+ import inspect
+ try:
+ filenames = os.listdir(importer.path)
+ except OSError:
+ # ignore unreadable directories like import does
+ filenames = []
+ filenames.sort() # handle packages before same-named modules
+
+ for fn in filenames:
+ modname = inspect.getmodulename(fn)
+ if modname=='__init__' or modname in yielded:
+ continue
+
+ path = os.path.join(importer.path, fn)
+ ispkg = False
+
+ if not modname and os.path.isdir(path) and '.' not in fn:
+ modname = fn
+ try:
+ dircontents = os.listdir(path)
+ except OSError:
+ # ignore unreadable directories like import does
+ dircontents = []
+ for fn in dircontents:
+ subname = inspect.getmodulename(fn)
+ if subname=='__init__':
+ ispkg = True
+ break
+ else:
+ continue # not a package
+
+ if modname and '.' not in modname:
+ yielded[modname] = 1
+ yield prefix + modname, ispkg
+
+iter_importer_modules.register(
+ importlib.machinery.FileFinder, _iter_file_finder_modules)
+
+
+def _import_imp():
+ global imp
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', DeprecationWarning)
+ imp = importlib.import_module('imp')
+
+class ImpImporter:
+ """PEP 302 Finder that wraps Python's "classic" import algorithm
+
+ ImpImporter(dirname) produces a PEP 302 finder that searches that
+ directory. ImpImporter(None) produces a PEP 302 finder that searches
+ the current sys.path, plus any modules that are frozen or built-in.
+
+ Note that ImpImporter does not currently support being used by placement
+ on sys.meta_path.
+ """
+
+ def __init__(self, path=None):
+ global imp
+ warnings.warn("This emulation is deprecated and slated for removal "
+ "in Python 3.12; use 'importlib' instead",
+ DeprecationWarning)
+ _import_imp()
+ self.path = path
+
+ def find_module(self, fullname, path=None):
+ # Note: we ignore 'path' argument since it is only used via meta_path
+ subname = fullname.split(".")[-1]
+ if subname != fullname and self.path is None:
+ return None
+ if self.path is None:
+ path = None
+ else:
+ path = [os.path.realpath(self.path)]
+ try:
+ file, filename, etc = imp.find_module(subname, path)
+ except ImportError:
+ return None
+ return ImpLoader(fullname, file, filename, etc)
+
+ def iter_modules(self, prefix=''):
+ if self.path is None or not os.path.isdir(self.path):
+ return
+
+ yielded = {}
+ import inspect
+ try:
+ filenames = os.listdir(self.path)
+ except OSError:
+ # ignore unreadable directories like import does
+ filenames = []
+ filenames.sort() # handle packages before same-named modules
+
+ for fn in filenames:
+ modname = inspect.getmodulename(fn)
+ if modname=='__init__' or modname in yielded:
+ continue
+
+ path = os.path.join(self.path, fn)
+ ispkg = False
+
+ if not modname and os.path.isdir(path) and '.' not in fn:
+ modname = fn
+ try:
+ dircontents = os.listdir(path)
+ except OSError:
+ # ignore unreadable directories like import does
+ dircontents = []
+ for fn in dircontents:
+ subname = inspect.getmodulename(fn)
+ if subname=='__init__':
+ ispkg = True
+ break
+ else:
+ continue # not a package
+
+ if modname and '.' not in modname:
+ yielded[modname] = 1
+ yield prefix + modname, ispkg
+
+
+class ImpLoader:
+ """PEP 302 Loader that wraps Python's "classic" import algorithm
+ """
+ code = source = None
+
+ def __init__(self, fullname, file, filename, etc):
+ warnings.warn("This emulation is deprecated and slated for removal in "
+ "Python 3.12; use 'importlib' instead",
+ DeprecationWarning)
+ _import_imp()
+ self.file = file
+ self.filename = filename
+ self.fullname = fullname
+ self.etc = etc
+
+ def load_module(self, fullname):
+ self._reopen()
+ try:
+ mod = imp.load_module(fullname, self.file, self.filename, self.etc)
+ finally:
+ if self.file:
+ self.file.close()
+ # Note: we don't set __loader__ because we want the module to look
+ # normal; i.e. this is just a wrapper for standard import machinery
+ return mod
+
+ def get_data(self, pathname):
+ with open(pathname, "rb") as file:
+ return file.read()
+
+ def _reopen(self):
+ if self.file and self.file.closed:
+ mod_type = self.etc[2]
+ if mod_type==imp.PY_SOURCE:
+ self.file = open(self.filename, 'r')
+ elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
+ self.file = open(self.filename, 'rb')
+
+ def _fix_name(self, fullname):
+ if fullname is None:
+ fullname = self.fullname
+ elif fullname != self.fullname:
+ raise ImportError("Loader for module %s cannot handle "
+ "module %s" % (self.fullname, fullname))
+ return fullname
+
+ def is_package(self, fullname):
+ fullname = self._fix_name(fullname)
+ return self.etc[2]==imp.PKG_DIRECTORY
+
+ def get_code(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ if self.code is None:
+ mod_type = self.etc[2]
+ if mod_type==imp.PY_SOURCE:
+ source = self.get_source(fullname)
+ self.code = compile(source, self.filename, 'exec')
+ elif mod_type==imp.PY_COMPILED:
+ self._reopen()
+ try:
+ self.code = read_code(self.file)
+ finally:
+ self.file.close()
+ elif mod_type==imp.PKG_DIRECTORY:
+ self.code = self._get_delegate().get_code()
+ return self.code
+
+ def get_source(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ if self.source is None:
+ mod_type = self.etc[2]
+ if mod_type==imp.PY_SOURCE:
+ self._reopen()
+ try:
+ self.source = self.file.read()
+ finally:
+ self.file.close()
+ elif mod_type==imp.PY_COMPILED:
+ if os.path.exists(self.filename[:-1]):
+ with open(self.filename[:-1], 'r') as f:
+ self.source = f.read()
+ elif mod_type==imp.PKG_DIRECTORY:
+ self.source = self._get_delegate().get_source()
+ return self.source
+
+ def _get_delegate(self):
+ finder = ImpImporter(self.filename)
+ spec = _get_spec(finder, '__init__')
+ return spec.loader
+
+ def get_filename(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ mod_type = self.etc[2]
+ if mod_type==imp.PKG_DIRECTORY:
+ return self._get_delegate().get_filename()
+ elif mod_type in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
+ return self.filename
+ return None
+
+
+try:
+ import zipimport
+ from zipimport import zipimporter
+
+ def iter_zipimport_modules(importer, prefix=''):
+ dirlist = sorted(zipimport._zip_directory_cache[importer.archive])
+ _prefix = importer.prefix
+ plen = len(_prefix)
+ yielded = {}
+ import inspect
+ for fn in dirlist:
+ if not fn.startswith(_prefix):
+ continue
+
+ fn = fn[plen:].split(os.sep)
+
+ if len(fn)==2 and fn[1].startswith('__init__.py'):
+ if fn[0] not in yielded:
+ yielded[fn[0]] = 1
+ yield prefix + fn[0], True
+
+ if len(fn)!=1:
+ continue
+
+ modname = inspect.getmodulename(fn[0])
+ if modname=='__init__':
+ continue
+
+ if modname and '.' not in modname and modname not in yielded:
+ yielded[modname] = 1
+ yield prefix + modname, False
+
+ iter_importer_modules.register(zipimporter, iter_zipimport_modules)
+
+except ImportError:
+ pass
+
+
+def get_importer(path_item):
+ """Retrieve a finder for the given path item
+
+ The returned finder is cached in sys.path_importer_cache
+ if it was newly created by a path hook.
+
+ The cache (or part of it) can be cleared manually if a
+ rescan of sys.path_hooks is necessary.
+ """
+ try:
+ importer = sys.path_importer_cache[path_item]
+ except KeyError:
+ for path_hook in sys.path_hooks:
+ try:
+ importer = path_hook(path_item)
+ sys.path_importer_cache.setdefault(path_item, importer)
+ break
+ except ImportError:
+ pass
+ else:
+ importer = None
+ return importer
+
+
+def iter_importers(fullname=""):
+ """Yield finders for the given module name
+
+ If fullname contains a '.', the finders will be for the package
+ containing fullname, otherwise they will be all registered top level
+ finders (i.e. those on both sys.meta_path and sys.path_hooks).
+
+ If the named module is in a package, that package is imported as a side
+ effect of invoking this function.
+
+ If no module name is specified, all top level finders are produced.
+ """
+ if fullname.startswith('.'):
+ msg = "Relative module name {!r} not supported".format(fullname)
+ raise ImportError(msg)
+ if '.' in fullname:
+ # Get the containing package's __path__
+ pkg_name = fullname.rpartition(".")[0]
+ pkg = importlib.import_module(pkg_name)
+ path = getattr(pkg, '__path__', None)
+ if path is None:
+ return
+ else:
+ yield from sys.meta_path
+ path = sys.path
+ for item in path:
+ yield get_importer(item)
+
+
+def get_loader(module_or_name):
+ """Get a "loader" object for module_or_name
+
+ Returns None if the module cannot be found or imported.
+ If the named module is not already imported, its containing package
+ (if any) is imported, in order to establish the package __path__.
+ """
+ if module_or_name in sys.modules:
+ module_or_name = sys.modules[module_or_name]
+ if module_or_name is None:
+ return None
+ if isinstance(module_or_name, ModuleType):
+ module = module_or_name
+ loader = getattr(module, '__loader__', None)
+ if loader is not None:
+ return loader
+ if getattr(module, '__spec__', None) is None:
+ return None
+ fullname = module.__name__
+ else:
+ fullname = module_or_name
+ return find_loader(fullname)
+
+
+def find_loader(fullname):
+ """Find a "loader" object for fullname
+
+ This is a backwards compatibility wrapper around
+ importlib.util.find_spec that converts most failures to ImportError
+ and only returns the loader rather than the full spec
+ """
+ if fullname.startswith('.'):
+ msg = "Relative module name {!r} not supported".format(fullname)
+ raise ImportError(msg)
+ try:
+ spec = importlib.util.find_spec(fullname)
+ except (ImportError, AttributeError, TypeError, ValueError) as ex:
+ # This hack fixes an impedance mismatch between pkgutil and
+ # importlib, where the latter raises other errors for cases where
+ # pkgutil previously raised ImportError
+ msg = "Error while finding loader for {!r} ({}: {})"
+ raise ImportError(msg.format(fullname, type(ex), ex)) from ex
+ return spec.loader if spec is not None else None
+
+
+def extend_path(path, name):
+ """Extend a package's path.
+
+ Intended use is to place the following code in a package's __init__.py:
+
+ from pkgutil import extend_path
+ __path__ = extend_path(__path__, __name__)
+
+ This will add to the package's __path__ all subdirectories of
+ directories on sys.path named after the package. This is useful
+ if one wants to distribute different parts of a single logical
+ package as multiple directories.
+
+ It also looks for *.pkg files beginning where * matches the name
+ argument. This feature is similar to *.pth files (see site.py),
+ except that it doesn't special-case lines starting with 'import'.
+ A *.pkg file is trusted at face value: apart from checking for
+ duplicates, all entries found in a *.pkg file are added to the
+ path, regardless of whether they are exist the filesystem. (This
+ is a feature.)
+
+ If the input path is not a list (as is the case for frozen
+ packages) it is returned unchanged. The input path is not
+ modified; an extended copy is returned. Items are only appended
+ to the copy at the end.
+
+ It is assumed that sys.path is a sequence. Items of sys.path that
+ are not (unicode or 8-bit) strings referring to existing
+ directories are ignored. Unicode items of sys.path that cause
+ errors when used as filenames may cause this function to raise an
+ exception (in line with os.path.isdir() behavior).
+ """
+
+ if not isinstance(path, list):
+ # This could happen e.g. when this is called from inside a
+ # frozen package. Return the path unchanged in that case.
+ return path
+
+ sname_pkg = name + ".pkg"
+
+ path = path[:] # Start with a copy of the existing path
+
+ parent_package, _, final_name = name.rpartition('.')
+ if parent_package:
+ try:
+ search_path = sys.modules[parent_package].__path__
+ except (KeyError, AttributeError):
+ # We can't do anything: find_loader() returns None when
+ # passed a dotted name.
+ return path
+ else:
+ search_path = sys.path
+
+ for dir in search_path:
+ if not isinstance(dir, str):
+ continue
+
+ finder = get_importer(dir)
+ if finder is not None:
+ portions = []
+ if hasattr(finder, 'find_spec'):
+ spec = finder.find_spec(final_name)
+ if spec is not None:
+ portions = spec.submodule_search_locations or []
+ # Is this finder PEP 420 compliant?
+ elif hasattr(finder, 'find_loader'):
+ _, portions = finder.find_loader(final_name)
+
+ for portion in portions:
+ # XXX This may still add duplicate entries to path on
+ # case-insensitive filesystems
+ if portion not in path:
+ path.append(portion)
+
+ # XXX Is this the right thing for subpackages like zope.app?
+ # It looks for a file named "zope.app.pkg"
+ pkgfile = os.path.join(dir, sname_pkg)
+ if os.path.isfile(pkgfile):
+ try:
+ f = open(pkgfile)
+ except OSError as msg:
+ sys.stderr.write("Can't open %s: %s\n" %
+ (pkgfile, msg))
+ else:
+ with f:
+ for line in f:
+ line = line.rstrip('\n')
+ if not line or line.startswith('#'):
+ continue
+ path.append(line) # Don't check for existence!
+
+ return path
+
+
+def get_data(package, resource):
+ """Get a resource from a package.
+
+ This is a wrapper round the PEP 302 loader get_data API. The package
+ argument should be the name of a package, in standard module format
+ (foo.bar). The resource argument should be in the form of a relative
+ filename, using '/' as the path separator. The parent directory name '..'
+ is not allowed, and nor is a rooted name (starting with a '/').
+
+ The function returns a binary string, which is the contents of the
+ specified resource.
+
+ For packages located in the filesystem, which have already been imported,
+ this is the rough equivalent of
+
+ d = os.path.dirname(sys.modules[package].__file__)
+ data = open(os.path.join(d, resource), 'rb').read()
+
+ If the package cannot be located or loaded, or it uses a PEP 302 loader
+ which does not support get_data(), then None is returned.
+ """
+
+ spec = importlib.util.find_spec(package)
+ if spec is None:
+ return None
+ loader = spec.loader
+ if loader is None or not hasattr(loader, 'get_data'):
+ return None
+ # XXX needs test
+ mod = (sys.modules.get(package) or
+ importlib._bootstrap._load(spec))
+ if mod is None or not hasattr(mod, '__file__'):
+ return None
+
+ # Modify the resource name to be compatible with the loader.get_data
+ # signature - an os.path format "filename" starting with the dirname of
+ # the package's __file__
+ parts = resource.split('/')
+ parts.insert(0, os.path.dirname(mod.__file__))
+ resource_name = os.path.join(*parts)
+ return loader.get_data(resource_name)
+
+
+_NAME_PATTERN = None
+
+def resolve_name(name):
+ """
+ Resolve a name to an object.
+
+ It is expected that `name` will be a string in one of the following
+ formats, where W is shorthand for a valid Python identifier and dot stands
+ for a literal period in these pseudo-regexes:
+
+ W(.W)*
+ W(.W)*:(W(.W)*)?
+
+ The first form is intended for backward compatibility only. It assumes that
+ some part of the dotted name is a package, and the rest is an object
+ somewhere within that package, possibly nested inside other objects.
+ Because the place where the package stops and the object hierarchy starts
+ can't be inferred by inspection, repeated attempts to import must be done
+ with this form.
+
+ In the second form, the caller makes the division point clear through the
+ provision of a single colon: the dotted name to the left of the colon is a
+ package to be imported, and the dotted name to the right is the object
+ hierarchy within that package. Only one import is needed in this form. If
+ it ends with the colon, then a module object is returned.
+
+ The function will return an object (which might be a module), or raise one
+ of the following exceptions:
+
+ ValueError - if `name` isn't in a recognised format
+ ImportError - if an import failed when it shouldn't have
+ AttributeError - if a failure occurred when traversing the object hierarchy
+ within the imported package to get to the desired object.
+ """
+ global _NAME_PATTERN
+ if _NAME_PATTERN is None:
+ # Lazy import to speedup Python startup time
+ import re
+ dotted_words = r'(?!\d)(\w+)(\.(?!\d)(\w+))*'
+ _NAME_PATTERN = re.compile(f'^(?P{dotted_words})'
+ f'(?P:(?P{dotted_words})?)?$',
+ re.UNICODE)
+
+ m = _NAME_PATTERN.match(name)
+ if not m:
+ raise ValueError(f'invalid format: {name!r}')
+ gd = m.groupdict()
+ if gd.get('cln'):
+ # there is a colon - a one-step import is all that's needed
+ mod = importlib.import_module(gd['pkg'])
+ parts = gd.get('obj')
+ parts = parts.split('.') if parts else []
+ else:
+ # no colon - have to iterate to find the package boundary
+ parts = name.split('.')
+ modname = parts.pop(0)
+ # first part *must* be a module/package.
+ mod = importlib.import_module(modname)
+ while parts:
+ p = parts[0]
+ s = f'{modname}.{p}'
+ try:
+ mod = importlib.import_module(s)
+ parts.pop(0)
+ modname = s
+ except ImportError:
+ break
+ # if we reach this point, mod is the module, already imported, and
+ # parts is the list of parts in the object hierarchy to be traversed, or
+ # an empty list if just the module is wanted.
+ result = mod
+ for p in parts:
+ result = getattr(result, p)
+ return result
diff --git a/infer_4_37_2/lib/python3.10/platform.py b/infer_4_37_2/lib/python3.10/platform.py
new file mode 100644
index 0000000000000000000000000000000000000000..0238aeb3650797e07a89678ee1c46bb2580e9c07
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/platform.py
@@ -0,0 +1,1319 @@
+#!/usr/bin/env python3
+
+""" This module tries to retrieve as much platform-identifying data as
+ possible. It makes this information available via function APIs.
+
+ If called from the command line, it prints the platform
+ information concatenated as single string to stdout. The output
+ format is useable as part of a filename.
+
+"""
+# This module is maintained by Marc-Andre Lemburg .
+# If you find problems, please submit bug reports/patches via the
+# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
+#
+# Still needed:
+# * support for MS-DOS (PythonDX ?)
+# * support for Amiga and other still unsupported platforms running Python
+# * support for additional Linux distributions
+#
+# Many thanks to all those who helped adding platform-specific
+# checks (in no particular order):
+#
+# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
+# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
+# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
+# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
+# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
+# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve
+# Dower
+#
+# History:
+#
+#
+#
+# 1.0.8 - changed Windows support to read version from kernel32.dll
+# 1.0.7 - added DEV_NULL
+# 1.0.6 - added linux_distribution()
+# 1.0.5 - fixed Java support to allow running the module on Jython
+# 1.0.4 - added IronPython support
+# 1.0.3 - added normalization of Windows system name
+# 1.0.2 - added more Windows support
+# 1.0.1 - reformatted to make doc.py happy
+# 1.0.0 - reformatted a bit and checked into Python CVS
+# 0.8.0 - added sys.version parser and various new access
+# APIs (python_version(), python_compiler(), etc.)
+# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
+# 0.7.1 - added support for Caldera OpenLinux
+# 0.7.0 - some fixes for WinCE; untabified the source file
+# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
+# vms_lib.getsyi() configured
+# 0.6.1 - added code to prevent 'uname -p' on platforms which are
+# known not to support it
+# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
+# did some cleanup of the interfaces - some APIs have changed
+# 0.5.5 - fixed another type in the MacOS code... should have
+# used more coffee today ;-)
+# 0.5.4 - fixed a few typos in the MacOS code
+# 0.5.3 - added experimental MacOS support; added better popen()
+# workarounds in _syscmd_ver() -- still not 100% elegant
+# though
+# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
+# return values (the system uname command tends to return
+# 'unknown' instead of just leaving the field empty)
+# 0.5.1 - included code for slackware dist; added exception handlers
+# to cover up situations where platforms don't have os.popen
+# (e.g. Mac) or fail on socket.gethostname(); fixed libc
+# detection RE
+# 0.5.0 - changed the API names referring to system commands to *syscmd*;
+# added java_ver(); made syscmd_ver() a private
+# API (was system_ver() in previous versions) -- use uname()
+# instead; extended the win32_ver() to also return processor
+# type information
+# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
+# 0.3.4 - fixed a bug in _follow_symlinks()
+# 0.3.3 - fixed popen() and "file" command invocation bugs
+# 0.3.2 - added architecture() API and support for it in platform()
+# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
+# 0.3.0 - added system alias support
+# 0.2.3 - removed 'wince' again... oh well.
+# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
+# 0.2.1 - added cache logic and changed the platform string format
+# 0.2.0 - changed the API to use functions instead of module globals
+# since some action take too long to be run on module import
+# 0.1.0 - first release
+#
+# You can always get the latest version of this module at:
+#
+# http://www.egenix.com/files/python/platform.py
+#
+# If that URL should fail, try contacting the author.
+
+__copyright__ = """
+ Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
+ Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
+
+ Permission to use, copy, modify, and distribute this software and its
+ documentation for any purpose and without fee or royalty is hereby granted,
+ provided that the above copyright notice appear in all copies and that
+ both that copyright notice and this permission notice appear in
+ supporting documentation or portions thereof, including modifications,
+ that you make.
+
+ EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
+ THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
+ INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+ FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
+
+"""
+
+__version__ = '1.0.8'
+
+import collections
+import os
+import re
+import sys
+import subprocess
+import functools
+import itertools
+
+### Globals & Constants
+
+# Helper for comparing two version number strings.
+# Based on the description of the PHP's version_compare():
+# http://php.net/manual/en/function.version-compare.php
+
+_ver_stages = {
+ # any string not found in this dict, will get 0 assigned
+ 'dev': 10,
+ 'alpha': 20, 'a': 20,
+ 'beta': 30, 'b': 30,
+ 'c': 40,
+ 'RC': 50, 'rc': 50,
+ # number, will get 100 assigned
+ 'pl': 200, 'p': 200,
+}
+
+_component_re = re.compile(r'([0-9]+|[._+-])')
+
+def _comparable_version(version):
+ result = []
+ for v in _component_re.split(version):
+ if v not in '._+-':
+ try:
+ v = int(v, 10)
+ t = 100
+ except ValueError:
+ t = _ver_stages.get(v, 0)
+ result.extend((t, v))
+ return result
+
+### Platform specific APIs
+
+_libc_search = re.compile(b'(__libc_init)'
+ b'|'
+ b'(GLIBC_([0-9.]+))'
+ b'|'
+ br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII)
+
+def libc_ver(executable=None, lib='', version='', chunksize=16384):
+
+ """ Tries to determine the libc version that the file executable
+ (which defaults to the Python interpreter) is linked against.
+
+ Returns a tuple of strings (lib,version) which default to the
+ given parameters in case the lookup fails.
+
+ Note that the function has intimate knowledge of how different
+ libc versions add symbols to the executable and thus is probably
+ only useable for executables compiled using gcc.
+
+ The file is read and scanned in chunks of chunksize bytes.
+
+ """
+ if not executable:
+ try:
+ ver = os.confstr('CS_GNU_LIBC_VERSION')
+ # parse 'glibc 2.28' as ('glibc', '2.28')
+ parts = ver.split(maxsplit=1)
+ if len(parts) == 2:
+ return tuple(parts)
+ except (AttributeError, ValueError, OSError):
+ # os.confstr() or CS_GNU_LIBC_VERSION value not available
+ pass
+
+ executable = sys.executable
+
+ V = _comparable_version
+ if hasattr(os.path, 'realpath'):
+ # Python 2.2 introduced os.path.realpath(); it is used
+ # here to work around problems with Cygwin not being
+ # able to open symlinks for reading
+ executable = os.path.realpath(executable)
+ with open(executable, 'rb') as f:
+ binary = f.read(chunksize)
+ pos = 0
+ while pos < len(binary):
+ if b'libc' in binary or b'GLIBC' in binary:
+ m = _libc_search.search(binary, pos)
+ else:
+ m = None
+ if not m or m.end() == len(binary):
+ chunk = f.read(chunksize)
+ if chunk:
+ binary = binary[max(pos, len(binary) - 1000):] + chunk
+ pos = 0
+ continue
+ if not m:
+ break
+ libcinit, glibc, glibcversion, so, threads, soversion = [
+ s.decode('latin1') if s is not None else s
+ for s in m.groups()]
+ if libcinit and not lib:
+ lib = 'libc'
+ elif glibc:
+ if lib != 'glibc':
+ lib = 'glibc'
+ version = glibcversion
+ elif V(glibcversion) > V(version):
+ version = glibcversion
+ elif so:
+ if lib != 'glibc':
+ lib = 'libc'
+ if soversion and (not version or V(soversion) > V(version)):
+ version = soversion
+ if threads and version[-len(threads):] != threads:
+ version = version + threads
+ pos = m.end()
+ return lib, version
+
+def _norm_version(version, build=''):
+
+ """ Normalize the version and build strings and return a single
+ version string using the format major.minor.build (or patchlevel).
+ """
+ l = version.split('.')
+ if build:
+ l.append(build)
+ try:
+ strings = list(map(str, map(int, l)))
+ except ValueError:
+ strings = l
+ version = '.'.join(strings[:3])
+ return version
+
+_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
+ r'.*'
+ r'\[.* ([\d.]+)\])')
+
+# Examples of VER command output:
+#
+# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
+# Windows XP: Microsoft Windows XP [Version 5.1.2600]
+# Windows Vista: Microsoft Windows [Version 6.0.6002]
+#
+# Note that the "Version" string gets localized on different
+# Windows versions.
+
+def _syscmd_ver(system='', release='', version='',
+
+ supported_platforms=('win32', 'win16', 'dos')):
+
+ """ Tries to figure out the OS version used and returns
+ a tuple (system, release, version).
+
+ It uses the "ver" shell command for this which is known
+ to exists on Windows, DOS. XXX Others too ?
+
+ In case this fails, the given parameters are used as
+ defaults.
+
+ """
+ if sys.platform not in supported_platforms:
+ return system, release, version
+
+ # Try some common cmd strings
+ import subprocess
+ for cmd in ('ver', 'command /c ver', 'cmd /c ver'):
+ try:
+ info = subprocess.check_output(cmd,
+ stdin=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ text=True,
+ shell=True)
+ except (OSError, subprocess.CalledProcessError) as why:
+ #print('Command %s failed: %s' % (cmd, why))
+ continue
+ else:
+ break
+ else:
+ return system, release, version
+
+ # Parse the output
+ info = info.strip()
+ m = _ver_output.match(info)
+ if m is not None:
+ system, release, version = m.groups()
+ # Strip trailing dots from version and release
+ if release[-1] == '.':
+ release = release[:-1]
+ if version[-1] == '.':
+ version = version[:-1]
+ # Normalize the version and build strings (eliminating additional
+ # zeros)
+ version = _norm_version(version)
+ return system, release, version
+
+_WIN32_CLIENT_RELEASES = {
+ (5, 0): "2000",
+ (5, 1): "XP",
+ # Strictly, 5.2 client is XP 64-bit, but platform.py historically
+ # has always called it 2003 Server
+ (5, 2): "2003Server",
+ (5, None): "post2003",
+
+ (6, 0): "Vista",
+ (6, 1): "7",
+ (6, 2): "8",
+ (6, 3): "8.1",
+ (6, None): "post8.1",
+
+ (10, 0): "10",
+ (10, None): "post10",
+}
+
+# Server release name lookup will default to client names if necessary
+_WIN32_SERVER_RELEASES = {
+ (5, 2): "2003Server",
+
+ (6, 0): "2008Server",
+ (6, 1): "2008ServerR2",
+ (6, 2): "2012Server",
+ (6, 3): "2012ServerR2",
+ (6, None): "post2012ServerR2",
+}
+
+def win32_is_iot():
+ return win32_edition() in ('IoTUAP', 'NanoServer', 'WindowsCoreHeadless', 'IoTEdgeOS')
+
+def win32_edition():
+ try:
+ try:
+ import winreg
+ except ImportError:
+ import _winreg as winreg
+ except ImportError:
+ pass
+ else:
+ try:
+ cvkey = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion'
+ with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, cvkey) as key:
+ return winreg.QueryValueEx(key, 'EditionId')[0]
+ except OSError:
+ pass
+
+ return None
+
+def win32_ver(release='', version='', csd='', ptype=''):
+ try:
+ from sys import getwindowsversion
+ except ImportError:
+ return release, version, csd, ptype
+
+ winver = getwindowsversion()
+ try:
+ major, minor, build = map(int, _syscmd_ver()[2].split('.'))
+ except ValueError:
+ major, minor, build = winver.platform_version or winver[:3]
+ version = '{0}.{1}.{2}'.format(major, minor, build)
+
+ release = (_WIN32_CLIENT_RELEASES.get((major, minor)) or
+ _WIN32_CLIENT_RELEASES.get((major, None)) or
+ release)
+
+ # getwindowsversion() reflect the compatibility mode Python is
+ # running under, and so the service pack value is only going to be
+ # valid if the versions match.
+ if winver[:2] == (major, minor):
+ try:
+ csd = 'SP{}'.format(winver.service_pack_major)
+ except AttributeError:
+ if csd[:13] == 'Service Pack ':
+ csd = 'SP' + csd[13:]
+
+ # VER_NT_SERVER = 3
+ if getattr(winver, 'product_type', None) == 3:
+ release = (_WIN32_SERVER_RELEASES.get((major, minor)) or
+ _WIN32_SERVER_RELEASES.get((major, None)) or
+ release)
+
+ try:
+ try:
+ import winreg
+ except ImportError:
+ import _winreg as winreg
+ except ImportError:
+ pass
+ else:
+ try:
+ cvkey = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion'
+ with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, cvkey) as key:
+ ptype = winreg.QueryValueEx(key, 'CurrentType')[0]
+ except OSError:
+ pass
+
+ return release, version, csd, ptype
+
+
+def _mac_ver_xml():
+ fn = '/System/Library/CoreServices/SystemVersion.plist'
+ if not os.path.exists(fn):
+ if 'SDKROOT' in os.environ:
+ fn = os.environ['SDKROOT'] + fn
+ if not os.path.exists(fn):
+ return None
+ else:
+ return None
+
+ try:
+ import plistlib
+ except ImportError:
+ return None
+
+ with open(fn, 'rb') as f:
+ pl = plistlib.load(f)
+ release = pl['ProductVersion']
+ versioninfo = ('', '', '')
+ machine = os.uname().machine
+ if machine in ('ppc', 'Power Macintosh'):
+ # Canonical name
+ machine = 'PowerPC'
+
+ return release, versioninfo, machine
+
+
+def mac_ver(release='', versioninfo=('', '', ''), machine=''):
+
+ """ Get macOS version information and return it as tuple (release,
+ versioninfo, machine) with versioninfo being a tuple (version,
+ dev_stage, non_release_version).
+
+ Entries which cannot be determined are set to the parameter values
+ which default to ''. All tuple entries are strings.
+ """
+
+ # First try reading the information from an XML file which should
+ # always be present
+ info = _mac_ver_xml()
+ if info is not None:
+ return info
+
+ # If that also doesn't work return the default values
+ return release, versioninfo, machine
+
+def _java_getprop(name, default):
+
+ from java.lang import System
+ try:
+ value = System.getProperty(name)
+ if value is None:
+ return default
+ return value
+ except AttributeError:
+ return default
+
+def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):
+
+ """ Version interface for Jython.
+
+ Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being
+ a tuple (vm_name, vm_release, vm_vendor) and osinfo being a
+ tuple (os_name, os_version, os_arch).
+
+ Values which cannot be determined are set to the defaults
+ given as parameters (which all default to '').
+
+ """
+ # Import the needed APIs
+ try:
+ import java.lang
+ except ImportError:
+ return release, vendor, vminfo, osinfo
+
+ vendor = _java_getprop('java.vendor', vendor)
+ release = _java_getprop('java.version', release)
+ vm_name, vm_release, vm_vendor = vminfo
+ vm_name = _java_getprop('java.vm.name', vm_name)
+ vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
+ vm_release = _java_getprop('java.vm.version', vm_release)
+ vminfo = vm_name, vm_release, vm_vendor
+ os_name, os_version, os_arch = osinfo
+ os_arch = _java_getprop('java.os.arch', os_arch)
+ os_name = _java_getprop('java.os.name', os_name)
+ os_version = _java_getprop('java.os.version', os_version)
+ osinfo = os_name, os_version, os_arch
+
+ return release, vendor, vminfo, osinfo
+
+### System name aliasing
+
+def system_alias(system, release, version):
+
+ """ Returns (system, release, version) aliased to common
+ marketing names used for some systems.
+
+ It also does some reordering of the information in some cases
+ where it would otherwise cause confusion.
+
+ """
+ if system == 'SunOS':
+ # Sun's OS
+ if release < '5':
+ # These releases use the old name SunOS
+ return system, release, version
+ # Modify release (marketing release = SunOS release - 3)
+ l = release.split('.')
+ if l:
+ try:
+ major = int(l[0])
+ except ValueError:
+ pass
+ else:
+ major = major - 3
+ l[0] = str(major)
+ release = '.'.join(l)
+ if release < '6':
+ system = 'Solaris'
+ else:
+ # XXX Whatever the new SunOS marketing name is...
+ system = 'Solaris'
+
+ elif system in ('win32', 'win16'):
+ # In case one of the other tricks
+ system = 'Windows'
+
+ # bpo-35516: Don't replace Darwin with macOS since input release and
+ # version arguments can be different than the currently running version.
+
+ return system, release, version
+
+### Various internal helpers
+
+def _platform(*args):
+
+ """ Helper to format the platform string in a filename
+ compatible format e.g. "system-version-machine".
+ """
+ # Format the platform string
+ platform = '-'.join(x.strip() for x in filter(len, args))
+
+ # Cleanup some possible filename obstacles...
+ platform = platform.replace(' ', '_')
+ platform = platform.replace('/', '-')
+ platform = platform.replace('\\', '-')
+ platform = platform.replace(':', '-')
+ platform = platform.replace(';', '-')
+ platform = platform.replace('"', '-')
+ platform = platform.replace('(', '-')
+ platform = platform.replace(')', '-')
+
+ # No need to report 'unknown' information...
+ platform = platform.replace('unknown', '')
+
+ # Fold '--'s and remove trailing '-'
+ while 1:
+ cleaned = platform.replace('--', '-')
+ if cleaned == platform:
+ break
+ platform = cleaned
+ while platform[-1] == '-':
+ platform = platform[:-1]
+
+ return platform
+
+def _node(default=''):
+
+ """ Helper to determine the node name of this machine.
+ """
+ try:
+ import socket
+ except ImportError:
+ # No sockets...
+ return default
+ try:
+ return socket.gethostname()
+ except OSError:
+ # Still not working...
+ return default
+
+def _follow_symlinks(filepath):
+
+ """ In case filepath is a symlink, follow it until a
+ real file is reached.
+ """
+ filepath = os.path.abspath(filepath)
+ while os.path.islink(filepath):
+ filepath = os.path.normpath(
+ os.path.join(os.path.dirname(filepath), os.readlink(filepath)))
+ return filepath
+
+
+def _syscmd_file(target, default=''):
+
+ """ Interface to the system's file command.
+
+ The function uses the -b option of the file command to have it
+ omit the filename in its output. Follow the symlinks. It returns
+ default in case the command should fail.
+
+ """
+ if sys.platform in ('dos', 'win32', 'win16'):
+ # XXX Others too ?
+ return default
+
+ import subprocess
+ target = _follow_symlinks(target)
+ # "file" output is locale dependent: force the usage of the C locale
+ # to get deterministic behavior.
+ env = dict(os.environ, LC_ALL='C')
+ try:
+ # -b: do not prepend filenames to output lines (brief mode)
+ output = subprocess.check_output(['file', '-b', target],
+ stderr=subprocess.DEVNULL,
+ env=env)
+ except (OSError, subprocess.CalledProcessError):
+ return default
+ if not output:
+ return default
+ # With the C locale, the output should be mostly ASCII-compatible.
+ # Decode from Latin-1 to prevent Unicode decode error.
+ return output.decode('latin-1')
+
+### Information about the used architecture
+
+# Default values for architecture; non-empty strings override the
+# defaults given as parameters
+_default_architecture = {
+ 'win32': ('', 'WindowsPE'),
+ 'win16': ('', 'Windows'),
+ 'dos': ('', 'MSDOS'),
+}
+
+def architecture(executable=sys.executable, bits='', linkage=''):
+
+ """ Queries the given executable (defaults to the Python interpreter
+ binary) for various architecture information.
+
+ Returns a tuple (bits, linkage) which contains information about
+ the bit architecture and the linkage format used for the
+ executable. Both values are returned as strings.
+
+ Values that cannot be determined are returned as given by the
+ parameter presets. If bits is given as '', the sizeof(pointer)
+ (or sizeof(long) on Python version < 1.5.2) is used as
+ indicator for the supported pointer size.
+
+ The function relies on the system's "file" command to do the
+ actual work. This is available on most if not all Unix
+ platforms. On some non-Unix platforms where the "file" command
+ does not exist and the executable is set to the Python interpreter
+ binary defaults from _default_architecture are used.
+
+ """
+ # Use the sizeof(pointer) as default number of bits if nothing
+ # else is given as default.
+ if not bits:
+ import struct
+ size = struct.calcsize('P')
+ bits = str(size * 8) + 'bit'
+
+ # Get data from the 'file' system command
+ if executable:
+ fileout = _syscmd_file(executable, '')
+ else:
+ fileout = ''
+
+ if not fileout and \
+ executable == sys.executable:
+ # "file" command did not return anything; we'll try to provide
+ # some sensible defaults then...
+ if sys.platform in _default_architecture:
+ b, l = _default_architecture[sys.platform]
+ if b:
+ bits = b
+ if l:
+ linkage = l
+ return bits, linkage
+
+ if 'executable' not in fileout and 'shared object' not in fileout:
+ # Format not supported
+ return bits, linkage
+
+ # Bits
+ if '32-bit' in fileout:
+ bits = '32bit'
+ elif '64-bit' in fileout:
+ bits = '64bit'
+
+ # Linkage
+ if 'ELF' in fileout:
+ linkage = 'ELF'
+ elif 'PE' in fileout:
+ # E.g. Windows uses this format
+ if 'Windows' in fileout:
+ linkage = 'WindowsPE'
+ else:
+ linkage = 'PE'
+ elif 'COFF' in fileout:
+ linkage = 'COFF'
+ elif 'MS-DOS' in fileout:
+ linkage = 'MSDOS'
+ else:
+ # XXX the A.OUT format also falls under this class...
+ pass
+
+ return bits, linkage
+
+
+def _get_machine_win32():
+ # Try to use the PROCESSOR_* environment variables
+ # available on Win XP and later; see
+ # http://support.microsoft.com/kb/888731 and
+ # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
+
+ # WOW64 processes mask the native architecture
+ return (
+ os.environ.get('PROCESSOR_ARCHITEW6432', '') or
+ os.environ.get('PROCESSOR_ARCHITECTURE', '')
+ )
+
+
+class _Processor:
+ @classmethod
+ def get(cls):
+ func = getattr(cls, f'get_{sys.platform}', cls.from_subprocess)
+ return func() or ''
+
+ def get_win32():
+ return os.environ.get('PROCESSOR_IDENTIFIER', _get_machine_win32())
+
+ def get_OpenVMS():
+ try:
+ import vms_lib
+ except ImportError:
+ pass
+ else:
+ csid, cpu_number = vms_lib.getsyi('SYI$_CPU', 0)
+ return 'Alpha' if cpu_number >= 128 else 'VAX'
+
+ def from_subprocess():
+ """
+ Fall back to `uname -p`
+ """
+ try:
+ return subprocess.check_output(
+ ['uname', '-p'],
+ stderr=subprocess.DEVNULL,
+ text=True,
+ ).strip()
+ except (OSError, subprocess.CalledProcessError):
+ pass
+
+
+def _unknown_as_blank(val):
+ return '' if val == 'unknown' else val
+
+
+### Portable uname() interface
+
+class uname_result(
+ collections.namedtuple(
+ "uname_result_base",
+ "system node release version machine")
+ ):
+ """
+ A uname_result that's largely compatible with a
+ simple namedtuple except that 'processor' is
+ resolved late and cached to avoid calling "uname"
+ except when needed.
+ """
+
+ _fields = ('system', 'node', 'release', 'version', 'machine', 'processor')
+
+ @functools.cached_property
+ def processor(self):
+ return _unknown_as_blank(_Processor.get())
+
+ def __iter__(self):
+ return itertools.chain(
+ super().__iter__(),
+ (self.processor,)
+ )
+
+ @classmethod
+ def _make(cls, iterable):
+ # override factory to affect length check
+ num_fields = len(cls._fields) - 1
+ result = cls.__new__(cls, *iterable)
+ if len(result) != num_fields + 1:
+ msg = f'Expected {num_fields} arguments, got {len(result)}'
+ raise TypeError(msg)
+ return result
+
+ def __getitem__(self, key):
+ return tuple(self)[key]
+
+ def __len__(self):
+ return len(tuple(iter(self)))
+
+ def __reduce__(self):
+ return uname_result, tuple(self)[:len(self._fields) - 1]
+
+
+_uname_cache = None
+
+
+def uname():
+
+ """ Fairly portable uname interface. Returns a tuple
+ of strings (system, node, release, version, machine, processor)
+ identifying the underlying platform.
+
+ Note that unlike the os.uname function this also returns
+ possible processor information as an additional tuple entry.
+
+ Entries which cannot be determined are set to ''.
+
+ """
+ global _uname_cache
+
+ if _uname_cache is not None:
+ return _uname_cache
+
+ # Get some infos from the builtin os.uname API...
+ try:
+ system, node, release, version, machine = infos = os.uname()
+ except AttributeError:
+ system = sys.platform
+ node = _node()
+ release = version = machine = ''
+ infos = ()
+
+ if not any(infos):
+ # uname is not available
+
+ # Try win32_ver() on win32 platforms
+ if system == 'win32':
+ release, version, csd, ptype = win32_ver()
+ machine = machine or _get_machine_win32()
+
+ # Try the 'ver' system command available on some
+ # platforms
+ if not (release and version):
+ system, release, version = _syscmd_ver(system)
+ # Normalize system to what win32_ver() normally returns
+ # (_syscmd_ver() tends to return the vendor name as well)
+ if system == 'Microsoft Windows':
+ system = 'Windows'
+ elif system == 'Microsoft' and release == 'Windows':
+ # Under Windows Vista and Windows Server 2008,
+ # Microsoft changed the output of the ver command. The
+ # release is no longer printed. This causes the
+ # system and release to be misidentified.
+ system = 'Windows'
+ if '6.0' == version[:3]:
+ release = 'Vista'
+ else:
+ release = ''
+
+ # In case we still don't know anything useful, we'll try to
+ # help ourselves
+ if system in ('win32', 'win16'):
+ if not version:
+ if system == 'win32':
+ version = '32bit'
+ else:
+ version = '16bit'
+ system = 'Windows'
+
+ elif system[:4] == 'java':
+ release, vendor, vminfo, osinfo = java_ver()
+ system = 'Java'
+ version = ', '.join(vminfo)
+ if not version:
+ version = vendor
+
+ # System specific extensions
+ if system == 'OpenVMS':
+ # OpenVMS seems to have release and version mixed up
+ if not release or release == '0':
+ release = version
+ version = ''
+
+ # normalize name
+ if system == 'Microsoft' and release == 'Windows':
+ system = 'Windows'
+ release = 'Vista'
+
+ vals = system, node, release, version, machine
+ # Replace 'unknown' values with the more portable ''
+ _uname_cache = uname_result(*map(_unknown_as_blank, vals))
+ return _uname_cache
+
+### Direct interfaces to some of the uname() return values
+
+def system():
+
+ """ Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname().system
+
+def node():
+
+ """ Returns the computer's network name (which may not be fully
+ qualified)
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname().node
+
+def release():
+
+ """ Returns the system's release, e.g. '2.2.0' or 'NT'
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname().release
+
+def version():
+
+ """ Returns the system's release version, e.g. '#3 on degas'
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname().version
+
+def machine():
+
+ """ Returns the machine type, e.g. 'i386'
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname().machine
+
+def processor():
+
+ """ Returns the (true) processor name, e.g. 'amdk6'
+
+ An empty string is returned if the value cannot be
+ determined. Note that many platforms do not provide this
+ information or simply return the same value as for machine(),
+ e.g. NetBSD does this.
+
+ """
+ return uname().processor
+
+### Various APIs for extracting information from sys.version
+
+_sys_version_parser = re.compile(
+ r'([\w.+]+)\s*' # "version"
+ r'\(#?([^,]+)' # "(#buildno"
+ r'(?:,\s*([\w ]*)' # ", builddate"
+ r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)"
+ r'\[([^\]]+)\]?', re.ASCII) # "[compiler]"
+
+_ironpython_sys_version_parser = re.compile(
+ r'IronPython\s*'
+ r'([\d\.]+)'
+ r'(?: \(([\d\.]+)\))?'
+ r' on (.NET [\d\.]+)', re.ASCII)
+
+# IronPython covering 2.6 and 2.7
+_ironpython26_sys_version_parser = re.compile(
+ r'([\d.]+)\s*'
+ r'\(IronPython\s*'
+ r'[\d.]+\s*'
+ r'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
+)
+
+_pypy_sys_version_parser = re.compile(
+ r'([\w.+]+)\s*'
+ r'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
+ r'\[PyPy [^\]]+\]?')
+
+_sys_version_cache = {}
+
+def _sys_version(sys_version=None):
+
+ """ Returns a parsed version of Python's sys.version as tuple
+ (name, version, branch, revision, buildno, builddate, compiler)
+ referring to the Python implementation name, version, branch,
+ revision, build number, build date/time as string and the compiler
+ identification string.
+
+ Note that unlike the Python sys.version, the returned value
+ for the Python version will always include the patchlevel (it
+ defaults to '.0').
+
+ The function returns empty strings for tuple entries that
+ cannot be determined.
+
+ sys_version may be given to parse an alternative version
+ string, e.g. if the version was read from a different Python
+ interpreter.
+
+ """
+ # Get the Python version
+ if sys_version is None:
+ sys_version = sys.version
+
+ # Try the cache first
+ result = _sys_version_cache.get(sys_version, None)
+ if result is not None:
+ return result
+
+ # Parse it
+ if 'IronPython' in sys_version:
+ # IronPython
+ name = 'IronPython'
+ if sys_version.startswith('IronPython'):
+ match = _ironpython_sys_version_parser.match(sys_version)
+ else:
+ match = _ironpython26_sys_version_parser.match(sys_version)
+
+ if match is None:
+ raise ValueError(
+ 'failed to parse IronPython sys.version: %s' %
+ repr(sys_version))
+
+ version, alt_version, compiler = match.groups()
+ buildno = ''
+ builddate = ''
+
+ elif sys.platform.startswith('java'):
+ # Jython
+ name = 'Jython'
+ match = _sys_version_parser.match(sys_version)
+ if match is None:
+ raise ValueError(
+ 'failed to parse Jython sys.version: %s' %
+ repr(sys_version))
+ version, buildno, builddate, buildtime, _ = match.groups()
+ if builddate is None:
+ builddate = ''
+ compiler = sys.platform
+
+ elif "PyPy" in sys_version:
+ # PyPy
+ name = "PyPy"
+ match = _pypy_sys_version_parser.match(sys_version)
+ if match is None:
+ raise ValueError("failed to parse PyPy sys.version: %s" %
+ repr(sys_version))
+ version, buildno, builddate, buildtime = match.groups()
+ compiler = ""
+
+ else:
+ # CPython
+ match = _sys_version_parser.match(sys_version)
+ if match is None:
+ raise ValueError(
+ 'failed to parse CPython sys.version: %s' %
+ repr(sys_version))
+ version, buildno, builddate, buildtime, compiler = \
+ match.groups()
+ name = 'CPython'
+ if builddate is None:
+ builddate = ''
+ elif buildtime:
+ builddate = builddate + ' ' + buildtime
+
+ if hasattr(sys, '_git'):
+ _, branch, revision = sys._git
+ elif hasattr(sys, '_mercurial'):
+ _, branch, revision = sys._mercurial
+ else:
+ branch = ''
+ revision = ''
+
+ # Add the patchlevel version if missing
+ l = version.split('.')
+ if len(l) == 2:
+ l.append('0')
+ version = '.'.join(l)
+
+ # Build and cache the result
+ result = (name, version, branch, revision, buildno, builddate, compiler)
+ _sys_version_cache[sys_version] = result
+ return result
+
+def python_implementation():
+
+ """ Returns a string identifying the Python implementation.
+
+ Currently, the following implementations are identified:
+ 'CPython' (C implementation of Python),
+ 'IronPython' (.NET implementation of Python),
+ 'Jython' (Java implementation of Python),
+ 'PyPy' (Python implementation of Python).
+
+ """
+ return _sys_version()[0]
+
+def python_version():
+
+ """ Returns the Python version as string 'major.minor.patchlevel'
+
+ Note that unlike the Python sys.version, the returned value
+ will always include the patchlevel (it defaults to 0).
+
+ """
+ return _sys_version()[1]
+
+def python_version_tuple():
+
+ """ Returns the Python version as tuple (major, minor, patchlevel)
+ of strings.
+
+ Note that unlike the Python sys.version, the returned value
+ will always include the patchlevel (it defaults to 0).
+
+ """
+ return tuple(_sys_version()[1].split('.'))
+
+def python_branch():
+
+ """ Returns a string identifying the Python implementation
+ branch.
+
+ For CPython this is the SCM branch from which the
+ Python binary was built.
+
+ If not available, an empty string is returned.
+
+ """
+
+ return _sys_version()[2]
+
+def python_revision():
+
+ """ Returns a string identifying the Python implementation
+ revision.
+
+ For CPython this is the SCM revision from which the
+ Python binary was built.
+
+ If not available, an empty string is returned.
+
+ """
+ return _sys_version()[3]
+
+def python_build():
+
+ """ Returns a tuple (buildno, builddate) stating the Python
+ build number and date as strings.
+
+ """
+ return _sys_version()[4:6]
+
+def python_compiler():
+
+ """ Returns a string identifying the compiler used for compiling
+ Python.
+
+ """
+ return _sys_version()[6]
+
+### The Opus Magnum of platform strings :-)
+
+_platform_cache = {}
+
+def platform(aliased=0, terse=0):
+
+ """ Returns a single string identifying the underlying platform
+ with as much useful information as possible (but no more :).
+
+ The output is intended to be human readable rather than
+ machine parseable. It may look different on different
+ platforms and this is intended.
+
+ If "aliased" is true, the function will use aliases for
+ various platforms that report system names which differ from
+ their common names, e.g. SunOS will be reported as
+ Solaris. The system_alias() function is used to implement
+ this.
+
+ Setting terse to true causes the function to return only the
+ absolute minimum information needed to identify the platform.
+
+ """
+ result = _platform_cache.get((aliased, terse), None)
+ if result is not None:
+ return result
+
+ # Get uname information and then apply platform specific cosmetics
+ # to it...
+ system, node, release, version, machine, processor = uname()
+ if machine == processor:
+ processor = ''
+ if aliased:
+ system, release, version = system_alias(system, release, version)
+
+ if system == 'Darwin':
+ # macOS (darwin kernel)
+ macos_release = mac_ver()[0]
+ if macos_release:
+ system = 'macOS'
+ release = macos_release
+
+ if system == 'Windows':
+ # MS platforms
+ rel, vers, csd, ptype = win32_ver(version)
+ if terse:
+ platform = _platform(system, release)
+ else:
+ platform = _platform(system, release, version, csd)
+
+ elif system in ('Linux',):
+ # check for libc vs. glibc
+ libcname, libcversion = libc_ver()
+ platform = _platform(system, release, machine, processor,
+ 'with',
+ libcname+libcversion)
+ elif system == 'Java':
+ # Java platforms
+ r, v, vminfo, (os_name, os_version, os_arch) = java_ver()
+ if terse or not os_name:
+ platform = _platform(system, release, version)
+ else:
+ platform = _platform(system, release, version,
+ 'on',
+ os_name, os_version, os_arch)
+
+ else:
+ # Generic handler
+ if terse:
+ platform = _platform(system, release)
+ else:
+ bits, linkage = architecture(sys.executable)
+ platform = _platform(system, release, machine,
+ processor, bits, linkage)
+
+ _platform_cache[(aliased, terse)] = platform
+ return platform
+
+### freedesktop.org os-release standard
+# https://www.freedesktop.org/software/systemd/man/os-release.html
+
+# NAME=value with optional quotes (' or "). The regular expression is less
+# strict than shell lexer, but that's ok.
+_os_release_line = re.compile(
+ "^(?P[a-zA-Z0-9_]+)=(?P[\"\']?)(?P.*)(?P=quote)$"
+)
+# unescape five special characters mentioned in the standard
+_os_release_unescape = re.compile(r"\\([\\\$\"\'`])")
+# /etc takes precedence over /usr/lib
+_os_release_candidates = ("/etc/os-release", "/usr/lib/os-release")
+_os_release_cache = None
+
+
+def _parse_os_release(lines):
+ # These fields are mandatory fields with well-known defaults
+ # in practice all Linux distributions override NAME, ID, and PRETTY_NAME.
+ info = {
+ "NAME": "Linux",
+ "ID": "linux",
+ "PRETTY_NAME": "Linux",
+ }
+
+ for line in lines:
+ mo = _os_release_line.match(line)
+ if mo is not None:
+ info[mo.group('name')] = _os_release_unescape.sub(
+ r"\1", mo.group('value')
+ )
+
+ return info
+
+
+def freedesktop_os_release():
+ """Return operation system identification from freedesktop.org os-release
+ """
+ global _os_release_cache
+
+ if _os_release_cache is None:
+ errno = None
+ for candidate in _os_release_candidates:
+ try:
+ with open(candidate, encoding="utf-8") as f:
+ _os_release_cache = _parse_os_release(f)
+ break
+ except OSError as e:
+ errno = e.errno
+ else:
+ raise OSError(
+ errno,
+ f"Unable to read files {', '.join(_os_release_candidates)}"
+ )
+
+ return _os_release_cache.copy()
+
+
+### Command line interface
+
+if __name__ == '__main__':
+ # Default is to print the aliased verbose platform string
+ terse = ('terse' in sys.argv or '--terse' in sys.argv)
+ aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
+ print(platform(aliased, terse))
+ sys.exit(0)
diff --git a/infer_4_37_2/lib/python3.10/plistlib.py b/infer_4_37_2/lib/python3.10/plistlib.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6c997efe9c5f51b4bf5f0f43656e0ccca977f58
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/plistlib.py
@@ -0,0 +1,911 @@
+r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
+
+The property list (.plist) file format is a simple XML pickle supporting
+basic object types, like dictionaries, lists, numbers and strings.
+Usually the top level object is a dictionary.
+
+To write out a plist file, use the dump(value, file)
+function. 'value' is the top level object, 'file' is
+a (writable) file object.
+
+To parse a plist from a file, use the load(file) function,
+with a (readable) file object as the only argument. It
+returns the top level object (again, usually a dictionary).
+
+To work with plist data in bytes objects, you can use loads()
+and dumps().
+
+Values can be strings, integers, floats, booleans, tuples, lists,
+dictionaries (but only with string keys), Data, bytes, bytearray, or
+datetime.datetime objects.
+
+Generate Plist example:
+
+ import datetime
+ import plistlib
+
+ pl = dict(
+ aString = "Doodah",
+ aList = ["A", "B", 12, 32.1, [1, 2, 3]],
+ aFloat = 0.1,
+ anInt = 728,
+ aDict = dict(
+ anotherString = "",
+ aThirdString = "M\xe4ssig, Ma\xdf",
+ aTrueValue = True,
+ aFalseValue = False,
+ ),
+ someData = b"",
+ someMoreData = b"" * 10,
+ aDate = datetime.datetime.now()
+ )
+ print(plistlib.dumps(pl).decode())
+
+Parse Plist example:
+
+ import plistlib
+
+ plist = b'''
+
+ foo
+ bar
+
+ '''
+ pl = plistlib.loads(plist)
+ print(pl["foo"])
+"""
+__all__ = [
+ "InvalidFileException", "FMT_XML", "FMT_BINARY", "load", "dump", "loads", "dumps", "UID"
+]
+
+import binascii
+import codecs
+import datetime
+import enum
+from io import BytesIO
+import itertools
+import os
+import re
+import struct
+from xml.parsers.expat import ParserCreate
+
+
+PlistFormat = enum.Enum('PlistFormat', 'FMT_XML FMT_BINARY', module=__name__)
+globals().update(PlistFormat.__members__)
+
+
+class UID:
+ def __init__(self, data):
+ if not isinstance(data, int):
+ raise TypeError("data must be an int")
+ if data >= 1 << 64:
+ raise ValueError("UIDs cannot be >= 2**64")
+ if data < 0:
+ raise ValueError("UIDs must be positive")
+ self.data = data
+
+ def __index__(self):
+ return self.data
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self.data))
+
+ def __reduce__(self):
+ return self.__class__, (self.data,)
+
+ def __eq__(self, other):
+ if not isinstance(other, UID):
+ return NotImplemented
+ return self.data == other.data
+
+ def __hash__(self):
+ return hash(self.data)
+
+#
+# XML support
+#
+
+
+# XML 'header'
+PLISTHEADER = b"""\
+
+
+"""
+
+
+# Regex to find any control chars, except for \t \n and \r
+_controlCharPat = re.compile(
+ r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
+ r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
+
+def _encode_base64(s, maxlinelength=76):
+ # copied from base64.encodebytes(), with added maxlinelength argument
+ maxbinsize = (maxlinelength//4)*3
+ pieces = []
+ for i in range(0, len(s), maxbinsize):
+ chunk = s[i : i + maxbinsize]
+ pieces.append(binascii.b2a_base64(chunk))
+ return b''.join(pieces)
+
+def _decode_base64(s):
+ if isinstance(s, str):
+ return binascii.a2b_base64(s.encode("utf-8"))
+
+ else:
+ return binascii.a2b_base64(s)
+
+# Contents should conform to a subset of ISO 8601
+# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units
+# may be omitted with # a loss of precision)
+_dateParser = re.compile(r"(?P\d\d\d\d)(?:-(?P\d\d)(?:-(?P\d\d)(?:T(?P\d\d)(?::(?P\d\d)(?::(?P\d\d))?)?)?)?)?Z", re.ASCII)
+
+
+def _date_from_string(s):
+ order = ('year', 'month', 'day', 'hour', 'minute', 'second')
+ gd = _dateParser.match(s).groupdict()
+ lst = []
+ for key in order:
+ val = gd[key]
+ if val is None:
+ break
+ lst.append(int(val))
+ return datetime.datetime(*lst)
+
+
+def _date_to_string(d):
+ return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
+ d.year, d.month, d.day,
+ d.hour, d.minute, d.second
+ )
+
+def _escape(text):
+ m = _controlCharPat.search(text)
+ if m is not None:
+ raise ValueError("strings can't contain control characters; "
+ "use bytes instead")
+ text = text.replace("\r\n", "\n") # convert DOS line endings
+ text = text.replace("\r", "\n") # convert Mac line endings
+ text = text.replace("&", "&") # escape '&'
+ text = text.replace("<", "<") # escape '<'
+ text = text.replace(">", ">") # escape '>'
+ return text
+
+class _PlistParser:
+ def __init__(self, dict_type):
+ self.stack = []
+ self.current_key = None
+ self.root = None
+ self._dict_type = dict_type
+
+ def parse(self, fileobj):
+ self.parser = ParserCreate()
+ self.parser.StartElementHandler = self.handle_begin_element
+ self.parser.EndElementHandler = self.handle_end_element
+ self.parser.CharacterDataHandler = self.handle_data
+ self.parser.EntityDeclHandler = self.handle_entity_decl
+ self.parser.ParseFile(fileobj)
+ return self.root
+
+ def handle_entity_decl(self, entity_name, is_parameter_entity, value, base, system_id, public_id, notation_name):
+ # Reject plist files with entity declarations to avoid XML vulnerabilies in expat.
+ # Regular plist files don't contain those declerations, and Apple's plutil tool does not
+ # accept them either.
+ raise InvalidFileException("XML entity declarations are not supported in plist files")
+
+ def handle_begin_element(self, element, attrs):
+ self.data = []
+ handler = getattr(self, "begin_" + element, None)
+ if handler is not None:
+ handler(attrs)
+
+ def handle_end_element(self, element):
+ handler = getattr(self, "end_" + element, None)
+ if handler is not None:
+ handler()
+
+ def handle_data(self, data):
+ self.data.append(data)
+
+ def add_object(self, value):
+ if self.current_key is not None:
+ if not isinstance(self.stack[-1], type({})):
+ raise ValueError("unexpected element at line %d" %
+ self.parser.CurrentLineNumber)
+ self.stack[-1][self.current_key] = value
+ self.current_key = None
+ elif not self.stack:
+ # this is the root object
+ self.root = value
+ else:
+ if not isinstance(self.stack[-1], type([])):
+ raise ValueError("unexpected element at line %d" %
+ self.parser.CurrentLineNumber)
+ self.stack[-1].append(value)
+
+ def get_data(self):
+ data = ''.join(self.data)
+ self.data = []
+ return data
+
+ # element handlers
+
+ def begin_dict(self, attrs):
+ d = self._dict_type()
+ self.add_object(d)
+ self.stack.append(d)
+
+ def end_dict(self):
+ if self.current_key:
+ raise ValueError("missing value for key '%s' at line %d" %
+ (self.current_key,self.parser.CurrentLineNumber))
+ self.stack.pop()
+
+ def end_key(self):
+ if self.current_key or not isinstance(self.stack[-1], type({})):
+ raise ValueError("unexpected key at line %d" %
+ self.parser.CurrentLineNumber)
+ self.current_key = self.get_data()
+
+ def begin_array(self, attrs):
+ a = []
+ self.add_object(a)
+ self.stack.append(a)
+
+ def end_array(self):
+ self.stack.pop()
+
+ def end_true(self):
+ self.add_object(True)
+
+ def end_false(self):
+ self.add_object(False)
+
+ def end_integer(self):
+ raw = self.get_data()
+ if raw.startswith('0x') or raw.startswith('0X'):
+ self.add_object(int(raw, 16))
+ else:
+ self.add_object(int(raw))
+
+ def end_real(self):
+ self.add_object(float(self.get_data()))
+
+ def end_string(self):
+ self.add_object(self.get_data())
+
+ def end_data(self):
+ self.add_object(_decode_base64(self.get_data()))
+
+ def end_date(self):
+ self.add_object(_date_from_string(self.get_data()))
+
+
+class _DumbXMLWriter:
+ def __init__(self, file, indent_level=0, indent="\t"):
+ self.file = file
+ self.stack = []
+ self._indent_level = indent_level
+ self.indent = indent
+
+ def begin_element(self, element):
+ self.stack.append(element)
+ self.writeln("<%s>" % element)
+ self._indent_level += 1
+
+ def end_element(self, element):
+ assert self._indent_level > 0
+ assert self.stack.pop() == element
+ self._indent_level -= 1
+ self.writeln("%s>" % element)
+
+ def simple_element(self, element, value=None):
+ if value is not None:
+ value = _escape(value)
+ self.writeln("<%s>%s%s>" % (element, value, element))
+
+ else:
+ self.writeln("<%s/>" % element)
+
+ def writeln(self, line):
+ if line:
+ # plist has fixed encoding of utf-8
+
+ # XXX: is this test needed?
+ if isinstance(line, str):
+ line = line.encode('utf-8')
+ self.file.write(self._indent_level * self.indent)
+ self.file.write(line)
+ self.file.write(b'\n')
+
+
+class _PlistWriter(_DumbXMLWriter):
+ def __init__(
+ self, file, indent_level=0, indent=b"\t", writeHeader=1,
+ sort_keys=True, skipkeys=False):
+
+ if writeHeader:
+ file.write(PLISTHEADER)
+ _DumbXMLWriter.__init__(self, file, indent_level, indent)
+ self._sort_keys = sort_keys
+ self._skipkeys = skipkeys
+
+ def write(self, value):
+ self.writeln("")
+ self.write_value(value)
+ self.writeln("")
+
+ def write_value(self, value):
+ if isinstance(value, str):
+ self.simple_element("string", value)
+
+ elif value is True:
+ self.simple_element("true")
+
+ elif value is False:
+ self.simple_element("false")
+
+ elif isinstance(value, int):
+ if -1 << 63 <= value < 1 << 64:
+ self.simple_element("integer", "%d" % value)
+ else:
+ raise OverflowError(value)
+
+ elif isinstance(value, float):
+ self.simple_element("real", repr(value))
+
+ elif isinstance(value, dict):
+ self.write_dict(value)
+
+ elif isinstance(value, (bytes, bytearray)):
+ self.write_bytes(value)
+
+ elif isinstance(value, datetime.datetime):
+ self.simple_element("date", _date_to_string(value))
+
+ elif isinstance(value, (tuple, list)):
+ self.write_array(value)
+
+ else:
+ raise TypeError("unsupported type: %s" % type(value))
+
+ def write_bytes(self, data):
+ self.begin_element("data")
+ self._indent_level -= 1
+ maxlinelength = max(
+ 16,
+ 76 - len(self.indent.replace(b"\t", b" " * 8) * self._indent_level))
+
+ for line in _encode_base64(data, maxlinelength).split(b"\n"):
+ if line:
+ self.writeln(line)
+ self._indent_level += 1
+ self.end_element("data")
+
+ def write_dict(self, d):
+ if d:
+ self.begin_element("dict")
+ if self._sort_keys:
+ items = sorted(d.items())
+ else:
+ items = d.items()
+
+ for key, value in items:
+ if not isinstance(key, str):
+ if self._skipkeys:
+ continue
+ raise TypeError("keys must be strings")
+ self.simple_element("key", key)
+ self.write_value(value)
+ self.end_element("dict")
+
+ else:
+ self.simple_element("dict")
+
+ def write_array(self, array):
+ if array:
+ self.begin_element("array")
+ for value in array:
+ self.write_value(value)
+ self.end_element("array")
+
+ else:
+ self.simple_element("array")
+
+
+def _is_fmt_xml(header):
+ prefixes = (b'offset...
+ # TRAILER
+ self._fp = fp
+ self._fp.seek(-32, os.SEEK_END)
+ trailer = self._fp.read(32)
+ if len(trailer) != 32:
+ raise InvalidFileException()
+ (
+ offset_size, self._ref_size, num_objects, top_object,
+ offset_table_offset
+ ) = struct.unpack('>6xBBQQQ', trailer)
+ self._fp.seek(offset_table_offset)
+ self._object_offsets = self._read_ints(num_objects, offset_size)
+ self._objects = [_undefined] * num_objects
+ return self._read_object(top_object)
+
+ except (OSError, IndexError, struct.error, OverflowError,
+ ValueError):
+ raise InvalidFileException()
+
+ def _get_size(self, tokenL):
+ """ return the size of the next object."""
+ if tokenL == 0xF:
+ m = self._fp.read(1)[0] & 0x3
+ s = 1 << m
+ f = '>' + _BINARY_FORMAT[s]
+ return struct.unpack(f, self._fp.read(s))[0]
+
+ return tokenL
+
+ def _read_ints(self, n, size):
+ data = self._fp.read(size * n)
+ if size in _BINARY_FORMAT:
+ return struct.unpack(f'>{n}{_BINARY_FORMAT[size]}', data)
+ else:
+ if not size or len(data) != size * n:
+ raise InvalidFileException()
+ return tuple(int.from_bytes(data[i: i + size], 'big')
+ for i in range(0, size * n, size))
+
+ def _read_refs(self, n):
+ return self._read_ints(n, self._ref_size)
+
+ def _read_object(self, ref):
+ """
+ read the object by reference.
+
+ May recursively read sub-objects (content of an array/dict/set)
+ """
+ result = self._objects[ref]
+ if result is not _undefined:
+ return result
+
+ offset = self._object_offsets[ref]
+ self._fp.seek(offset)
+ token = self._fp.read(1)[0]
+ tokenH, tokenL = token & 0xF0, token & 0x0F
+
+ if token == 0x00:
+ result = None
+
+ elif token == 0x08:
+ result = False
+
+ elif token == 0x09:
+ result = True
+
+ # The referenced source code also mentions URL (0x0c, 0x0d) and
+ # UUID (0x0e), but neither can be generated using the Cocoa libraries.
+
+ elif token == 0x0f:
+ result = b''
+
+ elif tokenH == 0x10: # int
+ result = int.from_bytes(self._fp.read(1 << tokenL),
+ 'big', signed=tokenL >= 3)
+
+ elif token == 0x22: # real
+ result = struct.unpack('>f', self._fp.read(4))[0]
+
+ elif token == 0x23: # real
+ result = struct.unpack('>d', self._fp.read(8))[0]
+
+ elif token == 0x33: # date
+ f = struct.unpack('>d', self._fp.read(8))[0]
+ # timestamp 0 of binary plists corresponds to 1/1/2001
+ # (year of Mac OS X 10.0), instead of 1/1/1970.
+ result = (datetime.datetime(2001, 1, 1) +
+ datetime.timedelta(seconds=f))
+
+ elif tokenH == 0x40: # data
+ s = self._get_size(tokenL)
+ result = self._fp.read(s)
+ if len(result) != s:
+ raise InvalidFileException()
+
+ elif tokenH == 0x50: # ascii string
+ s = self._get_size(tokenL)
+ data = self._fp.read(s)
+ if len(data) != s:
+ raise InvalidFileException()
+ result = data.decode('ascii')
+
+ elif tokenH == 0x60: # unicode string
+ s = self._get_size(tokenL) * 2
+ data = self._fp.read(s)
+ if len(data) != s:
+ raise InvalidFileException()
+ result = data.decode('utf-16be')
+
+ elif tokenH == 0x80: # UID
+ # used by Key-Archiver plist files
+ result = UID(int.from_bytes(self._fp.read(1 + tokenL), 'big'))
+
+ elif tokenH == 0xA0: # array
+ s = self._get_size(tokenL)
+ obj_refs = self._read_refs(s)
+ result = []
+ self._objects[ref] = result
+ result.extend(self._read_object(x) for x in obj_refs)
+
+ # tokenH == 0xB0 is documented as 'ordset', but is not actually
+ # implemented in the Apple reference code.
+
+ # tokenH == 0xC0 is documented as 'set', but sets cannot be used in
+ # plists.
+
+ elif tokenH == 0xD0: # dict
+ s = self._get_size(tokenL)
+ key_refs = self._read_refs(s)
+ obj_refs = self._read_refs(s)
+ result = self._dict_type()
+ self._objects[ref] = result
+ try:
+ for k, o in zip(key_refs, obj_refs):
+ result[self._read_object(k)] = self._read_object(o)
+ except TypeError:
+ raise InvalidFileException()
+ else:
+ raise InvalidFileException()
+
+ self._objects[ref] = result
+ return result
+
+def _count_to_size(count):
+ if count < 1 << 8:
+ return 1
+
+ elif count < 1 << 16:
+ return 2
+
+ elif count < 1 << 32:
+ return 4
+
+ else:
+ return 8
+
+_scalars = (str, int, float, datetime.datetime, bytes)
+
+class _BinaryPlistWriter (object):
+ def __init__(self, fp, sort_keys, skipkeys):
+ self._fp = fp
+ self._sort_keys = sort_keys
+ self._skipkeys = skipkeys
+
+ def write(self, value):
+
+ # Flattened object list:
+ self._objlist = []
+
+ # Mappings from object->objectid
+ # First dict has (type(object), object) as the key,
+ # second dict is used when object is not hashable and
+ # has id(object) as the key.
+ self._objtable = {}
+ self._objidtable = {}
+
+ # Create list of all objects in the plist
+ self._flatten(value)
+
+ # Size of object references in serialized containers
+ # depends on the number of objects in the plist.
+ num_objects = len(self._objlist)
+ self._object_offsets = [0]*num_objects
+ self._ref_size = _count_to_size(num_objects)
+
+ self._ref_format = _BINARY_FORMAT[self._ref_size]
+
+ # Write file header
+ self._fp.write(b'bplist00')
+
+ # Write object list
+ for obj in self._objlist:
+ self._write_object(obj)
+
+ # Write refnum->object offset table
+ top_object = self._getrefnum(value)
+ offset_table_offset = self._fp.tell()
+ offset_size = _count_to_size(offset_table_offset)
+ offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects
+ self._fp.write(struct.pack(offset_format, *self._object_offsets))
+
+ # Write trailer
+ sort_version = 0
+ trailer = (
+ sort_version, offset_size, self._ref_size, num_objects,
+ top_object, offset_table_offset
+ )
+ self._fp.write(struct.pack('>5xBBBQQQ', *trailer))
+
+ def _flatten(self, value):
+ # First check if the object is in the object table, not used for
+ # containers to ensure that two subcontainers with the same contents
+ # will be serialized as distinct values.
+ if isinstance(value, _scalars):
+ if (type(value), value) in self._objtable:
+ return
+
+ elif id(value) in self._objidtable:
+ return
+
+ # Add to objectreference map
+ refnum = len(self._objlist)
+ self._objlist.append(value)
+ if isinstance(value, _scalars):
+ self._objtable[(type(value), value)] = refnum
+ else:
+ self._objidtable[id(value)] = refnum
+
+ # And finally recurse into containers
+ if isinstance(value, dict):
+ keys = []
+ values = []
+ items = value.items()
+ if self._sort_keys:
+ items = sorted(items)
+
+ for k, v in items:
+ if not isinstance(k, str):
+ if self._skipkeys:
+ continue
+ raise TypeError("keys must be strings")
+ keys.append(k)
+ values.append(v)
+
+ for o in itertools.chain(keys, values):
+ self._flatten(o)
+
+ elif isinstance(value, (list, tuple)):
+ for o in value:
+ self._flatten(o)
+
+ def _getrefnum(self, value):
+ if isinstance(value, _scalars):
+ return self._objtable[(type(value), value)]
+ else:
+ return self._objidtable[id(value)]
+
+ def _write_size(self, token, size):
+ if size < 15:
+ self._fp.write(struct.pack('>B', token | size))
+
+ elif size < 1 << 8:
+ self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size))
+
+ elif size < 1 << 16:
+ self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size))
+
+ elif size < 1 << 32:
+ self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size))
+
+ else:
+ self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size))
+
+ def _write_object(self, value):
+ ref = self._getrefnum(value)
+ self._object_offsets[ref] = self._fp.tell()
+ if value is None:
+ self._fp.write(b'\x00')
+
+ elif value is False:
+ self._fp.write(b'\x08')
+
+ elif value is True:
+ self._fp.write(b'\x09')
+
+ elif isinstance(value, int):
+ if value < 0:
+ try:
+ self._fp.write(struct.pack('>Bq', 0x13, value))
+ except struct.error:
+ raise OverflowError(value) from None
+ elif value < 1 << 8:
+ self._fp.write(struct.pack('>BB', 0x10, value))
+ elif value < 1 << 16:
+ self._fp.write(struct.pack('>BH', 0x11, value))
+ elif value < 1 << 32:
+ self._fp.write(struct.pack('>BL', 0x12, value))
+ elif value < 1 << 63:
+ self._fp.write(struct.pack('>BQ', 0x13, value))
+ elif value < 1 << 64:
+ self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True))
+ else:
+ raise OverflowError(value)
+
+ elif isinstance(value, float):
+ self._fp.write(struct.pack('>Bd', 0x23, value))
+
+ elif isinstance(value, datetime.datetime):
+ f = (value - datetime.datetime(2001, 1, 1)).total_seconds()
+ self._fp.write(struct.pack('>Bd', 0x33, f))
+
+ elif isinstance(value, (bytes, bytearray)):
+ self._write_size(0x40, len(value))
+ self._fp.write(value)
+
+ elif isinstance(value, str):
+ try:
+ t = value.encode('ascii')
+ self._write_size(0x50, len(value))
+ except UnicodeEncodeError:
+ t = value.encode('utf-16be')
+ self._write_size(0x60, len(t) // 2)
+
+ self._fp.write(t)
+
+ elif isinstance(value, UID):
+ if value.data < 0:
+ raise ValueError("UIDs must be positive")
+ elif value.data < 1 << 8:
+ self._fp.write(struct.pack('>BB', 0x80, value))
+ elif value.data < 1 << 16:
+ self._fp.write(struct.pack('>BH', 0x81, value))
+ elif value.data < 1 << 32:
+ self._fp.write(struct.pack('>BL', 0x83, value))
+ elif value.data < 1 << 64:
+ self._fp.write(struct.pack('>BQ', 0x87, value))
+ else:
+ raise OverflowError(value)
+
+ elif isinstance(value, (list, tuple)):
+ refs = [self._getrefnum(o) for o in value]
+ s = len(refs)
+ self._write_size(0xA0, s)
+ self._fp.write(struct.pack('>' + self._ref_format * s, *refs))
+
+ elif isinstance(value, dict):
+ keyRefs, valRefs = [], []
+
+ if self._sort_keys:
+ rootItems = sorted(value.items())
+ else:
+ rootItems = value.items()
+
+ for k, v in rootItems:
+ if not isinstance(k, str):
+ if self._skipkeys:
+ continue
+ raise TypeError("keys must be strings")
+ keyRefs.append(self._getrefnum(k))
+ valRefs.append(self._getrefnum(v))
+
+ s = len(keyRefs)
+ self._write_size(0xD0, s)
+ self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs))
+ self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs))
+
+ else:
+ raise TypeError(value)
+
+
+def _is_fmt_binary(header):
+ return header[:8] == b'bplist00'
+
+
+#
+# Generic bits
+#
+
+_FORMATS={
+ FMT_XML: dict(
+ detect=_is_fmt_xml,
+ parser=_PlistParser,
+ writer=_PlistWriter,
+ ),
+ FMT_BINARY: dict(
+ detect=_is_fmt_binary,
+ parser=_BinaryPlistParser,
+ writer=_BinaryPlistWriter,
+ )
+}
+
+
+def load(fp, *, fmt=None, dict_type=dict):
+ """Read a .plist file. 'fp' should be a readable and binary file object.
+ Return the unpacked root object (which usually is a dictionary).
+ """
+ if fmt is None:
+ header = fp.read(32)
+ fp.seek(0)
+ for info in _FORMATS.values():
+ if info['detect'](header):
+ P = info['parser']
+ break
+
+ else:
+ raise InvalidFileException()
+
+ else:
+ P = _FORMATS[fmt]['parser']
+
+ p = P(dict_type=dict_type)
+ return p.parse(fp)
+
+
+def loads(value, *, fmt=None, dict_type=dict):
+ """Read a .plist file from a bytes object.
+ Return the unpacked root object (which usually is a dictionary).
+ """
+ fp = BytesIO(value)
+ return load(fp, fmt=fmt, dict_type=dict_type)
+
+
+def dump(value, fp, *, fmt=FMT_XML, sort_keys=True, skipkeys=False):
+ """Write 'value' to a .plist file. 'fp' should be a writable,
+ binary file object.
+ """
+ if fmt not in _FORMATS:
+ raise ValueError("Unsupported format: %r"%(fmt,))
+
+ writer = _FORMATS[fmt]["writer"](fp, sort_keys=sort_keys, skipkeys=skipkeys)
+ writer.write(value)
+
+
+def dumps(value, *, fmt=FMT_XML, skipkeys=False, sort_keys=True):
+ """Return a bytes object with the contents for a .plist file.
+ """
+ fp = BytesIO()
+ dump(value, fp, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys)
+ return fp.getvalue()
diff --git a/infer_4_37_2/lib/python3.10/poplib.py b/infer_4_37_2/lib/python3.10/poplib.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f8587317c2bbc4c37c79b9112ffc281c67bcebd
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/poplib.py
@@ -0,0 +1,483 @@
+"""A POP3 client class.
+
+Based on the J. Myers POP3 draft, Jan. 96
+"""
+
+# Author: David Ascher
+# [heavily stealing from nntplib.py]
+# Updated: Piers Lauder [Jul '97]
+# String method conversion and test jig improvements by ESR, February 2001.
+# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia Aug 2003
+
+# Example (see the test function at the end of this file)
+
+# Imports
+
+import errno
+import re
+import socket
+import sys
+
+try:
+ import ssl
+ HAVE_SSL = True
+except ImportError:
+ HAVE_SSL = False
+
+__all__ = ["POP3","error_proto"]
+
+# Exception raised when an error or invalid response is received:
+
+class error_proto(Exception): pass
+
+# Standard Port
+POP3_PORT = 110
+
+# POP SSL PORT
+POP3_SSL_PORT = 995
+
+# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
+CR = b'\r'
+LF = b'\n'
+CRLF = CR+LF
+
+# maximal line length when calling readline(). This is to prevent
+# reading arbitrary length lines. RFC 1939 limits POP3 line length to
+# 512 characters, including CRLF. We have selected 2048 just to be on
+# the safe side.
+_MAXLINE = 2048
+
+
+class POP3:
+
+ """This class supports both the minimal and optional command sets.
+ Arguments can be strings or integers (where appropriate)
+ (e.g.: retr(1) and retr('1') both work equally well.
+
+ Minimal Command Set:
+ USER name user(name)
+ PASS string pass_(string)
+ STAT stat()
+ LIST [msg] list(msg = None)
+ RETR msg retr(msg)
+ DELE msg dele(msg)
+ NOOP noop()
+ RSET rset()
+ QUIT quit()
+
+ Optional Commands (some servers support these):
+ RPOP name rpop(name)
+ APOP name digest apop(name, digest)
+ TOP msg n top(msg, n)
+ UIDL [msg] uidl(msg = None)
+ CAPA capa()
+ STLS stls()
+ UTF8 utf8()
+
+ Raises one exception: 'error_proto'.
+
+ Instantiate with:
+ POP3(hostname, port=110)
+
+ NB: the POP protocol locks the mailbox from user
+ authorization until QUIT, so be sure to get in, suck
+ the messages, and quit, each time you access the
+ mailbox.
+
+ POP is a line-based protocol, which means large mail
+ messages consume lots of python cycles reading them
+ line-by-line.
+
+ If it's available on your mail server, use IMAP4
+ instead, it doesn't suffer from the two problems
+ above.
+ """
+
+ encoding = 'UTF-8'
+
+ def __init__(self, host, port=POP3_PORT,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
+ self.host = host
+ self.port = port
+ self._tls_established = False
+ sys.audit("poplib.connect", self, host, port)
+ self.sock = self._create_socket(timeout)
+ self.file = self.sock.makefile('rb')
+ self._debugging = 0
+ self.welcome = self._getresp()
+
+ def _create_socket(self, timeout):
+ if timeout is not None and not timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
+ return socket.create_connection((self.host, self.port), timeout)
+
+ def _putline(self, line):
+ if self._debugging > 1: print('*put*', repr(line))
+ sys.audit("poplib.putline", self, line)
+ self.sock.sendall(line + CRLF)
+
+
+ # Internal: send one command to the server (through _putline())
+
+ def _putcmd(self, line):
+ if self._debugging: print('*cmd*', repr(line))
+ line = bytes(line, self.encoding)
+ self._putline(line)
+
+
+ # Internal: return one line from the server, stripping CRLF.
+ # This is where all the CPU time of this module is consumed.
+ # Raise error_proto('-ERR EOF') if the connection is closed.
+
+ def _getline(self):
+ line = self.file.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise error_proto('line too long')
+
+ if self._debugging > 1: print('*get*', repr(line))
+ if not line: raise error_proto('-ERR EOF')
+ octets = len(line)
+ # server can send any combination of CR & LF
+ # however, 'readline()' returns lines ending in LF
+ # so only possibilities are ...LF, ...CRLF, CR...LF
+ if line[-2:] == CRLF:
+ return line[:-2], octets
+ if line[:1] == CR:
+ return line[1:-1], octets
+ return line[:-1], octets
+
+
+ # Internal: get a response from the server.
+ # Raise 'error_proto' if the response doesn't start with '+'.
+
+ def _getresp(self):
+ resp, o = self._getline()
+ if self._debugging > 1: print('*resp*', repr(resp))
+ if not resp.startswith(b'+'):
+ raise error_proto(resp)
+ return resp
+
+
+ # Internal: get a response plus following text from the server.
+
+ def _getlongresp(self):
+ resp = self._getresp()
+ list = []; octets = 0
+ line, o = self._getline()
+ while line != b'.':
+ if line.startswith(b'..'):
+ o = o-1
+ line = line[1:]
+ octets = octets + o
+ list.append(line)
+ line, o = self._getline()
+ return resp, list, octets
+
+
+ # Internal: send a command and get the response
+
+ def _shortcmd(self, line):
+ self._putcmd(line)
+ return self._getresp()
+
+
+ # Internal: send a command and get the response plus following text
+
+ def _longcmd(self, line):
+ self._putcmd(line)
+ return self._getlongresp()
+
+
+ # These can be useful:
+
+ def getwelcome(self):
+ return self.welcome
+
+
+ def set_debuglevel(self, level):
+ self._debugging = level
+
+
+ # Here are all the POP commands:
+
+ def user(self, user):
+ """Send user name, return response
+
+ (should indicate password required).
+ """
+ return self._shortcmd('USER %s' % user)
+
+
+ def pass_(self, pswd):
+ """Send password, return response
+
+ (response includes message count, mailbox size).
+
+ NB: mailbox is locked by server from here to 'quit()'
+ """
+ return self._shortcmd('PASS %s' % pswd)
+
+
+ def stat(self):
+ """Get mailbox status.
+
+ Result is tuple of 2 ints (message count, mailbox size)
+ """
+ retval = self._shortcmd('STAT')
+ rets = retval.split()
+ if self._debugging: print('*stat*', repr(rets))
+ numMessages = int(rets[1])
+ sizeMessages = int(rets[2])
+ return (numMessages, sizeMessages)
+
+
+ def list(self, which=None):
+ """Request listing, return result.
+
+ Result without a message number argument is in form
+ ['response', ['mesg_num octets', ...], octets].
+
+ Result when a message number argument is given is a
+ single response: the "scan listing" for that message.
+ """
+ if which is not None:
+ return self._shortcmd('LIST %s' % which)
+ return self._longcmd('LIST')
+
+
+ def retr(self, which):
+ """Retrieve whole message number 'which'.
+
+ Result is in form ['response', ['line', ...], octets].
+ """
+ return self._longcmd('RETR %s' % which)
+
+
+ def dele(self, which):
+ """Delete message number 'which'.
+
+ Result is 'response'.
+ """
+ return self._shortcmd('DELE %s' % which)
+
+
+ def noop(self):
+ """Does nothing.
+
+ One supposes the response indicates the server is alive.
+ """
+ return self._shortcmd('NOOP')
+
+
+ def rset(self):
+ """Unmark all messages marked for deletion."""
+ return self._shortcmd('RSET')
+
+
+ def quit(self):
+ """Signoff: commit changes on server, unlock mailbox, close connection."""
+ resp = self._shortcmd('QUIT')
+ self.close()
+ return resp
+
+ def close(self):
+ """Close the connection without assuming anything about it."""
+ try:
+ file = self.file
+ self.file = None
+ if file is not None:
+ file.close()
+ finally:
+ sock = self.sock
+ self.sock = None
+ if sock is not None:
+ try:
+ sock.shutdown(socket.SHUT_RDWR)
+ except OSError as exc:
+ # The server might already have closed the connection.
+ # On Windows, this may result in WSAEINVAL (error 10022):
+ # An invalid operation was attempted.
+ if (exc.errno != errno.ENOTCONN
+ and getattr(exc, 'winerror', 0) != 10022):
+ raise
+ finally:
+ sock.close()
+
+ #__del__ = quit
+
+
+ # optional commands:
+
+ def rpop(self, user):
+ """Not sure what this does."""
+ return self._shortcmd('RPOP %s' % user)
+
+
+ timestamp = re.compile(br'\+OK.[^<]*(<.*>)')
+
+ def apop(self, user, password):
+ """Authorisation
+
+ - only possible if server has supplied a timestamp in initial greeting.
+
+ Args:
+ user - mailbox user;
+ password - mailbox password.
+
+ NB: mailbox is locked by server from here to 'quit()'
+ """
+ secret = bytes(password, self.encoding)
+ m = self.timestamp.match(self.welcome)
+ if not m:
+ raise error_proto('-ERR APOP not supported by server')
+ import hashlib
+ digest = m.group(1)+secret
+ digest = hashlib.md5(digest).hexdigest()
+ return self._shortcmd('APOP %s %s' % (user, digest))
+
+
+ def top(self, which, howmuch):
+ """Retrieve message header of message number 'which'
+ and first 'howmuch' lines of message body.
+
+ Result is in form ['response', ['line', ...], octets].
+ """
+ return self._longcmd('TOP %s %s' % (which, howmuch))
+
+
+ def uidl(self, which=None):
+ """Return message digest (unique id) list.
+
+ If 'which', result contains unique id for that message
+ in the form 'response mesgnum uid', otherwise result is
+ the list ['response', ['mesgnum uid', ...], octets]
+ """
+ if which is not None:
+ return self._shortcmd('UIDL %s' % which)
+ return self._longcmd('UIDL')
+
+
+ def utf8(self):
+ """Try to enter UTF-8 mode (see RFC 6856). Returns server response.
+ """
+ return self._shortcmd('UTF8')
+
+
+ def capa(self):
+ """Return server capabilities (RFC 2449) as a dictionary
+ >>> c=poplib.POP3('localhost')
+ >>> c.capa()
+ {'IMPLEMENTATION': ['Cyrus', 'POP3', 'server', 'v2.2.12'],
+ 'TOP': [], 'LOGIN-DELAY': ['0'], 'AUTH-RESP-CODE': [],
+ 'EXPIRE': ['NEVER'], 'USER': [], 'STLS': [], 'PIPELINING': [],
+ 'UIDL': [], 'RESP-CODES': []}
+ >>>
+
+ Really, according to RFC 2449, the cyrus folks should avoid
+ having the implementation split into multiple arguments...
+ """
+ def _parsecap(line):
+ lst = line.decode('ascii').split()
+ return lst[0], lst[1:]
+
+ caps = {}
+ try:
+ resp = self._longcmd('CAPA')
+ rawcaps = resp[1]
+ for capline in rawcaps:
+ capnm, capargs = _parsecap(capline)
+ caps[capnm] = capargs
+ except error_proto:
+ raise error_proto('-ERR CAPA not supported by server')
+ return caps
+
+
+ def stls(self, context=None):
+ """Start a TLS session on the active connection as specified in RFC 2595.
+
+ context - a ssl.SSLContext
+ """
+ if not HAVE_SSL:
+ raise error_proto('-ERR TLS support missing')
+ if self._tls_established:
+ raise error_proto('-ERR TLS session already established')
+ caps = self.capa()
+ if not 'STLS' in caps:
+ raise error_proto('-ERR STLS not supported by server')
+ if context is None:
+ context = ssl._create_stdlib_context()
+ resp = self._shortcmd('STLS')
+ self.sock = context.wrap_socket(self.sock,
+ server_hostname=self.host)
+ self.file = self.sock.makefile('rb')
+ self._tls_established = True
+ return resp
+
+
+if HAVE_SSL:
+
+ class POP3_SSL(POP3):
+ """POP3 client class over SSL connection
+
+ Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None,
+ context=None)
+
+ hostname - the hostname of the pop3 over ssl server
+ port - port number
+ keyfile - PEM formatted file that contains your private key
+ certfile - PEM formatted certificate chain file
+ context - a ssl.SSLContext
+
+ See the methods of the parent class POP3 for more documentation.
+ """
+
+ def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None):
+ if context is not None and keyfile is not None:
+ raise ValueError("context and keyfile arguments are mutually "
+ "exclusive")
+ if context is not None and certfile is not None:
+ raise ValueError("context and certfile arguments are mutually "
+ "exclusive")
+ if keyfile is not None or certfile is not None:
+ import warnings
+ warnings.warn("keyfile and certfile are deprecated, use a "
+ "custom context instead", DeprecationWarning, 2)
+ self.keyfile = keyfile
+ self.certfile = certfile
+ if context is None:
+ context = ssl._create_stdlib_context(certfile=certfile,
+ keyfile=keyfile)
+ self.context = context
+ POP3.__init__(self, host, port, timeout)
+
+ def _create_socket(self, timeout):
+ sock = POP3._create_socket(self, timeout)
+ sock = self.context.wrap_socket(sock,
+ server_hostname=self.host)
+ return sock
+
+ def stls(self, keyfile=None, certfile=None, context=None):
+ """The method unconditionally raises an exception since the
+ STLS command doesn't make any sense on an already established
+ SSL/TLS session.
+ """
+ raise error_proto('-ERR TLS session already established')
+
+ __all__.append("POP3_SSL")
+
+if __name__ == "__main__":
+ import sys
+ a = POP3(sys.argv[1])
+ print(a.getwelcome())
+ a.user(sys.argv[2])
+ a.pass_(sys.argv[3])
+ a.list()
+ (numMsgs, totalSize) = a.stat()
+ for i in range(1, numMsgs + 1):
+ (header, msg, octets) = a.retr(i)
+ print("Message %d:" % i)
+ for line in msg:
+ print(' ' + line)
+ print('-----------------------')
+ a.quit()
diff --git a/infer_4_37_2/lib/python3.10/posixpath.py b/infer_4_37_2/lib/python3.10/posixpath.py
new file mode 100644
index 0000000000000000000000000000000000000000..e550b470da5bebf60cf2ea4efc2b017520e5d670
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/posixpath.py
@@ -0,0 +1,542 @@
+"""Common operations on Posix pathnames.
+
+Instead of importing this module directly, import os and refer to
+this module as os.path. The "os.path" name is an alias for this
+module on Posix systems; on other systems (e.g. Windows),
+os.path provides the same operations in a manner specific to that
+platform, and is an alias to another module (e.g. ntpath).
+
+Some of this can actually be useful on non-Posix systems too, e.g.
+for manipulation of the pathname component of URLs.
+"""
+
+# Strings representing various path-related bits and pieces.
+# These are primarily for export; internally, they are hardcoded.
+# Should be set before imports for resolving cyclic dependency.
+curdir = '.'
+pardir = '..'
+extsep = '.'
+sep = '/'
+pathsep = ':'
+defpath = '/bin:/usr/bin'
+altsep = None
+devnull = '/dev/null'
+
+import os
+import sys
+import stat
+import genericpath
+from genericpath import *
+
+__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
+ "basename","dirname","commonprefix","getsize","getmtime",
+ "getatime","getctime","islink","exists","lexists","isdir","isfile",
+ "ismount", "expanduser","expandvars","normpath","abspath",
+ "samefile","sameopenfile","samestat",
+ "curdir","pardir","sep","pathsep","defpath","altsep","extsep",
+ "devnull","realpath","supports_unicode_filenames","relpath",
+ "commonpath"]
+
+
+def _get_sep(path):
+ if isinstance(path, bytes):
+ return b'/'
+ else:
+ return '/'
+
+# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
+# On MS-DOS this may also turn slashes into backslashes; however, other
+# normalizations (such as optimizing '../' away) are not allowed
+# (another function should be defined to do that).
+
+def normcase(s):
+ """Normalize case of pathname. Has no effect under Posix"""
+ return os.fspath(s)
+
+
+# Return whether a path is absolute.
+# Trivial in Posix, harder on the Mac or MS-DOS.
+
+def isabs(s):
+ """Test whether a path is absolute"""
+ s = os.fspath(s)
+ sep = _get_sep(s)
+ return s.startswith(sep)
+
+
+# Join pathnames.
+# Ignore the previous parts if a part is absolute.
+# Insert a '/' unless the first part is empty or already ends in '/'.
+
+def join(a, *p):
+ """Join two or more pathname components, inserting '/' as needed.
+ If any component is an absolute path, all previous path components
+ will be discarded. An empty last part will result in a path that
+ ends with a separator."""
+ a = os.fspath(a)
+ sep = _get_sep(a)
+ path = a
+ try:
+ if not p:
+ path[:0] + sep #23780: Ensure compatible data type even if p is null.
+ for b in map(os.fspath, p):
+ if b.startswith(sep):
+ path = b
+ elif not path or path.endswith(sep):
+ path += b
+ else:
+ path += sep + b
+ except (TypeError, AttributeError, BytesWarning):
+ genericpath._check_arg_types('join', a, *p)
+ raise
+ return path
+
+
+# Split a path in head (everything up to the last '/') and tail (the
+# rest). If the path ends in '/', tail will be empty. If there is no
+# '/' in the path, head will be empty.
+# Trailing '/'es are stripped from head unless it is the root.
+
+def split(p):
+ """Split a pathname. Returns tuple "(head, tail)" where "tail" is
+ everything after the final slash. Either part may be empty."""
+ p = os.fspath(p)
+ sep = _get_sep(p)
+ i = p.rfind(sep) + 1
+ head, tail = p[:i], p[i:]
+ if head and head != sep*len(head):
+ head = head.rstrip(sep)
+ return head, tail
+
+
+# Split a path in root and extension.
+# The extension is everything starting at the last dot in the last
+# pathname component; the root is everything before that.
+# It is always true that root + ext == p.
+
+def splitext(p):
+ p = os.fspath(p)
+ if isinstance(p, bytes):
+ sep = b'/'
+ extsep = b'.'
+ else:
+ sep = '/'
+ extsep = '.'
+ return genericpath._splitext(p, sep, None, extsep)
+splitext.__doc__ = genericpath._splitext.__doc__
+
+# Split a pathname into a drive specification and the rest of the
+# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
+
+def splitdrive(p):
+ """Split a pathname into drive and path. On Posix, drive is always
+ empty."""
+ p = os.fspath(p)
+ return p[:0], p
+
+
+# Return the tail (basename) part of a path, same as split(path)[1].
+
+def basename(p):
+ """Returns the final component of a pathname"""
+ p = os.fspath(p)
+ sep = _get_sep(p)
+ i = p.rfind(sep) + 1
+ return p[i:]
+
+
+# Return the head (dirname) part of a path, same as split(path)[0].
+
+def dirname(p):
+ """Returns the directory component of a pathname"""
+ p = os.fspath(p)
+ sep = _get_sep(p)
+ i = p.rfind(sep) + 1
+ head = p[:i]
+ if head and head != sep*len(head):
+ head = head.rstrip(sep)
+ return head
+
+
+# Is a path a symbolic link?
+# This will always return false on systems where os.lstat doesn't exist.
+
+def islink(path):
+ """Test whether a path is a symbolic link"""
+ try:
+ st = os.lstat(path)
+ except (OSError, ValueError, AttributeError):
+ return False
+ return stat.S_ISLNK(st.st_mode)
+
+# Being true for dangling symbolic links is also useful.
+
+def lexists(path):
+ """Test whether a path exists. Returns True for broken symbolic links"""
+ try:
+ os.lstat(path)
+ except (OSError, ValueError):
+ return False
+ return True
+
+
+# Is a path a mount point?
+# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
+
+def ismount(path):
+ """Test whether a path is a mount point"""
+ try:
+ s1 = os.lstat(path)
+ except (OSError, ValueError):
+ # It doesn't exist -- so not a mount point. :-)
+ return False
+ else:
+ # A symlink can never be a mount point
+ if stat.S_ISLNK(s1.st_mode):
+ return False
+
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ parent = join(path, b'..')
+ else:
+ parent = join(path, '..')
+ parent = realpath(parent)
+ try:
+ s2 = os.lstat(parent)
+ except (OSError, ValueError):
+ return False
+
+ dev1 = s1.st_dev
+ dev2 = s2.st_dev
+ if dev1 != dev2:
+ return True # path/.. on a different device as path
+ ino1 = s1.st_ino
+ ino2 = s2.st_ino
+ if ino1 == ino2:
+ return True # path/.. is the same i-node as path
+ return False
+
+
+# Expand paths beginning with '~' or '~user'.
+# '~' means $HOME; '~user' means that user's home directory.
+# If the path doesn't begin with '~', or if the user or $HOME is unknown,
+# the path is returned unchanged (leaving error reporting to whatever
+# function is called with the expanded path as argument).
+# See also module 'glob' for expansion of *, ? and [...] in pathnames.
+# (A function should also be defined to do full *sh-style environment
+# variable expansion.)
+
+def expanduser(path):
+ """Expand ~ and ~user constructions. If user or $HOME is unknown,
+ do nothing."""
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ tilde = b'~'
+ else:
+ tilde = '~'
+ if not path.startswith(tilde):
+ return path
+ sep = _get_sep(path)
+ i = path.find(sep, 1)
+ if i < 0:
+ i = len(path)
+ if i == 1:
+ if 'HOME' not in os.environ:
+ import pwd
+ try:
+ userhome = pwd.getpwuid(os.getuid()).pw_dir
+ except KeyError:
+ # bpo-10496: if the current user identifier doesn't exist in the
+ # password database, return the path unchanged
+ return path
+ else:
+ userhome = os.environ['HOME']
+ else:
+ import pwd
+ name = path[1:i]
+ if isinstance(name, bytes):
+ name = str(name, 'ASCII')
+ try:
+ pwent = pwd.getpwnam(name)
+ except KeyError:
+ # bpo-10496: if the user name from the path doesn't exist in the
+ # password database, return the path unchanged
+ return path
+ userhome = pwent.pw_dir
+ # if no user home, return the path unchanged on VxWorks
+ if userhome is None and sys.platform == "vxworks":
+ return path
+ if isinstance(path, bytes):
+ userhome = os.fsencode(userhome)
+ root = b'/'
+ else:
+ root = '/'
+ userhome = userhome.rstrip(root)
+ return (userhome + path[i:]) or root
+
+
+# Expand paths containing shell variable substitutions.
+# This expands the forms $variable and ${variable} only.
+# Non-existent variables are left unchanged.
+
+_varprog = None
+_varprogb = None
+
+def expandvars(path):
+ """Expand shell variables of form $var and ${var}. Unknown variables
+ are left unchanged."""
+ path = os.fspath(path)
+ global _varprog, _varprogb
+ if isinstance(path, bytes):
+ if b'$' not in path:
+ return path
+ if not _varprogb:
+ import re
+ _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
+ search = _varprogb.search
+ start = b'{'
+ end = b'}'
+ environ = getattr(os, 'environb', None)
+ else:
+ if '$' not in path:
+ return path
+ if not _varprog:
+ import re
+ _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
+ search = _varprog.search
+ start = '{'
+ end = '}'
+ environ = os.environ
+ i = 0
+ while True:
+ m = search(path, i)
+ if not m:
+ break
+ i, j = m.span(0)
+ name = m.group(1)
+ if name.startswith(start) and name.endswith(end):
+ name = name[1:-1]
+ try:
+ if environ is None:
+ value = os.fsencode(os.environ[os.fsdecode(name)])
+ else:
+ value = environ[name]
+ except KeyError:
+ i = j
+ else:
+ tail = path[j:]
+ path = path[:i] + value
+ i = len(path)
+ path += tail
+ return path
+
+
+# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
+# It should be understood that this may change the meaning of the path
+# if it contains symbolic links!
+
+def normpath(path):
+ """Normalize path, eliminating double slashes, etc."""
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ sep = b'/'
+ empty = b''
+ dot = b'.'
+ dotdot = b'..'
+ else:
+ sep = '/'
+ empty = ''
+ dot = '.'
+ dotdot = '..'
+ if path == empty:
+ return dot
+ initial_slashes = path.startswith(sep)
+ # POSIX allows one or two initial slashes, but treats three or more
+ # as single slash.
+ # (see https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13)
+ if (initial_slashes and
+ path.startswith(sep*2) and not path.startswith(sep*3)):
+ initial_slashes = 2
+ comps = path.split(sep)
+ new_comps = []
+ for comp in comps:
+ if comp in (empty, dot):
+ continue
+ if (comp != dotdot or (not initial_slashes and not new_comps) or
+ (new_comps and new_comps[-1] == dotdot)):
+ new_comps.append(comp)
+ elif new_comps:
+ new_comps.pop()
+ comps = new_comps
+ path = sep.join(comps)
+ if initial_slashes:
+ path = sep*initial_slashes + path
+ return path or dot
+
+
+def abspath(path):
+ """Return an absolute path."""
+ path = os.fspath(path)
+ if not isabs(path):
+ if isinstance(path, bytes):
+ cwd = os.getcwdb()
+ else:
+ cwd = os.getcwd()
+ path = join(cwd, path)
+ return normpath(path)
+
+
+# Return a canonical path (i.e. the absolute location of a file on the
+# filesystem).
+
+def realpath(filename, *, strict=False):
+ """Return the canonical path of the specified filename, eliminating any
+symbolic links encountered in the path."""
+ filename = os.fspath(filename)
+ path, ok = _joinrealpath(filename[:0], filename, strict, {})
+ return abspath(path)
+
+# Join two paths, normalizing and eliminating any symbolic links
+# encountered in the second path.
+def _joinrealpath(path, rest, strict, seen):
+ if isinstance(path, bytes):
+ sep = b'/'
+ curdir = b'.'
+ pardir = b'..'
+ else:
+ sep = '/'
+ curdir = '.'
+ pardir = '..'
+
+ if isabs(rest):
+ rest = rest[1:]
+ path = sep
+
+ while rest:
+ name, _, rest = rest.partition(sep)
+ if not name or name == curdir:
+ # current dir
+ continue
+ if name == pardir:
+ # parent dir
+ if path:
+ path, name = split(path)
+ if name == pardir:
+ path = join(path, pardir, pardir)
+ else:
+ path = pardir
+ continue
+ newpath = join(path, name)
+ try:
+ st = os.lstat(newpath)
+ except OSError:
+ if strict:
+ raise
+ is_link = False
+ else:
+ is_link = stat.S_ISLNK(st.st_mode)
+ if not is_link:
+ path = newpath
+ continue
+ # Resolve the symbolic link
+ if newpath in seen:
+ # Already seen this path
+ path = seen[newpath]
+ if path is not None:
+ # use cached value
+ continue
+ # The symlink is not resolved, so we must have a symlink loop.
+ if strict:
+ # Raise OSError(errno.ELOOP)
+ os.stat(newpath)
+ else:
+ # Return already resolved part + rest of the path unchanged.
+ return join(newpath, rest), False
+ seen[newpath] = None # not resolved symlink
+ path, ok = _joinrealpath(path, os.readlink(newpath), strict, seen)
+ if not ok:
+ return join(path, rest), False
+ seen[newpath] = path # resolved symlink
+
+ return path, True
+
+
+supports_unicode_filenames = (sys.platform == 'darwin')
+
+def relpath(path, start=None):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+
+ path = os.fspath(path)
+ if isinstance(path, bytes):
+ curdir = b'.'
+ sep = b'/'
+ pardir = b'..'
+ else:
+ curdir = '.'
+ sep = '/'
+ pardir = '..'
+
+ if start is None:
+ start = curdir
+ else:
+ start = os.fspath(start)
+
+ try:
+ start_list = [x for x in abspath(start).split(sep) if x]
+ path_list = [x for x in abspath(path).split(sep) if x]
+ # Work out how much of the filepath is shared by start and path.
+ i = len(commonprefix([start_list, path_list]))
+
+ rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return curdir
+ return join(*rel_list)
+ except (TypeError, AttributeError, BytesWarning, DeprecationWarning):
+ genericpath._check_arg_types('relpath', path, start)
+ raise
+
+
+# Return the longest common sub-path of the sequence of paths given as input.
+# The paths are not normalized before comparing them (this is the
+# responsibility of the caller). Any trailing separator is stripped from the
+# returned path.
+
+def commonpath(paths):
+ """Given a sequence of path names, returns the longest common sub-path."""
+
+ if not paths:
+ raise ValueError('commonpath() arg is an empty sequence')
+
+ paths = tuple(map(os.fspath, paths))
+ if isinstance(paths[0], bytes):
+ sep = b'/'
+ curdir = b'.'
+ else:
+ sep = '/'
+ curdir = '.'
+
+ try:
+ split_paths = [path.split(sep) for path in paths]
+
+ try:
+ isabs, = set(p[:1] == sep for p in paths)
+ except ValueError:
+ raise ValueError("Can't mix absolute and relative paths") from None
+
+ split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
+ s1 = min(split_paths)
+ s2 = max(split_paths)
+ common = s1
+ for i, c in enumerate(s1):
+ if c != s2[i]:
+ common = s1[:i]
+ break
+
+ prefix = sep if isabs else sep[:0]
+ return prefix + sep.join(common)
+ except (TypeError, AttributeError):
+ genericpath._check_arg_types('commonpath', *paths)
+ raise
diff --git a/infer_4_37_2/lib/python3.10/pyclbr.py b/infer_4_37_2/lib/python3.10/pyclbr.py
new file mode 100644
index 0000000000000000000000000000000000000000..37f86995d6ce00013643f1b4a8aa387cebaeae72
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/pyclbr.py
@@ -0,0 +1,314 @@
+"""Parse a Python module and describe its classes and functions.
+
+Parse enough of a Python file to recognize imports and class and
+function definitions, and to find out the superclasses of a class.
+
+The interface consists of a single function:
+ readmodule_ex(module, path=None)
+where module is the name of a Python module, and path is an optional
+list of directories where the module is to be searched. If present,
+path is prepended to the system search path sys.path. The return value
+is a dictionary. The keys of the dictionary are the names of the
+classes and functions defined in the module (including classes that are
+defined via the from XXX import YYY construct). The values are
+instances of classes Class and Function. One special key/value pair is
+present for packages: the key '__path__' has a list as its value which
+contains the package search path.
+
+Classes and Functions have a common superclass: _Object. Every instance
+has the following attributes:
+ module -- name of the module;
+ name -- name of the object;
+ file -- file in which the object is defined;
+ lineno -- line in the file where the object's definition starts;
+ end_lineno -- line in the file where the object's definition ends;
+ parent -- parent of this object, if any;
+ children -- nested objects contained in this object.
+The 'children' attribute is a dictionary mapping names to objects.
+
+Instances of Function describe functions with the attributes from _Object,
+plus the following:
+ is_async -- if a function is defined with an 'async' prefix
+
+Instances of Class describe classes with the attributes from _Object,
+plus the following:
+ super -- list of super classes (Class instances if possible);
+ methods -- mapping of method names to beginning line numbers.
+If the name of a super class is not recognized, the corresponding
+entry in the list of super classes is not a class instance but a
+string giving the name of the super class. Since import statements
+are recognized and imported modules are scanned as well, this
+shouldn't happen often.
+"""
+
+import ast
+import sys
+import importlib.util
+
+__all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
+
+_modules = {} # Initialize cache of modules we've seen.
+
+
+class _Object:
+ "Information about Python class or function."
+ def __init__(self, module, name, file, lineno, end_lineno, parent):
+ self.module = module
+ self.name = name
+ self.file = file
+ self.lineno = lineno
+ self.end_lineno = end_lineno
+ self.parent = parent
+ self.children = {}
+ if parent is not None:
+ parent.children[name] = self
+
+
+# Odd Function and Class signatures are for back-compatibility.
+class Function(_Object):
+ "Information about a Python function, including methods."
+ def __init__(self, module, name, file, lineno,
+ parent=None, is_async=False, *, end_lineno=None):
+ super().__init__(module, name, file, lineno, end_lineno, parent)
+ self.is_async = is_async
+ if isinstance(parent, Class):
+ parent.methods[name] = lineno
+
+
+class Class(_Object):
+ "Information about a Python class."
+ def __init__(self, module, name, super_, file, lineno,
+ parent=None, *, end_lineno=None):
+ super().__init__(module, name, file, lineno, end_lineno, parent)
+ self.super = super_ or []
+ self.methods = {}
+
+
+# These 2 functions are used in these tests
+# Lib/test/test_pyclbr, Lib/idlelib/idle_test/test_browser.py
+def _nest_function(ob, func_name, lineno, end_lineno, is_async=False):
+ "Return a Function after nesting within ob."
+ return Function(ob.module, func_name, ob.file, lineno,
+ parent=ob, is_async=is_async, end_lineno=end_lineno)
+
+def _nest_class(ob, class_name, lineno, end_lineno, super=None):
+ "Return a Class after nesting within ob."
+ return Class(ob.module, class_name, super, ob.file, lineno,
+ parent=ob, end_lineno=end_lineno)
+
+
+def readmodule(module, path=None):
+ """Return Class objects for the top-level classes in module.
+
+ This is the original interface, before Functions were added.
+ """
+
+ res = {}
+ for key, value in _readmodule(module, path or []).items():
+ if isinstance(value, Class):
+ res[key] = value
+ return res
+
+def readmodule_ex(module, path=None):
+ """Return a dictionary with all functions and classes in module.
+
+ Search for module in PATH + sys.path.
+ If possible, include imported superclasses.
+ Do this by reading source, without importing (and executing) it.
+ """
+ return _readmodule(module, path or [])
+
+
+def _readmodule(module, path, inpackage=None):
+ """Do the hard work for readmodule[_ex].
+
+ If inpackage is given, it must be the dotted name of the package in
+ which we are searching for a submodule, and then PATH must be the
+ package search path; otherwise, we are searching for a top-level
+ module, and path is combined with sys.path.
+ """
+ # Compute the full module name (prepending inpackage if set).
+ if inpackage is not None:
+ fullmodule = "%s.%s" % (inpackage, module)
+ else:
+ fullmodule = module
+
+ # Check in the cache.
+ if fullmodule in _modules:
+ return _modules[fullmodule]
+
+ # Initialize the dict for this module's contents.
+ tree = {}
+
+ # Check if it is a built-in module; we don't do much for these.
+ if module in sys.builtin_module_names and inpackage is None:
+ _modules[module] = tree
+ return tree
+
+ # Check for a dotted module name.
+ i = module.rfind('.')
+ if i >= 0:
+ package = module[:i]
+ submodule = module[i+1:]
+ parent = _readmodule(package, path, inpackage)
+ if inpackage is not None:
+ package = "%s.%s" % (inpackage, package)
+ if not '__path__' in parent:
+ raise ImportError('No package named {}'.format(package))
+ return _readmodule(submodule, parent['__path__'], package)
+
+ # Search the path for the module.
+ f = None
+ if inpackage is not None:
+ search_path = path
+ else:
+ search_path = path + sys.path
+ spec = importlib.util._find_spec_from_path(fullmodule, search_path)
+ if spec is None:
+ raise ModuleNotFoundError(f"no module named {fullmodule!r}", name=fullmodule)
+ _modules[fullmodule] = tree
+ # Is module a package?
+ if spec.submodule_search_locations is not None:
+ tree['__path__'] = spec.submodule_search_locations
+ try:
+ source = spec.loader.get_source(fullmodule)
+ except (AttributeError, ImportError):
+ # If module is not Python source, we cannot do anything.
+ return tree
+ else:
+ if source is None:
+ return tree
+
+ fname = spec.loader.get_filename(fullmodule)
+ return _create_tree(fullmodule, path, fname, source, tree, inpackage)
+
+
+class _ModuleBrowser(ast.NodeVisitor):
+ def __init__(self, module, path, file, tree, inpackage):
+ self.path = path
+ self.tree = tree
+ self.file = file
+ self.module = module
+ self.inpackage = inpackage
+ self.stack = []
+
+ def visit_ClassDef(self, node):
+ bases = []
+ for base in node.bases:
+ name = ast.unparse(base)
+ if name in self.tree:
+ # We know this super class.
+ bases.append(self.tree[name])
+ elif len(names := name.split(".")) > 1:
+ # Super class form is module.class:
+ # look in module for class.
+ *_, module, class_ = names
+ if module in _modules:
+ bases.append(_modules[module].get(class_, name))
+ else:
+ bases.append(name)
+
+ parent = self.stack[-1] if self.stack else None
+ class_ = Class(self.module, node.name, bases, self.file, node.lineno,
+ parent=parent, end_lineno=node.end_lineno)
+ if parent is None:
+ self.tree[node.name] = class_
+ self.stack.append(class_)
+ self.generic_visit(node)
+ self.stack.pop()
+
+ def visit_FunctionDef(self, node, *, is_async=False):
+ parent = self.stack[-1] if self.stack else None
+ function = Function(self.module, node.name, self.file, node.lineno,
+ parent, is_async, end_lineno=node.end_lineno)
+ if parent is None:
+ self.tree[node.name] = function
+ self.stack.append(function)
+ self.generic_visit(node)
+ self.stack.pop()
+
+ def visit_AsyncFunctionDef(self, node):
+ self.visit_FunctionDef(node, is_async=True)
+
+ def visit_Import(self, node):
+ if node.col_offset != 0:
+ return
+
+ for module in node.names:
+ try:
+ try:
+ _readmodule(module.name, self.path, self.inpackage)
+ except ImportError:
+ _readmodule(module.name, [])
+ except (ImportError, SyntaxError):
+ # If we can't find or parse the imported module,
+ # too bad -- don't die here.
+ continue
+
+ def visit_ImportFrom(self, node):
+ if node.col_offset != 0:
+ return
+ try:
+ module = "." * node.level
+ if node.module:
+ module += node.module
+ module = _readmodule(module, self.path, self.inpackage)
+ except (ImportError, SyntaxError):
+ return
+
+ for name in node.names:
+ if name.name in module:
+ self.tree[name.asname or name.name] = module[name.name]
+ elif name.name == "*":
+ for import_name, import_value in module.items():
+ if import_name.startswith("_"):
+ continue
+ self.tree[import_name] = import_value
+
+
+def _create_tree(fullmodule, path, fname, source, tree, inpackage):
+ mbrowser = _ModuleBrowser(fullmodule, path, fname, tree, inpackage)
+ mbrowser.visit(ast.parse(source))
+ return mbrowser.tree
+
+
+def _main():
+ "Print module output (default this file) for quick visual check."
+ import os
+ try:
+ mod = sys.argv[1]
+ except:
+ mod = __file__
+ if os.path.exists(mod):
+ path = [os.path.dirname(mod)]
+ mod = os.path.basename(mod)
+ if mod.lower().endswith(".py"):
+ mod = mod[:-3]
+ else:
+ path = []
+ tree = readmodule_ex(mod, path)
+ lineno_key = lambda a: getattr(a, 'lineno', 0)
+ objs = sorted(tree.values(), key=lineno_key, reverse=True)
+ indent_level = 2
+ while objs:
+ obj = objs.pop()
+ if isinstance(obj, list):
+ # Value is a __path__ key.
+ continue
+ if not hasattr(obj, 'indent'):
+ obj.indent = 0
+
+ if isinstance(obj, _Object):
+ new_objs = sorted(obj.children.values(),
+ key=lineno_key, reverse=True)
+ for ob in new_objs:
+ ob.indent = obj.indent + indent_level
+ objs.extend(new_objs)
+ if isinstance(obj, Class):
+ print("{}class {} {} {}"
+ .format(' ' * obj.indent, obj.name, obj.super, obj.lineno))
+ elif isinstance(obj, Function):
+ print("{}def {} {}".format(' ' * obj.indent, obj.name, obj.lineno))
+
+if __name__ == "__main__":
+ _main()
diff --git a/infer_4_37_2/lib/python3.10/queue.py b/infer_4_37_2/lib/python3.10/queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..55f50088460f9e5450c86d8c68d99b259a4c6d5c
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/queue.py
@@ -0,0 +1,326 @@
+'''A multi-producer, multi-consumer queue.'''
+
+import threading
+import types
+from collections import deque
+from heapq import heappush, heappop
+from time import monotonic as time
+try:
+ from _queue import SimpleQueue
+except ImportError:
+ SimpleQueue = None
+
+__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue', 'SimpleQueue']
+
+
+try:
+ from _queue import Empty
+except ImportError:
+ class Empty(Exception):
+ 'Exception raised by Queue.get(block=0)/get_nowait().'
+ pass
+
+class Full(Exception):
+ 'Exception raised by Queue.put(block=0)/put_nowait().'
+ pass
+
+
+class Queue:
+ '''Create a queue object with a given maximum size.
+
+ If maxsize is <= 0, the queue size is infinite.
+ '''
+
+ def __init__(self, maxsize=0):
+ self.maxsize = maxsize
+ self._init(maxsize)
+
+ # mutex must be held whenever the queue is mutating. All methods
+ # that acquire mutex must release it before returning. mutex
+ # is shared between the three conditions, so acquiring and
+ # releasing the conditions also acquires and releases mutex.
+ self.mutex = threading.Lock()
+
+ # Notify not_empty whenever an item is added to the queue; a
+ # thread waiting to get is notified then.
+ self.not_empty = threading.Condition(self.mutex)
+
+ # Notify not_full whenever an item is removed from the queue;
+ # a thread waiting to put is notified then.
+ self.not_full = threading.Condition(self.mutex)
+
+ # Notify all_tasks_done whenever the number of unfinished tasks
+ # drops to zero; thread waiting to join() is notified to resume
+ self.all_tasks_done = threading.Condition(self.mutex)
+ self.unfinished_tasks = 0
+
+ def task_done(self):
+ '''Indicate that a formerly enqueued task is complete.
+
+ Used by Queue consumer threads. For each get() used to fetch a task,
+ a subsequent call to task_done() tells the queue that the processing
+ on the task is complete.
+
+ If a join() is currently blocking, it will resume when all items
+ have been processed (meaning that a task_done() call was received
+ for every item that had been put() into the queue).
+
+ Raises a ValueError if called more times than there were items
+ placed in the queue.
+ '''
+ with self.all_tasks_done:
+ unfinished = self.unfinished_tasks - 1
+ if unfinished <= 0:
+ if unfinished < 0:
+ raise ValueError('task_done() called too many times')
+ self.all_tasks_done.notify_all()
+ self.unfinished_tasks = unfinished
+
+ def join(self):
+ '''Blocks until all items in the Queue have been gotten and processed.
+
+ The count of unfinished tasks goes up whenever an item is added to the
+ queue. The count goes down whenever a consumer thread calls task_done()
+ to indicate the item was retrieved and all work on it is complete.
+
+ When the count of unfinished tasks drops to zero, join() unblocks.
+ '''
+ with self.all_tasks_done:
+ while self.unfinished_tasks:
+ self.all_tasks_done.wait()
+
+ def qsize(self):
+ '''Return the approximate size of the queue (not reliable!).'''
+ with self.mutex:
+ return self._qsize()
+
+ def empty(self):
+ '''Return True if the queue is empty, False otherwise (not reliable!).
+
+ This method is likely to be removed at some point. Use qsize() == 0
+ as a direct substitute, but be aware that either approach risks a race
+ condition where a queue can grow before the result of empty() or
+ qsize() can be used.
+
+ To create code that needs to wait for all queued tasks to be
+ completed, the preferred technique is to use the join() method.
+ '''
+ with self.mutex:
+ return not self._qsize()
+
+ def full(self):
+ '''Return True if the queue is full, False otherwise (not reliable!).
+
+ This method is likely to be removed at some point. Use qsize() >= n
+ as a direct substitute, but be aware that either approach risks a race
+ condition where a queue can shrink before the result of full() or
+ qsize() can be used.
+ '''
+ with self.mutex:
+ return 0 < self.maxsize <= self._qsize()
+
+ def put(self, item, block=True, timeout=None):
+ '''Put an item into the queue.
+
+ If optional args 'block' is true and 'timeout' is None (the default),
+ block if necessary until a free slot is available. If 'timeout' is
+ a non-negative number, it blocks at most 'timeout' seconds and raises
+ the Full exception if no free slot was available within that time.
+ Otherwise ('block' is false), put an item on the queue if a free slot
+ is immediately available, else raise the Full exception ('timeout'
+ is ignored in that case).
+ '''
+ with self.not_full:
+ if self.maxsize > 0:
+ if not block:
+ if self._qsize() >= self.maxsize:
+ raise Full
+ elif timeout is None:
+ while self._qsize() >= self.maxsize:
+ self.not_full.wait()
+ elif timeout < 0:
+ raise ValueError("'timeout' must be a non-negative number")
+ else:
+ endtime = time() + timeout
+ while self._qsize() >= self.maxsize:
+ remaining = endtime - time()
+ if remaining <= 0.0:
+ raise Full
+ self.not_full.wait(remaining)
+ self._put(item)
+ self.unfinished_tasks += 1
+ self.not_empty.notify()
+
+ def get(self, block=True, timeout=None):
+ '''Remove and return an item from the queue.
+
+ If optional args 'block' is true and 'timeout' is None (the default),
+ block if necessary until an item is available. If 'timeout' is
+ a non-negative number, it blocks at most 'timeout' seconds and raises
+ the Empty exception if no item was available within that time.
+ Otherwise ('block' is false), return an item if one is immediately
+ available, else raise the Empty exception ('timeout' is ignored
+ in that case).
+ '''
+ with self.not_empty:
+ if not block:
+ if not self._qsize():
+ raise Empty
+ elif timeout is None:
+ while not self._qsize():
+ self.not_empty.wait()
+ elif timeout < 0:
+ raise ValueError("'timeout' must be a non-negative number")
+ else:
+ endtime = time() + timeout
+ while not self._qsize():
+ remaining = endtime - time()
+ if remaining <= 0.0:
+ raise Empty
+ self.not_empty.wait(remaining)
+ item = self._get()
+ self.not_full.notify()
+ return item
+
+ def put_nowait(self, item):
+ '''Put an item into the queue without blocking.
+
+ Only enqueue the item if a free slot is immediately available.
+ Otherwise raise the Full exception.
+ '''
+ return self.put(item, block=False)
+
+ def get_nowait(self):
+ '''Remove and return an item from the queue without blocking.
+
+ Only get an item if one is immediately available. Otherwise
+ raise the Empty exception.
+ '''
+ return self.get(block=False)
+
+ # Override these methods to implement other queue organizations
+ # (e.g. stack or priority queue).
+ # These will only be called with appropriate locks held
+
+ # Initialize the queue representation
+ def _init(self, maxsize):
+ self.queue = deque()
+
+ def _qsize(self):
+ return len(self.queue)
+
+ # Put a new item in the queue
+ def _put(self, item):
+ self.queue.append(item)
+
+ # Get an item from the queue
+ def _get(self):
+ return self.queue.popleft()
+
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+
+class PriorityQueue(Queue):
+ '''Variant of Queue that retrieves open entries in priority order (lowest first).
+
+ Entries are typically tuples of the form: (priority number, data).
+ '''
+
+ def _init(self, maxsize):
+ self.queue = []
+
+ def _qsize(self):
+ return len(self.queue)
+
+ def _put(self, item):
+ heappush(self.queue, item)
+
+ def _get(self):
+ return heappop(self.queue)
+
+
+class LifoQueue(Queue):
+ '''Variant of Queue that retrieves most recently added entries first.'''
+
+ def _init(self, maxsize):
+ self.queue = []
+
+ def _qsize(self):
+ return len(self.queue)
+
+ def _put(self, item):
+ self.queue.append(item)
+
+ def _get(self):
+ return self.queue.pop()
+
+
+class _PySimpleQueue:
+ '''Simple, unbounded FIFO queue.
+
+ This pure Python implementation is not reentrant.
+ '''
+ # Note: while this pure Python version provides fairness
+ # (by using a threading.Semaphore which is itself fair, being based
+ # on threading.Condition), fairness is not part of the API contract.
+ # This allows the C version to use a different implementation.
+
+ def __init__(self):
+ self._queue = deque()
+ self._count = threading.Semaphore(0)
+
+ def put(self, item, block=True, timeout=None):
+ '''Put the item on the queue.
+
+ The optional 'block' and 'timeout' arguments are ignored, as this method
+ never blocks. They are provided for compatibility with the Queue class.
+ '''
+ self._queue.append(item)
+ self._count.release()
+
+ def get(self, block=True, timeout=None):
+ '''Remove and return an item from the queue.
+
+ If optional args 'block' is true and 'timeout' is None (the default),
+ block if necessary until an item is available. If 'timeout' is
+ a non-negative number, it blocks at most 'timeout' seconds and raises
+ the Empty exception if no item was available within that time.
+ Otherwise ('block' is false), return an item if one is immediately
+ available, else raise the Empty exception ('timeout' is ignored
+ in that case).
+ '''
+ if timeout is not None and timeout < 0:
+ raise ValueError("'timeout' must be a non-negative number")
+ if not self._count.acquire(block, timeout):
+ raise Empty
+ return self._queue.popleft()
+
+ def put_nowait(self, item):
+ '''Put an item into the queue without blocking.
+
+ This is exactly equivalent to `put(item, block=False)` and is only provided
+ for compatibility with the Queue class.
+ '''
+ return self.put(item, block=False)
+
+ def get_nowait(self):
+ '''Remove and return an item from the queue without blocking.
+
+ Only get an item if one is immediately available. Otherwise
+ raise the Empty exception.
+ '''
+ return self.get(block=False)
+
+ def empty(self):
+ '''Return True if the queue is empty, False otherwise (not reliable!).'''
+ return len(self._queue) == 0
+
+ def qsize(self):
+ '''Return the approximate size of the queue (not reliable!).'''
+ return len(self._queue)
+
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+
+if SimpleQueue is None:
+ SimpleQueue = _PySimpleQueue
diff --git a/infer_4_37_2/lib/python3.10/reprlib.py b/infer_4_37_2/lib/python3.10/reprlib.py
new file mode 100644
index 0000000000000000000000000000000000000000..616b3439b5de30a9a8cef6b1beb100625e825ee4
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/reprlib.py
@@ -0,0 +1,161 @@
+"""Redo the builtin repr() (representation) but with limits on most sizes."""
+
+__all__ = ["Repr", "repr", "recursive_repr"]
+
+import builtins
+from itertools import islice
+from _thread import get_ident
+
+def recursive_repr(fillvalue='...'):
+ 'Decorator to make a repr function return fillvalue for a recursive call'
+
+ def decorating_function(user_function):
+ repr_running = set()
+
+ def wrapper(self):
+ key = id(self), get_ident()
+ if key in repr_running:
+ return fillvalue
+ repr_running.add(key)
+ try:
+ result = user_function(self)
+ finally:
+ repr_running.discard(key)
+ return result
+
+ # Can't use functools.wraps() here because of bootstrap issues
+ wrapper.__module__ = getattr(user_function, '__module__')
+ wrapper.__doc__ = getattr(user_function, '__doc__')
+ wrapper.__name__ = getattr(user_function, '__name__')
+ wrapper.__qualname__ = getattr(user_function, '__qualname__')
+ wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
+ return wrapper
+
+ return decorating_function
+
+class Repr:
+
+ def __init__(self):
+ self.maxlevel = 6
+ self.maxtuple = 6
+ self.maxlist = 6
+ self.maxarray = 5
+ self.maxdict = 4
+ self.maxset = 6
+ self.maxfrozenset = 6
+ self.maxdeque = 6
+ self.maxstring = 30
+ self.maxlong = 40
+ self.maxother = 30
+
+ def repr(self, x):
+ return self.repr1(x, self.maxlevel)
+
+ def repr1(self, x, level):
+ typename = type(x).__name__
+ if ' ' in typename:
+ parts = typename.split()
+ typename = '_'.join(parts)
+ if hasattr(self, 'repr_' + typename):
+ return getattr(self, 'repr_' + typename)(x, level)
+ else:
+ return self.repr_instance(x, level)
+
+ def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
+ n = len(x)
+ if level <= 0 and n:
+ s = '...'
+ else:
+ newlevel = level - 1
+ repr1 = self.repr1
+ pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
+ if n > maxiter: pieces.append('...')
+ s = ', '.join(pieces)
+ if n == 1 and trail: right = trail + right
+ return '%s%s%s' % (left, s, right)
+
+ def repr_tuple(self, x, level):
+ return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
+
+ def repr_list(self, x, level):
+ return self._repr_iterable(x, level, '[', ']', self.maxlist)
+
+ def repr_array(self, x, level):
+ if not x:
+ return "array('%s')" % x.typecode
+ header = "array('%s', [" % x.typecode
+ return self._repr_iterable(x, level, header, '])', self.maxarray)
+
+ def repr_set(self, x, level):
+ if not x:
+ return 'set()'
+ x = _possibly_sorted(x)
+ return self._repr_iterable(x, level, '{', '}', self.maxset)
+
+ def repr_frozenset(self, x, level):
+ if not x:
+ return 'frozenset()'
+ x = _possibly_sorted(x)
+ return self._repr_iterable(x, level, 'frozenset({', '})',
+ self.maxfrozenset)
+
+ def repr_deque(self, x, level):
+ return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
+
+ def repr_dict(self, x, level):
+ n = len(x)
+ if n == 0: return '{}'
+ if level <= 0: return '{...}'
+ newlevel = level - 1
+ repr1 = self.repr1
+ pieces = []
+ for key in islice(_possibly_sorted(x), self.maxdict):
+ keyrepr = repr1(key, newlevel)
+ valrepr = repr1(x[key], newlevel)
+ pieces.append('%s: %s' % (keyrepr, valrepr))
+ if n > self.maxdict: pieces.append('...')
+ s = ', '.join(pieces)
+ return '{%s}' % (s,)
+
+ def repr_str(self, x, level):
+ s = builtins.repr(x[:self.maxstring])
+ if len(s) > self.maxstring:
+ i = max(0, (self.maxstring-3)//2)
+ j = max(0, self.maxstring-3-i)
+ s = builtins.repr(x[:i] + x[len(x)-j:])
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+ def repr_int(self, x, level):
+ s = builtins.repr(x) # XXX Hope this isn't too slow...
+ if len(s) > self.maxlong:
+ i = max(0, (self.maxlong-3)//2)
+ j = max(0, self.maxlong-3-i)
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+ def repr_instance(self, x, level):
+ try:
+ s = builtins.repr(x)
+ # Bugs in x.__repr__() can cause arbitrary
+ # exceptions -- then make up something
+ except Exception:
+ return '<%s instance at %#x>' % (x.__class__.__name__, id(x))
+ if len(s) > self.maxother:
+ i = max(0, (self.maxother-3)//2)
+ j = max(0, self.maxother-3-i)
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+
+def _possibly_sorted(x):
+ # Since not all sequences of items can be sorted and comparison
+ # functions may raise arbitrary exceptions, return an unsorted
+ # sequence in that case.
+ try:
+ return sorted(x)
+ except Exception:
+ return list(x)
+
+aRepr = Repr()
+repr = aRepr.repr
diff --git a/infer_4_37_2/lib/python3.10/rlcompleter.py b/infer_4_37_2/lib/python3.10/rlcompleter.py
new file mode 100644
index 0000000000000000000000000000000000000000..98b7930b32fab32e387500dee01c7a2a68eb2e3a
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/rlcompleter.py
@@ -0,0 +1,219 @@
+"""Word completion for GNU readline.
+
+The completer completes keywords, built-ins and globals in a selectable
+namespace (which defaults to __main__); when completing NAME.NAME..., it
+evaluates (!) the expression up to the last dot and completes its attributes.
+
+It's very cool to do "import sys" type "sys.", hit the completion key (twice),
+and see the list of names defined by the sys module!
+
+Tip: to use the tab key as the completion key, call
+
+ readline.parse_and_bind("tab: complete")
+
+Notes:
+
+- Exceptions raised by the completer function are *ignored* (and generally cause
+ the completion to fail). This is a feature -- since readline sets the tty
+ device in raw (or cbreak) mode, printing a traceback wouldn't work well
+ without some complicated hoopla to save, reset and restore the tty state.
+
+- The evaluation of the NAME.NAME... form may cause arbitrary application
+ defined code to be executed if an object with a __getattr__ hook is found.
+ Since it is the responsibility of the application (or the user) to enable this
+ feature, I consider this an acceptable risk. More complicated expressions
+ (e.g. function calls or indexing operations) are *not* evaluated.
+
+- When the original stdin is not a tty device, GNU readline is never
+ used, and this module (and the readline module) are silently inactive.
+
+"""
+
+import atexit
+import builtins
+import inspect
+import __main__
+
+__all__ = ["Completer"]
+
+class Completer:
+ def __init__(self, namespace = None):
+ """Create a new completer for the command line.
+
+ Completer([namespace]) -> completer instance.
+
+ If unspecified, the default namespace where completions are performed
+ is __main__ (technically, __main__.__dict__). Namespaces should be
+ given as dictionaries.
+
+ Completer instances should be used as the completion mechanism of
+ readline via the set_completer() call:
+
+ readline.set_completer(Completer(my_namespace).complete)
+ """
+
+ if namespace and not isinstance(namespace, dict):
+ raise TypeError('namespace must be a dictionary')
+
+ # Don't bind to namespace quite yet, but flag whether the user wants a
+ # specific namespace or to use __main__.__dict__. This will allow us
+ # to bind to __main__.__dict__ at completion time, not now.
+ if namespace is None:
+ self.use_main_ns = 1
+ else:
+ self.use_main_ns = 0
+ self.namespace = namespace
+
+ def complete(self, text, state):
+ """Return the next possible completion for 'text'.
+
+ This is called successively with state == 0, 1, 2, ... until it
+ returns None. The completion should begin with 'text'.
+
+ """
+ if self.use_main_ns:
+ self.namespace = __main__.__dict__
+
+ if not text.strip():
+ if state == 0:
+ if _readline_available:
+ readline.insert_text('\t')
+ readline.redisplay()
+ return ''
+ else:
+ return '\t'
+ else:
+ return None
+
+ if state == 0:
+ if "." in text:
+ self.matches = self.attr_matches(text)
+ else:
+ self.matches = self.global_matches(text)
+ try:
+ return self.matches[state]
+ except IndexError:
+ return None
+
+ def _callable_postfix(self, val, word):
+ if callable(val):
+ word += "("
+ try:
+ if not inspect.signature(val).parameters:
+ word += ")"
+ except ValueError:
+ pass
+
+ return word
+
+ def global_matches(self, text):
+ """Compute matches when text is a simple name.
+
+ Return a list of all keywords, built-in functions and names currently
+ defined in self.namespace that match.
+
+ """
+ import keyword
+ matches = []
+ seen = {"__builtins__"}
+ n = len(text)
+ for word in keyword.kwlist:
+ if word[:n] == text:
+ seen.add(word)
+ if word in {'finally', 'try'}:
+ word = word + ':'
+ elif word not in {'False', 'None', 'True',
+ 'break', 'continue', 'pass',
+ 'else'}:
+ word = word + ' '
+ matches.append(word)
+ for nspace in [self.namespace, builtins.__dict__]:
+ for word, val in nspace.items():
+ if word[:n] == text and word not in seen:
+ seen.add(word)
+ matches.append(self._callable_postfix(val, word))
+ return matches
+
+ def attr_matches(self, text):
+ """Compute matches when text contains a dot.
+
+ Assuming the text is of the form NAME.NAME....[NAME], and is
+ evaluable in self.namespace, it will be evaluated and its attributes
+ (as revealed by dir()) are used as possible completions. (For class
+ instances, class members are also considered.)
+
+ WARNING: this can still invoke arbitrary C code, if an object
+ with a __getattr__ hook is evaluated.
+
+ """
+ import re
+ m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
+ if not m:
+ return []
+ expr, attr = m.group(1, 3)
+ try:
+ thisobject = eval(expr, self.namespace)
+ except Exception:
+ return []
+
+ # get the content of the object, except __builtins__
+ words = set(dir(thisobject))
+ words.discard("__builtins__")
+
+ if hasattr(thisobject, '__class__'):
+ words.add('__class__')
+ words.update(get_class_members(thisobject.__class__))
+ matches = []
+ n = len(attr)
+ if attr == '':
+ noprefix = '_'
+ elif attr == '_':
+ noprefix = '__'
+ else:
+ noprefix = None
+ while True:
+ for word in words:
+ if (word[:n] == attr and
+ not (noprefix and word[:n+1] == noprefix)):
+ match = "%s.%s" % (expr, word)
+ if isinstance(getattr(type(thisobject), word, None),
+ property):
+ # bpo-44752: thisobject.word is a method decorated by
+ # `@property`. What follows applies a postfix if
+ # thisobject.word is callable, but know we know that
+ # this is not callable (because it is a property).
+ # Also, getattr(thisobject, word) will evaluate the
+ # property method, which is not desirable.
+ matches.append(match)
+ continue
+ if (value := getattr(thisobject, word, None)) is not None:
+ matches.append(self._callable_postfix(value, match))
+ else:
+ matches.append(match)
+ if matches or not noprefix:
+ break
+ if noprefix == '_':
+ noprefix = '__'
+ else:
+ noprefix = None
+ matches.sort()
+ return matches
+
+def get_class_members(klass):
+ ret = dir(klass)
+ if hasattr(klass,'__bases__'):
+ for base in klass.__bases__:
+ ret = ret + get_class_members(base)
+ return ret
+
+try:
+ import readline
+except ImportError:
+ _readline_available = False
+else:
+ readline.set_completer(Completer().complete)
+ # Release references early at shutdown (the readline module's
+ # contents are quasi-immortal, and the completer function holds a
+ # reference to globals).
+ atexit.register(lambda: readline.set_completer(None))
+ _readline_available = True
diff --git a/infer_4_37_2/lib/python3.10/secrets.py b/infer_4_37_2/lib/python3.10/secrets.py
new file mode 100644
index 0000000000000000000000000000000000000000..a546efbdd4204c81d0a8d8917dc539380fa77070
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/secrets.py
@@ -0,0 +1,72 @@
+"""Generate cryptographically strong pseudo-random numbers suitable for
+managing secrets such as account authentication, tokens, and similar.
+
+See PEP 506 for more information.
+https://www.python.org/dev/peps/pep-0506/
+
+"""
+
+__all__ = ['choice', 'randbelow', 'randbits', 'SystemRandom',
+ 'token_bytes', 'token_hex', 'token_urlsafe',
+ 'compare_digest',
+ ]
+
+
+import base64
+import binascii
+
+from hmac import compare_digest
+from random import SystemRandom
+
+_sysrand = SystemRandom()
+
+randbits = _sysrand.getrandbits
+choice = _sysrand.choice
+
+def randbelow(exclusive_upper_bound):
+ """Return a random int in the range [0, n)."""
+ if exclusive_upper_bound <= 0:
+ raise ValueError("Upper bound must be positive.")
+ return _sysrand._randbelow(exclusive_upper_bound)
+
+DEFAULT_ENTROPY = 32 # number of bytes to return by default
+
+def token_bytes(nbytes=None):
+ """Return a random byte string containing *nbytes* bytes.
+
+ If *nbytes* is ``None`` or not supplied, a reasonable
+ default is used.
+
+ >>> token_bytes(16) #doctest:+SKIP
+ b'\\xebr\\x17D*t\\xae\\xd4\\xe3S\\xb6\\xe2\\xebP1\\x8b'
+
+ """
+ if nbytes is None:
+ nbytes = DEFAULT_ENTROPY
+ return _sysrand.randbytes(nbytes)
+
+def token_hex(nbytes=None):
+ """Return a random text string, in hexadecimal.
+
+ The string has *nbytes* random bytes, each byte converted to two
+ hex digits. If *nbytes* is ``None`` or not supplied, a reasonable
+ default is used.
+
+ >>> token_hex(16) #doctest:+SKIP
+ 'f9bf78b9a18ce6d46a0cd2b0b86df9da'
+
+ """
+ return binascii.hexlify(token_bytes(nbytes)).decode('ascii')
+
+def token_urlsafe(nbytes=None):
+ """Return a random URL-safe text string, in Base64 encoding.
+
+ The string has *nbytes* random bytes. If *nbytes* is ``None``
+ or not supplied, a reasonable default is used.
+
+ >>> token_urlsafe(16) #doctest:+SKIP
+ 'Drmhze6EPcv0fN_81Bj-nA'
+
+ """
+ tok = token_bytes(nbytes)
+ return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii')
diff --git a/infer_4_37_2/lib/python3.10/shelve.py b/infer_4_37_2/lib/python3.10/shelve.py
new file mode 100644
index 0000000000000000000000000000000000000000..e053c397345a07e69dfa8f72a3d5ebbede86a883
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/shelve.py
@@ -0,0 +1,243 @@
+"""Manage shelves of pickled objects.
+
+A "shelf" is a persistent, dictionary-like object. The difference
+with dbm databases is that the values (not the keys!) in a shelf can
+be essentially arbitrary Python objects -- anything that the "pickle"
+module can handle. This includes most class instances, recursive data
+types, and objects containing lots of shared sub-objects. The keys
+are ordinary strings.
+
+To summarize the interface (key is a string, data is an arbitrary
+object):
+
+ import shelve
+ d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
+
+ d[key] = data # store data at key (overwrites old data if
+ # using an existing key)
+ data = d[key] # retrieve a COPY of the data at key (raise
+ # KeyError if no such key) -- NOTE that this
+ # access returns a *copy* of the entry!
+ del d[key] # delete data stored at key (raises KeyError
+ # if no such key)
+ flag = key in d # true if the key exists
+ list = d.keys() # a list of all existing keys (slow!)
+
+ d.close() # close it
+
+Dependent on the implementation, closing a persistent dictionary may
+or may not be necessary to flush changes to disk.
+
+Normally, d[key] returns a COPY of the entry. This needs care when
+mutable entries are mutated: for example, if d[key] is a list,
+ d[key].append(anitem)
+does NOT modify the entry d[key] itself, as stored in the persistent
+mapping -- it only modifies the copy, which is then immediately
+discarded, so that the append has NO effect whatsoever. To append an
+item to d[key] in a way that will affect the persistent mapping, use:
+ data = d[key]
+ data.append(anitem)
+ d[key] = data
+
+To avoid the problem with mutable entries, you may pass the keyword
+argument writeback=True in the call to shelve.open. When you use:
+ d = shelve.open(filename, writeback=True)
+then d keeps a cache of all entries you access, and writes them all back
+to the persistent mapping when you call d.close(). This ensures that
+such usage as d[key].append(anitem) works as intended.
+
+However, using keyword argument writeback=True may consume vast amount
+of memory for the cache, and it may make d.close() very slow, if you
+access many of d's entries after opening it in this way: d has no way to
+check which of the entries you access are mutable and/or which ones you
+actually mutate, so it must cache, and write back at close, all of the
+entries that you access. You can call d.sync() to write back all the
+entries in the cache, and empty the cache (d.sync() also synchronizes
+the persistent dictionary on disk, if feasible).
+"""
+
+from pickle import DEFAULT_PROTOCOL, Pickler, Unpickler
+from io import BytesIO
+
+import collections.abc
+
+__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
+
+class _ClosedDict(collections.abc.MutableMapping):
+ 'Marker for a closed dict. Access attempts raise a ValueError.'
+
+ def closed(self, *args):
+ raise ValueError('invalid operation on closed shelf')
+ __iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
+
+ def __repr__(self):
+ return ''
+
+
+class Shelf(collections.abc.MutableMapping):
+ """Base class for shelf implementations.
+
+ This is initialized with a dictionary-like object.
+ See the module's __doc__ string for an overview of the interface.
+ """
+
+ def __init__(self, dict, protocol=None, writeback=False,
+ keyencoding="utf-8"):
+ self.dict = dict
+ if protocol is None:
+ protocol = DEFAULT_PROTOCOL
+ self._protocol = protocol
+ self.writeback = writeback
+ self.cache = {}
+ self.keyencoding = keyencoding
+
+ def __iter__(self):
+ for k in self.dict.keys():
+ yield k.decode(self.keyencoding)
+
+ def __len__(self):
+ return len(self.dict)
+
+ def __contains__(self, key):
+ return key.encode(self.keyencoding) in self.dict
+
+ def get(self, key, default=None):
+ if key.encode(self.keyencoding) in self.dict:
+ return self[key]
+ return default
+
+ def __getitem__(self, key):
+ try:
+ value = self.cache[key]
+ except KeyError:
+ f = BytesIO(self.dict[key.encode(self.keyencoding)])
+ value = Unpickler(f).load()
+ if self.writeback:
+ self.cache[key] = value
+ return value
+
+ def __setitem__(self, key, value):
+ if self.writeback:
+ self.cache[key] = value
+ f = BytesIO()
+ p = Pickler(f, self._protocol)
+ p.dump(value)
+ self.dict[key.encode(self.keyencoding)] = f.getvalue()
+
+ def __delitem__(self, key):
+ del self.dict[key.encode(self.keyencoding)]
+ try:
+ del self.cache[key]
+ except KeyError:
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def close(self):
+ if self.dict is None:
+ return
+ try:
+ self.sync()
+ try:
+ self.dict.close()
+ except AttributeError:
+ pass
+ finally:
+ # Catch errors that may happen when close is called from __del__
+ # because CPython is in interpreter shutdown.
+ try:
+ self.dict = _ClosedDict()
+ except:
+ self.dict = None
+
+ def __del__(self):
+ if not hasattr(self, 'writeback'):
+ # __init__ didn't succeed, so don't bother closing
+ # see http://bugs.python.org/issue1339007 for details
+ return
+ self.close()
+
+ def sync(self):
+ if self.writeback and self.cache:
+ self.writeback = False
+ for key, entry in self.cache.items():
+ self[key] = entry
+ self.writeback = True
+ self.cache = {}
+ if hasattr(self.dict, 'sync'):
+ self.dict.sync()
+
+
+class BsdDbShelf(Shelf):
+ """Shelf implementation using the "BSD" db interface.
+
+ This adds methods first(), next(), previous(), last() and
+ set_location() that have no counterpart in [g]dbm databases.
+
+ The actual database must be opened using one of the "bsddb"
+ modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
+ bsddb.rnopen) and passed to the constructor.
+
+ See the module's __doc__ string for an overview of the interface.
+ """
+
+ def __init__(self, dict, protocol=None, writeback=False,
+ keyencoding="utf-8"):
+ Shelf.__init__(self, dict, protocol, writeback, keyencoding)
+
+ def set_location(self, key):
+ (key, value) = self.dict.set_location(key)
+ f = BytesIO(value)
+ return (key.decode(self.keyencoding), Unpickler(f).load())
+
+ def next(self):
+ (key, value) = next(self.dict)
+ f = BytesIO(value)
+ return (key.decode(self.keyencoding), Unpickler(f).load())
+
+ def previous(self):
+ (key, value) = self.dict.previous()
+ f = BytesIO(value)
+ return (key.decode(self.keyencoding), Unpickler(f).load())
+
+ def first(self):
+ (key, value) = self.dict.first()
+ f = BytesIO(value)
+ return (key.decode(self.keyencoding), Unpickler(f).load())
+
+ def last(self):
+ (key, value) = self.dict.last()
+ f = BytesIO(value)
+ return (key.decode(self.keyencoding), Unpickler(f).load())
+
+
+class DbfilenameShelf(Shelf):
+ """Shelf implementation using the "dbm" generic dbm interface.
+
+ This is initialized with the filename for the dbm database.
+ See the module's __doc__ string for an overview of the interface.
+ """
+
+ def __init__(self, filename, flag='c', protocol=None, writeback=False):
+ import dbm
+ Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
+
+
+def open(filename, flag='c', protocol=None, writeback=False):
+ """Open a persistent dictionary for reading and writing.
+
+ The filename parameter is the base filename for the underlying
+ database. As a side-effect, an extension may be added to the
+ filename and more than one file may be created. The optional flag
+ parameter has the same interpretation as the flag parameter of
+ dbm.open(). The optional protocol parameter specifies the
+ version of the pickle protocol.
+
+ See the module's __doc__ string for an overview of the interface.
+ """
+
+ return DbfilenameShelf(filename, flag, protocol, writeback)
diff --git a/infer_4_37_2/lib/python3.10/shlex.py b/infer_4_37_2/lib/python3.10/shlex.py
new file mode 100644
index 0000000000000000000000000000000000000000..4801a6c1d47bd9e0a8ada16089221c8237b777d5
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/shlex.py
@@ -0,0 +1,350 @@
+"""A lexical analyzer class for simple shell-like syntaxes."""
+
+# Module and documentation by Eric S. Raymond, 21 Dec 1998
+# Input stacking and error message cleanup added by ESR, March 2000
+# push_source() and pop_source() made explicit by ESR, January 2001.
+# Posix compliance, split(), string arguments, and
+# iterator interface by Gustavo Niemeyer, April 2003.
+# changes to tokenize more like Posix shells by Vinay Sajip, July 2016.
+
+import os
+import re
+import sys
+from collections import deque
+
+from io import StringIO
+
+__all__ = ["shlex", "split", "quote", "join"]
+
+class shlex:
+ "A lexical analyzer class for simple shell-like syntaxes."
+ def __init__(self, instream=None, infile=None, posix=False,
+ punctuation_chars=False):
+ if isinstance(instream, str):
+ instream = StringIO(instream)
+ if instream is not None:
+ self.instream = instream
+ self.infile = infile
+ else:
+ self.instream = sys.stdin
+ self.infile = None
+ self.posix = posix
+ if posix:
+ self.eof = None
+ else:
+ self.eof = ''
+ self.commenters = '#'
+ self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
+ if self.posix:
+ self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
+ 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
+ self.whitespace = ' \t\r\n'
+ self.whitespace_split = False
+ self.quotes = '\'"'
+ self.escape = '\\'
+ self.escapedquotes = '"'
+ self.state = ' '
+ self.pushback = deque()
+ self.lineno = 1
+ self.debug = 0
+ self.token = ''
+ self.filestack = deque()
+ self.source = None
+ if not punctuation_chars:
+ punctuation_chars = ''
+ elif punctuation_chars is True:
+ punctuation_chars = '();<>|&'
+ self._punctuation_chars = punctuation_chars
+ if punctuation_chars:
+ # _pushback_chars is a push back queue used by lookahead logic
+ self._pushback_chars = deque()
+ # these chars added because allowed in file names, args, wildcards
+ self.wordchars += '~-./*?='
+ #remove any punctuation chars from wordchars
+ t = self.wordchars.maketrans(dict.fromkeys(punctuation_chars))
+ self.wordchars = self.wordchars.translate(t)
+
+ @property
+ def punctuation_chars(self):
+ return self._punctuation_chars
+
+ def push_token(self, tok):
+ "Push a token onto the stack popped by the get_token method"
+ if self.debug >= 1:
+ print("shlex: pushing token " + repr(tok))
+ self.pushback.appendleft(tok)
+
+ def push_source(self, newstream, newfile=None):
+ "Push an input source onto the lexer's input source stack."
+ if isinstance(newstream, str):
+ newstream = StringIO(newstream)
+ self.filestack.appendleft((self.infile, self.instream, self.lineno))
+ self.infile = newfile
+ self.instream = newstream
+ self.lineno = 1
+ if self.debug:
+ if newfile is not None:
+ print('shlex: pushing to file %s' % (self.infile,))
+ else:
+ print('shlex: pushing to stream %s' % (self.instream,))
+
+ def pop_source(self):
+ "Pop the input source stack."
+ self.instream.close()
+ (self.infile, self.instream, self.lineno) = self.filestack.popleft()
+ if self.debug:
+ print('shlex: popping to %s, line %d' \
+ % (self.instream, self.lineno))
+ self.state = ' '
+
+ def get_token(self):
+ "Get a token from the input stream (or from stack if it's nonempty)"
+ if self.pushback:
+ tok = self.pushback.popleft()
+ if self.debug >= 1:
+ print("shlex: popping token " + repr(tok))
+ return tok
+ # No pushback. Get a token.
+ raw = self.read_token()
+ # Handle inclusions
+ if self.source is not None:
+ while raw == self.source:
+ spec = self.sourcehook(self.read_token())
+ if spec:
+ (newfile, newstream) = spec
+ self.push_source(newstream, newfile)
+ raw = self.get_token()
+ # Maybe we got EOF instead?
+ while raw == self.eof:
+ if not self.filestack:
+ return self.eof
+ else:
+ self.pop_source()
+ raw = self.get_token()
+ # Neither inclusion nor EOF
+ if self.debug >= 1:
+ if raw != self.eof:
+ print("shlex: token=" + repr(raw))
+ else:
+ print("shlex: token=EOF")
+ return raw
+
+ def read_token(self):
+ quoted = False
+ escapedstate = ' '
+ while True:
+ if self.punctuation_chars and self._pushback_chars:
+ nextchar = self._pushback_chars.pop()
+ else:
+ nextchar = self.instream.read(1)
+ if nextchar == '\n':
+ self.lineno += 1
+ if self.debug >= 3:
+ print("shlex: in state %r I see character: %r" % (self.state,
+ nextchar))
+ if self.state is None:
+ self.token = '' # past end of file
+ break
+ elif self.state == ' ':
+ if not nextchar:
+ self.state = None # end of file
+ break
+ elif nextchar in self.whitespace:
+ if self.debug >= 2:
+ print("shlex: I see whitespace in whitespace state")
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif nextchar in self.commenters:
+ self.instream.readline()
+ self.lineno += 1
+ elif self.posix and nextchar in self.escape:
+ escapedstate = 'a'
+ self.state = nextchar
+ elif nextchar in self.wordchars:
+ self.token = nextchar
+ self.state = 'a'
+ elif nextchar in self.punctuation_chars:
+ self.token = nextchar
+ self.state = 'c'
+ elif nextchar in self.quotes:
+ if not self.posix:
+ self.token = nextchar
+ self.state = nextchar
+ elif self.whitespace_split:
+ self.token = nextchar
+ self.state = 'a'
+ else:
+ self.token = nextchar
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif self.state in self.quotes:
+ quoted = True
+ if not nextchar: # end of file
+ if self.debug >= 2:
+ print("shlex: I see EOF in quotes state")
+ # XXX what error should be raised here?
+ raise ValueError("No closing quotation")
+ if nextchar == self.state:
+ if not self.posix:
+ self.token += nextchar
+ self.state = ' '
+ break
+ else:
+ self.state = 'a'
+ elif (self.posix and nextchar in self.escape and self.state
+ in self.escapedquotes):
+ escapedstate = self.state
+ self.state = nextchar
+ else:
+ self.token += nextchar
+ elif self.state in self.escape:
+ if not nextchar: # end of file
+ if self.debug >= 2:
+ print("shlex: I see EOF in escape state")
+ # XXX what error should be raised here?
+ raise ValueError("No escaped character")
+ # In posix shells, only the quote itself or the escape
+ # character may be escaped within quotes.
+ if (escapedstate in self.quotes and
+ nextchar != self.state and nextchar != escapedstate):
+ self.token += self.state
+ self.token += nextchar
+ self.state = escapedstate
+ elif self.state in ('a', 'c'):
+ if not nextchar:
+ self.state = None # end of file
+ break
+ elif nextchar in self.whitespace:
+ if self.debug >= 2:
+ print("shlex: I see whitespace in word state")
+ self.state = ' '
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif nextchar in self.commenters:
+ self.instream.readline()
+ self.lineno += 1
+ if self.posix:
+ self.state = ' '
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif self.state == 'c':
+ if nextchar in self.punctuation_chars:
+ self.token += nextchar
+ else:
+ if nextchar not in self.whitespace:
+ self._pushback_chars.append(nextchar)
+ self.state = ' '
+ break
+ elif self.posix and nextchar in self.quotes:
+ self.state = nextchar
+ elif self.posix and nextchar in self.escape:
+ escapedstate = 'a'
+ self.state = nextchar
+ elif (nextchar in self.wordchars or nextchar in self.quotes
+ or (self.whitespace_split and
+ nextchar not in self.punctuation_chars)):
+ self.token += nextchar
+ else:
+ if self.punctuation_chars:
+ self._pushback_chars.append(nextchar)
+ else:
+ self.pushback.appendleft(nextchar)
+ if self.debug >= 2:
+ print("shlex: I see punctuation in word state")
+ self.state = ' '
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ result = self.token
+ self.token = ''
+ if self.posix and not quoted and result == '':
+ result = None
+ if self.debug > 1:
+ if result:
+ print("shlex: raw token=" + repr(result))
+ else:
+ print("shlex: raw token=EOF")
+ return result
+
+ def sourcehook(self, newfile):
+ "Hook called on a filename to be sourced."
+ if newfile[0] == '"':
+ newfile = newfile[1:-1]
+ # This implements cpp-like semantics for relative-path inclusion.
+ if isinstance(self.infile, str) and not os.path.isabs(newfile):
+ newfile = os.path.join(os.path.dirname(self.infile), newfile)
+ return (newfile, open(newfile, "r"))
+
+ def error_leader(self, infile=None, lineno=None):
+ "Emit a C-compiler-like, Emacs-friendly error-message leader."
+ if infile is None:
+ infile = self.infile
+ if lineno is None:
+ lineno = self.lineno
+ return "\"%s\", line %d: " % (infile, lineno)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ token = self.get_token()
+ if token == self.eof:
+ raise StopIteration
+ return token
+
+def split(s, comments=False, posix=True):
+ """Split the string *s* using shell-like syntax."""
+ if s is None:
+ import warnings
+ warnings.warn("Passing None for 's' to shlex.split() is deprecated.",
+ DeprecationWarning, stacklevel=2)
+ lex = shlex(s, posix=posix)
+ lex.whitespace_split = True
+ if not comments:
+ lex.commenters = ''
+ return list(lex)
+
+
+def join(split_command):
+ """Return a shell-escaped string from *split_command*."""
+ return ' '.join(quote(arg) for arg in split_command)
+
+
+_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.ASCII).search
+
+def quote(s):
+ """Return a shell-escaped version of the string *s*."""
+ if not s:
+ return "''"
+ if _find_unsafe(s) is None:
+ return s
+
+ # use single quotes, and put single quotes into double quotes
+ # the string $'b is then quoted as '$'"'"'b'
+ return "'" + s.replace("'", "'\"'\"'") + "'"
+
+
+def _print_tokens(lexer):
+ while 1:
+ tt = lexer.get_token()
+ if not tt:
+ break
+ print("Token: " + repr(tt))
+
+if __name__ == '__main__':
+ if len(sys.argv) == 1:
+ _print_tokens(shlex())
+ else:
+ fn = sys.argv[1]
+ with open(fn) as f:
+ _print_tokens(shlex(f, fn))
diff --git a/infer_4_37_2/lib/python3.10/shutil.py b/infer_4_37_2/lib/python3.10/shutil.py
new file mode 100644
index 0000000000000000000000000000000000000000..482ce95a7b23caff1404dcf4b78a494895cb1f03
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/shutil.py
@@ -0,0 +1,1517 @@
+"""Utility functions for copying and archiving files and directory trees.
+
+XXX The functions here don't copy the resource fork or other metadata on Mac.
+
+"""
+
+import os
+import sys
+import stat
+import fnmatch
+import collections
+import errno
+
+try:
+ import zlib
+ del zlib
+ _ZLIB_SUPPORTED = True
+except ImportError:
+ _ZLIB_SUPPORTED = False
+
+try:
+ import bz2
+ del bz2
+ _BZ2_SUPPORTED = True
+except ImportError:
+ _BZ2_SUPPORTED = False
+
+try:
+ import lzma
+ del lzma
+ _LZMA_SUPPORTED = True
+except ImportError:
+ _LZMA_SUPPORTED = False
+
+_WINDOWS = os.name == 'nt'
+posix = nt = None
+if os.name == 'posix':
+ import posix
+elif _WINDOWS:
+ import nt
+
+COPY_BUFSIZE = 1024 * 1024 if _WINDOWS else 64 * 1024
+_USE_CP_SENDFILE = hasattr(os, "sendfile") and sys.platform.startswith("linux")
+_HAS_FCOPYFILE = posix and hasattr(posix, "_fcopyfile") # macOS
+
+# CMD defaults in Windows 10
+_WIN_DEFAULT_PATHEXT = ".COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC"
+
+__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
+ "copytree", "move", "rmtree", "Error", "SpecialFileError",
+ "ExecError", "make_archive", "get_archive_formats",
+ "register_archive_format", "unregister_archive_format",
+ "get_unpack_formats", "register_unpack_format",
+ "unregister_unpack_format", "unpack_archive",
+ "ignore_patterns", "chown", "which", "get_terminal_size",
+ "SameFileError"]
+ # disk_usage is added later, if available on the platform
+
+class Error(OSError):
+ pass
+
+class SameFileError(Error):
+ """Raised when source and destination are the same file."""
+
+class SpecialFileError(OSError):
+ """Raised when trying to do a kind of operation (e.g. copying) which is
+ not supported on a special file (e.g. a named pipe)"""
+
+class ExecError(OSError):
+ """Raised when a command could not be executed"""
+
+class ReadError(OSError):
+ """Raised when an archive cannot be read"""
+
+class RegistryError(Exception):
+ """Raised when a registry operation with the archiving
+ and unpacking registries fails"""
+
+class _GiveupOnFastCopy(Exception):
+ """Raised as a signal to fallback on using raw read()/write()
+ file copy when fast-copy functions fail to do so.
+ """
+
+def _fastcopy_fcopyfile(fsrc, fdst, flags):
+ """Copy a regular file content or metadata by using high-performance
+ fcopyfile(3) syscall (macOS).
+ """
+ try:
+ infd = fsrc.fileno()
+ outfd = fdst.fileno()
+ except Exception as err:
+ raise _GiveupOnFastCopy(err) # not a regular file
+
+ try:
+ posix._fcopyfile(infd, outfd, flags)
+ except OSError as err:
+ err.filename = fsrc.name
+ err.filename2 = fdst.name
+ if err.errno in {errno.EINVAL, errno.ENOTSUP}:
+ raise _GiveupOnFastCopy(err)
+ else:
+ raise err from None
+
+def _fastcopy_sendfile(fsrc, fdst):
+ """Copy data from one regular mmap-like fd to another by using
+ high-performance sendfile(2) syscall.
+ This should work on Linux >= 2.6.33 only.
+ """
+ # Note: copyfileobj() is left alone in order to not introduce any
+ # unexpected breakage. Possible risks by using zero-copy calls
+ # in copyfileobj() are:
+ # - fdst cannot be open in "a"(ppend) mode
+ # - fsrc and fdst may be open in "t"(ext) mode
+ # - fsrc may be a BufferedReader (which hides unread data in a buffer),
+ # GzipFile (which decompresses data), HTTPResponse (which decodes
+ # chunks).
+ # - possibly others (e.g. encrypted fs/partition?)
+ global _USE_CP_SENDFILE
+ try:
+ infd = fsrc.fileno()
+ outfd = fdst.fileno()
+ except Exception as err:
+ raise _GiveupOnFastCopy(err) # not a regular file
+
+ # Hopefully the whole file will be copied in a single call.
+ # sendfile() is called in a loop 'till EOF is reached (0 return)
+ # so a bufsize smaller or bigger than the actual file size
+ # should not make any difference, also in case the file content
+ # changes while being copied.
+ try:
+ blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8MiB
+ except OSError:
+ blocksize = 2 ** 27 # 128MiB
+ # On 32-bit architectures truncate to 1GiB to avoid OverflowError,
+ # see bpo-38319.
+ if sys.maxsize < 2 ** 32:
+ blocksize = min(blocksize, 2 ** 30)
+
+ offset = 0
+ while True:
+ try:
+ sent = os.sendfile(outfd, infd, offset, blocksize)
+ except OSError as err:
+ # ...in oder to have a more informative exception.
+ err.filename = fsrc.name
+ err.filename2 = fdst.name
+
+ if err.errno == errno.ENOTSOCK:
+ # sendfile() on this platform (probably Linux < 2.6.33)
+ # does not support copies between regular files (only
+ # sockets).
+ _USE_CP_SENDFILE = False
+ raise _GiveupOnFastCopy(err)
+
+ if err.errno == errno.ENOSPC: # filesystem is full
+ raise err from None
+
+ # Give up on first call and if no data was copied.
+ if offset == 0 and os.lseek(outfd, 0, os.SEEK_CUR) == 0:
+ raise _GiveupOnFastCopy(err)
+
+ raise err
+ else:
+ if sent == 0:
+ break # EOF
+ offset += sent
+
+def _copyfileobj_readinto(fsrc, fdst, length=COPY_BUFSIZE):
+ """readinto()/memoryview() based variant of copyfileobj().
+ *fsrc* must support readinto() method and both files must be
+ open in binary mode.
+ """
+ # Localize variable access to minimize overhead.
+ fsrc_readinto = fsrc.readinto
+ fdst_write = fdst.write
+ with memoryview(bytearray(length)) as mv:
+ while True:
+ n = fsrc_readinto(mv)
+ if not n:
+ break
+ elif n < length:
+ with mv[:n] as smv:
+ fdst.write(smv)
+ else:
+ fdst_write(mv)
+
+def copyfileobj(fsrc, fdst, length=0):
+ """copy data from file-like object fsrc to file-like object fdst"""
+ # Localize variable access to minimize overhead.
+ if not length:
+ length = COPY_BUFSIZE
+ fsrc_read = fsrc.read
+ fdst_write = fdst.write
+ while True:
+ buf = fsrc_read(length)
+ if not buf:
+ break
+ fdst_write(buf)
+
+def _samefile(src, dst):
+ # Macintosh, Unix.
+ if isinstance(src, os.DirEntry) and hasattr(os.path, 'samestat'):
+ try:
+ return os.path.samestat(src.stat(), os.stat(dst))
+ except OSError:
+ return False
+
+ if hasattr(os.path, 'samefile'):
+ try:
+ return os.path.samefile(src, dst)
+ except OSError:
+ return False
+
+ # All other platforms: check for same pathname.
+ return (os.path.normcase(os.path.abspath(src)) ==
+ os.path.normcase(os.path.abspath(dst)))
+
+def _stat(fn):
+ return fn.stat() if isinstance(fn, os.DirEntry) else os.stat(fn)
+
+def _islink(fn):
+ return fn.is_symlink() if isinstance(fn, os.DirEntry) else os.path.islink(fn)
+
+def copyfile(src, dst, *, follow_symlinks=True):
+ """Copy data from src to dst in the most efficient way possible.
+
+ If follow_symlinks is not set and src is a symbolic link, a new
+ symlink will be created instead of copying the file it points to.
+
+ """
+ sys.audit("shutil.copyfile", src, dst)
+
+ if _samefile(src, dst):
+ raise SameFileError("{!r} and {!r} are the same file".format(src, dst))
+
+ file_size = 0
+ for i, fn in enumerate([src, dst]):
+ try:
+ st = _stat(fn)
+ except OSError:
+ # File most likely does not exist
+ pass
+ else:
+ # XXX What about other special files? (sockets, devices...)
+ if stat.S_ISFIFO(st.st_mode):
+ fn = fn.path if isinstance(fn, os.DirEntry) else fn
+ raise SpecialFileError("`%s` is a named pipe" % fn)
+ if _WINDOWS and i == 0:
+ file_size = st.st_size
+
+ if not follow_symlinks and _islink(src):
+ os.symlink(os.readlink(src), dst)
+ else:
+ with open(src, 'rb') as fsrc:
+ try:
+ with open(dst, 'wb') as fdst:
+ # macOS
+ if _HAS_FCOPYFILE:
+ try:
+ _fastcopy_fcopyfile(fsrc, fdst, posix._COPYFILE_DATA)
+ return dst
+ except _GiveupOnFastCopy:
+ pass
+ # Linux
+ elif _USE_CP_SENDFILE:
+ try:
+ _fastcopy_sendfile(fsrc, fdst)
+ return dst
+ except _GiveupOnFastCopy:
+ pass
+ # Windows, see:
+ # https://github.com/python/cpython/pull/7160#discussion_r195405230
+ elif _WINDOWS and file_size > 0:
+ _copyfileobj_readinto(fsrc, fdst, min(file_size, COPY_BUFSIZE))
+ return dst
+
+ copyfileobj(fsrc, fdst)
+
+ # Issue 43219, raise a less confusing exception
+ except IsADirectoryError as e:
+ if not os.path.exists(dst):
+ raise FileNotFoundError(f'Directory does not exist: {dst}') from e
+ else:
+ raise
+
+ return dst
+
+def copymode(src, dst, *, follow_symlinks=True):
+ """Copy mode bits from src to dst.
+
+ If follow_symlinks is not set, symlinks aren't followed if and only
+ if both `src` and `dst` are symlinks. If `lchmod` isn't available
+ (e.g. Linux) this method does nothing.
+
+ """
+ sys.audit("shutil.copymode", src, dst)
+
+ if not follow_symlinks and _islink(src) and os.path.islink(dst):
+ if hasattr(os, 'lchmod'):
+ stat_func, chmod_func = os.lstat, os.lchmod
+ else:
+ return
+ else:
+ stat_func, chmod_func = _stat, os.chmod
+
+ st = stat_func(src)
+ chmod_func(dst, stat.S_IMODE(st.st_mode))
+
+if hasattr(os, 'listxattr'):
+ def _copyxattr(src, dst, *, follow_symlinks=True):
+ """Copy extended filesystem attributes from `src` to `dst`.
+
+ Overwrite existing attributes.
+
+ If `follow_symlinks` is false, symlinks won't be followed.
+
+ """
+
+ try:
+ names = os.listxattr(src, follow_symlinks=follow_symlinks)
+ except OSError as e:
+ if e.errno not in (errno.ENOTSUP, errno.ENODATA, errno.EINVAL):
+ raise
+ return
+ for name in names:
+ try:
+ value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
+ os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
+ except OSError as e:
+ if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA,
+ errno.EINVAL):
+ raise
+else:
+ def _copyxattr(*args, **kwargs):
+ pass
+
+def copystat(src, dst, *, follow_symlinks=True):
+ """Copy file metadata
+
+ Copy the permission bits, last access time, last modification time, and
+ flags from `src` to `dst`. On Linux, copystat() also copies the "extended
+ attributes" where possible. The file contents, owner, and group are
+ unaffected. `src` and `dst` are path-like objects or path names given as
+ strings.
+
+ If the optional flag `follow_symlinks` is not set, symlinks aren't
+ followed if and only if both `src` and `dst` are symlinks.
+ """
+ sys.audit("shutil.copystat", src, dst)
+
+ def _nop(*args, ns=None, follow_symlinks=None):
+ pass
+
+ # follow symlinks (aka don't not follow symlinks)
+ follow = follow_symlinks or not (_islink(src) and os.path.islink(dst))
+ if follow:
+ # use the real function if it exists
+ def lookup(name):
+ return getattr(os, name, _nop)
+ else:
+ # use the real function only if it exists
+ # *and* it supports follow_symlinks
+ def lookup(name):
+ fn = getattr(os, name, _nop)
+ if fn in os.supports_follow_symlinks:
+ return fn
+ return _nop
+
+ if isinstance(src, os.DirEntry):
+ st = src.stat(follow_symlinks=follow)
+ else:
+ st = lookup("stat")(src, follow_symlinks=follow)
+ mode = stat.S_IMODE(st.st_mode)
+ lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
+ follow_symlinks=follow)
+ # We must copy extended attributes before the file is (potentially)
+ # chmod()'ed read-only, otherwise setxattr() will error with -EACCES.
+ _copyxattr(src, dst, follow_symlinks=follow)
+ try:
+ lookup("chmod")(dst, mode, follow_symlinks=follow)
+ except NotImplementedError:
+ # if we got a NotImplementedError, it's because
+ # * follow_symlinks=False,
+ # * lchown() is unavailable, and
+ # * either
+ # * fchownat() is unavailable or
+ # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
+ # (it returned ENOSUP.)
+ # therefore we're out of options--we simply cannot chown the
+ # symlink. give up, suppress the error.
+ # (which is what shutil always did in this circumstance.)
+ pass
+ if hasattr(st, 'st_flags'):
+ try:
+ lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
+ except OSError as why:
+ for err in 'EOPNOTSUPP', 'ENOTSUP':
+ if hasattr(errno, err) and why.errno == getattr(errno, err):
+ break
+ else:
+ raise
+
+def copy(src, dst, *, follow_symlinks=True):
+ """Copy data and mode bits ("cp src dst"). Return the file's destination.
+
+ The destination may be a directory.
+
+ If follow_symlinks is false, symlinks won't be followed. This
+ resembles GNU's "cp -P src dst".
+
+ If source and destination are the same file, a SameFileError will be
+ raised.
+
+ """
+ if os.path.isdir(dst):
+ dst = os.path.join(dst, os.path.basename(src))
+ copyfile(src, dst, follow_symlinks=follow_symlinks)
+ copymode(src, dst, follow_symlinks=follow_symlinks)
+ return dst
+
+def copy2(src, dst, *, follow_symlinks=True):
+ """Copy data and metadata. Return the file's destination.
+
+ Metadata is copied with copystat(). Please see the copystat function
+ for more information.
+
+ The destination may be a directory.
+
+ If follow_symlinks is false, symlinks won't be followed. This
+ resembles GNU's "cp -P src dst".
+ """
+ if os.path.isdir(dst):
+ dst = os.path.join(dst, os.path.basename(src))
+ copyfile(src, dst, follow_symlinks=follow_symlinks)
+ copystat(src, dst, follow_symlinks=follow_symlinks)
+ return dst
+
+def ignore_patterns(*patterns):
+ """Function that can be used as copytree() ignore parameter.
+
+ Patterns is a sequence of glob-style patterns
+ that are used to exclude files"""
+ def _ignore_patterns(path, names):
+ ignored_names = []
+ for pattern in patterns:
+ ignored_names.extend(fnmatch.filter(names, pattern))
+ return set(ignored_names)
+ return _ignore_patterns
+
+def _copytree(entries, src, dst, symlinks, ignore, copy_function,
+ ignore_dangling_symlinks, dirs_exist_ok=False):
+ if ignore is not None:
+ ignored_names = ignore(os.fspath(src), [x.name for x in entries])
+ else:
+ ignored_names = set()
+
+ os.makedirs(dst, exist_ok=dirs_exist_ok)
+ errors = []
+ use_srcentry = copy_function is copy2 or copy_function is copy
+
+ for srcentry in entries:
+ if srcentry.name in ignored_names:
+ continue
+ srcname = os.path.join(src, srcentry.name)
+ dstname = os.path.join(dst, srcentry.name)
+ srcobj = srcentry if use_srcentry else srcname
+ try:
+ is_symlink = srcentry.is_symlink()
+ if is_symlink and os.name == 'nt':
+ # Special check for directory junctions, which appear as
+ # symlinks but we want to recurse.
+ lstat = srcentry.stat(follow_symlinks=False)
+ if lstat.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT:
+ is_symlink = False
+ if is_symlink:
+ linkto = os.readlink(srcname)
+ if symlinks:
+ # We can't just leave it to `copy_function` because legacy
+ # code with a custom `copy_function` may rely on copytree
+ # doing the right thing.
+ os.symlink(linkto, dstname)
+ copystat(srcobj, dstname, follow_symlinks=not symlinks)
+ else:
+ # ignore dangling symlink if the flag is on
+ if not os.path.exists(linkto) and ignore_dangling_symlinks:
+ continue
+ # otherwise let the copy occur. copy2 will raise an error
+ if srcentry.is_dir():
+ copytree(srcobj, dstname, symlinks, ignore,
+ copy_function, ignore_dangling_symlinks,
+ dirs_exist_ok)
+ else:
+ copy_function(srcobj, dstname)
+ elif srcentry.is_dir():
+ copytree(srcobj, dstname, symlinks, ignore, copy_function,
+ ignore_dangling_symlinks, dirs_exist_ok)
+ else:
+ # Will raise a SpecialFileError for unsupported file types
+ copy_function(srcobj, dstname)
+ # catch the Error from the recursive copytree so that we can
+ # continue with other files
+ except Error as err:
+ errors.extend(err.args[0])
+ except OSError as why:
+ errors.append((srcname, dstname, str(why)))
+ try:
+ copystat(src, dst)
+ except OSError as why:
+ # Copying file access times may fail on Windows
+ if getattr(why, 'winerror', None) is None:
+ errors.append((src, dst, str(why)))
+ if errors:
+ raise Error(errors)
+ return dst
+
+def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
+ ignore_dangling_symlinks=False, dirs_exist_ok=False):
+ """Recursively copy a directory tree and return the destination directory.
+
+ If exception(s) occur, an Error is raised with a list of reasons.
+
+ If the optional symlinks flag is true, symbolic links in the
+ source tree result in symbolic links in the destination tree; if
+ it is false, the contents of the files pointed to by symbolic
+ links are copied. If the file pointed by the symlink doesn't
+ exist, an exception will be added in the list of errors raised in
+ an Error exception at the end of the copy process.
+
+ You can set the optional ignore_dangling_symlinks flag to true if you
+ want to silence this exception. Notice that this has no effect on
+ platforms that don't support os.symlink.
+
+ The optional ignore argument is a callable. If given, it
+ is called with the `src` parameter, which is the directory
+ being visited by copytree(), and `names` which is the list of
+ `src` contents, as returned by os.listdir():
+
+ callable(src, names) -> ignored_names
+
+ Since copytree() is called recursively, the callable will be
+ called once for each directory that is copied. It returns a
+ list of names relative to the `src` directory that should
+ not be copied.
+
+ The optional copy_function argument is a callable that will be used
+ to copy each file. It will be called with the source path and the
+ destination path as arguments. By default, copy2() is used, but any
+ function that supports the same signature (like copy()) can be used.
+
+ If dirs_exist_ok is false (the default) and `dst` already exists, a
+ `FileExistsError` is raised. If `dirs_exist_ok` is true, the copying
+ operation will continue if it encounters existing directories, and files
+ within the `dst` tree will be overwritten by corresponding files from the
+ `src` tree.
+ """
+ sys.audit("shutil.copytree", src, dst)
+ with os.scandir(src) as itr:
+ entries = list(itr)
+ return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks,
+ ignore=ignore, copy_function=copy_function,
+ ignore_dangling_symlinks=ignore_dangling_symlinks,
+ dirs_exist_ok=dirs_exist_ok)
+
+if hasattr(os.stat_result, 'st_file_attributes'):
+ # Special handling for directory junctions to make them behave like
+ # symlinks for shutil.rmtree, since in general they do not appear as
+ # regular links.
+ def _rmtree_isdir(entry):
+ try:
+ st = entry.stat(follow_symlinks=False)
+ return (stat.S_ISDIR(st.st_mode) and not
+ (st.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT
+ and st.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT))
+ except OSError:
+ return False
+
+ def _rmtree_islink(path):
+ try:
+ st = os.lstat(path)
+ return (stat.S_ISLNK(st.st_mode) or
+ (st.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT
+ and st.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT))
+ except OSError:
+ return False
+else:
+ def _rmtree_isdir(entry):
+ try:
+ return entry.is_dir(follow_symlinks=False)
+ except OSError:
+ return False
+
+ def _rmtree_islink(path):
+ return os.path.islink(path)
+
+# version vulnerable to race conditions
+def _rmtree_unsafe(path, onerror):
+ try:
+ with os.scandir(path) as scandir_it:
+ entries = list(scandir_it)
+ except OSError:
+ onerror(os.scandir, path, sys.exc_info())
+ entries = []
+ for entry in entries:
+ fullname = entry.path
+ if _rmtree_isdir(entry):
+ try:
+ if entry.is_symlink():
+ # This can only happen if someone replaces
+ # a directory with a symlink after the call to
+ # os.scandir or entry.is_dir above.
+ raise OSError("Cannot call rmtree on a symbolic link")
+ except OSError:
+ onerror(os.path.islink, fullname, sys.exc_info())
+ continue
+ _rmtree_unsafe(fullname, onerror)
+ else:
+ try:
+ os.unlink(fullname)
+ except OSError:
+ onerror(os.unlink, fullname, sys.exc_info())
+ try:
+ os.rmdir(path)
+ except OSError:
+ onerror(os.rmdir, path, sys.exc_info())
+
+# Version using fd-based APIs to protect against races
+def _rmtree_safe_fd(topfd, path, onerror):
+ try:
+ with os.scandir(topfd) as scandir_it:
+ entries = list(scandir_it)
+ except OSError as err:
+ err.filename = path
+ onerror(os.scandir, path, sys.exc_info())
+ return
+ for entry in entries:
+ fullname = os.path.join(path, entry.name)
+ try:
+ is_dir = entry.is_dir(follow_symlinks=False)
+ except OSError:
+ is_dir = False
+ else:
+ if is_dir:
+ try:
+ orig_st = entry.stat(follow_symlinks=False)
+ is_dir = stat.S_ISDIR(orig_st.st_mode)
+ except OSError:
+ onerror(os.lstat, fullname, sys.exc_info())
+ continue
+ if is_dir:
+ try:
+ dirfd = os.open(entry.name, os.O_RDONLY, dir_fd=topfd)
+ dirfd_closed = False
+ except OSError:
+ onerror(os.open, fullname, sys.exc_info())
+ else:
+ try:
+ if os.path.samestat(orig_st, os.fstat(dirfd)):
+ _rmtree_safe_fd(dirfd, fullname, onerror)
+ try:
+ os.close(dirfd)
+ dirfd_closed = True
+ os.rmdir(entry.name, dir_fd=topfd)
+ except OSError:
+ onerror(os.rmdir, fullname, sys.exc_info())
+ else:
+ try:
+ # This can only happen if someone replaces
+ # a directory with a symlink after the call to
+ # os.scandir or stat.S_ISDIR above.
+ raise OSError("Cannot call rmtree on a symbolic "
+ "link")
+ except OSError:
+ onerror(os.path.islink, fullname, sys.exc_info())
+ finally:
+ if not dirfd_closed:
+ os.close(dirfd)
+ else:
+ try:
+ os.unlink(entry.name, dir_fd=topfd)
+ except OSError:
+ onerror(os.unlink, fullname, sys.exc_info())
+
+_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
+ os.supports_dir_fd and
+ os.scandir in os.supports_fd and
+ os.stat in os.supports_follow_symlinks)
+
+def rmtree(path, ignore_errors=False, onerror=None):
+ """Recursively delete a directory tree.
+
+ If ignore_errors is set, errors are ignored; otherwise, if onerror
+ is set, it is called to handle the error with arguments (func,
+ path, exc_info) where func is platform and implementation dependent;
+ path is the argument to that function that caused it to fail; and
+ exc_info is a tuple returned by sys.exc_info(). If ignore_errors
+ is false and onerror is None, an exception is raised.
+
+ """
+ sys.audit("shutil.rmtree", path)
+ if ignore_errors:
+ def onerror(*args):
+ pass
+ elif onerror is None:
+ def onerror(*args):
+ raise
+ if _use_fd_functions:
+ # While the unsafe rmtree works fine on bytes, the fd based does not.
+ if isinstance(path, bytes):
+ path = os.fsdecode(path)
+ # Note: To guard against symlink races, we use the standard
+ # lstat()/open()/fstat() trick.
+ try:
+ orig_st = os.lstat(path)
+ except Exception:
+ onerror(os.lstat, path, sys.exc_info())
+ return
+ try:
+ fd = os.open(path, os.O_RDONLY)
+ fd_closed = False
+ except Exception:
+ onerror(os.open, path, sys.exc_info())
+ return
+ try:
+ if os.path.samestat(orig_st, os.fstat(fd)):
+ _rmtree_safe_fd(fd, path, onerror)
+ try:
+ os.close(fd)
+ fd_closed = True
+ os.rmdir(path)
+ except OSError:
+ onerror(os.rmdir, path, sys.exc_info())
+ else:
+ try:
+ # symlinks to directories are forbidden, see bug #1669
+ raise OSError("Cannot call rmtree on a symbolic link")
+ except OSError:
+ onerror(os.path.islink, path, sys.exc_info())
+ finally:
+ if not fd_closed:
+ os.close(fd)
+ else:
+ try:
+ if _rmtree_islink(path):
+ # symlinks to directories are forbidden, see bug #1669
+ raise OSError("Cannot call rmtree on a symbolic link")
+ except OSError:
+ onerror(os.path.islink, path, sys.exc_info())
+ # can't continue even if onerror hook returns
+ return
+ return _rmtree_unsafe(path, onerror)
+
+# Allow introspection of whether or not the hardening against symlink
+# attacks is supported on the current platform
+rmtree.avoids_symlink_attacks = _use_fd_functions
+
+def _basename(path):
+ """A basename() variant which first strips the trailing slash, if present.
+ Thus we always get the last component of the path, even for directories.
+
+ path: Union[PathLike, str]
+
+ e.g.
+ >>> os.path.basename('/bar/foo')
+ 'foo'
+ >>> os.path.basename('/bar/foo/')
+ ''
+ >>> _basename('/bar/foo/')
+ 'foo'
+ """
+ path = os.fspath(path)
+ sep = os.path.sep + (os.path.altsep or '')
+ return os.path.basename(path.rstrip(sep))
+
+def move(src, dst, copy_function=copy2):
+ """Recursively move a file or directory to another location. This is
+ similar to the Unix "mv" command. Return the file or directory's
+ destination.
+
+ If the destination is a directory or a symlink to a directory, the source
+ is moved inside the directory. The destination path must not already
+ exist.
+
+ If the destination already exists but is not a directory, it may be
+ overwritten depending on os.rename() semantics.
+
+ If the destination is on our current filesystem, then rename() is used.
+ Otherwise, src is copied to the destination and then removed. Symlinks are
+ recreated under the new name if os.rename() fails because of cross
+ filesystem renames.
+
+ The optional `copy_function` argument is a callable that will be used
+ to copy the source or it will be delegated to `copytree`.
+ By default, copy2() is used, but any function that supports the same
+ signature (like copy()) can be used.
+
+ A lot more could be done here... A look at a mv.c shows a lot of
+ the issues this implementation glosses over.
+
+ """
+ sys.audit("shutil.move", src, dst)
+ real_dst = dst
+ if os.path.isdir(dst):
+ if _samefile(src, dst):
+ # We might be on a case insensitive filesystem,
+ # perform the rename anyway.
+ os.rename(src, dst)
+ return
+
+ # Using _basename instead of os.path.basename is important, as we must
+ # ignore any trailing slash to avoid the basename returning ''
+ real_dst = os.path.join(dst, _basename(src))
+
+ if os.path.exists(real_dst):
+ raise Error("Destination path '%s' already exists" % real_dst)
+ try:
+ os.rename(src, real_dst)
+ except OSError:
+ if os.path.islink(src):
+ linkto = os.readlink(src)
+ os.symlink(linkto, real_dst)
+ os.unlink(src)
+ elif os.path.isdir(src):
+ if _destinsrc(src, dst):
+ raise Error("Cannot move a directory '%s' into itself"
+ " '%s'." % (src, dst))
+ if (_is_immutable(src)
+ or (not os.access(src, os.W_OK) and os.listdir(src)
+ and sys.platform == 'darwin')):
+ raise PermissionError("Cannot move the non-empty directory "
+ "'%s': Lacking write permission to '%s'."
+ % (src, src))
+ copytree(src, real_dst, copy_function=copy_function,
+ symlinks=True)
+ rmtree(src)
+ else:
+ copy_function(src, real_dst)
+ os.unlink(src)
+ return real_dst
+
+def _destinsrc(src, dst):
+ src = os.path.abspath(src)
+ dst = os.path.abspath(dst)
+ if not src.endswith(os.path.sep):
+ src += os.path.sep
+ if not dst.endswith(os.path.sep):
+ dst += os.path.sep
+ return dst.startswith(src)
+
+def _is_immutable(src):
+ st = _stat(src)
+ immutable_states = [stat.UF_IMMUTABLE, stat.SF_IMMUTABLE]
+ return hasattr(st, 'st_flags') and st.st_flags in immutable_states
+
+def _get_gid(name):
+ """Returns a gid, given a group name."""
+ if name is None:
+ return None
+
+ try:
+ from grp import getgrnam
+ except ImportError:
+ return None
+
+ try:
+ result = getgrnam(name)
+ except KeyError:
+ result = None
+ if result is not None:
+ return result[2]
+ return None
+
+def _get_uid(name):
+ """Returns an uid, given a user name."""
+ if name is None:
+ return None
+
+ try:
+ from pwd import getpwnam
+ except ImportError:
+ return None
+
+ try:
+ result = getpwnam(name)
+ except KeyError:
+ result = None
+ if result is not None:
+ return result[2]
+ return None
+
+def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
+ owner=None, group=None, logger=None, root_dir=None):
+ """Create a (possibly compressed) tar file from all the files under
+ 'base_dir'.
+
+ 'compress' must be "gzip" (the default), "bzip2", "xz", or None.
+
+ 'owner' and 'group' can be used to define an owner and a group for the
+ archive that is being built. If not provided, the current owner and group
+ will be used.
+
+ The output tar file will be named 'base_name' + ".tar", possibly plus
+ the appropriate compression extension (".gz", ".bz2", or ".xz").
+
+ Returns the output filename.
+ """
+ if compress is None:
+ tar_compression = ''
+ elif _ZLIB_SUPPORTED and compress == 'gzip':
+ tar_compression = 'gz'
+ elif _BZ2_SUPPORTED and compress == 'bzip2':
+ tar_compression = 'bz2'
+ elif _LZMA_SUPPORTED and compress == 'xz':
+ tar_compression = 'xz'
+ else:
+ raise ValueError("bad value for 'compress', or compression format not "
+ "supported : {0}".format(compress))
+
+ import tarfile # late import for breaking circular dependency
+
+ compress_ext = '.' + tar_compression if compress else ''
+ archive_name = base_name + '.tar' + compress_ext
+ archive_dir = os.path.dirname(archive_name)
+
+ if archive_dir and not os.path.exists(archive_dir):
+ if logger is not None:
+ logger.info("creating %s", archive_dir)
+ if not dry_run:
+ os.makedirs(archive_dir)
+
+ # creating the tarball
+ if logger is not None:
+ logger.info('Creating tar archive')
+
+ uid = _get_uid(owner)
+ gid = _get_gid(group)
+
+ def _set_uid_gid(tarinfo):
+ if gid is not None:
+ tarinfo.gid = gid
+ tarinfo.gname = group
+ if uid is not None:
+ tarinfo.uid = uid
+ tarinfo.uname = owner
+ return tarinfo
+
+ if not dry_run:
+ tar = tarfile.open(archive_name, 'w|%s' % tar_compression)
+ arcname = base_dir
+ if root_dir is not None:
+ base_dir = os.path.join(root_dir, base_dir)
+ try:
+ tar.add(base_dir, arcname, filter=_set_uid_gid)
+ finally:
+ tar.close()
+
+ if root_dir is not None:
+ archive_name = os.path.abspath(archive_name)
+ return archive_name
+
+def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0,
+ logger=None, owner=None, group=None, root_dir=None):
+ """Create a zip file from all the files under 'base_dir'.
+
+ The output zip file will be named 'base_name' + ".zip". Returns the
+ name of the output zip file.
+ """
+ import zipfile # late import for breaking circular dependency
+
+ zip_filename = base_name + ".zip"
+ archive_dir = os.path.dirname(base_name)
+
+ if archive_dir and not os.path.exists(archive_dir):
+ if logger is not None:
+ logger.info("creating %s", archive_dir)
+ if not dry_run:
+ os.makedirs(archive_dir)
+
+ if logger is not None:
+ logger.info("creating '%s' and adding '%s' to it",
+ zip_filename, base_dir)
+
+ if not dry_run:
+ with zipfile.ZipFile(zip_filename, "w",
+ compression=zipfile.ZIP_DEFLATED) as zf:
+ arcname = os.path.normpath(base_dir)
+ if root_dir is not None:
+ base_dir = os.path.join(root_dir, base_dir)
+ base_dir = os.path.normpath(base_dir)
+ if arcname != os.curdir:
+ zf.write(base_dir, arcname)
+ if logger is not None:
+ logger.info("adding '%s'", base_dir)
+ for dirpath, dirnames, filenames in os.walk(base_dir):
+ arcdirpath = dirpath
+ if root_dir is not None:
+ arcdirpath = os.path.relpath(arcdirpath, root_dir)
+ arcdirpath = os.path.normpath(arcdirpath)
+ for name in sorted(dirnames):
+ path = os.path.join(dirpath, name)
+ arcname = os.path.join(arcdirpath, name)
+ zf.write(path, arcname)
+ if logger is not None:
+ logger.info("adding '%s'", path)
+ for name in filenames:
+ path = os.path.join(dirpath, name)
+ path = os.path.normpath(path)
+ if os.path.isfile(path):
+ arcname = os.path.join(arcdirpath, name)
+ zf.write(path, arcname)
+ if logger is not None:
+ logger.info("adding '%s'", path)
+
+ if root_dir is not None:
+ zip_filename = os.path.abspath(zip_filename)
+ return zip_filename
+
+# Maps the name of the archive format to a tuple containing:
+# * the archiving function
+# * extra keyword arguments
+# * description
+# * does it support the root_dir argument?
+_ARCHIVE_FORMATS = {
+ 'tar': (_make_tarball, [('compress', None)],
+ "uncompressed tar file", True),
+}
+
+if _ZLIB_SUPPORTED:
+ _ARCHIVE_FORMATS['gztar'] = (_make_tarball, [('compress', 'gzip')],
+ "gzip'ed tar-file", True)
+ _ARCHIVE_FORMATS['zip'] = (_make_zipfile, [], "ZIP file", True)
+
+if _BZ2_SUPPORTED:
+ _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
+ "bzip2'ed tar-file", True)
+
+if _LZMA_SUPPORTED:
+ _ARCHIVE_FORMATS['xztar'] = (_make_tarball, [('compress', 'xz')],
+ "xz'ed tar-file", True)
+
+def get_archive_formats():
+ """Returns a list of supported formats for archiving and unarchiving.
+
+ Each element of the returned sequence is a tuple (name, description)
+ """
+ formats = [(name, registry[2]) for name, registry in
+ _ARCHIVE_FORMATS.items()]
+ formats.sort()
+ return formats
+
+def register_archive_format(name, function, extra_args=None, description=''):
+ """Registers an archive format.
+
+ name is the name of the format. function is the callable that will be
+ used to create archives. If provided, extra_args is a sequence of
+ (name, value) tuples that will be passed as arguments to the callable.
+ description can be provided to describe the format, and will be returned
+ by the get_archive_formats() function.
+ """
+ if extra_args is None:
+ extra_args = []
+ if not callable(function):
+ raise TypeError('The %s object is not callable' % function)
+ if not isinstance(extra_args, (tuple, list)):
+ raise TypeError('extra_args needs to be a sequence')
+ for element in extra_args:
+ if not isinstance(element, (tuple, list)) or len(element) !=2:
+ raise TypeError('extra_args elements are : (arg_name, value)')
+
+ _ARCHIVE_FORMATS[name] = (function, extra_args, description, False)
+
+def unregister_archive_format(name):
+ del _ARCHIVE_FORMATS[name]
+
+def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
+ dry_run=0, owner=None, group=None, logger=None):
+ """Create an archive file (eg. zip or tar).
+
+ 'base_name' is the name of the file to create, minus any format-specific
+ extension; 'format' is the archive format: one of "zip", "tar", "gztar",
+ "bztar", or "xztar". Or any other registered format.
+
+ 'root_dir' is a directory that will be the root directory of the
+ archive; ie. we typically chdir into 'root_dir' before creating the
+ archive. 'base_dir' is the directory where we start archiving from;
+ ie. 'base_dir' will be the common prefix of all files and
+ directories in the archive. 'root_dir' and 'base_dir' both default
+ to the current directory. Returns the name of the archive file.
+
+ 'owner' and 'group' are used when creating a tar archive. By default,
+ uses the current owner and group.
+ """
+ sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir)
+ try:
+ format_info = _ARCHIVE_FORMATS[format]
+ except KeyError:
+ raise ValueError("unknown archive format '%s'" % format) from None
+
+ kwargs = {'dry_run': dry_run, 'logger': logger,
+ 'owner': owner, 'group': group}
+
+ func = format_info[0]
+ for arg, val in format_info[1]:
+ kwargs[arg] = val
+
+ if base_dir is None:
+ base_dir = os.curdir
+
+ support_root_dir = format_info[3]
+ save_cwd = None
+ if root_dir is not None:
+ if support_root_dir:
+ # Support path-like base_name here for backwards-compatibility.
+ base_name = os.fspath(base_name)
+ kwargs['root_dir'] = root_dir
+ else:
+ save_cwd = os.getcwd()
+ if logger is not None:
+ logger.debug("changing into '%s'", root_dir)
+ base_name = os.path.abspath(base_name)
+ if not dry_run:
+ os.chdir(root_dir)
+
+ try:
+ filename = func(base_name, base_dir, **kwargs)
+ finally:
+ if save_cwd is not None:
+ if logger is not None:
+ logger.debug("changing back to '%s'", save_cwd)
+ os.chdir(save_cwd)
+
+ return filename
+
+
+def get_unpack_formats():
+ """Returns a list of supported formats for unpacking.
+
+ Each element of the returned sequence is a tuple
+ (name, extensions, description)
+ """
+ formats = [(name, info[0], info[3]) for name, info in
+ _UNPACK_FORMATS.items()]
+ formats.sort()
+ return formats
+
+def _check_unpack_options(extensions, function, extra_args):
+ """Checks what gets registered as an unpacker."""
+ # first make sure no other unpacker is registered for this extension
+ existing_extensions = {}
+ for name, info in _UNPACK_FORMATS.items():
+ for ext in info[0]:
+ existing_extensions[ext] = name
+
+ for extension in extensions:
+ if extension in existing_extensions:
+ msg = '%s is already registered for "%s"'
+ raise RegistryError(msg % (extension,
+ existing_extensions[extension]))
+
+ if not callable(function):
+ raise TypeError('The registered function must be a callable')
+
+
+def register_unpack_format(name, extensions, function, extra_args=None,
+ description=''):
+ """Registers an unpack format.
+
+ `name` is the name of the format. `extensions` is a list of extensions
+ corresponding to the format.
+
+ `function` is the callable that will be
+ used to unpack archives. The callable will receive archives to unpack.
+ If it's unable to handle an archive, it needs to raise a ReadError
+ exception.
+
+ If provided, `extra_args` is a sequence of
+ (name, value) tuples that will be passed as arguments to the callable.
+ description can be provided to describe the format, and will be returned
+ by the get_unpack_formats() function.
+ """
+ if extra_args is None:
+ extra_args = []
+ _check_unpack_options(extensions, function, extra_args)
+ _UNPACK_FORMATS[name] = extensions, function, extra_args, description
+
+def unregister_unpack_format(name):
+ """Removes the pack format from the registry."""
+ del _UNPACK_FORMATS[name]
+
+def _ensure_directory(path):
+ """Ensure that the parent directory of `path` exists"""
+ dirname = os.path.dirname(path)
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+
+def _unpack_zipfile(filename, extract_dir):
+ """Unpack zip `filename` to `extract_dir`
+ """
+ import zipfile # late import for breaking circular dependency
+
+ if not zipfile.is_zipfile(filename):
+ raise ReadError("%s is not a zip file" % filename)
+
+ zip = zipfile.ZipFile(filename)
+ try:
+ for info in zip.infolist():
+ name = info.filename
+
+ # don't extract absolute paths or ones with .. in them
+ if name.startswith('/') or '..' in name:
+ continue
+
+ targetpath = os.path.join(extract_dir, *name.split('/'))
+ if not targetpath:
+ continue
+
+ _ensure_directory(targetpath)
+ if not name.endswith('/'):
+ # file
+ with zip.open(name, 'r') as source, \
+ open(targetpath, 'wb') as target:
+ copyfileobj(source, target)
+ finally:
+ zip.close()
+
+def _unpack_tarfile(filename, extract_dir, *, filter=None):
+ """Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir`
+ """
+ import tarfile # late import for breaking circular dependency
+ try:
+ tarobj = tarfile.open(filename)
+ except tarfile.TarError:
+ raise ReadError(
+ "%s is not a compressed or uncompressed tar file" % filename)
+ try:
+ tarobj.extractall(extract_dir, filter=filter)
+ finally:
+ tarobj.close()
+
+# Maps the name of the unpack format to a tuple containing:
+# * extensions
+# * the unpacking function
+# * extra keyword arguments
+# * description
+_UNPACK_FORMATS = {
+ 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
+ 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file"),
+}
+
+if _ZLIB_SUPPORTED:
+ _UNPACK_FORMATS['gztar'] = (['.tar.gz', '.tgz'], _unpack_tarfile, [],
+ "gzip'ed tar-file")
+
+if _BZ2_SUPPORTED:
+ _UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [],
+ "bzip2'ed tar-file")
+
+if _LZMA_SUPPORTED:
+ _UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [],
+ "xz'ed tar-file")
+
+def _find_unpack_format(filename):
+ for name, info in _UNPACK_FORMATS.items():
+ for extension in info[0]:
+ if filename.endswith(extension):
+ return name
+ return None
+
+def unpack_archive(filename, extract_dir=None, format=None, *, filter=None):
+ """Unpack an archive.
+
+ `filename` is the name of the archive.
+
+ `extract_dir` is the name of the target directory, where the archive
+ is unpacked. If not provided, the current working directory is used.
+
+ `format` is the archive format: one of "zip", "tar", "gztar", "bztar",
+ or "xztar". Or any other registered format. If not provided,
+ unpack_archive will use the filename extension and see if an unpacker
+ was registered for that extension.
+
+ In case none is found, a ValueError is raised.
+
+ If `filter` is given, it is passed to the underlying
+ extraction function.
+ """
+ sys.audit("shutil.unpack_archive", filename, extract_dir, format)
+
+ if extract_dir is None:
+ extract_dir = os.getcwd()
+
+ extract_dir = os.fspath(extract_dir)
+ filename = os.fspath(filename)
+
+ if filter is None:
+ filter_kwargs = {}
+ else:
+ filter_kwargs = {'filter': filter}
+ if format is not None:
+ try:
+ format_info = _UNPACK_FORMATS[format]
+ except KeyError:
+ raise ValueError("Unknown unpack format '{0}'".format(format)) from None
+
+ func = format_info[1]
+ func(filename, extract_dir, **dict(format_info[2]), **filter_kwargs)
+ else:
+ # we need to look at the registered unpackers supported extensions
+ format = _find_unpack_format(filename)
+ if format is None:
+ raise ReadError("Unknown archive format '{0}'".format(filename))
+
+ func = _UNPACK_FORMATS[format][1]
+ kwargs = dict(_UNPACK_FORMATS[format][2]) | filter_kwargs
+ func(filename, extract_dir, **kwargs)
+
+
+if hasattr(os, 'statvfs'):
+
+ __all__.append('disk_usage')
+ _ntuple_diskusage = collections.namedtuple('usage', 'total used free')
+ _ntuple_diskusage.total.__doc__ = 'Total space in bytes'
+ _ntuple_diskusage.used.__doc__ = 'Used space in bytes'
+ _ntuple_diskusage.free.__doc__ = 'Free space in bytes'
+
+ def disk_usage(path):
+ """Return disk usage statistics about the given path.
+
+ Returned value is a named tuple with attributes 'total', 'used' and
+ 'free', which are the amount of total, used and free space, in bytes.
+ """
+ st = os.statvfs(path)
+ free = st.f_bavail * st.f_frsize
+ total = st.f_blocks * st.f_frsize
+ used = (st.f_blocks - st.f_bfree) * st.f_frsize
+ return _ntuple_diskusage(total, used, free)
+
+elif _WINDOWS:
+
+ __all__.append('disk_usage')
+ _ntuple_diskusage = collections.namedtuple('usage', 'total used free')
+
+ def disk_usage(path):
+ """Return disk usage statistics about the given path.
+
+ Returned values is a named tuple with attributes 'total', 'used' and
+ 'free', which are the amount of total, used and free space, in bytes.
+ """
+ total, free = nt._getdiskusage(path)
+ used = total - free
+ return _ntuple_diskusage(total, used, free)
+
+
+def chown(path, user=None, group=None):
+ """Change owner user and group of the given path.
+
+ user and group can be the uid/gid or the user/group names, and in that case,
+ they are converted to their respective uid/gid.
+ """
+ sys.audit('shutil.chown', path, user, group)
+
+ if user is None and group is None:
+ raise ValueError("user and/or group must be set")
+
+ _user = user
+ _group = group
+
+ # -1 means don't change it
+ if user is None:
+ _user = -1
+ # user can either be an int (the uid) or a string (the system username)
+ elif isinstance(user, str):
+ _user = _get_uid(user)
+ if _user is None:
+ raise LookupError("no such user: {!r}".format(user))
+
+ if group is None:
+ _group = -1
+ elif not isinstance(group, int):
+ _group = _get_gid(group)
+ if _group is None:
+ raise LookupError("no such group: {!r}".format(group))
+
+ os.chown(path, _user, _group)
+
+def get_terminal_size(fallback=(80, 24)):
+ """Get the size of the terminal window.
+
+ For each of the two dimensions, the environment variable, COLUMNS
+ and LINES respectively, is checked. If the variable is defined and
+ the value is a positive integer, it is used.
+
+ When COLUMNS or LINES is not defined, which is the common case,
+ the terminal connected to sys.__stdout__ is queried
+ by invoking os.get_terminal_size.
+
+ If the terminal size cannot be successfully queried, either because
+ the system doesn't support querying, or because we are not
+ connected to a terminal, the value given in fallback parameter
+ is used. Fallback defaults to (80, 24) which is the default
+ size used by many terminal emulators.
+
+ The value returned is a named tuple of type os.terminal_size.
+ """
+ # columns, lines are the working values
+ try:
+ columns = int(os.environ['COLUMNS'])
+ except (KeyError, ValueError):
+ columns = 0
+
+ try:
+ lines = int(os.environ['LINES'])
+ except (KeyError, ValueError):
+ lines = 0
+
+ # only query if necessary
+ if columns <= 0 or lines <= 0:
+ try:
+ size = os.get_terminal_size(sys.__stdout__.fileno())
+ except (AttributeError, ValueError, OSError):
+ # stdout is None, closed, detached, or not a terminal, or
+ # os.get_terminal_size() is unsupported
+ size = os.terminal_size(fallback)
+ if columns <= 0:
+ columns = size.columns
+ if lines <= 0:
+ lines = size.lines
+
+ return os.terminal_size((columns, lines))
+
+
+# Check that a given file can be accessed with the correct mode.
+# Additionally check that `file` is not a directory, as on Windows
+# directories pass the os.access check.
+def _access_check(fn, mode):
+ return (os.path.exists(fn) and os.access(fn, mode)
+ and not os.path.isdir(fn))
+
+
+def which(cmd, mode=os.F_OK | os.X_OK, path=None):
+ """Given a command, mode, and a PATH string, return the path which
+ conforms to the given mode on the PATH, or None if there is no such
+ file.
+
+ `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
+ of os.environ.get("PATH"), or can be overridden with a custom search
+ path.
+
+ """
+ # If we're given a path with a directory part, look it up directly rather
+ # than referring to PATH directories. This includes checking relative to the
+ # current directory, e.g. ./script
+ if os.path.dirname(cmd):
+ if _access_check(cmd, mode):
+ return cmd
+ return None
+
+ use_bytes = isinstance(cmd, bytes)
+
+ if path is None:
+ path = os.environ.get("PATH", None)
+ if path is None:
+ try:
+ path = os.confstr("CS_PATH")
+ except (AttributeError, ValueError):
+ # os.confstr() or CS_PATH is not available
+ path = os.defpath
+ # bpo-35755: Don't use os.defpath if the PATH environment variable is
+ # set to an empty string
+
+ # PATH='' doesn't match, whereas PATH=':' looks in the current directory
+ if not path:
+ return None
+
+ if use_bytes:
+ path = os.fsencode(path)
+ path = path.split(os.fsencode(os.pathsep))
+ else:
+ path = os.fsdecode(path)
+ path = path.split(os.pathsep)
+
+ if sys.platform == "win32":
+ # The current directory takes precedence on Windows.
+ curdir = os.curdir
+ if use_bytes:
+ curdir = os.fsencode(curdir)
+ if curdir not in path:
+ path.insert(0, curdir)
+
+ # PATHEXT is necessary to check on Windows.
+ pathext_source = os.getenv("PATHEXT") or _WIN_DEFAULT_PATHEXT
+ pathext = [ext for ext in pathext_source.split(os.pathsep) if ext]
+
+ if use_bytes:
+ pathext = [os.fsencode(ext) for ext in pathext]
+ # See if the given file matches any of the expected path extensions.
+ # This will allow us to short circuit when given "python.exe".
+ # If it does match, only test that one, otherwise we have to try
+ # others.
+ if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
+ files = [cmd]
+ else:
+ files = [cmd + ext for ext in pathext]
+ else:
+ # On other platforms you don't have things like PATHEXT to tell you
+ # what file suffixes are executable, so just pass on cmd as-is.
+ files = [cmd]
+
+ seen = set()
+ for dir in path:
+ normdir = os.path.normcase(dir)
+ if not normdir in seen:
+ seen.add(normdir)
+ for thefile in files:
+ name = os.path.join(dir, thefile)
+ if _access_check(name, mode):
+ return name
+ return None
diff --git a/infer_4_37_2/lib/python3.10/site.py b/infer_4_37_2/lib/python3.10/site.py
new file mode 100644
index 0000000000000000000000000000000000000000..5302037e0bf2c1f42e7e83b6e1697fc885fca638
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/site.py
@@ -0,0 +1,684 @@
+"""Append module search paths for third-party packages to sys.path.
+
+****************************************************************
+* This module is automatically imported during initialization. *
+****************************************************************
+
+This will append site-specific paths to the module search path. On
+Unix (including Mac OSX), it starts with sys.prefix and
+sys.exec_prefix (if different) and appends
+lib/python/site-packages.
+On other platforms (such as Windows), it tries each of the
+prefixes directly, as well as with lib/site-packages appended. The
+resulting directories, if they exist, are appended to sys.path, and
+also inspected for path configuration files.
+
+If a file named "pyvenv.cfg" exists one directory above sys.executable,
+sys.prefix and sys.exec_prefix are set to that directory and
+it is also checked for site-packages (sys.base_prefix and
+sys.base_exec_prefix will always be the "real" prefixes of the Python
+installation). If "pyvenv.cfg" (a bootstrap configuration file) contains
+the key "include-system-site-packages" set to anything other than "false"
+(case-insensitive), the system-level prefixes will still also be
+searched for site-packages; otherwise they won't.
+
+All of the resulting site-specific directories, if they exist, are
+appended to sys.path, and also inspected for path configuration
+files.
+
+A path configuration file is a file whose name has the form
+.pth; its contents are additional directories (one per line)
+to be added to sys.path. Non-existing directories (or
+non-directories) are never added to sys.path; no directory is added to
+sys.path more than once. Blank lines and lines beginning with
+'#' are skipped. Lines starting with 'import' are executed.
+
+For example, suppose sys.prefix and sys.exec_prefix are set to
+/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
+with three subdirectories, foo, bar and spam, and two path
+configuration files, foo.pth and bar.pth. Assume foo.pth contains the
+following:
+
+ # foo package configuration
+ foo
+ bar
+ bletch
+
+and bar.pth contains:
+
+ # bar package configuration
+ bar
+
+Then the following directories are added to sys.path, in this order:
+
+ /usr/local/lib/python2.5/site-packages/bar
+ /usr/local/lib/python2.5/site-packages/foo
+
+Note that bletch is omitted because it doesn't exist; bar precedes foo
+because bar.pth comes alphabetically before foo.pth; and spam is
+omitted because it is not mentioned in either path configuration file.
+
+The readline module is also automatically configured to enable
+completion for systems that support it. This can be overridden in
+sitecustomize, usercustomize or PYTHONSTARTUP. Starting Python in
+isolated mode (-I) disables automatic readline configuration.
+
+After these operations, an attempt is made to import a module
+named sitecustomize, which can perform arbitrary additional
+site-specific customizations. If this import fails with an
+ImportError exception, it is silently ignored.
+"""
+
+import sys
+import os
+import builtins
+import _sitebuiltins
+import io
+import stat
+
+# Prefixes for site-packages; add additional prefixes like /usr/local here
+PREFIXES = [sys.prefix, sys.exec_prefix]
+# Enable per user site-packages directory
+# set it to False to disable the feature or True to force the feature
+ENABLE_USER_SITE = None
+
+# for distutils.commands.install
+# These values are initialized by the getuserbase() and getusersitepackages()
+# functions, through the main() function when Python starts.
+USER_SITE = None
+USER_BASE = None
+
+
+def _trace(message):
+ if sys.flags.verbose:
+ print(message, file=sys.stderr)
+
+
+def makepath(*paths):
+ dir = os.path.join(*paths)
+ try:
+ dir = os.path.abspath(dir)
+ except OSError:
+ pass
+ return dir, os.path.normcase(dir)
+
+
+def abs_paths():
+ """Set all module __file__ and __cached__ attributes to an absolute path"""
+ for m in set(sys.modules.values()):
+ loader_module = None
+ try:
+ loader_module = m.__loader__.__module__
+ except AttributeError:
+ try:
+ loader_module = m.__spec__.loader.__module__
+ except AttributeError:
+ pass
+ if loader_module not in {'_frozen_importlib', '_frozen_importlib_external'}:
+ continue # don't mess with a PEP 302-supplied __file__
+ try:
+ m.__file__ = os.path.abspath(m.__file__)
+ except (AttributeError, OSError, TypeError):
+ pass
+ try:
+ m.__cached__ = os.path.abspath(m.__cached__)
+ except (AttributeError, OSError, TypeError):
+ pass
+
+
+def removeduppaths():
+ """ Remove duplicate entries from sys.path along with making them
+ absolute"""
+ # This ensures that the initial path provided by the interpreter contains
+ # only absolute pathnames, even if we're running from the build directory.
+ L = []
+ known_paths = set()
+ for dir in sys.path:
+ # Filter out duplicate paths (on case-insensitive file systems also
+ # if they only differ in case); turn relative paths into absolute
+ # paths.
+ dir, dircase = makepath(dir)
+ if dircase not in known_paths:
+ L.append(dir)
+ known_paths.add(dircase)
+ sys.path[:] = L
+ return known_paths
+
+
+def _init_pathinfo():
+ """Return a set containing all existing file system items from sys.path."""
+ d = set()
+ for item in sys.path:
+ try:
+ if os.path.exists(item):
+ _, itemcase = makepath(item)
+ d.add(itemcase)
+ except TypeError:
+ continue
+ return d
+
+
+def addpackage(sitedir, name, known_paths):
+ """Process a .pth file within the site-packages directory:
+ For each line in the file, either combine it with sitedir to a path
+ and add that to known_paths, or execute it if it starts with 'import '.
+ """
+ if known_paths is None:
+ known_paths = _init_pathinfo()
+ reset = True
+ else:
+ reset = False
+ fullname = os.path.join(sitedir, name)
+ try:
+ st = os.lstat(fullname)
+ except OSError:
+ return
+ if ((getattr(st, 'st_flags', 0) & stat.UF_HIDDEN) or
+ (getattr(st, 'st_file_attributes', 0) & stat.FILE_ATTRIBUTE_HIDDEN)):
+ _trace(f"Skipping hidden .pth file: {fullname!r}")
+ return
+ _trace(f"Processing .pth file: {fullname!r}")
+ try:
+ # locale encoding is not ideal especially on Windows. But we have used
+ # it for a long time. setuptools uses the locale encoding too.
+ f = io.TextIOWrapper(io.open_code(fullname), encoding="locale")
+ except OSError:
+ return
+ with f:
+ for n, line in enumerate(f):
+ if line.startswith("#"):
+ continue
+ if line.strip() == "":
+ continue
+ try:
+ if line.startswith(("import ", "import\t")):
+ exec(line)
+ continue
+ line = line.rstrip()
+ dir, dircase = makepath(sitedir, line)
+ if not dircase in known_paths and os.path.exists(dir):
+ sys.path.append(dir)
+ known_paths.add(dircase)
+ except Exception:
+ print("Error processing line {:d} of {}:\n".format(n+1, fullname),
+ file=sys.stderr)
+ import traceback
+ for record in traceback.format_exception(*sys.exc_info()):
+ for line in record.splitlines():
+ print(' '+line, file=sys.stderr)
+ print("\nRemainder of file ignored", file=sys.stderr)
+ break
+ if reset:
+ known_paths = None
+ return known_paths
+
+
+def addsitedir(sitedir, known_paths=None):
+ """Add 'sitedir' argument to sys.path if missing and handle .pth files in
+ 'sitedir'"""
+ _trace(f"Adding directory: {sitedir!r}")
+ if known_paths is None:
+ known_paths = _init_pathinfo()
+ reset = True
+ else:
+ reset = False
+ sitedir, sitedircase = makepath(sitedir)
+ if not sitedircase in known_paths:
+ sys.path.append(sitedir) # Add path component
+ known_paths.add(sitedircase)
+ try:
+ names = os.listdir(sitedir)
+ except OSError:
+ return
+ names = [name for name in names
+ if name.endswith(".pth") and not name.startswith(".")]
+ for name in sorted(names):
+ addpackage(sitedir, name, known_paths)
+ if reset:
+ known_paths = None
+ return known_paths
+
+
+def check_enableusersite():
+ """Check if user site directory is safe for inclusion
+
+ The function tests for the command line flag (including environment var),
+ process uid/gid equal to effective uid/gid.
+
+ None: Disabled for security reasons
+ False: Disabled by user (command line option)
+ True: Safe and enabled
+ """
+ if sys.flags.no_user_site:
+ return False
+
+ if hasattr(os, "getuid") and hasattr(os, "geteuid"):
+ # check process uid == effective uid
+ if os.geteuid() != os.getuid():
+ return None
+ if hasattr(os, "getgid") and hasattr(os, "getegid"):
+ # check process gid == effective gid
+ if os.getegid() != os.getgid():
+ return None
+
+ return True
+
+
+# NOTE: sysconfig and it's dependencies are relatively large but site module
+# needs very limited part of them.
+# To speedup startup time, we have copy of them.
+#
+# See https://bugs.python.org/issue29585
+
+# Copy of sysconfig._getuserbase()
+def _getuserbase():
+ env_base = os.environ.get("PYTHONUSERBASE", None)
+ if env_base:
+ return env_base
+
+ # VxWorks has no home directories
+ if sys.platform == "vxworks":
+ return None
+
+ def joinuser(*args):
+ return os.path.expanduser(os.path.join(*args))
+
+ if os.name == "nt":
+ base = os.environ.get("APPDATA") or "~"
+ return joinuser(base, "Python")
+
+ if sys.platform == "darwin" and sys._framework:
+ return joinuser("~", "Library", sys._framework,
+ "%d.%d" % sys.version_info[:2])
+
+ return joinuser("~", ".local")
+
+
+# Same to sysconfig.get_path('purelib', os.name+'_user')
+def _get_path(userbase):
+ version = sys.version_info
+
+ if os.name == 'nt':
+ ver_nodot = sys.winver.replace('.', '')
+ return f'{userbase}\\Python{ver_nodot}\\site-packages'
+
+ if sys.platform == 'darwin' and sys._framework:
+ return f'{userbase}/lib/python/site-packages'
+
+ return f'{userbase}/lib/python{version[0]}.{version[1]}/site-packages'
+
+
+def getuserbase():
+ """Returns the `user base` directory path.
+
+ The `user base` directory can be used to store data. If the global
+ variable ``USER_BASE`` is not initialized yet, this function will also set
+ it.
+ """
+ global USER_BASE
+ if USER_BASE is None:
+ USER_BASE = _getuserbase()
+ return USER_BASE
+
+
+def getusersitepackages():
+ """Returns the user-specific site-packages directory path.
+
+ If the global variable ``USER_SITE`` is not initialized yet, this
+ function will also set it.
+ """
+ global USER_SITE, ENABLE_USER_SITE
+ userbase = getuserbase() # this will also set USER_BASE
+
+ if USER_SITE is None:
+ if userbase is None:
+ ENABLE_USER_SITE = False # disable user site and return None
+ else:
+ USER_SITE = _get_path(userbase)
+
+ return USER_SITE
+
+def addusersitepackages(known_paths):
+ """Add a per user site-package to sys.path
+
+ Each user has its own python directory with site-packages in the
+ home directory.
+ """
+ # get the per user site-package path
+ # this call will also make sure USER_BASE and USER_SITE are set
+ _trace("Processing user site-packages")
+ user_site = getusersitepackages()
+
+ if ENABLE_USER_SITE and os.path.isdir(user_site):
+ addsitedir(user_site, known_paths)
+ return known_paths
+
+def getsitepackages(prefixes=None):
+ """Returns a list containing all global site-packages directories.
+
+ For each directory present in ``prefixes`` (or the global ``PREFIXES``),
+ this function will find its `site-packages` subdirectory depending on the
+ system environment, and will return a list of full paths.
+ """
+ sitepackages = []
+ seen = set()
+
+ if prefixes is None:
+ prefixes = PREFIXES
+
+ for prefix in prefixes:
+ if not prefix or prefix in seen:
+ continue
+ seen.add(prefix)
+
+ libdirs = [sys.platlibdir]
+ if sys.platlibdir != "lib":
+ libdirs.append("lib")
+
+ if os.sep == '/':
+ for libdir in libdirs:
+ path = os.path.join(prefix, libdir,
+ "python%d.%d" % sys.version_info[:2],
+ "site-packages")
+ sitepackages.append(path)
+ else:
+ sitepackages.append(prefix)
+
+ for libdir in libdirs:
+ path = os.path.join(prefix, libdir, "site-packages")
+ sitepackages.append(path)
+ return sitepackages
+
+def addsitepackages(known_paths, prefixes=None):
+ """Add site-packages to sys.path"""
+ _trace("Processing global site-packages")
+ for sitedir in getsitepackages(prefixes):
+ if os.path.isdir(sitedir):
+ addsitedir(sitedir, known_paths)
+
+ return known_paths
+
+def setquit():
+ """Define new builtins 'quit' and 'exit'.
+
+ These are objects which make the interpreter exit when called.
+ The repr of each object contains a hint at how it works.
+
+ """
+ if os.sep == '\\':
+ eof = 'Ctrl-Z plus Return'
+ else:
+ eof = 'Ctrl-D (i.e. EOF)'
+
+ builtins.quit = _sitebuiltins.Quitter('quit', eof)
+ builtins.exit = _sitebuiltins.Quitter('exit', eof)
+
+
+def setcopyright():
+ """Set 'copyright' and 'credits' in builtins"""
+ builtins.copyright = _sitebuiltins._Printer("copyright", sys.copyright)
+ if sys.platform[:4] == 'java':
+ builtins.credits = _sitebuiltins._Printer(
+ "credits",
+ "Jython is maintained by the Jython developers (www.jython.org).")
+ else:
+ builtins.credits = _sitebuiltins._Printer("credits", """\
+ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
+ for supporting Python development. See www.python.org for more information.""")
+ files, dirs = [], []
+ # Not all modules are required to have a __file__ attribute. See
+ # PEP 420 for more details.
+ if hasattr(os, '__file__'):
+ here = os.path.dirname(os.__file__)
+ files.extend(["LICENSE.txt", "LICENSE"])
+ dirs.extend([os.path.join(here, os.pardir), here, os.curdir])
+ builtins.license = _sitebuiltins._Printer(
+ "license",
+ "See https://www.python.org/psf/license/",
+ files, dirs)
+
+
+def sethelper():
+ builtins.help = _sitebuiltins._Helper()
+
+def enablerlcompleter():
+ """Enable default readline configuration on interactive prompts, by
+ registering a sys.__interactivehook__.
+
+ If the readline module can be imported, the hook will set the Tab key
+ as completion key and register ~/.python_history as history file.
+ This can be overridden in the sitecustomize or usercustomize module,
+ or in a PYTHONSTARTUP file.
+ """
+ def register_readline():
+ import atexit
+ try:
+ import readline
+ import rlcompleter
+ except ImportError:
+ return
+
+ # Reading the initialization (config) file may not be enough to set a
+ # completion key, so we set one first and then read the file.
+ readline_doc = getattr(readline, '__doc__', '')
+ if readline_doc is not None and 'libedit' in readline_doc:
+ readline.parse_and_bind('bind ^I rl_complete')
+ else:
+ readline.parse_and_bind('tab: complete')
+
+ try:
+ readline.read_init_file()
+ except OSError:
+ # An OSError here could have many causes, but the most likely one
+ # is that there's no .inputrc file (or .editrc file in the case of
+ # Mac OS X + libedit) in the expected location. In that case, we
+ # want to ignore the exception.
+ pass
+
+ if readline.get_current_history_length() == 0:
+ # If no history was loaded, default to .python_history.
+ # The guard is necessary to avoid doubling history size at
+ # each interpreter exit when readline was already configured
+ # through a PYTHONSTARTUP hook, see:
+ # http://bugs.python.org/issue5845#msg198636
+ history = os.path.join(os.path.expanduser('~'),
+ '.python_history')
+ try:
+ readline.read_history_file(history)
+ except OSError:
+ pass
+
+ def write_history():
+ try:
+ readline.write_history_file(history)
+ except OSError:
+ # bpo-19891, bpo-41193: Home directory does not exist
+ # or is not writable, or the filesystem is read-only.
+ pass
+
+ atexit.register(write_history)
+
+ sys.__interactivehook__ = register_readline
+
+def venv(known_paths):
+ global PREFIXES, ENABLE_USER_SITE
+
+ env = os.environ
+ if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
+ executable = sys._base_executable = os.environ['__PYVENV_LAUNCHER__']
+ else:
+ executable = sys.executable
+ exe_dir, _ = os.path.split(os.path.abspath(executable))
+ site_prefix = os.path.dirname(exe_dir)
+ sys._home = None
+ conf_basename = 'pyvenv.cfg'
+ candidate_confs = [
+ conffile for conffile in (
+ os.path.join(exe_dir, conf_basename),
+ os.path.join(site_prefix, conf_basename)
+ )
+ if os.path.isfile(conffile)
+ ]
+
+ if candidate_confs:
+ virtual_conf = candidate_confs[0]
+ system_site = "true"
+ # Issue 25185: Use UTF-8, as that's what the venv module uses when
+ # writing the file.
+ with open(virtual_conf, encoding='utf-8') as f:
+ for line in f:
+ if '=' in line:
+ key, _, value = line.partition('=')
+ key = key.strip().lower()
+ value = value.strip()
+ if key == 'include-system-site-packages':
+ system_site = value.lower()
+ elif key == 'home':
+ sys._home = value
+
+ sys.prefix = sys.exec_prefix = site_prefix
+
+ # Doing this here ensures venv takes precedence over user-site
+ addsitepackages(known_paths, [sys.prefix])
+
+ # addsitepackages will process site_prefix again if its in PREFIXES,
+ # but that's ok; known_paths will prevent anything being added twice
+ if system_site == "true":
+ PREFIXES.insert(0, sys.prefix)
+ else:
+ PREFIXES = [sys.prefix]
+ ENABLE_USER_SITE = False
+
+ return known_paths
+
+
+def execsitecustomize():
+ """Run custom site specific code, if available."""
+ try:
+ try:
+ import sitecustomize
+ except ImportError as exc:
+ if exc.name == 'sitecustomize':
+ pass
+ else:
+ raise
+ except Exception as err:
+ if sys.flags.verbose:
+ sys.excepthook(*sys.exc_info())
+ else:
+ sys.stderr.write(
+ "Error in sitecustomize; set PYTHONVERBOSE for traceback:\n"
+ "%s: %s\n" %
+ (err.__class__.__name__, err))
+
+
+def execusercustomize():
+ """Run custom user specific code, if available."""
+ try:
+ try:
+ import usercustomize
+ except ImportError as exc:
+ if exc.name == 'usercustomize':
+ pass
+ else:
+ raise
+ except Exception as err:
+ if sys.flags.verbose:
+ sys.excepthook(*sys.exc_info())
+ else:
+ sys.stderr.write(
+ "Error in usercustomize; set PYTHONVERBOSE for traceback:\n"
+ "%s: %s\n" %
+ (err.__class__.__name__, err))
+
+
+def main():
+ """Add standard site-specific directories to the module search path.
+
+ This function is called automatically when this module is imported,
+ unless the python interpreter was started with the -S flag.
+ """
+ global ENABLE_USER_SITE
+
+ orig_path = sys.path[:]
+ known_paths = removeduppaths()
+ if orig_path != sys.path:
+ # removeduppaths() might make sys.path absolute.
+ # fix __file__ and __cached__ of already imported modules too.
+ abs_paths()
+
+ known_paths = venv(known_paths)
+ if ENABLE_USER_SITE is None:
+ ENABLE_USER_SITE = check_enableusersite()
+ known_paths = addusersitepackages(known_paths)
+ known_paths = addsitepackages(known_paths)
+ setquit()
+ setcopyright()
+ sethelper()
+ if not sys.flags.isolated:
+ enablerlcompleter()
+ execsitecustomize()
+ if ENABLE_USER_SITE:
+ execusercustomize()
+
+# Prevent extending of sys.path when python was started with -S and
+# site is imported later.
+if not sys.flags.no_site:
+ main()
+
+def _script():
+ help = """\
+ %s [--user-base] [--user-site]
+
+ Without arguments print some useful information
+ With arguments print the value of USER_BASE and/or USER_SITE separated
+ by '%s'.
+
+ Exit codes with --user-base or --user-site:
+ 0 - user site directory is enabled
+ 1 - user site directory is disabled by user
+ 2 - user site directory is disabled by super user
+ or for security reasons
+ >2 - unknown error
+ """
+ args = sys.argv[1:]
+ if not args:
+ user_base = getuserbase()
+ user_site = getusersitepackages()
+ print("sys.path = [")
+ for dir in sys.path:
+ print(" %r," % (dir,))
+ print("]")
+ def exists(path):
+ if path is not None and os.path.isdir(path):
+ return "exists"
+ else:
+ return "doesn't exist"
+ print(f"USER_BASE: {user_base!r} ({exists(user_base)})")
+ print(f"USER_SITE: {user_site!r} ({exists(user_site)})")
+ print(f"ENABLE_USER_SITE: {ENABLE_USER_SITE!r}")
+ sys.exit(0)
+
+ buffer = []
+ if '--user-base' in args:
+ buffer.append(USER_BASE)
+ if '--user-site' in args:
+ buffer.append(USER_SITE)
+
+ if buffer:
+ print(os.pathsep.join(buffer))
+ if ENABLE_USER_SITE:
+ sys.exit(0)
+ elif ENABLE_USER_SITE is False:
+ sys.exit(1)
+ elif ENABLE_USER_SITE is None:
+ sys.exit(2)
+ else:
+ sys.exit(3)
+ else:
+ import textwrap
+ print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
+ sys.exit(10)
+
+if __name__ == '__main__':
+ _script()
diff --git a/infer_4_37_2/lib/python3.10/smtpd.py b/infer_4_37_2/lib/python3.10/smtpd.py
new file mode 100644
index 0000000000000000000000000000000000000000..963e0a7689c269fe0998127946ec190cf8d1a1c1
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/smtpd.py
@@ -0,0 +1,979 @@
+#! /usr/bin/env python3
+"""An RFC 5321 smtp proxy with optional RFC 1870 and RFC 6531 extensions.
+
+Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
+
+Options:
+
+ --nosetuid
+ -n
+ This program generally tries to setuid `nobody', unless this flag is
+ set. The setuid call will fail if this program is not run as root (in
+ which case, use this flag).
+
+ --version
+ -V
+ Print the version number and exit.
+
+ --class classname
+ -c classname
+ Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
+ default.
+
+ --size limit
+ -s limit
+ Restrict the total size of the incoming message to "limit" number of
+ bytes via the RFC 1870 SIZE extension. Defaults to 33554432 bytes.
+
+ --smtputf8
+ -u
+ Enable the SMTPUTF8 extension and behave as an RFC 6531 smtp proxy.
+
+ --debug
+ -d
+ Turn on debugging prints.
+
+ --help
+ -h
+ Print this message and exit.
+
+Version: %(__version__)s
+
+If localhost is not given then `localhost' is used, and if localport is not
+given then 8025 is used. If remotehost is not given then `localhost' is used,
+and if remoteport is not given, then 25 is used.
+"""
+
+# Overview:
+#
+# This file implements the minimal SMTP protocol as defined in RFC 5321. It
+# has a hierarchy of classes which implement the backend functionality for the
+# smtpd. A number of classes are provided:
+#
+# SMTPServer - the base class for the backend. Raises NotImplementedError
+# if you try to use it.
+#
+# DebuggingServer - simply prints each message it receives on stdout.
+#
+# PureProxy - Proxies all messages to a real smtpd which does final
+# delivery. One known problem with this class is that it doesn't handle
+# SMTP errors from the backend server at all. This should be fixed
+# (contributions are welcome!).
+#
+# MailmanProxy - An experimental hack to work with GNU Mailman
+# . Using this server as your real incoming smtpd, your
+# mailhost will automatically recognize and accept mail destined to Mailman
+# lists when those lists are created. Every message not destined for a list
+# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
+# are not handled correctly yet.
+#
+#
+# Author: Barry Warsaw
+#
+# TODO:
+#
+# - support mailbox delivery
+# - alias files
+# - Handle more ESMTP extensions
+# - handle error codes from the backend smtpd
+
+import sys
+import os
+import errno
+import getopt
+import time
+import socket
+import collections
+from warnings import warn
+from email._header_value_parser import get_addr_spec, get_angle_addr
+
+__all__ = [
+ "SMTPChannel", "SMTPServer", "DebuggingServer", "PureProxy",
+ "MailmanProxy",
+]
+
+warn(
+ 'The smtpd module is deprecated and unmaintained and will be removed '
+ 'in Python 3.12. Please see aiosmtpd '
+ '(https://aiosmtpd.readthedocs.io/) for the recommended replacement.',
+ DeprecationWarning,
+ stacklevel=2)
+
+
+# These are imported after the above warning so that users get the correct
+# deprecation warning.
+import asyncore
+import asynchat
+
+
+program = sys.argv[0]
+__version__ = 'Python SMTP proxy version 0.3'
+
+
+class Devnull:
+ def write(self, msg): pass
+ def flush(self): pass
+
+
+DEBUGSTREAM = Devnull()
+NEWLINE = '\n'
+COMMASPACE = ', '
+DATA_SIZE_DEFAULT = 33554432
+
+
+def usage(code, msg=''):
+ print(__doc__ % globals(), file=sys.stderr)
+ if msg:
+ print(msg, file=sys.stderr)
+ sys.exit(code)
+
+
+class SMTPChannel(asynchat.async_chat):
+ COMMAND = 0
+ DATA = 1
+
+ command_size_limit = 512
+ command_size_limits = collections.defaultdict(lambda x=command_size_limit: x)
+
+ @property
+ def max_command_size_limit(self):
+ try:
+ return max(self.command_size_limits.values())
+ except ValueError:
+ return self.command_size_limit
+
+ def __init__(self, server, conn, addr, data_size_limit=DATA_SIZE_DEFAULT,
+ map=None, enable_SMTPUTF8=False, decode_data=False):
+ asynchat.async_chat.__init__(self, conn, map=map)
+ self.smtp_server = server
+ self.conn = conn
+ self.addr = addr
+ self.data_size_limit = data_size_limit
+ self.enable_SMTPUTF8 = enable_SMTPUTF8
+ self._decode_data = decode_data
+ if enable_SMTPUTF8 and decode_data:
+ raise ValueError("decode_data and enable_SMTPUTF8 cannot"
+ " be set to True at the same time")
+ if decode_data:
+ self._emptystring = ''
+ self._linesep = '\r\n'
+ self._dotsep = '.'
+ self._newline = NEWLINE
+ else:
+ self._emptystring = b''
+ self._linesep = b'\r\n'
+ self._dotsep = ord(b'.')
+ self._newline = b'\n'
+ self._set_rset_state()
+ self.seen_greeting = ''
+ self.extended_smtp = False
+ self.command_size_limits.clear()
+ self.fqdn = socket.getfqdn()
+ try:
+ self.peer = conn.getpeername()
+ except OSError as err:
+ # a race condition may occur if the other end is closing
+ # before we can get the peername
+ self.close()
+ if err.errno != errno.ENOTCONN:
+ raise
+ return
+ print('Peer:', repr(self.peer), file=DEBUGSTREAM)
+ self.push('220 %s %s' % (self.fqdn, __version__))
+
+ def _set_post_data_state(self):
+ """Reset state variables to their post-DATA state."""
+ self.smtp_state = self.COMMAND
+ self.mailfrom = None
+ self.rcpttos = []
+ self.require_SMTPUTF8 = False
+ self.num_bytes = 0
+ self.set_terminator(b'\r\n')
+
+ def _set_rset_state(self):
+ """Reset all state variables except the greeting."""
+ self._set_post_data_state()
+ self.received_data = ''
+ self.received_lines = []
+
+
+ # properties for backwards-compatibility
+ @property
+ def __server(self):
+ warn("Access to __server attribute on SMTPChannel is deprecated, "
+ "use 'smtp_server' instead", DeprecationWarning, 2)
+ return self.smtp_server
+ @__server.setter
+ def __server(self, value):
+ warn("Setting __server attribute on SMTPChannel is deprecated, "
+ "set 'smtp_server' instead", DeprecationWarning, 2)
+ self.smtp_server = value
+
+ @property
+ def __line(self):
+ warn("Access to __line attribute on SMTPChannel is deprecated, "
+ "use 'received_lines' instead", DeprecationWarning, 2)
+ return self.received_lines
+ @__line.setter
+ def __line(self, value):
+ warn("Setting __line attribute on SMTPChannel is deprecated, "
+ "set 'received_lines' instead", DeprecationWarning, 2)
+ self.received_lines = value
+
+ @property
+ def __state(self):
+ warn("Access to __state attribute on SMTPChannel is deprecated, "
+ "use 'smtp_state' instead", DeprecationWarning, 2)
+ return self.smtp_state
+ @__state.setter
+ def __state(self, value):
+ warn("Setting __state attribute on SMTPChannel is deprecated, "
+ "set 'smtp_state' instead", DeprecationWarning, 2)
+ self.smtp_state = value
+
+ @property
+ def __greeting(self):
+ warn("Access to __greeting attribute on SMTPChannel is deprecated, "
+ "use 'seen_greeting' instead", DeprecationWarning, 2)
+ return self.seen_greeting
+ @__greeting.setter
+ def __greeting(self, value):
+ warn("Setting __greeting attribute on SMTPChannel is deprecated, "
+ "set 'seen_greeting' instead", DeprecationWarning, 2)
+ self.seen_greeting = value
+
+ @property
+ def __mailfrom(self):
+ warn("Access to __mailfrom attribute on SMTPChannel is deprecated, "
+ "use 'mailfrom' instead", DeprecationWarning, 2)
+ return self.mailfrom
+ @__mailfrom.setter
+ def __mailfrom(self, value):
+ warn("Setting __mailfrom attribute on SMTPChannel is deprecated, "
+ "set 'mailfrom' instead", DeprecationWarning, 2)
+ self.mailfrom = value
+
+ @property
+ def __rcpttos(self):
+ warn("Access to __rcpttos attribute on SMTPChannel is deprecated, "
+ "use 'rcpttos' instead", DeprecationWarning, 2)
+ return self.rcpttos
+ @__rcpttos.setter
+ def __rcpttos(self, value):
+ warn("Setting __rcpttos attribute on SMTPChannel is deprecated, "
+ "set 'rcpttos' instead", DeprecationWarning, 2)
+ self.rcpttos = value
+
+ @property
+ def __data(self):
+ warn("Access to __data attribute on SMTPChannel is deprecated, "
+ "use 'received_data' instead", DeprecationWarning, 2)
+ return self.received_data
+ @__data.setter
+ def __data(self, value):
+ warn("Setting __data attribute on SMTPChannel is deprecated, "
+ "set 'received_data' instead", DeprecationWarning, 2)
+ self.received_data = value
+
+ @property
+ def __fqdn(self):
+ warn("Access to __fqdn attribute on SMTPChannel is deprecated, "
+ "use 'fqdn' instead", DeprecationWarning, 2)
+ return self.fqdn
+ @__fqdn.setter
+ def __fqdn(self, value):
+ warn("Setting __fqdn attribute on SMTPChannel is deprecated, "
+ "set 'fqdn' instead", DeprecationWarning, 2)
+ self.fqdn = value
+
+ @property
+ def __peer(self):
+ warn("Access to __peer attribute on SMTPChannel is deprecated, "
+ "use 'peer' instead", DeprecationWarning, 2)
+ return self.peer
+ @__peer.setter
+ def __peer(self, value):
+ warn("Setting __peer attribute on SMTPChannel is deprecated, "
+ "set 'peer' instead", DeprecationWarning, 2)
+ self.peer = value
+
+ @property
+ def __conn(self):
+ warn("Access to __conn attribute on SMTPChannel is deprecated, "
+ "use 'conn' instead", DeprecationWarning, 2)
+ return self.conn
+ @__conn.setter
+ def __conn(self, value):
+ warn("Setting __conn attribute on SMTPChannel is deprecated, "
+ "set 'conn' instead", DeprecationWarning, 2)
+ self.conn = value
+
+ @property
+ def __addr(self):
+ warn("Access to __addr attribute on SMTPChannel is deprecated, "
+ "use 'addr' instead", DeprecationWarning, 2)
+ return self.addr
+ @__addr.setter
+ def __addr(self, value):
+ warn("Setting __addr attribute on SMTPChannel is deprecated, "
+ "set 'addr' instead", DeprecationWarning, 2)
+ self.addr = value
+
+ # Overrides base class for convenience.
+ def push(self, msg):
+ asynchat.async_chat.push(self, bytes(
+ msg + '\r\n', 'utf-8' if self.require_SMTPUTF8 else 'ascii'))
+
+ # Implementation of base class abstract method
+ def collect_incoming_data(self, data):
+ limit = None
+ if self.smtp_state == self.COMMAND:
+ limit = self.max_command_size_limit
+ elif self.smtp_state == self.DATA:
+ limit = self.data_size_limit
+ if limit and self.num_bytes > limit:
+ return
+ elif limit:
+ self.num_bytes += len(data)
+ if self._decode_data:
+ self.received_lines.append(str(data, 'utf-8'))
+ else:
+ self.received_lines.append(data)
+
+ # Implementation of base class abstract method
+ def found_terminator(self):
+ line = self._emptystring.join(self.received_lines)
+ print('Data:', repr(line), file=DEBUGSTREAM)
+ self.received_lines = []
+ if self.smtp_state == self.COMMAND:
+ sz, self.num_bytes = self.num_bytes, 0
+ if not line:
+ self.push('500 Error: bad syntax')
+ return
+ if not self._decode_data:
+ line = str(line, 'utf-8')
+ i = line.find(' ')
+ if i < 0:
+ command = line.upper()
+ arg = None
+ else:
+ command = line[:i].upper()
+ arg = line[i+1:].strip()
+ max_sz = (self.command_size_limits[command]
+ if self.extended_smtp else self.command_size_limit)
+ if sz > max_sz:
+ self.push('500 Error: line too long')
+ return
+ method = getattr(self, 'smtp_' + command, None)
+ if not method:
+ self.push('500 Error: command "%s" not recognized' % command)
+ return
+ method(arg)
+ return
+ else:
+ if self.smtp_state != self.DATA:
+ self.push('451 Internal confusion')
+ self.num_bytes = 0
+ return
+ if self.data_size_limit and self.num_bytes > self.data_size_limit:
+ self.push('552 Error: Too much mail data')
+ self.num_bytes = 0
+ return
+ # Remove extraneous carriage returns and de-transparency according
+ # to RFC 5321, Section 4.5.2.
+ data = []
+ for text in line.split(self._linesep):
+ if text and text[0] == self._dotsep:
+ data.append(text[1:])
+ else:
+ data.append(text)
+ self.received_data = self._newline.join(data)
+ args = (self.peer, self.mailfrom, self.rcpttos, self.received_data)
+ kwargs = {}
+ if not self._decode_data:
+ kwargs = {
+ 'mail_options': self.mail_options,
+ 'rcpt_options': self.rcpt_options,
+ }
+ status = self.smtp_server.process_message(*args, **kwargs)
+ self._set_post_data_state()
+ if not status:
+ self.push('250 OK')
+ else:
+ self.push(status)
+
+ # SMTP and ESMTP commands
+ def smtp_HELO(self, arg):
+ if not arg:
+ self.push('501 Syntax: HELO hostname')
+ return
+ # See issue #21783 for a discussion of this behavior.
+ if self.seen_greeting:
+ self.push('503 Duplicate HELO/EHLO')
+ return
+ self._set_rset_state()
+ self.seen_greeting = arg
+ self.push('250 %s' % self.fqdn)
+
+ def smtp_EHLO(self, arg):
+ if not arg:
+ self.push('501 Syntax: EHLO hostname')
+ return
+ # See issue #21783 for a discussion of this behavior.
+ if self.seen_greeting:
+ self.push('503 Duplicate HELO/EHLO')
+ return
+ self._set_rset_state()
+ self.seen_greeting = arg
+ self.extended_smtp = True
+ self.push('250-%s' % self.fqdn)
+ if self.data_size_limit:
+ self.push('250-SIZE %s' % self.data_size_limit)
+ self.command_size_limits['MAIL'] += 26
+ if not self._decode_data:
+ self.push('250-8BITMIME')
+ if self.enable_SMTPUTF8:
+ self.push('250-SMTPUTF8')
+ self.command_size_limits['MAIL'] += 10
+ self.push('250 HELP')
+
+ def smtp_NOOP(self, arg):
+ if arg:
+ self.push('501 Syntax: NOOP')
+ else:
+ self.push('250 OK')
+
+ def smtp_QUIT(self, arg):
+ # args is ignored
+ self.push('221 Bye')
+ self.close_when_done()
+
+ def _strip_command_keyword(self, keyword, arg):
+ keylen = len(keyword)
+ if arg[:keylen].upper() == keyword:
+ return arg[keylen:].strip()
+ return ''
+
+ def _getaddr(self, arg):
+ if not arg:
+ return '', ''
+ if arg.lstrip().startswith('<'):
+ address, rest = get_angle_addr(arg)
+ else:
+ address, rest = get_addr_spec(arg)
+ if not address:
+ return address, rest
+ return address.addr_spec, rest
+
+ def _getparams(self, params):
+ # Return params as dictionary. Return None if not all parameters
+ # appear to be syntactically valid according to RFC 1869.
+ result = {}
+ for param in params:
+ param, eq, value = param.partition('=')
+ if not param.isalnum() or eq and not value:
+ return None
+ result[param] = value if eq else True
+ return result
+
+ def smtp_HELP(self, arg):
+ if arg:
+ extended = ' [SP ]'
+ lc_arg = arg.upper()
+ if lc_arg == 'EHLO':
+ self.push('250 Syntax: EHLO hostname')
+ elif lc_arg == 'HELO':
+ self.push('250 Syntax: HELO hostname')
+ elif lc_arg == 'MAIL':
+ msg = '250 Syntax: MAIL FROM: '
+ if self.extended_smtp:
+ msg += extended
+ self.push(msg)
+ elif lc_arg == 'RCPT':
+ msg = '250 Syntax: RCPT TO: '
+ if self.extended_smtp:
+ msg += extended
+ self.push(msg)
+ elif lc_arg == 'DATA':
+ self.push('250 Syntax: DATA')
+ elif lc_arg == 'RSET':
+ self.push('250 Syntax: RSET')
+ elif lc_arg == 'NOOP':
+ self.push('250 Syntax: NOOP')
+ elif lc_arg == 'QUIT':
+ self.push('250 Syntax: QUIT')
+ elif lc_arg == 'VRFY':
+ self.push('250 Syntax: VRFY ')
+ else:
+ self.push('501 Supported commands: EHLO HELO MAIL RCPT '
+ 'DATA RSET NOOP QUIT VRFY')
+ else:
+ self.push('250 Supported commands: EHLO HELO MAIL RCPT DATA '
+ 'RSET NOOP QUIT VRFY')
+
+ def smtp_VRFY(self, arg):
+ if arg:
+ address, params = self._getaddr(arg)
+ if address:
+ self.push('252 Cannot VRFY user, but will accept message '
+ 'and attempt delivery')
+ else:
+ self.push('502 Could not VRFY %s' % arg)
+ else:
+ self.push('501 Syntax: VRFY ')
+
+ def smtp_MAIL(self, arg):
+ if not self.seen_greeting:
+ self.push('503 Error: send HELO first')
+ return
+ print('===> MAIL', arg, file=DEBUGSTREAM)
+ syntaxerr = '501 Syntax: MAIL FROM: '
+ if self.extended_smtp:
+ syntaxerr += ' [SP ]'
+ if arg is None:
+ self.push(syntaxerr)
+ return
+ arg = self._strip_command_keyword('FROM:', arg)
+ address, params = self._getaddr(arg)
+ if not address:
+ self.push(syntaxerr)
+ return
+ if not self.extended_smtp and params:
+ self.push(syntaxerr)
+ return
+ if self.mailfrom:
+ self.push('503 Error: nested MAIL command')
+ return
+ self.mail_options = params.upper().split()
+ params = self._getparams(self.mail_options)
+ if params is None:
+ self.push(syntaxerr)
+ return
+ if not self._decode_data:
+ body = params.pop('BODY', '7BIT')
+ if body not in ['7BIT', '8BITMIME']:
+ self.push('501 Error: BODY can only be one of 7BIT, 8BITMIME')
+ return
+ if self.enable_SMTPUTF8:
+ smtputf8 = params.pop('SMTPUTF8', False)
+ if smtputf8 is True:
+ self.require_SMTPUTF8 = True
+ elif smtputf8 is not False:
+ self.push('501 Error: SMTPUTF8 takes no arguments')
+ return
+ size = params.pop('SIZE', None)
+ if size:
+ if not size.isdigit():
+ self.push(syntaxerr)
+ return
+ elif self.data_size_limit and int(size) > self.data_size_limit:
+ self.push('552 Error: message size exceeds fixed maximum message size')
+ return
+ if len(params.keys()) > 0:
+ self.push('555 MAIL FROM parameters not recognized or not implemented')
+ return
+ self.mailfrom = address
+ print('sender:', self.mailfrom, file=DEBUGSTREAM)
+ self.push('250 OK')
+
+ def smtp_RCPT(self, arg):
+ if not self.seen_greeting:
+ self.push('503 Error: send HELO first');
+ return
+ print('===> RCPT', arg, file=DEBUGSTREAM)
+ if not self.mailfrom:
+ self.push('503 Error: need MAIL command')
+ return
+ syntaxerr = '501 Syntax: RCPT TO: '
+ if self.extended_smtp:
+ syntaxerr += ' [SP ]'
+ if arg is None:
+ self.push(syntaxerr)
+ return
+ arg = self._strip_command_keyword('TO:', arg)
+ address, params = self._getaddr(arg)
+ if not address:
+ self.push(syntaxerr)
+ return
+ if not self.extended_smtp and params:
+ self.push(syntaxerr)
+ return
+ self.rcpt_options = params.upper().split()
+ params = self._getparams(self.rcpt_options)
+ if params is None:
+ self.push(syntaxerr)
+ return
+ # XXX currently there are no options we recognize.
+ if len(params.keys()) > 0:
+ self.push('555 RCPT TO parameters not recognized or not implemented')
+ return
+ self.rcpttos.append(address)
+ print('recips:', self.rcpttos, file=DEBUGSTREAM)
+ self.push('250 OK')
+
+ def smtp_RSET(self, arg):
+ if arg:
+ self.push('501 Syntax: RSET')
+ return
+ self._set_rset_state()
+ self.push('250 OK')
+
+ def smtp_DATA(self, arg):
+ if not self.seen_greeting:
+ self.push('503 Error: send HELO first');
+ return
+ if not self.rcpttos:
+ self.push('503 Error: need RCPT command')
+ return
+ if arg:
+ self.push('501 Syntax: DATA')
+ return
+ self.smtp_state = self.DATA
+ self.set_terminator(b'\r\n.\r\n')
+ self.push('354 End data with .')
+
+ # Commands that have not been implemented
+ def smtp_EXPN(self, arg):
+ self.push('502 EXPN not implemented')
+
+
+class SMTPServer(asyncore.dispatcher):
+ # SMTPChannel class to use for managing client connections
+ channel_class = SMTPChannel
+
+ def __init__(self, localaddr, remoteaddr,
+ data_size_limit=DATA_SIZE_DEFAULT, map=None,
+ enable_SMTPUTF8=False, decode_data=False):
+ self._localaddr = localaddr
+ self._remoteaddr = remoteaddr
+ self.data_size_limit = data_size_limit
+ self.enable_SMTPUTF8 = enable_SMTPUTF8
+ self._decode_data = decode_data
+ if enable_SMTPUTF8 and decode_data:
+ raise ValueError("decode_data and enable_SMTPUTF8 cannot"
+ " be set to True at the same time")
+ asyncore.dispatcher.__init__(self, map=map)
+ try:
+ gai_results = socket.getaddrinfo(*localaddr,
+ type=socket.SOCK_STREAM)
+ self.create_socket(gai_results[0][0], gai_results[0][1])
+ # try to re-use a server port if possible
+ self.set_reuse_addr()
+ self.bind(localaddr)
+ self.listen(5)
+ except:
+ self.close()
+ raise
+ else:
+ print('%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
+ self.__class__.__name__, time.ctime(time.time()),
+ localaddr, remoteaddr), file=DEBUGSTREAM)
+
+ def handle_accepted(self, conn, addr):
+ print('Incoming connection from %s' % repr(addr), file=DEBUGSTREAM)
+ channel = self.channel_class(self,
+ conn,
+ addr,
+ self.data_size_limit,
+ self._map,
+ self.enable_SMTPUTF8,
+ self._decode_data)
+
+ # API for "doing something useful with the message"
+ def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
+ """Override this abstract method to handle messages from the client.
+
+ peer is a tuple containing (ipaddr, port) of the client that made the
+ socket connection to our smtp port.
+
+ mailfrom is the raw address the client claims the message is coming
+ from.
+
+ rcpttos is a list of raw addresses the client wishes to deliver the
+ message to.
+
+ data is a string containing the entire full text of the message,
+ headers (if supplied) and all. It has been `de-transparencied'
+ according to RFC 821, Section 4.5.2. In other words, a line
+ containing a `.' followed by other text has had the leading dot
+ removed.
+
+ kwargs is a dictionary containing additional information. It is
+ empty if decode_data=True was given as init parameter, otherwise
+ it will contain the following keys:
+ 'mail_options': list of parameters to the mail command. All
+ elements are uppercase strings. Example:
+ ['BODY=8BITMIME', 'SMTPUTF8'].
+ 'rcpt_options': same, for the rcpt command.
+
+ This function should return None for a normal `250 Ok' response;
+ otherwise, it should return the desired response string in RFC 821
+ format.
+
+ """
+ raise NotImplementedError
+
+
+class DebuggingServer(SMTPServer):
+
+ def _print_message_content(self, peer, data):
+ inheaders = 1
+ lines = data.splitlines()
+ for line in lines:
+ # headers first
+ if inheaders and not line:
+ peerheader = 'X-Peer: ' + peer[0]
+ if not isinstance(data, str):
+ # decoded_data=false; make header match other binary output
+ peerheader = repr(peerheader.encode('utf-8'))
+ print(peerheader)
+ inheaders = 0
+ if not isinstance(data, str):
+ # Avoid spurious 'str on bytes instance' warning.
+ line = repr(line)
+ print(line)
+
+ def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
+ print('---------- MESSAGE FOLLOWS ----------')
+ if kwargs:
+ if kwargs.get('mail_options'):
+ print('mail options: %s' % kwargs['mail_options'])
+ if kwargs.get('rcpt_options'):
+ print('rcpt options: %s\n' % kwargs['rcpt_options'])
+ self._print_message_content(peer, data)
+ print('------------ END MESSAGE ------------')
+
+
+class PureProxy(SMTPServer):
+ def __init__(self, *args, **kwargs):
+ if 'enable_SMTPUTF8' in kwargs and kwargs['enable_SMTPUTF8']:
+ raise ValueError("PureProxy does not support SMTPUTF8.")
+ super(PureProxy, self).__init__(*args, **kwargs)
+
+ def process_message(self, peer, mailfrom, rcpttos, data):
+ lines = data.split('\n')
+ # Look for the last header
+ i = 0
+ for line in lines:
+ if not line:
+ break
+ i += 1
+ lines.insert(i, 'X-Peer: %s' % peer[0])
+ data = NEWLINE.join(lines)
+ refused = self._deliver(mailfrom, rcpttos, data)
+ # TBD: what to do with refused addresses?
+ print('we got some refusals:', refused, file=DEBUGSTREAM)
+
+ def _deliver(self, mailfrom, rcpttos, data):
+ import smtplib
+ refused = {}
+ try:
+ s = smtplib.SMTP()
+ s.connect(self._remoteaddr[0], self._remoteaddr[1])
+ try:
+ refused = s.sendmail(mailfrom, rcpttos, data)
+ finally:
+ s.quit()
+ except smtplib.SMTPRecipientsRefused as e:
+ print('got SMTPRecipientsRefused', file=DEBUGSTREAM)
+ refused = e.recipients
+ except (OSError, smtplib.SMTPException) as e:
+ print('got', e.__class__, file=DEBUGSTREAM)
+ # All recipients were refused. If the exception had an associated
+ # error code, use it. Otherwise,fake it with a non-triggering
+ # exception code.
+ errcode = getattr(e, 'smtp_code', -1)
+ errmsg = getattr(e, 'smtp_error', 'ignore')
+ for r in rcpttos:
+ refused[r] = (errcode, errmsg)
+ return refused
+
+
+class MailmanProxy(PureProxy):
+ def __init__(self, *args, **kwargs):
+ warn('MailmanProxy is deprecated and will be removed '
+ 'in future', DeprecationWarning, 2)
+ if 'enable_SMTPUTF8' in kwargs and kwargs['enable_SMTPUTF8']:
+ raise ValueError("MailmanProxy does not support SMTPUTF8.")
+ super(PureProxy, self).__init__(*args, **kwargs)
+
+ def process_message(self, peer, mailfrom, rcpttos, data):
+ from io import StringIO
+ from Mailman import Utils
+ from Mailman import Message
+ from Mailman import MailList
+ # If the message is to a Mailman mailing list, then we'll invoke the
+ # Mailman script directly, without going through the real smtpd.
+ # Otherwise we'll forward it to the local proxy for disposition.
+ listnames = []
+ for rcpt in rcpttos:
+ local = rcpt.lower().split('@')[0]
+ # We allow the following variations on the theme
+ # listname
+ # listname-admin
+ # listname-owner
+ # listname-request
+ # listname-join
+ # listname-leave
+ parts = local.split('-')
+ if len(parts) > 2:
+ continue
+ listname = parts[0]
+ if len(parts) == 2:
+ command = parts[1]
+ else:
+ command = ''
+ if not Utils.list_exists(listname) or command not in (
+ '', 'admin', 'owner', 'request', 'join', 'leave'):
+ continue
+ listnames.append((rcpt, listname, command))
+ # Remove all list recipients from rcpttos and forward what we're not
+ # going to take care of ourselves. Linear removal should be fine
+ # since we don't expect a large number of recipients.
+ for rcpt, listname, command in listnames:
+ rcpttos.remove(rcpt)
+ # If there's any non-list destined recipients left,
+ print('forwarding recips:', ' '.join(rcpttos), file=DEBUGSTREAM)
+ if rcpttos:
+ refused = self._deliver(mailfrom, rcpttos, data)
+ # TBD: what to do with refused addresses?
+ print('we got refusals:', refused, file=DEBUGSTREAM)
+ # Now deliver directly to the list commands
+ mlists = {}
+ s = StringIO(data)
+ msg = Message.Message(s)
+ # These headers are required for the proper execution of Mailman. All
+ # MTAs in existence seem to add these if the original message doesn't
+ # have them.
+ if not msg.get('from'):
+ msg['From'] = mailfrom
+ if not msg.get('date'):
+ msg['Date'] = time.ctime(time.time())
+ for rcpt, listname, command in listnames:
+ print('sending message to', rcpt, file=DEBUGSTREAM)
+ mlist = mlists.get(listname)
+ if not mlist:
+ mlist = MailList.MailList(listname, lock=0)
+ mlists[listname] = mlist
+ # dispatch on the type of command
+ if command == '':
+ # post
+ msg.Enqueue(mlist, tolist=1)
+ elif command == 'admin':
+ msg.Enqueue(mlist, toadmin=1)
+ elif command == 'owner':
+ msg.Enqueue(mlist, toowner=1)
+ elif command == 'request':
+ msg.Enqueue(mlist, torequest=1)
+ elif command in ('join', 'leave'):
+ # TBD: this is a hack!
+ if command == 'join':
+ msg['Subject'] = 'subscribe'
+ else:
+ msg['Subject'] = 'unsubscribe'
+ msg.Enqueue(mlist, torequest=1)
+
+
+class Options:
+ setuid = True
+ classname = 'PureProxy'
+ size_limit = None
+ enable_SMTPUTF8 = False
+
+
+def parseargs():
+ global DEBUGSTREAM
+ try:
+ opts, args = getopt.getopt(
+ sys.argv[1:], 'nVhc:s:du',
+ ['class=', 'nosetuid', 'version', 'help', 'size=', 'debug',
+ 'smtputf8'])
+ except getopt.error as e:
+ usage(1, e)
+
+ options = Options()
+ for opt, arg in opts:
+ if opt in ('-h', '--help'):
+ usage(0)
+ elif opt in ('-V', '--version'):
+ print(__version__)
+ sys.exit(0)
+ elif opt in ('-n', '--nosetuid'):
+ options.setuid = False
+ elif opt in ('-c', '--class'):
+ options.classname = arg
+ elif opt in ('-d', '--debug'):
+ DEBUGSTREAM = sys.stderr
+ elif opt in ('-u', '--smtputf8'):
+ options.enable_SMTPUTF8 = True
+ elif opt in ('-s', '--size'):
+ try:
+ int_size = int(arg)
+ options.size_limit = int_size
+ except:
+ print('Invalid size: ' + arg, file=sys.stderr)
+ sys.exit(1)
+
+ # parse the rest of the arguments
+ if len(args) < 1:
+ localspec = 'localhost:8025'
+ remotespec = 'localhost:25'
+ elif len(args) < 2:
+ localspec = args[0]
+ remotespec = 'localhost:25'
+ elif len(args) < 3:
+ localspec = args[0]
+ remotespec = args[1]
+ else:
+ usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
+
+ # split into host/port pairs
+ i = localspec.find(':')
+ if i < 0:
+ usage(1, 'Bad local spec: %s' % localspec)
+ options.localhost = localspec[:i]
+ try:
+ options.localport = int(localspec[i+1:])
+ except ValueError:
+ usage(1, 'Bad local port: %s' % localspec)
+ i = remotespec.find(':')
+ if i < 0:
+ usage(1, 'Bad remote spec: %s' % remotespec)
+ options.remotehost = remotespec[:i]
+ try:
+ options.remoteport = int(remotespec[i+1:])
+ except ValueError:
+ usage(1, 'Bad remote port: %s' % remotespec)
+ return options
+
+
+if __name__ == '__main__':
+ options = parseargs()
+ # Become nobody
+ classname = options.classname
+ if "." in classname:
+ lastdot = classname.rfind(".")
+ mod = __import__(classname[:lastdot], globals(), locals(), [""])
+ classname = classname[lastdot+1:]
+ else:
+ import __main__ as mod
+ class_ = getattr(mod, classname)
+ proxy = class_((options.localhost, options.localport),
+ (options.remotehost, options.remoteport),
+ options.size_limit, enable_SMTPUTF8=options.enable_SMTPUTF8)
+ if options.setuid:
+ try:
+ import pwd
+ except ImportError:
+ print('Cannot import module "pwd"; try running with -n option.', file=sys.stderr)
+ sys.exit(1)
+ nobody = pwd.getpwnam('nobody')[2]
+ try:
+ os.setuid(nobody)
+ except PermissionError:
+ print('Cannot setuid "nobody"; try running with -n option.', file=sys.stderr)
+ sys.exit(1)
+ try:
+ asyncore.loop()
+ except KeyboardInterrupt:
+ pass
diff --git a/infer_4_37_2/lib/python3.10/socketserver.py b/infer_4_37_2/lib/python3.10/socketserver.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d9583d56a4d742aa04b075426cdb66b781ef0c5
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/socketserver.py
@@ -0,0 +1,844 @@
+"""Generic socket server classes.
+
+This module tries to capture the various aspects of defining a server:
+
+For socket-based servers:
+
+- address family:
+ - AF_INET{,6}: IP (Internet Protocol) sockets (default)
+ - AF_UNIX: Unix domain sockets
+ - others, e.g. AF_DECNET are conceivable (see
+- socket type:
+ - SOCK_STREAM (reliable stream, e.g. TCP)
+ - SOCK_DGRAM (datagrams, e.g. UDP)
+
+For request-based servers (including socket-based):
+
+- client address verification before further looking at the request
+ (This is actually a hook for any processing that needs to look
+ at the request before anything else, e.g. logging)
+- how to handle multiple requests:
+ - synchronous (one request is handled at a time)
+ - forking (each request is handled by a new process)
+ - threading (each request is handled by a new thread)
+
+The classes in this module favor the server type that is simplest to
+write: a synchronous TCP/IP server. This is bad class design, but
+saves some typing. (There's also the issue that a deep class hierarchy
+slows down method lookups.)
+
+There are five classes in an inheritance diagram, four of which represent
+synchronous servers of four types:
+
+ +------------+
+ | BaseServer |
+ +------------+
+ |
+ v
+ +-----------+ +------------------+
+ | TCPServer |------->| UnixStreamServer |
+ +-----------+ +------------------+
+ |
+ v
+ +-----------+ +--------------------+
+ | UDPServer |------->| UnixDatagramServer |
+ +-----------+ +--------------------+
+
+Note that UnixDatagramServer derives from UDPServer, not from
+UnixStreamServer -- the only difference between an IP and a Unix
+stream server is the address family, which is simply repeated in both
+unix server classes.
+
+Forking and threading versions of each type of server can be created
+using the ForkingMixIn and ThreadingMixIn mix-in classes. For
+instance, a threading UDP server class is created as follows:
+
+ class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+
+The Mix-in class must come first, since it overrides a method defined
+in UDPServer! Setting the various member variables also changes
+the behavior of the underlying server mechanism.
+
+To implement a service, you must derive a class from
+BaseRequestHandler and redefine its handle() method. You can then run
+various versions of the service by combining one of the server classes
+with your request handler class.
+
+The request handler class must be different for datagram or stream
+services. This can be hidden by using the request handler
+subclasses StreamRequestHandler or DatagramRequestHandler.
+
+Of course, you still have to use your head!
+
+For instance, it makes no sense to use a forking server if the service
+contains state in memory that can be modified by requests (since the
+modifications in the child process would never reach the initial state
+kept in the parent process and passed to each child). In this case,
+you can use a threading server, but you will probably have to use
+locks to avoid two requests that come in nearly simultaneous to apply
+conflicting changes to the server state.
+
+On the other hand, if you are building e.g. an HTTP server, where all
+data is stored externally (e.g. in the file system), a synchronous
+class will essentially render the service "deaf" while one request is
+being handled -- which may be for a very long time if a client is slow
+to read all the data it has requested. Here a threading or forking
+server is appropriate.
+
+In some cases, it may be appropriate to process part of a request
+synchronously, but to finish processing in a forked child depending on
+the request data. This can be implemented by using a synchronous
+server and doing an explicit fork in the request handler class
+handle() method.
+
+Another approach to handling multiple simultaneous requests in an
+environment that supports neither threads nor fork (or where these are
+too expensive or inappropriate for the service) is to maintain an
+explicit table of partially finished requests and to use a selector to
+decide which request to work on next (or whether to handle a new
+incoming request). This is particularly important for stream services
+where each client can potentially be connected for a long time (if
+threads or subprocesses cannot be used).
+
+Future work:
+- Standard classes for Sun RPC (which uses either UDP or TCP)
+- Standard mix-in classes to implement various authentication
+ and encryption schemes
+
+XXX Open problems:
+- What to do with out-of-band data?
+
+BaseServer:
+- split generic "request" functionality out into BaseServer class.
+ Copyright (C) 2000 Luke Kenneth Casson Leighton
+
+ example: read entries from a SQL database (requires overriding
+ get_request() to return a table entry from the database).
+ entry is processed by a RequestHandlerClass.
+
+"""
+
+# Author of the BaseServer patch: Luke Kenneth Casson Leighton
+
+__version__ = "0.4"
+
+
+import socket
+import selectors
+import os
+import sys
+import threading
+from io import BufferedIOBase
+from time import monotonic as time
+
+__all__ = ["BaseServer", "TCPServer", "UDPServer",
+ "ThreadingUDPServer", "ThreadingTCPServer",
+ "BaseRequestHandler", "StreamRequestHandler",
+ "DatagramRequestHandler", "ThreadingMixIn"]
+if hasattr(os, "fork"):
+ __all__.extend(["ForkingUDPServer","ForkingTCPServer", "ForkingMixIn"])
+if hasattr(socket, "AF_UNIX"):
+ __all__.extend(["UnixStreamServer","UnixDatagramServer",
+ "ThreadingUnixStreamServer",
+ "ThreadingUnixDatagramServer"])
+
+# poll/select have the advantage of not requiring any extra file descriptor,
+# contrarily to epoll/kqueue (also, they require a single syscall).
+if hasattr(selectors, 'PollSelector'):
+ _ServerSelector = selectors.PollSelector
+else:
+ _ServerSelector = selectors.SelectSelector
+
+
+class BaseServer:
+
+ """Base class for server classes.
+
+ Methods for the caller:
+
+ - __init__(server_address, RequestHandlerClass)
+ - serve_forever(poll_interval=0.5)
+ - shutdown()
+ - handle_request() # if you do not use serve_forever()
+ - fileno() -> int # for selector
+
+ Methods that may be overridden:
+
+ - server_bind()
+ - server_activate()
+ - get_request() -> request, client_address
+ - handle_timeout()
+ - verify_request(request, client_address)
+ - server_close()
+ - process_request(request, client_address)
+ - shutdown_request(request)
+ - close_request(request)
+ - service_actions()
+ - handle_error()
+
+ Methods for derived classes:
+
+ - finish_request(request, client_address)
+
+ Class variables that may be overridden by derived classes or
+ instances:
+
+ - timeout
+ - address_family
+ - socket_type
+ - allow_reuse_address
+
+ Instance variables:
+
+ - RequestHandlerClass
+ - socket
+
+ """
+
+ timeout = None
+
+ def __init__(self, server_address, RequestHandlerClass):
+ """Constructor. May be extended, do not override."""
+ self.server_address = server_address
+ self.RequestHandlerClass = RequestHandlerClass
+ self.__is_shut_down = threading.Event()
+ self.__shutdown_request = False
+
+ def server_activate(self):
+ """Called by constructor to activate the server.
+
+ May be overridden.
+
+ """
+ pass
+
+ def serve_forever(self, poll_interval=0.5):
+ """Handle one request at a time until shutdown.
+
+ Polls for shutdown every poll_interval seconds. Ignores
+ self.timeout. If you need to do periodic tasks, do them in
+ another thread.
+ """
+ self.__is_shut_down.clear()
+ try:
+ # XXX: Consider using another file descriptor or connecting to the
+ # socket to wake this up instead of polling. Polling reduces our
+ # responsiveness to a shutdown request and wastes cpu at all other
+ # times.
+ with _ServerSelector() as selector:
+ selector.register(self, selectors.EVENT_READ)
+
+ while not self.__shutdown_request:
+ ready = selector.select(poll_interval)
+ # bpo-35017: shutdown() called during select(), exit immediately.
+ if self.__shutdown_request:
+ break
+ if ready:
+ self._handle_request_noblock()
+
+ self.service_actions()
+ finally:
+ self.__shutdown_request = False
+ self.__is_shut_down.set()
+
+ def shutdown(self):
+ """Stops the serve_forever loop.
+
+ Blocks until the loop has finished. This must be called while
+ serve_forever() is running in another thread, or it will
+ deadlock.
+ """
+ self.__shutdown_request = True
+ self.__is_shut_down.wait()
+
+ def service_actions(self):
+ """Called by the serve_forever() loop.
+
+ May be overridden by a subclass / Mixin to implement any code that
+ needs to be run during the loop.
+ """
+ pass
+
+ # The distinction between handling, getting, processing and finishing a
+ # request is fairly arbitrary. Remember:
+ #
+ # - handle_request() is the top-level call. It calls selector.select(),
+ # get_request(), verify_request() and process_request()
+ # - get_request() is different for stream or datagram sockets
+ # - process_request() is the place that may fork a new process or create a
+ # new thread to finish the request
+ # - finish_request() instantiates the request handler class; this
+ # constructor will handle the request all by itself
+
+ def handle_request(self):
+ """Handle one request, possibly blocking.
+
+ Respects self.timeout.
+ """
+ # Support people who used socket.settimeout() to escape
+ # handle_request before self.timeout was available.
+ timeout = self.socket.gettimeout()
+ if timeout is None:
+ timeout = self.timeout
+ elif self.timeout is not None:
+ timeout = min(timeout, self.timeout)
+ if timeout is not None:
+ deadline = time() + timeout
+
+ # Wait until a request arrives or the timeout expires - the loop is
+ # necessary to accommodate early wakeups due to EINTR.
+ with _ServerSelector() as selector:
+ selector.register(self, selectors.EVENT_READ)
+
+ while True:
+ ready = selector.select(timeout)
+ if ready:
+ return self._handle_request_noblock()
+ else:
+ if timeout is not None:
+ timeout = deadline - time()
+ if timeout < 0:
+ return self.handle_timeout()
+
+ def _handle_request_noblock(self):
+ """Handle one request, without blocking.
+
+ I assume that selector.select() has returned that the socket is
+ readable before this function was called, so there should be no risk of
+ blocking in get_request().
+ """
+ try:
+ request, client_address = self.get_request()
+ except OSError:
+ return
+ if self.verify_request(request, client_address):
+ try:
+ self.process_request(request, client_address)
+ except Exception:
+ self.handle_error(request, client_address)
+ self.shutdown_request(request)
+ except:
+ self.shutdown_request(request)
+ raise
+ else:
+ self.shutdown_request(request)
+
+ def handle_timeout(self):
+ """Called if no new request arrives within self.timeout.
+
+ Overridden by ForkingMixIn.
+ """
+ pass
+
+ def verify_request(self, request, client_address):
+ """Verify the request. May be overridden.
+
+ Return True if we should proceed with this request.
+
+ """
+ return True
+
+ def process_request(self, request, client_address):
+ """Call finish_request.
+
+ Overridden by ForkingMixIn and ThreadingMixIn.
+
+ """
+ self.finish_request(request, client_address)
+ self.shutdown_request(request)
+
+ def server_close(self):
+ """Called to clean-up the server.
+
+ May be overridden.
+
+ """
+ pass
+
+ def finish_request(self, request, client_address):
+ """Finish one request by instantiating RequestHandlerClass."""
+ self.RequestHandlerClass(request, client_address, self)
+
+ def shutdown_request(self, request):
+ """Called to shutdown and close an individual request."""
+ self.close_request(request)
+
+ def close_request(self, request):
+ """Called to clean up an individual request."""
+ pass
+
+ def handle_error(self, request, client_address):
+ """Handle an error gracefully. May be overridden.
+
+ The default is to print a traceback and continue.
+
+ """
+ print('-'*40, file=sys.stderr)
+ print('Exception occurred during processing of request from',
+ client_address, file=sys.stderr)
+ import traceback
+ traceback.print_exc()
+ print('-'*40, file=sys.stderr)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.server_close()
+
+
+class TCPServer(BaseServer):
+
+ """Base class for various socket-based server classes.
+
+ Defaults to synchronous IP stream (i.e., TCP).
+
+ Methods for the caller:
+
+ - __init__(server_address, RequestHandlerClass, bind_and_activate=True)
+ - serve_forever(poll_interval=0.5)
+ - shutdown()
+ - handle_request() # if you don't use serve_forever()
+ - fileno() -> int # for selector
+
+ Methods that may be overridden:
+
+ - server_bind()
+ - server_activate()
+ - get_request() -> request, client_address
+ - handle_timeout()
+ - verify_request(request, client_address)
+ - process_request(request, client_address)
+ - shutdown_request(request)
+ - close_request(request)
+ - handle_error()
+
+ Methods for derived classes:
+
+ - finish_request(request, client_address)
+
+ Class variables that may be overridden by derived classes or
+ instances:
+
+ - timeout
+ - address_family
+ - socket_type
+ - request_queue_size (only for stream sockets)
+ - allow_reuse_address
+
+ Instance variables:
+
+ - server_address
+ - RequestHandlerClass
+ - socket
+
+ """
+
+ address_family = socket.AF_INET
+
+ socket_type = socket.SOCK_STREAM
+
+ request_queue_size = 5
+
+ allow_reuse_address = False
+
+ def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
+ """Constructor. May be extended, do not override."""
+ BaseServer.__init__(self, server_address, RequestHandlerClass)
+ self.socket = socket.socket(self.address_family,
+ self.socket_type)
+ if bind_and_activate:
+ try:
+ self.server_bind()
+ self.server_activate()
+ except:
+ self.server_close()
+ raise
+
+ def server_bind(self):
+ """Called by constructor to bind the socket.
+
+ May be overridden.
+
+ """
+ if self.allow_reuse_address:
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.socket.bind(self.server_address)
+ self.server_address = self.socket.getsockname()
+
+ def server_activate(self):
+ """Called by constructor to activate the server.
+
+ May be overridden.
+
+ """
+ self.socket.listen(self.request_queue_size)
+
+ def server_close(self):
+ """Called to clean-up the server.
+
+ May be overridden.
+
+ """
+ self.socket.close()
+
+ def fileno(self):
+ """Return socket file number.
+
+ Interface required by selector.
+
+ """
+ return self.socket.fileno()
+
+ def get_request(self):
+ """Get the request and client address from the socket.
+
+ May be overridden.
+
+ """
+ return self.socket.accept()
+
+ def shutdown_request(self, request):
+ """Called to shutdown and close an individual request."""
+ try:
+ #explicitly shutdown. socket.close() merely releases
+ #the socket and waits for GC to perform the actual close.
+ request.shutdown(socket.SHUT_WR)
+ except OSError:
+ pass #some platforms may raise ENOTCONN here
+ self.close_request(request)
+
+ def close_request(self, request):
+ """Called to clean up an individual request."""
+ request.close()
+
+
+class UDPServer(TCPServer):
+
+ """UDP server class."""
+
+ allow_reuse_address = False
+
+ socket_type = socket.SOCK_DGRAM
+
+ max_packet_size = 8192
+
+ def get_request(self):
+ data, client_addr = self.socket.recvfrom(self.max_packet_size)
+ return (data, self.socket), client_addr
+
+ def server_activate(self):
+ # No need to call listen() for UDP.
+ pass
+
+ def shutdown_request(self, request):
+ # No need to shutdown anything.
+ self.close_request(request)
+
+ def close_request(self, request):
+ # No need to close anything.
+ pass
+
+if hasattr(os, "fork"):
+ class ForkingMixIn:
+ """Mix-in class to handle each request in a new process."""
+
+ timeout = 300
+ active_children = None
+ max_children = 40
+ # If true, server_close() waits until all child processes complete.
+ block_on_close = True
+
+ def collect_children(self, *, blocking=False):
+ """Internal routine to wait for children that have exited."""
+ if self.active_children is None:
+ return
+
+ # If we're above the max number of children, wait and reap them until
+ # we go back below threshold. Note that we use waitpid(-1) below to be
+ # able to collect children in size() syscalls instead
+ # of size(): the downside is that this might reap children
+ # which we didn't spawn, which is why we only resort to this when we're
+ # above max_children.
+ while len(self.active_children) >= self.max_children:
+ try:
+ pid, _ = os.waitpid(-1, 0)
+ self.active_children.discard(pid)
+ except ChildProcessError:
+ # we don't have any children, we're done
+ self.active_children.clear()
+ except OSError:
+ break
+
+ # Now reap all defunct children.
+ for pid in self.active_children.copy():
+ try:
+ flags = 0 if blocking else os.WNOHANG
+ pid, _ = os.waitpid(pid, flags)
+ # if the child hasn't exited yet, pid will be 0 and ignored by
+ # discard() below
+ self.active_children.discard(pid)
+ except ChildProcessError:
+ # someone else reaped it
+ self.active_children.discard(pid)
+ except OSError:
+ pass
+
+ def handle_timeout(self):
+ """Wait for zombies after self.timeout seconds of inactivity.
+
+ May be extended, do not override.
+ """
+ self.collect_children()
+
+ def service_actions(self):
+ """Collect the zombie child processes regularly in the ForkingMixIn.
+
+ service_actions is called in the BaseServer's serve_forever loop.
+ """
+ self.collect_children()
+
+ def process_request(self, request, client_address):
+ """Fork a new subprocess to process the request."""
+ pid = os.fork()
+ if pid:
+ # Parent process
+ if self.active_children is None:
+ self.active_children = set()
+ self.active_children.add(pid)
+ self.close_request(request)
+ return
+ else:
+ # Child process.
+ # This must never return, hence os._exit()!
+ status = 1
+ try:
+ self.finish_request(request, client_address)
+ status = 0
+ except Exception:
+ self.handle_error(request, client_address)
+ finally:
+ try:
+ self.shutdown_request(request)
+ finally:
+ os._exit(status)
+
+ def server_close(self):
+ super().server_close()
+ self.collect_children(blocking=self.block_on_close)
+
+
+class _Threads(list):
+ """
+ Joinable list of all non-daemon threads.
+ """
+ def append(self, thread):
+ self.reap()
+ if thread.daemon:
+ return
+ super().append(thread)
+
+ def pop_all(self):
+ self[:], result = [], self[:]
+ return result
+
+ def join(self):
+ for thread in self.pop_all():
+ thread.join()
+
+ def reap(self):
+ self[:] = (thread for thread in self if thread.is_alive())
+
+
+class _NoThreads:
+ """
+ Degenerate version of _Threads.
+ """
+ def append(self, thread):
+ pass
+
+ def join(self):
+ pass
+
+
+class ThreadingMixIn:
+ """Mix-in class to handle each request in a new thread."""
+
+ # Decides how threads will act upon termination of the
+ # main process
+ daemon_threads = False
+ # If true, server_close() waits until all non-daemonic threads terminate.
+ block_on_close = True
+ # Threads object
+ # used by server_close() to wait for all threads completion.
+ _threads = _NoThreads()
+
+ def process_request_thread(self, request, client_address):
+ """Same as in BaseServer but as a thread.
+
+ In addition, exception handling is done here.
+
+ """
+ try:
+ self.finish_request(request, client_address)
+ except Exception:
+ self.handle_error(request, client_address)
+ finally:
+ self.shutdown_request(request)
+
+ def process_request(self, request, client_address):
+ """Start a new thread to process the request."""
+ if self.block_on_close:
+ vars(self).setdefault('_threads', _Threads())
+ t = threading.Thread(target = self.process_request_thread,
+ args = (request, client_address))
+ t.daemon = self.daemon_threads
+ self._threads.append(t)
+ t.start()
+
+ def server_close(self):
+ super().server_close()
+ self._threads.join()
+
+
+if hasattr(os, "fork"):
+ class ForkingUDPServer(ForkingMixIn, UDPServer): pass
+ class ForkingTCPServer(ForkingMixIn, TCPServer): pass
+
+class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
+
+if hasattr(socket, 'AF_UNIX'):
+
+ class UnixStreamServer(TCPServer):
+ address_family = socket.AF_UNIX
+
+ class UnixDatagramServer(UDPServer):
+ address_family = socket.AF_UNIX
+
+ class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
+
+ class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
+
+class BaseRequestHandler:
+
+ """Base class for request handler classes.
+
+ This class is instantiated for each request to be handled. The
+ constructor sets the instance variables request, client_address
+ and server, and then calls the handle() method. To implement a
+ specific service, all you need to do is to derive a class which
+ defines a handle() method.
+
+ The handle() method can find the request as self.request, the
+ client address as self.client_address, and the server (in case it
+ needs access to per-server information) as self.server. Since a
+ separate instance is created for each request, the handle() method
+ can define other arbitrary instance variables.
+
+ """
+
+ def __init__(self, request, client_address, server):
+ self.request = request
+ self.client_address = client_address
+ self.server = server
+ self.setup()
+ try:
+ self.handle()
+ finally:
+ self.finish()
+
+ def setup(self):
+ pass
+
+ def handle(self):
+ pass
+
+ def finish(self):
+ pass
+
+
+# The following two classes make it possible to use the same service
+# class for stream or datagram servers.
+# Each class sets up these instance variables:
+# - rfile: a file object from which receives the request is read
+# - wfile: a file object to which the reply is written
+# When the handle() method returns, wfile is flushed properly
+
+
+class StreamRequestHandler(BaseRequestHandler):
+
+ """Define self.rfile and self.wfile for stream sockets."""
+
+ # Default buffer sizes for rfile, wfile.
+ # We default rfile to buffered because otherwise it could be
+ # really slow for large data (a getc() call per byte); we make
+ # wfile unbuffered because (a) often after a write() we want to
+ # read and we need to flush the line; (b) big writes to unbuffered
+ # files are typically optimized by stdio even when big reads
+ # aren't.
+ rbufsize = -1
+ wbufsize = 0
+
+ # A timeout to apply to the request socket, if not None.
+ timeout = None
+
+ # Disable nagle algorithm for this socket, if True.
+ # Use only when wbufsize != 0, to avoid small packets.
+ disable_nagle_algorithm = False
+
+ def setup(self):
+ self.connection = self.request
+ if self.timeout is not None:
+ self.connection.settimeout(self.timeout)
+ if self.disable_nagle_algorithm:
+ self.connection.setsockopt(socket.IPPROTO_TCP,
+ socket.TCP_NODELAY, True)
+ self.rfile = self.connection.makefile('rb', self.rbufsize)
+ if self.wbufsize == 0:
+ self.wfile = _SocketWriter(self.connection)
+ else:
+ self.wfile = self.connection.makefile('wb', self.wbufsize)
+
+ def finish(self):
+ if not self.wfile.closed:
+ try:
+ self.wfile.flush()
+ except socket.error:
+ # A final socket error may have occurred here, such as
+ # the local error ECONNABORTED.
+ pass
+ self.wfile.close()
+ self.rfile.close()
+
+class _SocketWriter(BufferedIOBase):
+ """Simple writable BufferedIOBase implementation for a socket
+
+ Does not hold data in a buffer, avoiding any need to call flush()."""
+
+ def __init__(self, sock):
+ self._sock = sock
+
+ def writable(self):
+ return True
+
+ def write(self, b):
+ self._sock.sendall(b)
+ with memoryview(b) as view:
+ return view.nbytes
+
+ def fileno(self):
+ return self._sock.fileno()
+
+class DatagramRequestHandler(BaseRequestHandler):
+
+ """Define self.rfile and self.wfile for datagram sockets."""
+
+ def setup(self):
+ from io import BytesIO
+ self.packet, self.socket = self.request
+ self.rfile = BytesIO(self.packet)
+ self.wfile = BytesIO()
+
+ def finish(self):
+ self.socket.sendto(self.wfile.getvalue(), self.client_address)
diff --git a/infer_4_37_2/lib/python3.10/sre_constants.py b/infer_4_37_2/lib/python3.10/sre_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..db3ca51e8306addd4549e0767f396e0b50e35e8f
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/sre_constants.py
@@ -0,0 +1,261 @@
+#
+# Secret Labs' Regular Expression Engine
+#
+# various symbols used by the regular expression engine.
+# run this script to update the _sre include files!
+#
+# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
+#
+# See the sre.py file for information on usage and redistribution.
+#
+
+"""Internal support module for sre"""
+
+# update when constants are added or removed
+
+MAGIC = 20171005
+
+from _sre import MAXREPEAT, MAXGROUPS
+
+# SRE standard exception (access as sre.error)
+# should this really be here?
+
+class error(Exception):
+ """Exception raised for invalid regular expressions.
+
+ Attributes:
+
+ msg: The unformatted error message
+ pattern: The regular expression pattern
+ pos: The index in the pattern where compilation failed (may be None)
+ lineno: The line corresponding to pos (may be None)
+ colno: The column corresponding to pos (may be None)
+ """
+
+ __module__ = 're'
+
+ def __init__(self, msg, pattern=None, pos=None):
+ self.msg = msg
+ self.pattern = pattern
+ self.pos = pos
+ if pattern is not None and pos is not None:
+ msg = '%s at position %d' % (msg, pos)
+ if isinstance(pattern, str):
+ newline = '\n'
+ else:
+ newline = b'\n'
+ self.lineno = pattern.count(newline, 0, pos) + 1
+ self.colno = pos - pattern.rfind(newline, 0, pos)
+ if newline in pattern:
+ msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno)
+ else:
+ self.lineno = self.colno = None
+ super().__init__(msg)
+
+
+class _NamedIntConstant(int):
+ def __new__(cls, value, name):
+ self = super(_NamedIntConstant, cls).__new__(cls, value)
+ self.name = name
+ return self
+
+ def __repr__(self):
+ return self.name
+
+ __reduce__ = None
+
+MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT')
+
+def _makecodes(names):
+ names = names.strip().split()
+ items = [_NamedIntConstant(i, name) for i, name in enumerate(names)]
+ globals().update({item.name: item for item in items})
+ return items
+
+# operators
+# failure=0 success=1 (just because it looks better that way :-)
+OPCODES = _makecodes("""
+ FAILURE SUCCESS
+
+ ANY ANY_ALL
+ ASSERT ASSERT_NOT
+ AT
+ BRANCH
+ CALL
+ CATEGORY
+ CHARSET BIGCHARSET
+ GROUPREF GROUPREF_EXISTS
+ IN
+ INFO
+ JUMP
+ LITERAL
+ MARK
+ MAX_UNTIL
+ MIN_UNTIL
+ NOT_LITERAL
+ NEGATE
+ RANGE
+ REPEAT
+ REPEAT_ONE
+ SUBPATTERN
+ MIN_REPEAT_ONE
+
+ GROUPREF_IGNORE
+ IN_IGNORE
+ LITERAL_IGNORE
+ NOT_LITERAL_IGNORE
+
+ GROUPREF_LOC_IGNORE
+ IN_LOC_IGNORE
+ LITERAL_LOC_IGNORE
+ NOT_LITERAL_LOC_IGNORE
+
+ GROUPREF_UNI_IGNORE
+ IN_UNI_IGNORE
+ LITERAL_UNI_IGNORE
+ NOT_LITERAL_UNI_IGNORE
+ RANGE_UNI_IGNORE
+
+ MIN_REPEAT MAX_REPEAT
+""")
+del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT
+
+# positions
+ATCODES = _makecodes("""
+ AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING
+ AT_BOUNDARY AT_NON_BOUNDARY
+ AT_END AT_END_LINE AT_END_STRING
+
+ AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY
+
+ AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY
+""")
+
+# categories
+CHCODES = _makecodes("""
+ CATEGORY_DIGIT CATEGORY_NOT_DIGIT
+ CATEGORY_SPACE CATEGORY_NOT_SPACE
+ CATEGORY_WORD CATEGORY_NOT_WORD
+ CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK
+
+ CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD
+
+ CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT
+ CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE
+ CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD
+ CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK
+""")
+
+
+# replacement operations for "ignore case" mode
+OP_IGNORE = {
+ LITERAL: LITERAL_IGNORE,
+ NOT_LITERAL: NOT_LITERAL_IGNORE,
+}
+
+OP_LOCALE_IGNORE = {
+ LITERAL: LITERAL_LOC_IGNORE,
+ NOT_LITERAL: NOT_LITERAL_LOC_IGNORE,
+}
+
+OP_UNICODE_IGNORE = {
+ LITERAL: LITERAL_UNI_IGNORE,
+ NOT_LITERAL: NOT_LITERAL_UNI_IGNORE,
+}
+
+AT_MULTILINE = {
+ AT_BEGINNING: AT_BEGINNING_LINE,
+ AT_END: AT_END_LINE
+}
+
+AT_LOCALE = {
+ AT_BOUNDARY: AT_LOC_BOUNDARY,
+ AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
+}
+
+AT_UNICODE = {
+ AT_BOUNDARY: AT_UNI_BOUNDARY,
+ AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
+}
+
+CH_LOCALE = {
+ CATEGORY_DIGIT: CATEGORY_DIGIT,
+ CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
+ CATEGORY_SPACE: CATEGORY_SPACE,
+ CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
+ CATEGORY_WORD: CATEGORY_LOC_WORD,
+ CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
+ CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
+ CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
+}
+
+CH_UNICODE = {
+ CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
+ CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
+ CATEGORY_SPACE: CATEGORY_UNI_SPACE,
+ CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
+ CATEGORY_WORD: CATEGORY_UNI_WORD,
+ CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
+ CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
+ CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
+}
+
+# flags
+SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
+SRE_FLAG_IGNORECASE = 2 # case insensitive
+SRE_FLAG_LOCALE = 4 # honour system locale
+SRE_FLAG_MULTILINE = 8 # treat target as multiline string
+SRE_FLAG_DOTALL = 16 # treat target as a single string
+SRE_FLAG_UNICODE = 32 # use unicode "locale"
+SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
+SRE_FLAG_DEBUG = 128 # debugging
+SRE_FLAG_ASCII = 256 # use ascii "locale"
+
+# flags for INFO primitive
+SRE_INFO_PREFIX = 1 # has prefix
+SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
+SRE_INFO_CHARSET = 4 # pattern starts with character from given set
+
+if __name__ == "__main__":
+ def dump(f, d, prefix):
+ items = sorted(d)
+ for item in items:
+ f.write("#define %s_%s %d\n" % (prefix, item, item))
+ with open("sre_constants.h", "w") as f:
+ f.write("""\
+/*
+ * Secret Labs' Regular Expression Engine
+ *
+ * regular expression matching engine
+ *
+ * NOTE: This file is generated by sre_constants.py. If you need
+ * to change anything in here, edit sre_constants.py and run it.
+ *
+ * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
+ *
+ * See the _sre.c file for information on usage and redistribution.
+ */
+
+""")
+
+ f.write("#define SRE_MAGIC %d\n" % MAGIC)
+
+ dump(f, OPCODES, "SRE_OP")
+ dump(f, ATCODES, "SRE")
+ dump(f, CHCODES, "SRE")
+
+ f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
+ f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
+ f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
+ f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
+ f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
+ f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
+ f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
+ f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
+ f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
+
+ f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
+ f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
+ f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
+
+ print("done")
diff --git a/infer_4_37_2/lib/python3.10/stat.py b/infer_4_37_2/lib/python3.10/stat.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc024db3f4fbeeb903272363ee2bad19de0e635b
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/stat.py
@@ -0,0 +1,195 @@
+"""Constants/functions for interpreting results of os.stat() and os.lstat().
+
+Suggested usage: from stat import *
+"""
+
+# Indices for stat struct members in the tuple returned by os.stat()
+
+ST_MODE = 0
+ST_INO = 1
+ST_DEV = 2
+ST_NLINK = 3
+ST_UID = 4
+ST_GID = 5
+ST_SIZE = 6
+ST_ATIME = 7
+ST_MTIME = 8
+ST_CTIME = 9
+
+# Extract bits from the mode
+
+def S_IMODE(mode):
+ """Return the portion of the file's mode that can be set by
+ os.chmod().
+ """
+ return mode & 0o7777
+
+def S_IFMT(mode):
+ """Return the portion of the file's mode that describes the
+ file type.
+ """
+ return mode & 0o170000
+
+# Constants used as S_IFMT() for various file types
+# (not all are implemented on all systems)
+
+S_IFDIR = 0o040000 # directory
+S_IFCHR = 0o020000 # character device
+S_IFBLK = 0o060000 # block device
+S_IFREG = 0o100000 # regular file
+S_IFIFO = 0o010000 # fifo (named pipe)
+S_IFLNK = 0o120000 # symbolic link
+S_IFSOCK = 0o140000 # socket file
+# Fallbacks for uncommon platform-specific constants
+S_IFDOOR = 0
+S_IFPORT = 0
+S_IFWHT = 0
+
+# Functions to test for each file type
+
+def S_ISDIR(mode):
+ """Return True if mode is from a directory."""
+ return S_IFMT(mode) == S_IFDIR
+
+def S_ISCHR(mode):
+ """Return True if mode is from a character special device file."""
+ return S_IFMT(mode) == S_IFCHR
+
+def S_ISBLK(mode):
+ """Return True if mode is from a block special device file."""
+ return S_IFMT(mode) == S_IFBLK
+
+def S_ISREG(mode):
+ """Return True if mode is from a regular file."""
+ return S_IFMT(mode) == S_IFREG
+
+def S_ISFIFO(mode):
+ """Return True if mode is from a FIFO (named pipe)."""
+ return S_IFMT(mode) == S_IFIFO
+
+def S_ISLNK(mode):
+ """Return True if mode is from a symbolic link."""
+ return S_IFMT(mode) == S_IFLNK
+
+def S_ISSOCK(mode):
+ """Return True if mode is from a socket."""
+ return S_IFMT(mode) == S_IFSOCK
+
+def S_ISDOOR(mode):
+ """Return True if mode is from a door."""
+ return False
+
+def S_ISPORT(mode):
+ """Return True if mode is from an event port."""
+ return False
+
+def S_ISWHT(mode):
+ """Return True if mode is from a whiteout."""
+ return False
+
+# Names for permission bits
+
+S_ISUID = 0o4000 # set UID bit
+S_ISGID = 0o2000 # set GID bit
+S_ENFMT = S_ISGID # file locking enforcement
+S_ISVTX = 0o1000 # sticky bit
+S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR
+S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
+S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR
+S_IRWXU = 0o0700 # mask for owner permissions
+S_IRUSR = 0o0400 # read by owner
+S_IWUSR = 0o0200 # write by owner
+S_IXUSR = 0o0100 # execute by owner
+S_IRWXG = 0o0070 # mask for group permissions
+S_IRGRP = 0o0040 # read by group
+S_IWGRP = 0o0020 # write by group
+S_IXGRP = 0o0010 # execute by group
+S_IRWXO = 0o0007 # mask for others (not in group) permissions
+S_IROTH = 0o0004 # read by others
+S_IWOTH = 0o0002 # write by others
+S_IXOTH = 0o0001 # execute by others
+
+# Names for file flags
+
+UF_NODUMP = 0x00000001 # do not dump file
+UF_IMMUTABLE = 0x00000002 # file may not be changed
+UF_APPEND = 0x00000004 # file may only be appended to
+UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
+UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
+UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
+UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
+SF_ARCHIVED = 0x00010000 # file may be archived
+SF_IMMUTABLE = 0x00020000 # file may not be changed
+SF_APPEND = 0x00040000 # file may only be appended to
+SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
+SF_SNAPSHOT = 0x00200000 # file is a snapshot file
+
+
+_filemode_table = (
+ ((S_IFLNK, "l"),
+ (S_IFSOCK, "s"), # Must appear before IFREG and IFDIR as IFSOCK == IFREG | IFDIR
+ (S_IFREG, "-"),
+ (S_IFBLK, "b"),
+ (S_IFDIR, "d"),
+ (S_IFCHR, "c"),
+ (S_IFIFO, "p")),
+
+ ((S_IRUSR, "r"),),
+ ((S_IWUSR, "w"),),
+ ((S_IXUSR|S_ISUID, "s"),
+ (S_ISUID, "S"),
+ (S_IXUSR, "x")),
+
+ ((S_IRGRP, "r"),),
+ ((S_IWGRP, "w"),),
+ ((S_IXGRP|S_ISGID, "s"),
+ (S_ISGID, "S"),
+ (S_IXGRP, "x")),
+
+ ((S_IROTH, "r"),),
+ ((S_IWOTH, "w"),),
+ ((S_IXOTH|S_ISVTX, "t"),
+ (S_ISVTX, "T"),
+ (S_IXOTH, "x"))
+)
+
+def filemode(mode):
+ """Convert a file's mode to a string of the form '-rwxrwxrwx'."""
+ perm = []
+ for table in _filemode_table:
+ for bit, char in table:
+ if mode & bit == bit:
+ perm.append(char)
+ break
+ else:
+ perm.append("-")
+ return "".join(perm)
+
+
+# Windows FILE_ATTRIBUTE constants for interpreting os.stat()'s
+# "st_file_attributes" member
+
+FILE_ATTRIBUTE_ARCHIVE = 32
+FILE_ATTRIBUTE_COMPRESSED = 2048
+FILE_ATTRIBUTE_DEVICE = 64
+FILE_ATTRIBUTE_DIRECTORY = 16
+FILE_ATTRIBUTE_ENCRYPTED = 16384
+FILE_ATTRIBUTE_HIDDEN = 2
+FILE_ATTRIBUTE_INTEGRITY_STREAM = 32768
+FILE_ATTRIBUTE_NORMAL = 128
+FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192
+FILE_ATTRIBUTE_NO_SCRUB_DATA = 131072
+FILE_ATTRIBUTE_OFFLINE = 4096
+FILE_ATTRIBUTE_READONLY = 1
+FILE_ATTRIBUTE_REPARSE_POINT = 1024
+FILE_ATTRIBUTE_SPARSE_FILE = 512
+FILE_ATTRIBUTE_SYSTEM = 4
+FILE_ATTRIBUTE_TEMPORARY = 256
+FILE_ATTRIBUTE_VIRTUAL = 65536
+
+
+# If available, use C implementation
+try:
+ from _stat import *
+except ImportError:
+ pass
diff --git a/infer_4_37_2/lib/python3.10/stringprep.py b/infer_4_37_2/lib/python3.10/stringprep.py
new file mode 100644
index 0000000000000000000000000000000000000000..44ecdb266ce8b95a0e5c9142ded9e43ac2e0235a
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/stringprep.py
@@ -0,0 +1,272 @@
+# This file is generated by mkstringprep.py. DO NOT EDIT.
+"""Library that exposes various tables found in the StringPrep RFC 3454.
+
+There are two kinds of tables: sets, for which a member test is provided,
+and mappings, for which a mapping function is provided.
+"""
+
+from unicodedata import ucd_3_2_0 as unicodedata
+
+assert unicodedata.unidata_version == '3.2.0'
+
+def in_table_a1(code):
+ if unicodedata.category(code) != 'Cn': return False
+ c = ord(code)
+ if 0xFDD0 <= c < 0xFDF0: return False
+ return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
+
+
+b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + list(range(65024,65040)))
+def in_table_b1(code):
+ return ord(code) in b1_set
+
+
+b3_exceptions = {
+0xb5:'\u03bc', 0xdf:'ss', 0x130:'i\u0307', 0x149:'\u02bcn',
+0x17f:'s', 0x1f0:'j\u030c', 0x345:'\u03b9', 0x37a:' \u03b9',
+0x390:'\u03b9\u0308\u0301', 0x3b0:'\u03c5\u0308\u0301', 0x3c2:'\u03c3', 0x3d0:'\u03b2',
+0x3d1:'\u03b8', 0x3d2:'\u03c5', 0x3d3:'\u03cd', 0x3d4:'\u03cb',
+0x3d5:'\u03c6', 0x3d6:'\u03c0', 0x3f0:'\u03ba', 0x3f1:'\u03c1',
+0x3f2:'\u03c3', 0x3f5:'\u03b5', 0x587:'\u0565\u0582', 0x1e96:'h\u0331',
+0x1e97:'t\u0308', 0x1e98:'w\u030a', 0x1e99:'y\u030a', 0x1e9a:'a\u02be',
+0x1e9b:'\u1e61', 0x1f50:'\u03c5\u0313', 0x1f52:'\u03c5\u0313\u0300', 0x1f54:'\u03c5\u0313\u0301',
+0x1f56:'\u03c5\u0313\u0342', 0x1f80:'\u1f00\u03b9', 0x1f81:'\u1f01\u03b9', 0x1f82:'\u1f02\u03b9',
+0x1f83:'\u1f03\u03b9', 0x1f84:'\u1f04\u03b9', 0x1f85:'\u1f05\u03b9', 0x1f86:'\u1f06\u03b9',
+0x1f87:'\u1f07\u03b9', 0x1f88:'\u1f00\u03b9', 0x1f89:'\u1f01\u03b9', 0x1f8a:'\u1f02\u03b9',
+0x1f8b:'\u1f03\u03b9', 0x1f8c:'\u1f04\u03b9', 0x1f8d:'\u1f05\u03b9', 0x1f8e:'\u1f06\u03b9',
+0x1f8f:'\u1f07\u03b9', 0x1f90:'\u1f20\u03b9', 0x1f91:'\u1f21\u03b9', 0x1f92:'\u1f22\u03b9',
+0x1f93:'\u1f23\u03b9', 0x1f94:'\u1f24\u03b9', 0x1f95:'\u1f25\u03b9', 0x1f96:'\u1f26\u03b9',
+0x1f97:'\u1f27\u03b9', 0x1f98:'\u1f20\u03b9', 0x1f99:'\u1f21\u03b9', 0x1f9a:'\u1f22\u03b9',
+0x1f9b:'\u1f23\u03b9', 0x1f9c:'\u1f24\u03b9', 0x1f9d:'\u1f25\u03b9', 0x1f9e:'\u1f26\u03b9',
+0x1f9f:'\u1f27\u03b9', 0x1fa0:'\u1f60\u03b9', 0x1fa1:'\u1f61\u03b9', 0x1fa2:'\u1f62\u03b9',
+0x1fa3:'\u1f63\u03b9', 0x1fa4:'\u1f64\u03b9', 0x1fa5:'\u1f65\u03b9', 0x1fa6:'\u1f66\u03b9',
+0x1fa7:'\u1f67\u03b9', 0x1fa8:'\u1f60\u03b9', 0x1fa9:'\u1f61\u03b9', 0x1faa:'\u1f62\u03b9',
+0x1fab:'\u1f63\u03b9', 0x1fac:'\u1f64\u03b9', 0x1fad:'\u1f65\u03b9', 0x1fae:'\u1f66\u03b9',
+0x1faf:'\u1f67\u03b9', 0x1fb2:'\u1f70\u03b9', 0x1fb3:'\u03b1\u03b9', 0x1fb4:'\u03ac\u03b9',
+0x1fb6:'\u03b1\u0342', 0x1fb7:'\u03b1\u0342\u03b9', 0x1fbc:'\u03b1\u03b9', 0x1fbe:'\u03b9',
+0x1fc2:'\u1f74\u03b9', 0x1fc3:'\u03b7\u03b9', 0x1fc4:'\u03ae\u03b9', 0x1fc6:'\u03b7\u0342',
+0x1fc7:'\u03b7\u0342\u03b9', 0x1fcc:'\u03b7\u03b9', 0x1fd2:'\u03b9\u0308\u0300', 0x1fd3:'\u03b9\u0308\u0301',
+0x1fd6:'\u03b9\u0342', 0x1fd7:'\u03b9\u0308\u0342', 0x1fe2:'\u03c5\u0308\u0300', 0x1fe3:'\u03c5\u0308\u0301',
+0x1fe4:'\u03c1\u0313', 0x1fe6:'\u03c5\u0342', 0x1fe7:'\u03c5\u0308\u0342', 0x1ff2:'\u1f7c\u03b9',
+0x1ff3:'\u03c9\u03b9', 0x1ff4:'\u03ce\u03b9', 0x1ff6:'\u03c9\u0342', 0x1ff7:'\u03c9\u0342\u03b9',
+0x1ffc:'\u03c9\u03b9', 0x20a8:'rs', 0x2102:'c', 0x2103:'\xb0c',
+0x2107:'\u025b', 0x2109:'\xb0f', 0x210b:'h', 0x210c:'h',
+0x210d:'h', 0x2110:'i', 0x2111:'i', 0x2112:'l',
+0x2115:'n', 0x2116:'no', 0x2119:'p', 0x211a:'q',
+0x211b:'r', 0x211c:'r', 0x211d:'r', 0x2120:'sm',
+0x2121:'tel', 0x2122:'tm', 0x2124:'z', 0x2128:'z',
+0x212c:'b', 0x212d:'c', 0x2130:'e', 0x2131:'f',
+0x2133:'m', 0x213e:'\u03b3', 0x213f:'\u03c0', 0x2145:'d',
+0x3371:'hpa', 0x3373:'au', 0x3375:'ov', 0x3380:'pa',
+0x3381:'na', 0x3382:'\u03bca', 0x3383:'ma', 0x3384:'ka',
+0x3385:'kb', 0x3386:'mb', 0x3387:'gb', 0x338a:'pf',
+0x338b:'nf', 0x338c:'\u03bcf', 0x3390:'hz', 0x3391:'khz',
+0x3392:'mhz', 0x3393:'ghz', 0x3394:'thz', 0x33a9:'pa',
+0x33aa:'kpa', 0x33ab:'mpa', 0x33ac:'gpa', 0x33b4:'pv',
+0x33b5:'nv', 0x33b6:'\u03bcv', 0x33b7:'mv', 0x33b8:'kv',
+0x33b9:'mv', 0x33ba:'pw', 0x33bb:'nw', 0x33bc:'\u03bcw',
+0x33bd:'mw', 0x33be:'kw', 0x33bf:'mw', 0x33c0:'k\u03c9',
+0x33c1:'m\u03c9', 0x33c3:'bq', 0x33c6:'c\u2215kg', 0x33c7:'co.',
+0x33c8:'db', 0x33c9:'gy', 0x33cb:'hp', 0x33cd:'kk',
+0x33ce:'km', 0x33d7:'ph', 0x33d9:'ppm', 0x33da:'pr',
+0x33dc:'sv', 0x33dd:'wb', 0xfb00:'ff', 0xfb01:'fi',
+0xfb02:'fl', 0xfb03:'ffi', 0xfb04:'ffl', 0xfb05:'st',
+0xfb06:'st', 0xfb13:'\u0574\u0576', 0xfb14:'\u0574\u0565', 0xfb15:'\u0574\u056b',
+0xfb16:'\u057e\u0576', 0xfb17:'\u0574\u056d', 0x1d400:'a', 0x1d401:'b',
+0x1d402:'c', 0x1d403:'d', 0x1d404:'e', 0x1d405:'f',
+0x1d406:'g', 0x1d407:'h', 0x1d408:'i', 0x1d409:'j',
+0x1d40a:'k', 0x1d40b:'l', 0x1d40c:'m', 0x1d40d:'n',
+0x1d40e:'o', 0x1d40f:'p', 0x1d410:'q', 0x1d411:'r',
+0x1d412:'s', 0x1d413:'t', 0x1d414:'u', 0x1d415:'v',
+0x1d416:'w', 0x1d417:'x', 0x1d418:'y', 0x1d419:'z',
+0x1d434:'a', 0x1d435:'b', 0x1d436:'c', 0x1d437:'d',
+0x1d438:'e', 0x1d439:'f', 0x1d43a:'g', 0x1d43b:'h',
+0x1d43c:'i', 0x1d43d:'j', 0x1d43e:'k', 0x1d43f:'l',
+0x1d440:'m', 0x1d441:'n', 0x1d442:'o', 0x1d443:'p',
+0x1d444:'q', 0x1d445:'r', 0x1d446:'s', 0x1d447:'t',
+0x1d448:'u', 0x1d449:'v', 0x1d44a:'w', 0x1d44b:'x',
+0x1d44c:'y', 0x1d44d:'z', 0x1d468:'a', 0x1d469:'b',
+0x1d46a:'c', 0x1d46b:'d', 0x1d46c:'e', 0x1d46d:'f',
+0x1d46e:'g', 0x1d46f:'h', 0x1d470:'i', 0x1d471:'j',
+0x1d472:'k', 0x1d473:'l', 0x1d474:'m', 0x1d475:'n',
+0x1d476:'o', 0x1d477:'p', 0x1d478:'q', 0x1d479:'r',
+0x1d47a:'s', 0x1d47b:'t', 0x1d47c:'u', 0x1d47d:'v',
+0x1d47e:'w', 0x1d47f:'x', 0x1d480:'y', 0x1d481:'z',
+0x1d49c:'a', 0x1d49e:'c', 0x1d49f:'d', 0x1d4a2:'g',
+0x1d4a5:'j', 0x1d4a6:'k', 0x1d4a9:'n', 0x1d4aa:'o',
+0x1d4ab:'p', 0x1d4ac:'q', 0x1d4ae:'s', 0x1d4af:'t',
+0x1d4b0:'u', 0x1d4b1:'v', 0x1d4b2:'w', 0x1d4b3:'x',
+0x1d4b4:'y', 0x1d4b5:'z', 0x1d4d0:'a', 0x1d4d1:'b',
+0x1d4d2:'c', 0x1d4d3:'d', 0x1d4d4:'e', 0x1d4d5:'f',
+0x1d4d6:'g', 0x1d4d7:'h', 0x1d4d8:'i', 0x1d4d9:'j',
+0x1d4da:'k', 0x1d4db:'l', 0x1d4dc:'m', 0x1d4dd:'n',
+0x1d4de:'o', 0x1d4df:'p', 0x1d4e0:'q', 0x1d4e1:'r',
+0x1d4e2:'s', 0x1d4e3:'t', 0x1d4e4:'u', 0x1d4e5:'v',
+0x1d4e6:'w', 0x1d4e7:'x', 0x1d4e8:'y', 0x1d4e9:'z',
+0x1d504:'a', 0x1d505:'b', 0x1d507:'d', 0x1d508:'e',
+0x1d509:'f', 0x1d50a:'g', 0x1d50d:'j', 0x1d50e:'k',
+0x1d50f:'l', 0x1d510:'m', 0x1d511:'n', 0x1d512:'o',
+0x1d513:'p', 0x1d514:'q', 0x1d516:'s', 0x1d517:'t',
+0x1d518:'u', 0x1d519:'v', 0x1d51a:'w', 0x1d51b:'x',
+0x1d51c:'y', 0x1d538:'a', 0x1d539:'b', 0x1d53b:'d',
+0x1d53c:'e', 0x1d53d:'f', 0x1d53e:'g', 0x1d540:'i',
+0x1d541:'j', 0x1d542:'k', 0x1d543:'l', 0x1d544:'m',
+0x1d546:'o', 0x1d54a:'s', 0x1d54b:'t', 0x1d54c:'u',
+0x1d54d:'v', 0x1d54e:'w', 0x1d54f:'x', 0x1d550:'y',
+0x1d56c:'a', 0x1d56d:'b', 0x1d56e:'c', 0x1d56f:'d',
+0x1d570:'e', 0x1d571:'f', 0x1d572:'g', 0x1d573:'h',
+0x1d574:'i', 0x1d575:'j', 0x1d576:'k', 0x1d577:'l',
+0x1d578:'m', 0x1d579:'n', 0x1d57a:'o', 0x1d57b:'p',
+0x1d57c:'q', 0x1d57d:'r', 0x1d57e:'s', 0x1d57f:'t',
+0x1d580:'u', 0x1d581:'v', 0x1d582:'w', 0x1d583:'x',
+0x1d584:'y', 0x1d585:'z', 0x1d5a0:'a', 0x1d5a1:'b',
+0x1d5a2:'c', 0x1d5a3:'d', 0x1d5a4:'e', 0x1d5a5:'f',
+0x1d5a6:'g', 0x1d5a7:'h', 0x1d5a8:'i', 0x1d5a9:'j',
+0x1d5aa:'k', 0x1d5ab:'l', 0x1d5ac:'m', 0x1d5ad:'n',
+0x1d5ae:'o', 0x1d5af:'p', 0x1d5b0:'q', 0x1d5b1:'r',
+0x1d5b2:'s', 0x1d5b3:'t', 0x1d5b4:'u', 0x1d5b5:'v',
+0x1d5b6:'w', 0x1d5b7:'x', 0x1d5b8:'y', 0x1d5b9:'z',
+0x1d5d4:'a', 0x1d5d5:'b', 0x1d5d6:'c', 0x1d5d7:'d',
+0x1d5d8:'e', 0x1d5d9:'f', 0x1d5da:'g', 0x1d5db:'h',
+0x1d5dc:'i', 0x1d5dd:'j', 0x1d5de:'k', 0x1d5df:'l',
+0x1d5e0:'m', 0x1d5e1:'n', 0x1d5e2:'o', 0x1d5e3:'p',
+0x1d5e4:'q', 0x1d5e5:'r', 0x1d5e6:'s', 0x1d5e7:'t',
+0x1d5e8:'u', 0x1d5e9:'v', 0x1d5ea:'w', 0x1d5eb:'x',
+0x1d5ec:'y', 0x1d5ed:'z', 0x1d608:'a', 0x1d609:'b',
+0x1d60a:'c', 0x1d60b:'d', 0x1d60c:'e', 0x1d60d:'f',
+0x1d60e:'g', 0x1d60f:'h', 0x1d610:'i', 0x1d611:'j',
+0x1d612:'k', 0x1d613:'l', 0x1d614:'m', 0x1d615:'n',
+0x1d616:'o', 0x1d617:'p', 0x1d618:'q', 0x1d619:'r',
+0x1d61a:'s', 0x1d61b:'t', 0x1d61c:'u', 0x1d61d:'v',
+0x1d61e:'w', 0x1d61f:'x', 0x1d620:'y', 0x1d621:'z',
+0x1d63c:'a', 0x1d63d:'b', 0x1d63e:'c', 0x1d63f:'d',
+0x1d640:'e', 0x1d641:'f', 0x1d642:'g', 0x1d643:'h',
+0x1d644:'i', 0x1d645:'j', 0x1d646:'k', 0x1d647:'l',
+0x1d648:'m', 0x1d649:'n', 0x1d64a:'o', 0x1d64b:'p',
+0x1d64c:'q', 0x1d64d:'r', 0x1d64e:'s', 0x1d64f:'t',
+0x1d650:'u', 0x1d651:'v', 0x1d652:'w', 0x1d653:'x',
+0x1d654:'y', 0x1d655:'z', 0x1d670:'a', 0x1d671:'b',
+0x1d672:'c', 0x1d673:'d', 0x1d674:'e', 0x1d675:'f',
+0x1d676:'g', 0x1d677:'h', 0x1d678:'i', 0x1d679:'j',
+0x1d67a:'k', 0x1d67b:'l', 0x1d67c:'m', 0x1d67d:'n',
+0x1d67e:'o', 0x1d67f:'p', 0x1d680:'q', 0x1d681:'r',
+0x1d682:'s', 0x1d683:'t', 0x1d684:'u', 0x1d685:'v',
+0x1d686:'w', 0x1d687:'x', 0x1d688:'y', 0x1d689:'z',
+0x1d6a8:'\u03b1', 0x1d6a9:'\u03b2', 0x1d6aa:'\u03b3', 0x1d6ab:'\u03b4',
+0x1d6ac:'\u03b5', 0x1d6ad:'\u03b6', 0x1d6ae:'\u03b7', 0x1d6af:'\u03b8',
+0x1d6b0:'\u03b9', 0x1d6b1:'\u03ba', 0x1d6b2:'\u03bb', 0x1d6b3:'\u03bc',
+0x1d6b4:'\u03bd', 0x1d6b5:'\u03be', 0x1d6b6:'\u03bf', 0x1d6b7:'\u03c0',
+0x1d6b8:'\u03c1', 0x1d6b9:'\u03b8', 0x1d6ba:'\u03c3', 0x1d6bb:'\u03c4',
+0x1d6bc:'\u03c5', 0x1d6bd:'\u03c6', 0x1d6be:'\u03c7', 0x1d6bf:'\u03c8',
+0x1d6c0:'\u03c9', 0x1d6d3:'\u03c3', 0x1d6e2:'\u03b1', 0x1d6e3:'\u03b2',
+0x1d6e4:'\u03b3', 0x1d6e5:'\u03b4', 0x1d6e6:'\u03b5', 0x1d6e7:'\u03b6',
+0x1d6e8:'\u03b7', 0x1d6e9:'\u03b8', 0x1d6ea:'\u03b9', 0x1d6eb:'\u03ba',
+0x1d6ec:'\u03bb', 0x1d6ed:'\u03bc', 0x1d6ee:'\u03bd', 0x1d6ef:'\u03be',
+0x1d6f0:'\u03bf', 0x1d6f1:'\u03c0', 0x1d6f2:'\u03c1', 0x1d6f3:'\u03b8',
+0x1d6f4:'\u03c3', 0x1d6f5:'\u03c4', 0x1d6f6:'\u03c5', 0x1d6f7:'\u03c6',
+0x1d6f8:'\u03c7', 0x1d6f9:'\u03c8', 0x1d6fa:'\u03c9', 0x1d70d:'\u03c3',
+0x1d71c:'\u03b1', 0x1d71d:'\u03b2', 0x1d71e:'\u03b3', 0x1d71f:'\u03b4',
+0x1d720:'\u03b5', 0x1d721:'\u03b6', 0x1d722:'\u03b7', 0x1d723:'\u03b8',
+0x1d724:'\u03b9', 0x1d725:'\u03ba', 0x1d726:'\u03bb', 0x1d727:'\u03bc',
+0x1d728:'\u03bd', 0x1d729:'\u03be', 0x1d72a:'\u03bf', 0x1d72b:'\u03c0',
+0x1d72c:'\u03c1', 0x1d72d:'\u03b8', 0x1d72e:'\u03c3', 0x1d72f:'\u03c4',
+0x1d730:'\u03c5', 0x1d731:'\u03c6', 0x1d732:'\u03c7', 0x1d733:'\u03c8',
+0x1d734:'\u03c9', 0x1d747:'\u03c3', 0x1d756:'\u03b1', 0x1d757:'\u03b2',
+0x1d758:'\u03b3', 0x1d759:'\u03b4', 0x1d75a:'\u03b5', 0x1d75b:'\u03b6',
+0x1d75c:'\u03b7', 0x1d75d:'\u03b8', 0x1d75e:'\u03b9', 0x1d75f:'\u03ba',
+0x1d760:'\u03bb', 0x1d761:'\u03bc', 0x1d762:'\u03bd', 0x1d763:'\u03be',
+0x1d764:'\u03bf', 0x1d765:'\u03c0', 0x1d766:'\u03c1', 0x1d767:'\u03b8',
+0x1d768:'\u03c3', 0x1d769:'\u03c4', 0x1d76a:'\u03c5', 0x1d76b:'\u03c6',
+0x1d76c:'\u03c7', 0x1d76d:'\u03c8', 0x1d76e:'\u03c9', 0x1d781:'\u03c3',
+0x1d790:'\u03b1', 0x1d791:'\u03b2', 0x1d792:'\u03b3', 0x1d793:'\u03b4',
+0x1d794:'\u03b5', 0x1d795:'\u03b6', 0x1d796:'\u03b7', 0x1d797:'\u03b8',
+0x1d798:'\u03b9', 0x1d799:'\u03ba', 0x1d79a:'\u03bb', 0x1d79b:'\u03bc',
+0x1d79c:'\u03bd', 0x1d79d:'\u03be', 0x1d79e:'\u03bf', 0x1d79f:'\u03c0',
+0x1d7a0:'\u03c1', 0x1d7a1:'\u03b8', 0x1d7a2:'\u03c3', 0x1d7a3:'\u03c4',
+0x1d7a4:'\u03c5', 0x1d7a5:'\u03c6', 0x1d7a6:'\u03c7', 0x1d7a7:'\u03c8',
+0x1d7a8:'\u03c9', 0x1d7bb:'\u03c3', }
+
+def map_table_b3(code):
+ r = b3_exceptions.get(ord(code))
+ if r is not None: return r
+ return code.lower()
+
+
+def map_table_b2(a):
+ al = map_table_b3(a)
+ b = unicodedata.normalize("NFKC", al)
+ bl = "".join([map_table_b3(ch) for ch in b])
+ c = unicodedata.normalize("NFKC", bl)
+ if b != c:
+ return c
+ else:
+ return al
+
+
+def in_table_c11(code):
+ return code == " "
+
+
+def in_table_c12(code):
+ return unicodedata.category(code) == "Zs" and code != " "
+
+def in_table_c11_c12(code):
+ return unicodedata.category(code) == "Zs"
+
+
+def in_table_c21(code):
+ return ord(code) < 128 and unicodedata.category(code) == "Cc"
+
+c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + list(range(8288,8292)) + list(range(8298,8304)) + list(range(65529,65533)) + list(range(119155,119163)))
+def in_table_c22(code):
+ c = ord(code)
+ if c < 128: return False
+ if unicodedata.category(code) == "Cc": return True
+ return c in c22_specials
+
+def in_table_c21_c22(code):
+ return unicodedata.category(code) == "Cc" or \
+ ord(code) in c22_specials
+
+
+def in_table_c3(code):
+ return unicodedata.category(code) == "Co"
+
+
+def in_table_c4(code):
+ c = ord(code)
+ if c < 0xFDD0: return False
+ if c < 0xFDF0: return True
+ return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
+
+
+def in_table_c5(code):
+ return unicodedata.category(code) == "Cs"
+
+
+c6_set = set(range(65529,65534))
+def in_table_c6(code):
+ return ord(code) in c6_set
+
+
+c7_set = set(range(12272,12284))
+def in_table_c7(code):
+ return ord(code) in c7_set
+
+
+c8_set = set([832, 833, 8206, 8207] + list(range(8234,8239)) + list(range(8298,8304)))
+def in_table_c8(code):
+ return ord(code) in c8_set
+
+
+c9_set = set([917505] + list(range(917536,917632)))
+def in_table_c9(code):
+ return ord(code) in c9_set
+
+
+def in_table_d1(code):
+ return unicodedata.bidirectional(code) in ("R","AL")
+
+
+def in_table_d2(code):
+ return unicodedata.bidirectional(code) == "L"
diff --git a/infer_4_37_2/lib/python3.10/subprocess.py b/infer_4_37_2/lib/python3.10/subprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1e3d64dfe04192add61eec2dd522360b35be091
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/subprocess.py
@@ -0,0 +1,2122 @@
+# subprocess - Subprocesses with accessible I/O streams
+#
+# For more information about this module, see PEP 324.
+#
+# Copyright (c) 2003-2005 by Peter Astrand
+#
+# Licensed to PSF under a Contributor Agreement.
+
+r"""Subprocesses with accessible I/O streams
+
+This module allows you to spawn processes, connect to their
+input/output/error pipes, and obtain their return codes.
+
+For a complete description of this module see the Python documentation.
+
+Main API
+========
+run(...): Runs a command, waits for it to complete, then returns a
+ CompletedProcess instance.
+Popen(...): A class for flexibly executing a command in a new process
+
+Constants
+---------
+DEVNULL: Special value that indicates that os.devnull should be used
+PIPE: Special value that indicates a pipe should be created
+STDOUT: Special value that indicates that stderr should go to stdout
+
+
+Older API
+=========
+call(...): Runs a command, waits for it to complete, then returns
+ the return code.
+check_call(...): Same as call() but raises CalledProcessError()
+ if return code is not 0
+check_output(...): Same as check_call() but returns the contents of
+ stdout instead of a return code
+getoutput(...): Runs a command in the shell, waits for it to complete,
+ then returns the output
+getstatusoutput(...): Runs a command in the shell, waits for it to complete,
+ then returns a (exitcode, output) tuple
+"""
+
+import builtins
+import errno
+import io
+import os
+import time
+import signal
+import sys
+import threading
+import warnings
+import contextlib
+from time import monotonic as _time
+import types
+
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+
+
+__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
+ "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL",
+ "SubprocessError", "TimeoutExpired", "CompletedProcess"]
+ # NOTE: We intentionally exclude list2cmdline as it is
+ # considered an internal implementation detail. issue10838.
+
+try:
+ import msvcrt
+ import _winapi
+ _mswindows = True
+except ModuleNotFoundError:
+ _mswindows = False
+ import _posixsubprocess
+ import select
+ import selectors
+else:
+ from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
+ STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
+ STD_ERROR_HANDLE, SW_HIDE,
+ STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW,
+ ABOVE_NORMAL_PRIORITY_CLASS, BELOW_NORMAL_PRIORITY_CLASS,
+ HIGH_PRIORITY_CLASS, IDLE_PRIORITY_CLASS,
+ NORMAL_PRIORITY_CLASS, REALTIME_PRIORITY_CLASS,
+ CREATE_NO_WINDOW, DETACHED_PROCESS,
+ CREATE_DEFAULT_ERROR_MODE, CREATE_BREAKAWAY_FROM_JOB)
+
+ __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
+ "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
+ "STD_ERROR_HANDLE", "SW_HIDE",
+ "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW",
+ "STARTUPINFO",
+ "ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
+ "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
+ "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
+ "CREATE_NO_WINDOW", "DETACHED_PROCESS",
+ "CREATE_DEFAULT_ERROR_MODE", "CREATE_BREAKAWAY_FROM_JOB"])
+
+
+# Exception classes used by this module.
+class SubprocessError(Exception): pass
+
+
+class CalledProcessError(SubprocessError):
+ """Raised when run() is called with check=True and the process
+ returns a non-zero exit status.
+
+ Attributes:
+ cmd, returncode, stdout, stderr, output
+ """
+ def __init__(self, returncode, cmd, output=None, stderr=None):
+ self.returncode = returncode
+ self.cmd = cmd
+ self.output = output
+ self.stderr = stderr
+
+ def __str__(self):
+ if self.returncode and self.returncode < 0:
+ try:
+ return "Command '%s' died with %r." % (
+ self.cmd, signal.Signals(-self.returncode))
+ except ValueError:
+ return "Command '%s' died with unknown signal %d." % (
+ self.cmd, -self.returncode)
+ else:
+ return "Command '%s' returned non-zero exit status %d." % (
+ self.cmd, self.returncode)
+
+ @property
+ def stdout(self):
+ """Alias for output attribute, to match stderr"""
+ return self.output
+
+ @stdout.setter
+ def stdout(self, value):
+ # There's no obvious reason to set this, but allow it anyway so
+ # .stdout is a transparent alias for .output
+ self.output = value
+
+
+class TimeoutExpired(SubprocessError):
+ """This exception is raised when the timeout expires while waiting for a
+ child process.
+
+ Attributes:
+ cmd, output, stdout, stderr, timeout
+ """
+ def __init__(self, cmd, timeout, output=None, stderr=None):
+ self.cmd = cmd
+ self.timeout = timeout
+ self.output = output
+ self.stderr = stderr
+
+ def __str__(self):
+ return ("Command '%s' timed out after %s seconds" %
+ (self.cmd, self.timeout))
+
+ @property
+ def stdout(self):
+ return self.output
+
+ @stdout.setter
+ def stdout(self, value):
+ # There's no obvious reason to set this, but allow it anyway so
+ # .stdout is a transparent alias for .output
+ self.output = value
+
+
+if _mswindows:
+ class STARTUPINFO:
+ def __init__(self, *, dwFlags=0, hStdInput=None, hStdOutput=None,
+ hStdError=None, wShowWindow=0, lpAttributeList=None):
+ self.dwFlags = dwFlags
+ self.hStdInput = hStdInput
+ self.hStdOutput = hStdOutput
+ self.hStdError = hStdError
+ self.wShowWindow = wShowWindow
+ self.lpAttributeList = lpAttributeList or {"handle_list": []}
+
+ def copy(self):
+ attr_list = self.lpAttributeList.copy()
+ if 'handle_list' in attr_list:
+ attr_list['handle_list'] = list(attr_list['handle_list'])
+
+ return STARTUPINFO(dwFlags=self.dwFlags,
+ hStdInput=self.hStdInput,
+ hStdOutput=self.hStdOutput,
+ hStdError=self.hStdError,
+ wShowWindow=self.wShowWindow,
+ lpAttributeList=attr_list)
+
+
+ class Handle(int):
+ closed = False
+
+ def Close(self, CloseHandle=_winapi.CloseHandle):
+ if not self.closed:
+ self.closed = True
+ CloseHandle(self)
+
+ def Detach(self):
+ if not self.closed:
+ self.closed = True
+ return int(self)
+ raise ValueError("already closed")
+
+ def __repr__(self):
+ return "%s(%d)" % (self.__class__.__name__, int(self))
+
+ __del__ = Close
+else:
+ # When select or poll has indicated that the file is writable,
+ # we can write up to _PIPE_BUF bytes without risk of blocking.
+ # POSIX defines PIPE_BUF as >= 512.
+ _PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
+
+ # poll/select have the advantage of not requiring any extra file
+ # descriptor, contrarily to epoll/kqueue (also, they require a single
+ # syscall).
+ if hasattr(selectors, 'PollSelector'):
+ _PopenSelector = selectors.PollSelector
+ else:
+ _PopenSelector = selectors.SelectSelector
+
+
+if _mswindows:
+ # On Windows we just need to close `Popen._handle` when we no longer need
+ # it, so that the kernel can free it. `Popen._handle` gets closed
+ # implicitly when the `Popen` instance is finalized (see `Handle.__del__`,
+ # which is calling `CloseHandle` as requested in [1]), so there is nothing
+ # for `_cleanup` to do.
+ #
+ # [1] https://docs.microsoft.com/en-us/windows/desktop/ProcThread/
+ # creating-processes
+ _active = None
+
+ def _cleanup():
+ pass
+else:
+ # This lists holds Popen instances for which the underlying process had not
+ # exited at the time its __del__ method got called: those processes are
+ # wait()ed for synchronously from _cleanup() when a new Popen object is
+ # created, to avoid zombie processes.
+ _active = []
+
+ def _cleanup():
+ if _active is None:
+ return
+ for inst in _active[:]:
+ res = inst._internal_poll(_deadstate=sys.maxsize)
+ if res is not None:
+ try:
+ _active.remove(inst)
+ except ValueError:
+ # This can happen if two threads create a new Popen instance.
+ # It's harmless that it was already removed, so ignore.
+ pass
+
+PIPE = -1
+STDOUT = -2
+DEVNULL = -3
+
+
+# XXX This function is only used by multiprocessing and the test suite,
+# but it's here so that it can be imported when Python is compiled without
+# threads.
+
+def _optim_args_from_interpreter_flags():
+ """Return a list of command-line arguments reproducing the current
+ optimization settings in sys.flags."""
+ args = []
+ value = sys.flags.optimize
+ if value > 0:
+ args.append('-' + 'O' * value)
+ return args
+
+
+def _args_from_interpreter_flags():
+ """Return a list of command-line arguments reproducing the current
+ settings in sys.flags, sys.warnoptions and sys._xoptions."""
+ flag_opt_map = {
+ 'debug': 'd',
+ # 'inspect': 'i',
+ # 'interactive': 'i',
+ 'dont_write_bytecode': 'B',
+ 'no_site': 'S',
+ 'verbose': 'v',
+ 'bytes_warning': 'b',
+ 'quiet': 'q',
+ # -O is handled in _optim_args_from_interpreter_flags()
+ }
+ args = _optim_args_from_interpreter_flags()
+ for flag, opt in flag_opt_map.items():
+ v = getattr(sys.flags, flag)
+ if v > 0:
+ args.append('-' + opt * v)
+
+ if sys.flags.isolated:
+ args.append('-I')
+ else:
+ if sys.flags.ignore_environment:
+ args.append('-E')
+ if sys.flags.no_user_site:
+ args.append('-s')
+
+ # -W options
+ warnopts = sys.warnoptions[:]
+ bytes_warning = sys.flags.bytes_warning
+ xoptions = getattr(sys, '_xoptions', {})
+ dev_mode = ('dev' in xoptions)
+
+ if bytes_warning > 1:
+ warnopts.remove("error::BytesWarning")
+ elif bytes_warning:
+ warnopts.remove("default::BytesWarning")
+ if dev_mode:
+ warnopts.remove('default')
+ for opt in warnopts:
+ args.append('-W' + opt)
+
+ # -X options
+ if dev_mode:
+ args.extend(('-X', 'dev'))
+ for opt in ('faulthandler', 'tracemalloc', 'importtime',
+ 'showrefcount', 'utf8'):
+ if opt in xoptions:
+ value = xoptions[opt]
+ if value is True:
+ arg = opt
+ else:
+ arg = '%s=%s' % (opt, value)
+ args.extend(('-X', arg))
+
+ return args
+
+
+def call(*popenargs, timeout=None, **kwargs):
+ """Run command with arguments. Wait for command to complete or
+ timeout, then return the returncode attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ retcode = call(["ls", "-l"])
+ """
+ with Popen(*popenargs, **kwargs) as p:
+ try:
+ return p.wait(timeout=timeout)
+ except: # Including KeyboardInterrupt, wait handled that.
+ p.kill()
+ # We don't call p.wait() again as p.__exit__ does that for us.
+ raise
+
+
+def check_call(*popenargs, **kwargs):
+ """Run command with arguments. Wait for command to complete. If
+ the exit code was zero then return, otherwise raise
+ CalledProcessError. The CalledProcessError object will have the
+ return code in the returncode attribute.
+
+ The arguments are the same as for the call function. Example:
+
+ check_call(["ls", "-l"])
+ """
+ retcode = call(*popenargs, **kwargs)
+ if retcode:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ raise CalledProcessError(retcode, cmd)
+ return 0
+
+
+def check_output(*popenargs, timeout=None, **kwargs):
+ r"""Run command with arguments and return its output.
+
+ If the exit code was non-zero it raises a CalledProcessError. The
+ CalledProcessError object will have the return code in the returncode
+ attribute and output in the output attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ >>> check_output(["ls", "-l", "/dev/null"])
+ b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
+
+ The stdout argument is not allowed as it is used internally.
+ To capture standard error in the result, use stderr=STDOUT.
+
+ >>> check_output(["/bin/sh", "-c",
+ ... "ls -l non_existent_file ; exit 0"],
+ ... stderr=STDOUT)
+ b'ls: non_existent_file: No such file or directory\n'
+
+ There is an additional optional argument, "input", allowing you to
+ pass a string to the subprocess's stdin. If you use this argument
+ you may not also use the Popen constructor's "stdin" argument, as
+ it too will be used internally. Example:
+
+ >>> check_output(["sed", "-e", "s/foo/bar/"],
+ ... input=b"when in the course of fooman events\n")
+ b'when in the course of barman events\n'
+
+ By default, all communication is in bytes, and therefore any "input"
+ should be bytes, and the return value will be bytes. If in text mode,
+ any "input" should be a string, and the return value will be a string
+ decoded according to locale encoding, or by "encoding" if set. Text mode
+ is triggered by setting any of text, encoding, errors or universal_newlines.
+ """
+ if 'stdout' in kwargs:
+ raise ValueError('stdout argument not allowed, it will be overridden.')
+
+ if 'input' in kwargs and kwargs['input'] is None:
+ # Explicitly passing input=None was previously equivalent to passing an
+ # empty string. That is maintained here for backwards compatibility.
+ if kwargs.get('universal_newlines') or kwargs.get('text') or kwargs.get('encoding') \
+ or kwargs.get('errors'):
+ empty = ''
+ else:
+ empty = b''
+ kwargs['input'] = empty
+
+ return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
+ **kwargs).stdout
+
+
+class CompletedProcess(object):
+ """A process that has finished running.
+
+ This is returned by run().
+
+ Attributes:
+ args: The list or str args passed to run().
+ returncode: The exit code of the process, negative for signals.
+ stdout: The standard output (None if not captured).
+ stderr: The standard error (None if not captured).
+ """
+ def __init__(self, args, returncode, stdout=None, stderr=None):
+ self.args = args
+ self.returncode = returncode
+ self.stdout = stdout
+ self.stderr = stderr
+
+ def __repr__(self):
+ args = ['args={!r}'.format(self.args),
+ 'returncode={!r}'.format(self.returncode)]
+ if self.stdout is not None:
+ args.append('stdout={!r}'.format(self.stdout))
+ if self.stderr is not None:
+ args.append('stderr={!r}'.format(self.stderr))
+ return "{}({})".format(type(self).__name__, ', '.join(args))
+
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+
+ def check_returncode(self):
+ """Raise CalledProcessError if the exit code is non-zero."""
+ if self.returncode:
+ raise CalledProcessError(self.returncode, self.args, self.stdout,
+ self.stderr)
+
+
+def run(*popenargs,
+ input=None, capture_output=False, timeout=None, check=False, **kwargs):
+ """Run command with arguments and return a CompletedProcess instance.
+
+ The returned instance will have attributes args, returncode, stdout and
+ stderr. By default, stdout and stderr are not captured, and those attributes
+ will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them,
+ or pass capture_output=True to capture both.
+
+ If check is True and the exit code was non-zero, it raises a
+ CalledProcessError. The CalledProcessError object will have the return code
+ in the returncode attribute, and output & stderr attributes if those streams
+ were captured.
+
+ If timeout is given, and the process takes too long, a TimeoutExpired
+ exception will be raised.
+
+ There is an optional argument "input", allowing you to
+ pass bytes or a string to the subprocess's stdin. If you use this argument
+ you may not also use the Popen constructor's "stdin" argument, as
+ it will be used internally.
+
+ By default, all communication is in bytes, and therefore any "input" should
+ be bytes, and the stdout and stderr will be bytes. If in text mode, any
+ "input" should be a string, and stdout and stderr will be strings decoded
+ according to locale encoding, or by "encoding" if set. Text mode is
+ triggered by setting any of text, encoding, errors or universal_newlines.
+
+ The other arguments are the same as for the Popen constructor.
+ """
+ if input is not None:
+ if kwargs.get('stdin') is not None:
+ raise ValueError('stdin and input arguments may not both be used.')
+ kwargs['stdin'] = PIPE
+
+ if capture_output:
+ if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
+ raise ValueError('stdout and stderr arguments may not be used '
+ 'with capture_output.')
+ kwargs['stdout'] = PIPE
+ kwargs['stderr'] = PIPE
+
+ with Popen(*popenargs, **kwargs) as process:
+ try:
+ stdout, stderr = process.communicate(input, timeout=timeout)
+ except TimeoutExpired as exc:
+ process.kill()
+ if _mswindows:
+ # Windows accumulates the output in a single blocking
+ # read() call run on child threads, with the timeout
+ # being done in a join() on those threads. communicate()
+ # _after_ kill() is required to collect that and add it
+ # to the exception.
+ exc.stdout, exc.stderr = process.communicate()
+ else:
+ # POSIX _communicate already populated the output so
+ # far into the TimeoutExpired exception.
+ process.wait()
+ raise
+ except: # Including KeyboardInterrupt, communicate handled that.
+ process.kill()
+ # We don't call process.wait() as .__exit__ does that for us.
+ raise
+ retcode = process.poll()
+ if check and retcode:
+ raise CalledProcessError(retcode, process.args,
+ output=stdout, stderr=stderr)
+ return CompletedProcess(process.args, retcode, stdout, stderr)
+
+
+def list2cmdline(seq):
+ """
+ Translate a sequence of arguments into a command line
+ string, using the same rules as the MS C runtime:
+
+ 1) Arguments are delimited by white space, which is either a
+ space or a tab.
+
+ 2) A string surrounded by double quotation marks is
+ interpreted as a single argument, regardless of white space
+ contained within. A quoted string can be embedded in an
+ argument.
+
+ 3) A double quotation mark preceded by a backslash is
+ interpreted as a literal double quotation mark.
+
+ 4) Backslashes are interpreted literally, unless they
+ immediately precede a double quotation mark.
+
+ 5) If backslashes immediately precede a double quotation mark,
+ every pair of backslashes is interpreted as a literal
+ backslash. If the number of backslashes is odd, the last
+ backslash escapes the next double quotation mark as
+ described in rule 3.
+ """
+
+ # See
+ # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
+ # or search http://msdn.microsoft.com for
+ # "Parsing C++ Command-Line Arguments"
+ result = []
+ needquote = False
+ for arg in map(os.fsdecode, seq):
+ bs_buf = []
+
+ # Add a space to separate this argument from the others
+ if result:
+ result.append(' ')
+
+ needquote = (" " in arg) or ("\t" in arg) or not arg
+ if needquote:
+ result.append('"')
+
+ for c in arg:
+ if c == '\\':
+ # Don't know if we need to double yet.
+ bs_buf.append(c)
+ elif c == '"':
+ # Double backslashes.
+ result.append('\\' * len(bs_buf)*2)
+ bs_buf = []
+ result.append('\\"')
+ else:
+ # Normal char
+ if bs_buf:
+ result.extend(bs_buf)
+ bs_buf = []
+ result.append(c)
+
+ # Add remaining backslashes, if any.
+ if bs_buf:
+ result.extend(bs_buf)
+
+ if needquote:
+ result.extend(bs_buf)
+ result.append('"')
+
+ return ''.join(result)
+
+
+# Various tools for executing commands and looking at their output and status.
+#
+
+def getstatusoutput(cmd):
+ """Return (exitcode, output) of executing cmd in a shell.
+
+ Execute the string 'cmd' in a shell with 'check_output' and
+ return a 2-tuple (status, output). The locale encoding is used
+ to decode the output and process newlines.
+
+ A trailing newline is stripped from the output.
+ The exit status for the command can be interpreted
+ according to the rules for the function 'wait'. Example:
+
+ >>> import subprocess
+ >>> subprocess.getstatusoutput('ls /bin/ls')
+ (0, '/bin/ls')
+ >>> subprocess.getstatusoutput('cat /bin/junk')
+ (1, 'cat: /bin/junk: No such file or directory')
+ >>> subprocess.getstatusoutput('/bin/junk')
+ (127, 'sh: /bin/junk: not found')
+ >>> subprocess.getstatusoutput('/bin/kill $$')
+ (-15, '')
+ """
+ try:
+ data = check_output(cmd, shell=True, text=True, stderr=STDOUT)
+ exitcode = 0
+ except CalledProcessError as ex:
+ data = ex.output
+ exitcode = ex.returncode
+ if data[-1:] == '\n':
+ data = data[:-1]
+ return exitcode, data
+
+def getoutput(cmd):
+ """Return output (stdout or stderr) of executing cmd in a shell.
+
+ Like getstatusoutput(), except the exit status is ignored and the return
+ value is a string containing the command's output. Example:
+
+ >>> import subprocess
+ >>> subprocess.getoutput('ls /bin/ls')
+ '/bin/ls'
+ """
+ return getstatusoutput(cmd)[1]
+
+
+def _use_posix_spawn():
+ """Check if posix_spawn() can be used for subprocess.
+
+ subprocess requires a posix_spawn() implementation that properly reports
+ errors to the parent process, & sets errno on the following failures:
+
+ * Process attribute actions failed.
+ * File actions failed.
+ * exec() failed.
+
+ Prefer an implementation which can use vfork() in some cases for best
+ performance.
+ """
+ if _mswindows or not hasattr(os, 'posix_spawn'):
+ # os.posix_spawn() is not available
+ return False
+
+ if sys.platform in ('darwin', 'sunos5'):
+ # posix_spawn() is a syscall on both macOS and Solaris,
+ # and properly reports errors
+ return True
+
+ # Check libc name and runtime libc version
+ try:
+ ver = os.confstr('CS_GNU_LIBC_VERSION')
+ # parse 'glibc 2.28' as ('glibc', (2, 28))
+ parts = ver.split(maxsplit=1)
+ if len(parts) != 2:
+ # reject unknown format
+ raise ValueError
+ libc = parts[0]
+ version = tuple(map(int, parts[1].split('.')))
+
+ if sys.platform == 'linux' and libc == 'glibc' and version >= (2, 24):
+ # glibc 2.24 has a new Linux posix_spawn implementation using vfork
+ # which properly reports errors to the parent process.
+ return True
+ # Note: Don't use the implementation in earlier glibc because it doesn't
+ # use vfork (even if glibc 2.26 added a pipe to properly report errors
+ # to the parent process).
+ except (AttributeError, ValueError, OSError):
+ # os.confstr() or CS_GNU_LIBC_VERSION value not available
+ pass
+
+ # By default, assume that posix_spawn() does not properly report errors.
+ return False
+
+
+# These are primarily fail-safe knobs for negatives. A True value does not
+# guarantee the given libc/syscall API will be used.
+_USE_POSIX_SPAWN = _use_posix_spawn()
+_USE_VFORK = True
+
+
+class Popen:
+ """ Execute a child program in a new process.
+
+ For a complete description of the arguments see the Python documentation.
+
+ Arguments:
+ args: A string, or a sequence of program arguments.
+
+ bufsize: supplied as the buffering argument to the open() function when
+ creating the stdin/stdout/stderr pipe file objects
+
+ executable: A replacement program to execute.
+
+ stdin, stdout and stderr: These specify the executed programs' standard
+ input, standard output and standard error file handles, respectively.
+
+ preexec_fn: (POSIX only) An object to be called in the child process
+ just before the child is executed.
+
+ close_fds: Controls closing or inheriting of file descriptors.
+
+ shell: If true, the command will be executed through the shell.
+
+ cwd: Sets the current directory before the child is executed.
+
+ env: Defines the environment variables for the new process.
+
+ text: If true, decode stdin, stdout and stderr using the given encoding
+ (if set) or the system default otherwise.
+
+ universal_newlines: Alias of text, provided for backwards compatibility.
+
+ startupinfo and creationflags (Windows only)
+
+ restore_signals (POSIX only)
+
+ start_new_session (POSIX only)
+
+ group (POSIX only)
+
+ extra_groups (POSIX only)
+
+ user (POSIX only)
+
+ umask (POSIX only)
+
+ pass_fds (POSIX only)
+
+ encoding and errors: Text mode encoding and error handling to use for
+ file objects stdin, stdout and stderr.
+
+ Attributes:
+ stdin, stdout, stderr, pid, returncode
+ """
+ _child_created = False # Set here since __del__ checks it
+
+ def __init__(self, args, bufsize=-1, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=True,
+ shell=False, cwd=None, env=None, universal_newlines=None,
+ startupinfo=None, creationflags=0,
+ restore_signals=True, start_new_session=False,
+ pass_fds=(), *, user=None, group=None, extra_groups=None,
+ encoding=None, errors=None, text=None, umask=-1, pipesize=-1):
+ """Create new Popen instance."""
+ _cleanup()
+ # Held while anything is calling waitpid before returncode has been
+ # updated to prevent clobbering returncode if wait() or poll() are
+ # called from multiple threads at once. After acquiring the lock,
+ # code must re-check self.returncode to see if another thread just
+ # finished a waitpid() call.
+ self._waitpid_lock = threading.Lock()
+
+ self._input = None
+ self._communication_started = False
+ if bufsize is None:
+ bufsize = -1 # Restore default
+ if not isinstance(bufsize, int):
+ raise TypeError("bufsize must be an integer")
+
+ if pipesize is None:
+ pipesize = -1 # Restore default
+ if not isinstance(pipesize, int):
+ raise TypeError("pipesize must be an integer")
+
+ if _mswindows:
+ if preexec_fn is not None:
+ raise ValueError("preexec_fn is not supported on Windows "
+ "platforms")
+ else:
+ # POSIX
+ if pass_fds and not close_fds:
+ warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
+ close_fds = True
+ if startupinfo is not None:
+ raise ValueError("startupinfo is only supported on Windows "
+ "platforms")
+ if creationflags != 0:
+ raise ValueError("creationflags is only supported on Windows "
+ "platforms")
+
+ self.args = args
+ self.stdin = None
+ self.stdout = None
+ self.stderr = None
+ self.pid = None
+ self.returncode = None
+ self.encoding = encoding
+ self.errors = errors
+ self.pipesize = pipesize
+
+ # Validate the combinations of text and universal_newlines
+ if (text is not None and universal_newlines is not None
+ and bool(universal_newlines) != bool(text)):
+ raise SubprocessError('Cannot disambiguate when both text '
+ 'and universal_newlines are supplied but '
+ 'different. Pass one or the other.')
+
+ # Input and output objects. The general principle is like
+ # this:
+ #
+ # Parent Child
+ # ------ -----
+ # p2cwrite ---stdin---> p2cread
+ # c2pread <--stdout--- c2pwrite
+ # errread <--stderr--- errwrite
+ #
+ # On POSIX, the child objects are file descriptors. On
+ # Windows, these are Windows file handles. The parent objects
+ # are file descriptors on both platforms. The parent objects
+ # are -1 when not using PIPEs. The child objects are -1
+ # when not redirecting.
+
+ (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite) = self._get_handles(stdin, stdout, stderr)
+
+ # We wrap OS handles *before* launching the child, otherwise a
+ # quickly terminating child could make our fds unwrappable
+ # (see #8458).
+
+ if _mswindows:
+ if p2cwrite != -1:
+ p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
+ if c2pread != -1:
+ c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
+ if errread != -1:
+ errread = msvcrt.open_osfhandle(errread.Detach(), 0)
+
+ self.text_mode = encoding or errors or text or universal_newlines
+
+ # PEP 597: We suppress the EncodingWarning in subprocess module
+ # for now (at Python 3.10), because we focus on files for now.
+ # This will be changed to encoding = io.text_encoding(encoding)
+ # in the future.
+ if self.text_mode and encoding is None:
+ self.encoding = encoding = "locale"
+
+ # How long to resume waiting on a child after the first ^C.
+ # There is no right value for this. The purpose is to be polite
+ # yet remain good for interactive users trying to exit a tool.
+ self._sigint_wait_secs = 0.25 # 1/xkcd221.getRandomNumber()
+
+ self._closed_child_pipe_fds = False
+
+ if self.text_mode:
+ if bufsize == 1:
+ line_buffering = True
+ # Use the default buffer size for the underlying binary streams
+ # since they don't support line buffering.
+ bufsize = -1
+ else:
+ line_buffering = False
+
+ gid = None
+ if group is not None:
+ if not hasattr(os, 'setregid'):
+ raise ValueError("The 'group' parameter is not supported on the "
+ "current platform")
+
+ elif isinstance(group, str):
+ try:
+ import grp
+ except ImportError:
+ raise ValueError("The group parameter cannot be a string "
+ "on systems without the grp module")
+
+ gid = grp.getgrnam(group).gr_gid
+ elif isinstance(group, int):
+ gid = group
+ else:
+ raise TypeError("Group must be a string or an integer, not {}"
+ .format(type(group)))
+
+ if gid < 0:
+ raise ValueError(f"Group ID cannot be negative, got {gid}")
+
+ gids = None
+ if extra_groups is not None:
+ if not hasattr(os, 'setgroups'):
+ raise ValueError("The 'extra_groups' parameter is not "
+ "supported on the current platform")
+
+ elif isinstance(extra_groups, str):
+ raise ValueError("Groups must be a list, not a string")
+
+ gids = []
+ for extra_group in extra_groups:
+ if isinstance(extra_group, str):
+ try:
+ import grp
+ except ImportError:
+ raise ValueError("Items in extra_groups cannot be "
+ "strings on systems without the "
+ "grp module")
+
+ gids.append(grp.getgrnam(extra_group).gr_gid)
+ elif isinstance(extra_group, int):
+ gids.append(extra_group)
+ else:
+ raise TypeError("Items in extra_groups must be a string "
+ "or integer, not {}"
+ .format(type(extra_group)))
+
+ # make sure that the gids are all positive here so we can do less
+ # checking in the C code
+ for gid_check in gids:
+ if gid_check < 0:
+ raise ValueError(f"Group ID cannot be negative, got {gid_check}")
+
+ uid = None
+ if user is not None:
+ if not hasattr(os, 'setreuid'):
+ raise ValueError("The 'user' parameter is not supported on "
+ "the current platform")
+
+ elif isinstance(user, str):
+ try:
+ import pwd
+ except ImportError:
+ raise ValueError("The user parameter cannot be a string "
+ "on systems without the pwd module")
+ uid = pwd.getpwnam(user).pw_uid
+ elif isinstance(user, int):
+ uid = user
+ else:
+ raise TypeError("User must be a string or an integer")
+
+ if uid < 0:
+ raise ValueError(f"User ID cannot be negative, got {uid}")
+
+ try:
+ if p2cwrite != -1:
+ self.stdin = io.open(p2cwrite, 'wb', bufsize)
+ if self.text_mode:
+ self.stdin = io.TextIOWrapper(self.stdin, write_through=True,
+ line_buffering=line_buffering,
+ encoding=encoding, errors=errors)
+ if c2pread != -1:
+ self.stdout = io.open(c2pread, 'rb', bufsize)
+ if self.text_mode:
+ self.stdout = io.TextIOWrapper(self.stdout,
+ encoding=encoding, errors=errors)
+ if errread != -1:
+ self.stderr = io.open(errread, 'rb', bufsize)
+ if self.text_mode:
+ self.stderr = io.TextIOWrapper(self.stderr,
+ encoding=encoding, errors=errors)
+
+ self._execute_child(args, executable, preexec_fn, close_fds,
+ pass_fds, cwd, env,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite,
+ restore_signals,
+ gid, gids, uid, umask,
+ start_new_session)
+ except:
+ # Cleanup if the child failed starting.
+ for f in filter(None, (self.stdin, self.stdout, self.stderr)):
+ try:
+ f.close()
+ except OSError:
+ pass # Ignore EBADF or other errors.
+
+ if not self._closed_child_pipe_fds:
+ to_close = []
+ if stdin == PIPE:
+ to_close.append(p2cread)
+ if stdout == PIPE:
+ to_close.append(c2pwrite)
+ if stderr == PIPE:
+ to_close.append(errwrite)
+ if hasattr(self, '_devnull'):
+ to_close.append(self._devnull)
+ for fd in to_close:
+ try:
+ if _mswindows and isinstance(fd, Handle):
+ fd.Close()
+ else:
+ os.close(fd)
+ except OSError:
+ pass
+
+ raise
+
+ def __repr__(self):
+ obj_repr = (
+ f"<{self.__class__.__name__}: "
+ f"returncode: {self.returncode} args: {self.args!r}>"
+ )
+ if len(obj_repr) > 80:
+ obj_repr = obj_repr[:76] + "...>"
+ return obj_repr
+
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+ @property
+ def universal_newlines(self):
+ # universal_newlines as retained as an alias of text_mode for API
+ # compatibility. bpo-31756
+ return self.text_mode
+
+ @universal_newlines.setter
+ def universal_newlines(self, universal_newlines):
+ self.text_mode = bool(universal_newlines)
+
+ def _translate_newlines(self, data, encoding, errors):
+ data = data.decode(encoding, errors)
+ return data.replace("\r\n", "\n").replace("\r", "\n")
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, value, traceback):
+ if self.stdout:
+ self.stdout.close()
+ if self.stderr:
+ self.stderr.close()
+ try: # Flushing a BufferedWriter may raise an error
+ if self.stdin:
+ self.stdin.close()
+ finally:
+ if exc_type == KeyboardInterrupt:
+ # https://bugs.python.org/issue25942
+ # In the case of a KeyboardInterrupt we assume the SIGINT
+ # was also already sent to our child processes. We can't
+ # block indefinitely as that is not user friendly.
+ # If we have not already waited a brief amount of time in
+ # an interrupted .wait() or .communicate() call, do so here
+ # for consistency.
+ if self._sigint_wait_secs > 0:
+ try:
+ self._wait(timeout=self._sigint_wait_secs)
+ except TimeoutExpired:
+ pass
+ self._sigint_wait_secs = 0 # Note that this has been done.
+ return # resume the KeyboardInterrupt
+
+ # Wait for the process to terminate, to avoid zombies.
+ self.wait()
+
+ def __del__(self, _maxsize=sys.maxsize, _warn=warnings.warn):
+ if not self._child_created:
+ # We didn't get to successfully create a child process.
+ return
+ if self.returncode is None:
+ # Not reading subprocess exit status creates a zombie process which
+ # is only destroyed at the parent python process exit
+ _warn("subprocess %s is still running" % self.pid,
+ ResourceWarning, source=self)
+ # In case the child hasn't been waited on, check if it's done.
+ self._internal_poll(_deadstate=_maxsize)
+ if self.returncode is None and _active is not None:
+ # Child is still running, keep us alive until we can wait on it.
+ _active.append(self)
+
+ def _get_devnull(self):
+ if not hasattr(self, '_devnull'):
+ self._devnull = os.open(os.devnull, os.O_RDWR)
+ return self._devnull
+
+ def _stdin_write(self, input):
+ if input:
+ try:
+ self.stdin.write(input)
+ except BrokenPipeError:
+ pass # communicate() must ignore broken pipe errors.
+ except OSError as exc:
+ if exc.errno == errno.EINVAL:
+ # bpo-19612, bpo-30418: On Windows, stdin.write() fails
+ # with EINVAL if the child process exited or if the child
+ # process is still running but closed the pipe.
+ pass
+ else:
+ raise
+
+ try:
+ self.stdin.close()
+ except BrokenPipeError:
+ pass # communicate() must ignore broken pipe errors.
+ except OSError as exc:
+ if exc.errno == errno.EINVAL:
+ pass
+ else:
+ raise
+
+ def communicate(self, input=None, timeout=None):
+ """Interact with process: Send data to stdin and close it.
+ Read data from stdout and stderr, until end-of-file is
+ reached. Wait for process to terminate.
+
+ The optional "input" argument should be data to be sent to the
+ child process, or None, if no data should be sent to the child.
+ communicate() returns a tuple (stdout, stderr).
+
+ By default, all communication is in bytes, and therefore any
+ "input" should be bytes, and the (stdout, stderr) will be bytes.
+ If in text mode (indicated by self.text_mode), any "input" should
+ be a string, and (stdout, stderr) will be strings decoded
+ according to locale encoding, or by "encoding" if set. Text mode
+ is triggered by setting any of text, encoding, errors or
+ universal_newlines.
+ """
+
+ if self._communication_started and input:
+ raise ValueError("Cannot send input after starting communication")
+
+ # Optimization: If we are not worried about timeouts, we haven't
+ # started communicating, and we have one or zero pipes, using select()
+ # or threads is unnecessary.
+ if (timeout is None and not self._communication_started and
+ [self.stdin, self.stdout, self.stderr].count(None) >= 2):
+ stdout = None
+ stderr = None
+ if self.stdin:
+ self._stdin_write(input)
+ elif self.stdout:
+ stdout = self.stdout.read()
+ self.stdout.close()
+ elif self.stderr:
+ stderr = self.stderr.read()
+ self.stderr.close()
+ self.wait()
+ else:
+ if timeout is not None:
+ endtime = _time() + timeout
+ else:
+ endtime = None
+
+ try:
+ stdout, stderr = self._communicate(input, endtime, timeout)
+ except KeyboardInterrupt:
+ # https://bugs.python.org/issue25942
+ # See the detailed comment in .wait().
+ if timeout is not None:
+ sigint_timeout = min(self._sigint_wait_secs,
+ self._remaining_time(endtime))
+ else:
+ sigint_timeout = self._sigint_wait_secs
+ self._sigint_wait_secs = 0 # nothing else should wait.
+ try:
+ self._wait(timeout=sigint_timeout)
+ except TimeoutExpired:
+ pass
+ raise # resume the KeyboardInterrupt
+
+ finally:
+ self._communication_started = True
+
+ sts = self.wait(timeout=self._remaining_time(endtime))
+
+ return (stdout, stderr)
+
+
+ def poll(self):
+ """Check if child process has terminated. Set and return returncode
+ attribute."""
+ return self._internal_poll()
+
+
+ def _remaining_time(self, endtime):
+ """Convenience for _communicate when computing timeouts."""
+ if endtime is None:
+ return None
+ else:
+ return endtime - _time()
+
+
+ def _check_timeout(self, endtime, orig_timeout, stdout_seq, stderr_seq,
+ skip_check_and_raise=False):
+ """Convenience for checking if a timeout has expired."""
+ if endtime is None:
+ return
+ if skip_check_and_raise or _time() > endtime:
+ raise TimeoutExpired(
+ self.args, orig_timeout,
+ output=b''.join(stdout_seq) if stdout_seq else None,
+ stderr=b''.join(stderr_seq) if stderr_seq else None)
+
+
+ def wait(self, timeout=None):
+ """Wait for child process to terminate; returns self.returncode."""
+ if timeout is not None:
+ endtime = _time() + timeout
+ try:
+ return self._wait(timeout=timeout)
+ except KeyboardInterrupt:
+ # https://bugs.python.org/issue25942
+ # The first keyboard interrupt waits briefly for the child to
+ # exit under the common assumption that it also received the ^C
+ # generated SIGINT and will exit rapidly.
+ if timeout is not None:
+ sigint_timeout = min(self._sigint_wait_secs,
+ self._remaining_time(endtime))
+ else:
+ sigint_timeout = self._sigint_wait_secs
+ self._sigint_wait_secs = 0 # nothing else should wait.
+ try:
+ self._wait(timeout=sigint_timeout)
+ except TimeoutExpired:
+ pass
+ raise # resume the KeyboardInterrupt
+
+ def _close_pipe_fds(self,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ # self._devnull is not always defined.
+ devnull_fd = getattr(self, '_devnull', None)
+
+ with contextlib.ExitStack() as stack:
+ if _mswindows:
+ if p2cread != -1:
+ stack.callback(p2cread.Close)
+ if c2pwrite != -1:
+ stack.callback(c2pwrite.Close)
+ if errwrite != -1:
+ stack.callback(errwrite.Close)
+ else:
+ if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
+ stack.callback(os.close, p2cread)
+ if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
+ stack.callback(os.close, c2pwrite)
+ if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
+ stack.callback(os.close, errwrite)
+
+ if devnull_fd is not None:
+ stack.callback(os.close, devnull_fd)
+
+ # Prevent a double close of these handles/fds from __init__ on error.
+ self._closed_child_pipe_fds = True
+
+ if _mswindows:
+ #
+ # Windows methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tuple with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ if stdin is None and stdout is None and stderr is None:
+ return (-1, -1, -1, -1, -1, -1)
+
+ p2cread, p2cwrite = -1, -1
+ c2pread, c2pwrite = -1, -1
+ errread, errwrite = -1, -1
+
+ if stdin is None:
+ p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
+ if p2cread is None:
+ p2cread, _ = _winapi.CreatePipe(None, 0)
+ p2cread = Handle(p2cread)
+ _winapi.CloseHandle(_)
+ elif stdin == PIPE:
+ p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
+ p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
+ elif stdin == DEVNULL:
+ p2cread = msvcrt.get_osfhandle(self._get_devnull())
+ elif isinstance(stdin, int):
+ p2cread = msvcrt.get_osfhandle(stdin)
+ else:
+ # Assuming file-like object
+ p2cread = msvcrt.get_osfhandle(stdin.fileno())
+ p2cread = self._make_inheritable(p2cread)
+
+ if stdout is None:
+ c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
+ if c2pwrite is None:
+ _, c2pwrite = _winapi.CreatePipe(None, 0)
+ c2pwrite = Handle(c2pwrite)
+ _winapi.CloseHandle(_)
+ elif stdout == PIPE:
+ c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
+ c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
+ elif stdout == DEVNULL:
+ c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
+ elif isinstance(stdout, int):
+ c2pwrite = msvcrt.get_osfhandle(stdout)
+ else:
+ # Assuming file-like object
+ c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
+ c2pwrite = self._make_inheritable(c2pwrite)
+
+ if stderr is None:
+ errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
+ if errwrite is None:
+ _, errwrite = _winapi.CreatePipe(None, 0)
+ errwrite = Handle(errwrite)
+ _winapi.CloseHandle(_)
+ elif stderr == PIPE:
+ errread, errwrite = _winapi.CreatePipe(None, 0)
+ errread, errwrite = Handle(errread), Handle(errwrite)
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif stderr == DEVNULL:
+ errwrite = msvcrt.get_osfhandle(self._get_devnull())
+ elif isinstance(stderr, int):
+ errwrite = msvcrt.get_osfhandle(stderr)
+ else:
+ # Assuming file-like object
+ errwrite = msvcrt.get_osfhandle(stderr.fileno())
+ errwrite = self._make_inheritable(errwrite)
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _make_inheritable(self, handle):
+ """Return a duplicate of handle, which is inheritable"""
+ h = _winapi.DuplicateHandle(
+ _winapi.GetCurrentProcess(), handle,
+ _winapi.GetCurrentProcess(), 0, 1,
+ _winapi.DUPLICATE_SAME_ACCESS)
+ return Handle(h)
+
+
+ def _filter_handle_list(self, handle_list):
+ """Filter out console handles that can't be used
+ in lpAttributeList["handle_list"] and make sure the list
+ isn't empty. This also removes duplicate handles."""
+ # An handle with it's lowest two bits set might be a special console
+ # handle that if passed in lpAttributeList["handle_list"], will
+ # cause it to fail.
+ return list({handle for handle in handle_list
+ if handle & 0x3 != 0x3
+ or _winapi.GetFileType(handle) !=
+ _winapi.FILE_TYPE_CHAR})
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ pass_fds, cwd, env,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite,
+ unused_restore_signals,
+ unused_gid, unused_gids, unused_uid,
+ unused_umask,
+ unused_start_new_session):
+ """Execute program (MS Windows version)"""
+
+ assert not pass_fds, "pass_fds not supported on Windows."
+
+ if isinstance(args, str):
+ pass
+ elif isinstance(args, bytes):
+ if shell:
+ raise TypeError('bytes args is not allowed on Windows')
+ args = list2cmdline([args])
+ elif isinstance(args, os.PathLike):
+ if shell:
+ raise TypeError('path-like args is not allowed when '
+ 'shell is true')
+ args = list2cmdline([args])
+ else:
+ args = list2cmdline(args)
+
+ if executable is not None:
+ executable = os.fsdecode(executable)
+
+ # Process startup details
+ if startupinfo is None:
+ startupinfo = STARTUPINFO()
+ else:
+ # bpo-34044: Copy STARTUPINFO since it is modified above,
+ # so the caller can reuse it multiple times.
+ startupinfo = startupinfo.copy()
+
+ use_std_handles = -1 not in (p2cread, c2pwrite, errwrite)
+ if use_std_handles:
+ startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
+ startupinfo.hStdInput = p2cread
+ startupinfo.hStdOutput = c2pwrite
+ startupinfo.hStdError = errwrite
+
+ attribute_list = startupinfo.lpAttributeList
+ have_handle_list = bool(attribute_list and
+ "handle_list" in attribute_list and
+ attribute_list["handle_list"])
+
+ # If we were given an handle_list or need to create one
+ if have_handle_list or (use_std_handles and close_fds):
+ if attribute_list is None:
+ attribute_list = startupinfo.lpAttributeList = {}
+ handle_list = attribute_list["handle_list"] = \
+ list(attribute_list.get("handle_list", []))
+
+ if use_std_handles:
+ handle_list += [int(p2cread), int(c2pwrite), int(errwrite)]
+
+ handle_list[:] = self._filter_handle_list(handle_list)
+
+ if handle_list:
+ if not close_fds:
+ warnings.warn("startupinfo.lpAttributeList['handle_list'] "
+ "overriding close_fds", RuntimeWarning)
+
+ # When using the handle_list we always request to inherit
+ # handles but the only handles that will be inherited are
+ # the ones in the handle_list
+ close_fds = False
+
+ if shell:
+ startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
+ startupinfo.wShowWindow = _winapi.SW_HIDE
+ if not executable:
+ # gh-101283: without a fully-qualified path, before Windows
+ # checks the system directories, it first looks in the
+ # application directory, and also the current directory if
+ # NeedCurrentDirectoryForExePathW(ExeName) is true, so try
+ # to avoid executing unqualified "cmd.exe".
+ comspec = os.environ.get('ComSpec')
+ if not comspec:
+ system_root = os.environ.get('SystemRoot', '')
+ comspec = os.path.join(system_root, 'System32', 'cmd.exe')
+ if not os.path.isabs(comspec):
+ raise FileNotFoundError('shell not found: neither %ComSpec% nor %SystemRoot% is set')
+ if os.path.isabs(comspec):
+ executable = comspec
+ else:
+ comspec = executable
+
+ args = '{} /c "{}"'.format (comspec, args)
+
+ if cwd is not None:
+ cwd = os.fsdecode(cwd)
+
+ sys.audit("subprocess.Popen", executable, args, cwd, env)
+
+ # Start the process
+ try:
+ hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
+ # no special security
+ None, None,
+ int(not close_fds),
+ creationflags,
+ env,
+ cwd,
+ startupinfo)
+ finally:
+ # Child is launched. Close the parent's copy of those pipe
+ # handles that only the child should have open. You need
+ # to make sure that no handles to the write end of the
+ # output pipe are maintained in this process or else the
+ # pipe will not close when the child process exits and the
+ # ReadFile will hang.
+ self._close_pipe_fds(p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+ # Retain the process handle, but close the thread handle
+ self._child_created = True
+ self._handle = Handle(hp)
+ self.pid = pid
+ _winapi.CloseHandle(ht)
+
+ def _internal_poll(self, _deadstate=None,
+ _WaitForSingleObject=_winapi.WaitForSingleObject,
+ _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
+ _GetExitCodeProcess=_winapi.GetExitCodeProcess):
+ """Check if child process has terminated. Returns returncode
+ attribute.
+
+ This method is called by __del__, so it can only refer to objects
+ in its local scope.
+
+ """
+ if self.returncode is None:
+ if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
+ self.returncode = _GetExitCodeProcess(self._handle)
+ return self.returncode
+
+
+ def _wait(self, timeout):
+ """Internal implementation of wait() on Windows."""
+ if timeout is None:
+ timeout_millis = _winapi.INFINITE
+ else:
+ timeout_millis = int(timeout * 1000)
+ if self.returncode is None:
+ # API note: Returns immediately if timeout_millis == 0.
+ result = _winapi.WaitForSingleObject(self._handle,
+ timeout_millis)
+ if result == _winapi.WAIT_TIMEOUT:
+ raise TimeoutExpired(self.args, timeout)
+ self.returncode = _winapi.GetExitCodeProcess(self._handle)
+ return self.returncode
+
+
+ def _readerthread(self, fh, buffer):
+ buffer.append(fh.read())
+ fh.close()
+
+
+ def _communicate(self, input, endtime, orig_timeout):
+ # Start reader threads feeding into a list hanging off of this
+ # object, unless they've already been started.
+ if self.stdout and not hasattr(self, "_stdout_buff"):
+ self._stdout_buff = []
+ self.stdout_thread = \
+ threading.Thread(target=self._readerthread,
+ args=(self.stdout, self._stdout_buff))
+ self.stdout_thread.daemon = True
+ self.stdout_thread.start()
+ if self.stderr and not hasattr(self, "_stderr_buff"):
+ self._stderr_buff = []
+ self.stderr_thread = \
+ threading.Thread(target=self._readerthread,
+ args=(self.stderr, self._stderr_buff))
+ self.stderr_thread.daemon = True
+ self.stderr_thread.start()
+
+ if self.stdin:
+ self._stdin_write(input)
+
+ # Wait for the reader threads, or time out. If we time out, the
+ # threads remain reading and the fds left open in case the user
+ # calls communicate again.
+ if self.stdout is not None:
+ self.stdout_thread.join(self._remaining_time(endtime))
+ if self.stdout_thread.is_alive():
+ raise TimeoutExpired(self.args, orig_timeout)
+ if self.stderr is not None:
+ self.stderr_thread.join(self._remaining_time(endtime))
+ if self.stderr_thread.is_alive():
+ raise TimeoutExpired(self.args, orig_timeout)
+
+ # Collect the output from and close both pipes, now that we know
+ # both have been read successfully.
+ stdout = None
+ stderr = None
+ if self.stdout:
+ stdout = self._stdout_buff
+ self.stdout.close()
+ if self.stderr:
+ stderr = self._stderr_buff
+ self.stderr.close()
+
+ # All data exchanged. Translate lists into strings.
+ stdout = stdout[0] if stdout else None
+ stderr = stderr[0] if stderr else None
+
+ return (stdout, stderr)
+
+ def send_signal(self, sig):
+ """Send a signal to the process."""
+ # Don't signal a process that we know has already died.
+ if self.returncode is not None:
+ return
+ if sig == signal.SIGTERM:
+ self.terminate()
+ elif sig == signal.CTRL_C_EVENT:
+ os.kill(self.pid, signal.CTRL_C_EVENT)
+ elif sig == signal.CTRL_BREAK_EVENT:
+ os.kill(self.pid, signal.CTRL_BREAK_EVENT)
+ else:
+ raise ValueError("Unsupported signal: {}".format(sig))
+
+ def terminate(self):
+ """Terminates the process."""
+ # Don't terminate a process that we know has already died.
+ if self.returncode is not None:
+ return
+ try:
+ _winapi.TerminateProcess(self._handle, 1)
+ except PermissionError:
+ # ERROR_ACCESS_DENIED (winerror 5) is received when the
+ # process already died.
+ rc = _winapi.GetExitCodeProcess(self._handle)
+ if rc == _winapi.STILL_ACTIVE:
+ raise
+ self.returncode = rc
+
+ kill = terminate
+
+ else:
+ #
+ # POSIX methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tuple with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ p2cread, p2cwrite = -1, -1
+ c2pread, c2pwrite = -1, -1
+ errread, errwrite = -1, -1
+
+ if stdin is None:
+ pass
+ elif stdin == PIPE:
+ p2cread, p2cwrite = os.pipe()
+ if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
+ fcntl.fcntl(p2cwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
+ elif stdin == DEVNULL:
+ p2cread = self._get_devnull()
+ elif isinstance(stdin, int):
+ p2cread = stdin
+ else:
+ # Assuming file-like object
+ p2cread = stdin.fileno()
+
+ if stdout is None:
+ pass
+ elif stdout == PIPE:
+ c2pread, c2pwrite = os.pipe()
+ if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
+ fcntl.fcntl(c2pwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
+ elif stdout == DEVNULL:
+ c2pwrite = self._get_devnull()
+ elif isinstance(stdout, int):
+ c2pwrite = stdout
+ else:
+ # Assuming file-like object
+ c2pwrite = stdout.fileno()
+
+ if stderr is None:
+ pass
+ elif stderr == PIPE:
+ errread, errwrite = os.pipe()
+ if self.pipesize > 0 and hasattr(fcntl, "F_SETPIPE_SZ"):
+ fcntl.fcntl(errwrite, fcntl.F_SETPIPE_SZ, self.pipesize)
+ elif stderr == STDOUT:
+ if c2pwrite != -1:
+ errwrite = c2pwrite
+ else: # child's stdout is not set, use parent's stdout
+ errwrite = sys.__stdout__.fileno()
+ elif stderr == DEVNULL:
+ errwrite = self._get_devnull()
+ elif isinstance(stderr, int):
+ errwrite = stderr
+ else:
+ # Assuming file-like object
+ errwrite = stderr.fileno()
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _posix_spawn(self, args, executable, env, restore_signals,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ """Execute program using os.posix_spawn()."""
+ if env is None:
+ env = os.environ
+
+ kwargs = {}
+ if restore_signals:
+ # See _Py_RestoreSignals() in Python/pylifecycle.c
+ sigset = []
+ for signame in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
+ signum = getattr(signal, signame, None)
+ if signum is not None:
+ sigset.append(signum)
+ kwargs['setsigdef'] = sigset
+
+ file_actions = []
+ for fd in (p2cwrite, c2pread, errread):
+ if fd != -1:
+ file_actions.append((os.POSIX_SPAWN_CLOSE, fd))
+ for fd, fd2 in (
+ (p2cread, 0),
+ (c2pwrite, 1),
+ (errwrite, 2),
+ ):
+ if fd != -1:
+ file_actions.append((os.POSIX_SPAWN_DUP2, fd, fd2))
+ if file_actions:
+ kwargs['file_actions'] = file_actions
+
+ self.pid = os.posix_spawn(executable, args, env, **kwargs)
+ self._child_created = True
+
+ self._close_pipe_fds(p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ pass_fds, cwd, env,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite,
+ restore_signals,
+ gid, gids, uid, umask,
+ start_new_session):
+ """Execute program (POSIX version)"""
+
+ if isinstance(args, (str, bytes)):
+ args = [args]
+ elif isinstance(args, os.PathLike):
+ if shell:
+ raise TypeError('path-like args is not allowed when '
+ 'shell is true')
+ args = [args]
+ else:
+ args = list(args)
+
+ if shell:
+ # On Android the default shell is at '/system/bin/sh'.
+ unix_shell = ('/system/bin/sh' if
+ hasattr(sys, 'getandroidapilevel') else '/bin/sh')
+ args = [unix_shell, "-c"] + args
+ if executable:
+ args[0] = executable
+
+ if executable is None:
+ executable = args[0]
+
+ sys.audit("subprocess.Popen", executable, args, cwd, env)
+
+ if (_USE_POSIX_SPAWN
+ and os.path.dirname(executable)
+ and preexec_fn is None
+ and not close_fds
+ and not pass_fds
+ and cwd is None
+ and (p2cread == -1 or p2cread > 2)
+ and (c2pwrite == -1 or c2pwrite > 2)
+ and (errwrite == -1 or errwrite > 2)
+ and not start_new_session
+ and gid is None
+ and gids is None
+ and uid is None
+ and umask < 0):
+ self._posix_spawn(args, executable, env, restore_signals,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+ return
+
+ orig_executable = executable
+
+ # For transferring possible exec failure from child to parent.
+ # Data format: "exception name:hex errno:description"
+ # Pickle is not used; it is complex and involves memory allocation.
+ errpipe_read, errpipe_write = os.pipe()
+ # errpipe_write must not be in the standard io 0, 1, or 2 fd range.
+ low_fds_to_close = []
+ while errpipe_write < 3:
+ low_fds_to_close.append(errpipe_write)
+ errpipe_write = os.dup(errpipe_write)
+ for low_fd in low_fds_to_close:
+ os.close(low_fd)
+ try:
+ try:
+ # We must avoid complex work that could involve
+ # malloc or free in the child process to avoid
+ # potential deadlocks, thus we do all this here.
+ # and pass it to fork_exec()
+
+ if env is not None:
+ env_list = []
+ for k, v in env.items():
+ k = os.fsencode(k)
+ if b'=' in k:
+ raise ValueError("illegal environment variable name")
+ env_list.append(k + b'=' + os.fsencode(v))
+ else:
+ env_list = None # Use execv instead of execve.
+ executable = os.fsencode(executable)
+ if os.path.dirname(executable):
+ executable_list = (executable,)
+ else:
+ # This matches the behavior of os._execvpe().
+ executable_list = tuple(
+ os.path.join(os.fsencode(dir), executable)
+ for dir in os.get_exec_path(env))
+ fds_to_keep = set(pass_fds)
+ fds_to_keep.add(errpipe_write)
+ self.pid = _posixsubprocess.fork_exec(
+ args, executable_list,
+ close_fds, tuple(sorted(map(int, fds_to_keep))),
+ cwd, env_list,
+ p2cread, p2cwrite, c2pread, c2pwrite,
+ errread, errwrite,
+ errpipe_read, errpipe_write,
+ restore_signals, start_new_session,
+ gid, gids, uid, umask,
+ preexec_fn)
+ self._child_created = True
+ finally:
+ # be sure the FD is closed no matter what
+ os.close(errpipe_write)
+
+ self._close_pipe_fds(p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+ # Wait for exec to fail or succeed; possibly raising an
+ # exception (limited in size)
+ errpipe_data = bytearray()
+ while True:
+ part = os.read(errpipe_read, 50000)
+ errpipe_data += part
+ if not part or len(errpipe_data) > 50000:
+ break
+ finally:
+ # be sure the FD is closed no matter what
+ os.close(errpipe_read)
+
+ if errpipe_data:
+ try:
+ pid, sts = os.waitpid(self.pid, 0)
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ else:
+ self.returncode = sys.maxsize
+ except ChildProcessError:
+ pass
+
+ try:
+ exception_name, hex_errno, err_msg = (
+ errpipe_data.split(b':', 2))
+ # The encoding here should match the encoding
+ # written in by the subprocess implementations
+ # like _posixsubprocess
+ err_msg = err_msg.decode()
+ except ValueError:
+ exception_name = b'SubprocessError'
+ hex_errno = b'0'
+ err_msg = 'Bad exception data from child: {!r}'.format(
+ bytes(errpipe_data))
+ child_exception_type = getattr(
+ builtins, exception_name.decode('ascii'),
+ SubprocessError)
+ if issubclass(child_exception_type, OSError) and hex_errno:
+ errno_num = int(hex_errno, 16)
+ child_exec_never_called = (err_msg == "noexec")
+ if child_exec_never_called:
+ err_msg = ""
+ # The error must be from chdir(cwd).
+ err_filename = cwd
+ else:
+ err_filename = orig_executable
+ if errno_num != 0:
+ err_msg = os.strerror(errno_num)
+ raise child_exception_type(errno_num, err_msg, err_filename)
+ raise child_exception_type(err_msg)
+
+
+ def _handle_exitstatus(self, sts,
+ waitstatus_to_exitcode=os.waitstatus_to_exitcode,
+ _WIFSTOPPED=os.WIFSTOPPED,
+ _WSTOPSIG=os.WSTOPSIG):
+ """All callers to this function MUST hold self._waitpid_lock."""
+ # This method is called (indirectly) by __del__, so it cannot
+ # refer to anything outside of its local scope.
+ if _WIFSTOPPED(sts):
+ self.returncode = -_WSTOPSIG(sts)
+ else:
+ self.returncode = waitstatus_to_exitcode(sts)
+
+ def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
+ _WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD):
+ """Check if child process has terminated. Returns returncode
+ attribute.
+
+ This method is called by __del__, so it cannot reference anything
+ outside of the local scope (nor can any methods it calls).
+
+ """
+ if self.returncode is None:
+ if not self._waitpid_lock.acquire(False):
+ # Something else is busy calling waitpid. Don't allow two
+ # at once. We know nothing yet.
+ return None
+ try:
+ if self.returncode is not None:
+ return self.returncode # Another thread waited.
+ pid, sts = _waitpid(self.pid, _WNOHANG)
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ except OSError as e:
+ if _deadstate is not None:
+ self.returncode = _deadstate
+ elif e.errno == _ECHILD:
+ # This happens if SIGCLD is set to be ignored or
+ # waiting for child processes has otherwise been
+ # disabled for our process. This child is dead, we
+ # can't get the status.
+ # http://bugs.python.org/issue15756
+ self.returncode = 0
+ finally:
+ self._waitpid_lock.release()
+ return self.returncode
+
+
+ def _try_wait(self, wait_flags):
+ """All callers to this function MUST hold self._waitpid_lock."""
+ try:
+ (pid, sts) = os.waitpid(self.pid, wait_flags)
+ except ChildProcessError:
+ # This happens if SIGCLD is set to be ignored or waiting
+ # for child processes has otherwise been disabled for our
+ # process. This child is dead, we can't get the status.
+ pid = self.pid
+ sts = 0
+ return (pid, sts)
+
+
+ def _wait(self, timeout):
+ """Internal implementation of wait() on POSIX."""
+ if self.returncode is not None:
+ return self.returncode
+
+ if timeout is not None:
+ endtime = _time() + timeout
+ # Enter a busy loop if we have a timeout. This busy loop was
+ # cribbed from Lib/threading.py in Thread.wait() at r71065.
+ delay = 0.0005 # 500 us -> initial delay of 1 ms
+ while True:
+ if self._waitpid_lock.acquire(False):
+ try:
+ if self.returncode is not None:
+ break # Another thread waited.
+ (pid, sts) = self._try_wait(os.WNOHANG)
+ assert pid == self.pid or pid == 0
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ break
+ finally:
+ self._waitpid_lock.release()
+ remaining = self._remaining_time(endtime)
+ if remaining <= 0:
+ raise TimeoutExpired(self.args, timeout)
+ delay = min(delay * 2, remaining, .05)
+ time.sleep(delay)
+ else:
+ while self.returncode is None:
+ with self._waitpid_lock:
+ if self.returncode is not None:
+ break # Another thread waited.
+ (pid, sts) = self._try_wait(0)
+ # Check the pid and loop as waitpid has been known to
+ # return 0 even without WNOHANG in odd situations.
+ # http://bugs.python.org/issue14396.
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ return self.returncode
+
+
+ def _communicate(self, input, endtime, orig_timeout):
+ if self.stdin and not self._communication_started:
+ # Flush stdio buffer. This might block, if the user has
+ # been writing to .stdin in an uncontrolled fashion.
+ try:
+ self.stdin.flush()
+ except BrokenPipeError:
+ pass # communicate() must ignore BrokenPipeError.
+ if not input:
+ try:
+ self.stdin.close()
+ except BrokenPipeError:
+ pass # communicate() must ignore BrokenPipeError.
+
+ stdout = None
+ stderr = None
+
+ # Only create this mapping if we haven't already.
+ if not self._communication_started:
+ self._fileobj2output = {}
+ if self.stdout:
+ self._fileobj2output[self.stdout] = []
+ if self.stderr:
+ self._fileobj2output[self.stderr] = []
+
+ if self.stdout:
+ stdout = self._fileobj2output[self.stdout]
+ if self.stderr:
+ stderr = self._fileobj2output[self.stderr]
+
+ self._save_input(input)
+
+ if self._input:
+ input_view = memoryview(self._input)
+
+ with _PopenSelector() as selector:
+ if self.stdin and input:
+ selector.register(self.stdin, selectors.EVENT_WRITE)
+ if self.stdout and not self.stdout.closed:
+ selector.register(self.stdout, selectors.EVENT_READ)
+ if self.stderr and not self.stderr.closed:
+ selector.register(self.stderr, selectors.EVENT_READ)
+
+ while selector.get_map():
+ timeout = self._remaining_time(endtime)
+ if timeout is not None and timeout < 0:
+ self._check_timeout(endtime, orig_timeout,
+ stdout, stderr,
+ skip_check_and_raise=True)
+ raise RuntimeError( # Impossible :)
+ '_check_timeout(..., skip_check_and_raise=True) '
+ 'failed to raise TimeoutExpired.')
+
+ ready = selector.select(timeout)
+ self._check_timeout(endtime, orig_timeout, stdout, stderr)
+
+ # XXX Rewrite these to use non-blocking I/O on the file
+ # objects; they are no longer using C stdio!
+
+ for key, events in ready:
+ if key.fileobj is self.stdin:
+ chunk = input_view[self._input_offset :
+ self._input_offset + _PIPE_BUF]
+ try:
+ self._input_offset += os.write(key.fd, chunk)
+ except BrokenPipeError:
+ selector.unregister(key.fileobj)
+ key.fileobj.close()
+ else:
+ if self._input_offset >= len(self._input):
+ selector.unregister(key.fileobj)
+ key.fileobj.close()
+ elif key.fileobj in (self.stdout, self.stderr):
+ data = os.read(key.fd, 32768)
+ if not data:
+ selector.unregister(key.fileobj)
+ key.fileobj.close()
+ self._fileobj2output[key.fileobj].append(data)
+
+ self.wait(timeout=self._remaining_time(endtime))
+
+ # All data exchanged. Translate lists into strings.
+ if stdout is not None:
+ stdout = b''.join(stdout)
+ if stderr is not None:
+ stderr = b''.join(stderr)
+
+ # Translate newlines, if requested.
+ # This also turns bytes into strings.
+ if self.text_mode:
+ if stdout is not None:
+ stdout = self._translate_newlines(stdout,
+ self.stdout.encoding,
+ self.stdout.errors)
+ if stderr is not None:
+ stderr = self._translate_newlines(stderr,
+ self.stderr.encoding,
+ self.stderr.errors)
+
+ return (stdout, stderr)
+
+
+ def _save_input(self, input):
+ # This method is called from the _communicate_with_*() methods
+ # so that if we time out while communicating, we can continue
+ # sending input if we retry.
+ if self.stdin and self._input is None:
+ self._input_offset = 0
+ self._input = input
+ if input is not None and self.text_mode:
+ self._input = self._input.encode(self.stdin.encoding,
+ self.stdin.errors)
+
+
+ def send_signal(self, sig):
+ """Send a signal to the process."""
+ # bpo-38630: Polling reduces the risk of sending a signal to the
+ # wrong process if the process completed, the Popen.returncode
+ # attribute is still None, and the pid has been reassigned
+ # (recycled) to a new different process. This race condition can
+ # happens in two cases.
+ #
+ # Case 1. Thread A calls Popen.poll(), thread B calls
+ # Popen.send_signal(). In thread A, waitpid() succeed and returns
+ # the exit status. Thread B calls kill() because poll() in thread A
+ # did not set returncode yet. Calling poll() in thread B prevents
+ # the race condition thanks to Popen._waitpid_lock.
+ #
+ # Case 2. waitpid(pid, 0) has been called directly, without
+ # using Popen methods: returncode is still None is this case.
+ # Calling Popen.poll() will set returncode to a default value,
+ # since waitpid() fails with ProcessLookupError.
+ self.poll()
+ if self.returncode is not None:
+ # Skip signalling a process that we know has already died.
+ return
+
+ # The race condition can still happen if the race condition
+ # described above happens between the returncode test
+ # and the kill() call.
+ try:
+ os.kill(self.pid, sig)
+ except ProcessLookupError:
+ # Supress the race condition error; bpo-40550.
+ pass
+
+ def terminate(self):
+ """Terminate the process with SIGTERM
+ """
+ self.send_signal(signal.SIGTERM)
+
+ def kill(self):
+ """Kill the process with SIGKILL
+ """
+ self.send_signal(signal.SIGKILL)
diff --git a/infer_4_37_2/lib/python3.10/tarfile.py b/infer_4_37_2/lib/python3.10/tarfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ab6811d63335b2d060505924e59aca070dc2d11
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/tarfile.py
@@ -0,0 +1,2921 @@
+#!/usr/bin/env python3
+#-------------------------------------------------------------------
+# tarfile.py
+#-------------------------------------------------------------------
+# Copyright (C) 2002 Lars Gustaebel
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use,
+# copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following
+# conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+"""Read from and write to tar format archives.
+"""
+
+version = "0.9.0"
+__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
+__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
+
+#---------
+# Imports
+#---------
+from builtins import open as bltn_open
+import sys
+import os
+import io
+import shutil
+import stat
+import time
+import struct
+import copy
+import re
+import warnings
+
+try:
+ import pwd
+except ImportError:
+ pwd = None
+try:
+ import grp
+except ImportError:
+ grp = None
+
+# os.symlink on Windows prior to 6.0 raises NotImplementedError
+symlink_exception = (AttributeError, NotImplementedError)
+try:
+ # OSError (winerror=1314) will be raised if the caller does not hold the
+ # SeCreateSymbolicLinkPrivilege privilege
+ symlink_exception += (OSError,)
+except NameError:
+ pass
+
+# from tarfile import *
+__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError",
+ "CompressionError", "StreamError", "ExtractError", "HeaderError",
+ "ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT",
+ "DEFAULT_FORMAT", "open"]
+
+
+#---------------------------------------------------------
+# tar constants
+#---------------------------------------------------------
+NUL = b"\0" # the null character
+BLOCKSIZE = 512 # length of processing blocks
+RECORDSIZE = BLOCKSIZE * 20 # length of records
+GNU_MAGIC = b"ustar \0" # magic gnu tar string
+POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
+
+LENGTH_NAME = 100 # maximum length of a filename
+LENGTH_LINK = 100 # maximum length of a linkname
+LENGTH_PREFIX = 155 # maximum length of the prefix field
+
+REGTYPE = b"0" # regular file
+AREGTYPE = b"\0" # regular file
+LNKTYPE = b"1" # link (inside tarfile)
+SYMTYPE = b"2" # symbolic link
+CHRTYPE = b"3" # character special device
+BLKTYPE = b"4" # block special device
+DIRTYPE = b"5" # directory
+FIFOTYPE = b"6" # fifo special device
+CONTTYPE = b"7" # contiguous file
+
+GNUTYPE_LONGNAME = b"L" # GNU tar longname
+GNUTYPE_LONGLINK = b"K" # GNU tar longlink
+GNUTYPE_SPARSE = b"S" # GNU tar sparse file
+
+XHDTYPE = b"x" # POSIX.1-2001 extended header
+XGLTYPE = b"g" # POSIX.1-2001 global header
+SOLARIS_XHDTYPE = b"X" # Solaris extended header
+
+USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
+GNU_FORMAT = 1 # GNU tar format
+PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
+DEFAULT_FORMAT = PAX_FORMAT
+
+#---------------------------------------------------------
+# tarfile constants
+#---------------------------------------------------------
+# File types that tarfile supports:
+SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
+ SYMTYPE, DIRTYPE, FIFOTYPE,
+ CONTTYPE, CHRTYPE, BLKTYPE,
+ GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
+ GNUTYPE_SPARSE)
+
+# File types that will be treated as a regular file.
+REGULAR_TYPES = (REGTYPE, AREGTYPE,
+ CONTTYPE, GNUTYPE_SPARSE)
+
+# File types that are part of the GNU tar format.
+GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
+ GNUTYPE_SPARSE)
+
+# Fields from a pax header that override a TarInfo attribute.
+PAX_FIELDS = ("path", "linkpath", "size", "mtime",
+ "uid", "gid", "uname", "gname")
+
+# Fields from a pax header that are affected by hdrcharset.
+PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"}
+
+# Fields in a pax header that are numbers, all other fields
+# are treated as strings.
+PAX_NUMBER_FIELDS = {
+ "atime": float,
+ "ctime": float,
+ "mtime": float,
+ "uid": int,
+ "gid": int,
+ "size": int
+}
+
+#---------------------------------------------------------
+# initialization
+#---------------------------------------------------------
+if os.name == "nt":
+ ENCODING = "utf-8"
+else:
+ ENCODING = sys.getfilesystemencoding()
+
+#---------------------------------------------------------
+# Some useful functions
+#---------------------------------------------------------
+
+def stn(s, length, encoding, errors):
+ """Convert a string to a null-terminated bytes object.
+ """
+ if s is None:
+ raise ValueError("metadata cannot contain None")
+ s = s.encode(encoding, errors)
+ return s[:length] + (length - len(s)) * NUL
+
+def nts(s, encoding, errors):
+ """Convert a null-terminated bytes object to a string.
+ """
+ p = s.find(b"\0")
+ if p != -1:
+ s = s[:p]
+ return s.decode(encoding, errors)
+
+def nti(s):
+ """Convert a number field to a python number.
+ """
+ # There are two possible encodings for a number field, see
+ # itn() below.
+ if s[0] in (0o200, 0o377):
+ n = 0
+ for i in range(len(s) - 1):
+ n <<= 8
+ n += s[i + 1]
+ if s[0] == 0o377:
+ n = -(256 ** (len(s) - 1) - n)
+ else:
+ try:
+ s = nts(s, "ascii", "strict")
+ n = int(s.strip() or "0", 8)
+ except ValueError:
+ raise InvalidHeaderError("invalid header")
+ return n
+
+def itn(n, digits=8, format=DEFAULT_FORMAT):
+ """Convert a python number to a number field.
+ """
+ # POSIX 1003.1-1988 requires numbers to be encoded as a string of
+ # octal digits followed by a null-byte, this allows values up to
+ # (8**(digits-1))-1. GNU tar allows storing numbers greater than
+ # that if necessary. A leading 0o200 or 0o377 byte indicate this
+ # particular encoding, the following digits-1 bytes are a big-endian
+ # base-256 representation. This allows values up to (256**(digits-1))-1.
+ # A 0o200 byte indicates a positive number, a 0o377 byte a negative
+ # number.
+ original_n = n
+ n = int(n)
+ if 0 <= n < 8 ** (digits - 1):
+ s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL
+ elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
+ if n >= 0:
+ s = bytearray([0o200])
+ else:
+ s = bytearray([0o377])
+ n = 256 ** digits + n
+
+ for i in range(digits - 1):
+ s.insert(1, n & 0o377)
+ n >>= 8
+ else:
+ raise ValueError("overflow in number field")
+
+ return s
+
+def calc_chksums(buf):
+ """Calculate the checksum for a member's header by summing up all
+ characters except for the chksum field which is treated as if
+ it was filled with spaces. According to the GNU tar sources,
+ some tars (Sun and NeXT) calculate chksum with signed char,
+ which will be different if there are chars in the buffer with
+ the high bit set. So we calculate two checksums, unsigned and
+ signed.
+ """
+ unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
+ signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
+ return unsigned_chksum, signed_chksum
+
+def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None):
+ """Copy length bytes from fileobj src to fileobj dst.
+ If length is None, copy the entire content.
+ """
+ bufsize = bufsize or 16 * 1024
+ if length == 0:
+ return
+ if length is None:
+ shutil.copyfileobj(src, dst, bufsize)
+ return
+
+ blocks, remainder = divmod(length, bufsize)
+ for b in range(blocks):
+ buf = src.read(bufsize)
+ if len(buf) < bufsize:
+ raise exception("unexpected end of data")
+ dst.write(buf)
+
+ if remainder != 0:
+ buf = src.read(remainder)
+ if len(buf) < remainder:
+ raise exception("unexpected end of data")
+ dst.write(buf)
+ return
+
+def _safe_print(s):
+ encoding = getattr(sys.stdout, 'encoding', None)
+ if encoding is not None:
+ s = s.encode(encoding, 'backslashreplace').decode(encoding)
+ print(s, end=' ')
+
+
+class TarError(Exception):
+ """Base exception."""
+ pass
+class ExtractError(TarError):
+ """General exception for extract errors."""
+ pass
+class ReadError(TarError):
+ """Exception for unreadable tar archives."""
+ pass
+class CompressionError(TarError):
+ """Exception for unavailable compression methods."""
+ pass
+class StreamError(TarError):
+ """Exception for unsupported operations on stream-like TarFiles."""
+ pass
+class HeaderError(TarError):
+ """Base exception for header errors."""
+ pass
+class EmptyHeaderError(HeaderError):
+ """Exception for empty headers."""
+ pass
+class TruncatedHeaderError(HeaderError):
+ """Exception for truncated headers."""
+ pass
+class EOFHeaderError(HeaderError):
+ """Exception for end of file headers."""
+ pass
+class InvalidHeaderError(HeaderError):
+ """Exception for invalid headers."""
+ pass
+class SubsequentHeaderError(HeaderError):
+ """Exception for missing and invalid extended headers."""
+ pass
+
+#---------------------------
+# internal stream interface
+#---------------------------
+class _LowLevelFile:
+ """Low-level file object. Supports reading and writing.
+ It is used instead of a regular file object for streaming
+ access.
+ """
+
+ def __init__(self, name, mode):
+ mode = {
+ "r": os.O_RDONLY,
+ "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
+ }[mode]
+ if hasattr(os, "O_BINARY"):
+ mode |= os.O_BINARY
+ self.fd = os.open(name, mode, 0o666)
+
+ def close(self):
+ os.close(self.fd)
+
+ def read(self, size):
+ return os.read(self.fd, size)
+
+ def write(self, s):
+ os.write(self.fd, s)
+
+class _Stream:
+ """Class that serves as an adapter between TarFile and
+ a stream-like object. The stream-like object only
+ needs to have a read() or write() method and is accessed
+ blockwise. Use of gzip or bzip2 compression is possible.
+ A stream-like object could be for example: sys.stdin,
+ sys.stdout, a socket, a tape device etc.
+
+ _Stream is intended to be used only internally.
+ """
+
+ def __init__(self, name, mode, comptype, fileobj, bufsize):
+ """Construct a _Stream object.
+ """
+ self._extfileobj = True
+ if fileobj is None:
+ fileobj = _LowLevelFile(name, mode)
+ self._extfileobj = False
+
+ if comptype == '*':
+ # Enable transparent compression detection for the
+ # stream interface
+ fileobj = _StreamProxy(fileobj)
+ comptype = fileobj.getcomptype()
+
+ self.name = name or ""
+ self.mode = mode
+ self.comptype = comptype
+ self.fileobj = fileobj
+ self.bufsize = bufsize
+ self.buf = b""
+ self.pos = 0
+ self.closed = False
+
+ try:
+ if comptype == "gz":
+ try:
+ import zlib
+ except ImportError:
+ raise CompressionError("zlib module is not available") from None
+ self.zlib = zlib
+ self.crc = zlib.crc32(b"")
+ if mode == "r":
+ self._init_read_gz()
+ self.exception = zlib.error
+ else:
+ self._init_write_gz()
+
+ elif comptype == "bz2":
+ try:
+ import bz2
+ except ImportError:
+ raise CompressionError("bz2 module is not available") from None
+ if mode == "r":
+ self.dbuf = b""
+ self.cmp = bz2.BZ2Decompressor()
+ self.exception = OSError
+ else:
+ self.cmp = bz2.BZ2Compressor()
+
+ elif comptype == "xz":
+ try:
+ import lzma
+ except ImportError:
+ raise CompressionError("lzma module is not available") from None
+ if mode == "r":
+ self.dbuf = b""
+ self.cmp = lzma.LZMADecompressor()
+ self.exception = lzma.LZMAError
+ else:
+ self.cmp = lzma.LZMACompressor()
+
+ elif comptype != "tar":
+ raise CompressionError("unknown compression type %r" % comptype)
+
+ except:
+ if not self._extfileobj:
+ self.fileobj.close()
+ self.closed = True
+ raise
+
+ def __del__(self):
+ if hasattr(self, "closed") and not self.closed:
+ self.close()
+
+ def _init_write_gz(self):
+ """Initialize for writing with gzip compression.
+ """
+ self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
+ -self.zlib.MAX_WBITS,
+ self.zlib.DEF_MEM_LEVEL,
+ 0)
+ timestamp = struct.pack(" self.bufsize:
+ self.fileobj.write(self.buf[:self.bufsize])
+ self.buf = self.buf[self.bufsize:]
+
+ def close(self):
+ """Close the _Stream object. No operation should be
+ done on it afterwards.
+ """
+ if self.closed:
+ return
+
+ self.closed = True
+ try:
+ if self.mode == "w" and self.comptype != "tar":
+ self.buf += self.cmp.flush()
+
+ if self.mode == "w" and self.buf:
+ self.fileobj.write(self.buf)
+ self.buf = b""
+ if self.comptype == "gz":
+ self.fileobj.write(struct.pack("= 0:
+ blocks, remainder = divmod(pos - self.pos, self.bufsize)
+ for i in range(blocks):
+ self.read(self.bufsize)
+ self.read(remainder)
+ else:
+ raise StreamError("seeking backwards is not allowed")
+ return self.pos
+
+ def read(self, size):
+ """Return the next size number of bytes from the stream."""
+ assert size is not None
+ buf = self._read(size)
+ self.pos += len(buf)
+ return buf
+
+ def _read(self, size):
+ """Return size bytes from the stream.
+ """
+ if self.comptype == "tar":
+ return self.__read(size)
+
+ c = len(self.dbuf)
+ t = [self.dbuf]
+ while c < size:
+ # Skip underlying buffer to avoid unaligned double buffering.
+ if self.buf:
+ buf = self.buf
+ self.buf = b""
+ else:
+ buf = self.fileobj.read(self.bufsize)
+ if not buf:
+ break
+ try:
+ buf = self.cmp.decompress(buf)
+ except self.exception as e:
+ raise ReadError("invalid compressed data") from e
+ t.append(buf)
+ c += len(buf)
+ t = b"".join(t)
+ self.dbuf = t[size:]
+ return t[:size]
+
+ def __read(self, size):
+ """Return size bytes from stream. If internal buffer is empty,
+ read another block from the stream.
+ """
+ c = len(self.buf)
+ t = [self.buf]
+ while c < size:
+ buf = self.fileobj.read(self.bufsize)
+ if not buf:
+ break
+ t.append(buf)
+ c += len(buf)
+ t = b"".join(t)
+ self.buf = t[size:]
+ return t[:size]
+# class _Stream
+
+class _StreamProxy(object):
+ """Small proxy class that enables transparent compression
+ detection for the Stream interface (mode 'r|*').
+ """
+
+ def __init__(self, fileobj):
+ self.fileobj = fileobj
+ self.buf = self.fileobj.read(BLOCKSIZE)
+
+ def read(self, size):
+ self.read = self.fileobj.read
+ return self.buf
+
+ def getcomptype(self):
+ if self.buf.startswith(b"\x1f\x8b\x08"):
+ return "gz"
+ elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY":
+ return "bz2"
+ elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")):
+ return "xz"
+ else:
+ return "tar"
+
+ def close(self):
+ self.fileobj.close()
+# class StreamProxy
+
+#------------------------
+# Extraction file object
+#------------------------
+class _FileInFile(object):
+ """A thin wrapper around an existing file object that
+ provides a part of its data as an individual file
+ object.
+ """
+
+ def __init__(self, fileobj, offset, size, blockinfo=None):
+ self.fileobj = fileobj
+ self.offset = offset
+ self.size = size
+ self.position = 0
+ self.name = getattr(fileobj, "name", None)
+ self.closed = False
+
+ if blockinfo is None:
+ blockinfo = [(0, size)]
+
+ # Construct a map with data and zero blocks.
+ self.map_index = 0
+ self.map = []
+ lastpos = 0
+ realpos = self.offset
+ for offset, size in blockinfo:
+ if offset > lastpos:
+ self.map.append((False, lastpos, offset, None))
+ self.map.append((True, offset, offset + size, realpos))
+ realpos += size
+ lastpos = offset + size
+ if lastpos < self.size:
+ self.map.append((False, lastpos, self.size, None))
+
+ def flush(self):
+ pass
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ return False
+
+ def seekable(self):
+ return self.fileobj.seekable()
+
+ def tell(self):
+ """Return the current file position.
+ """
+ return self.position
+
+ def seek(self, position, whence=io.SEEK_SET):
+ """Seek to a position in the file.
+ """
+ if whence == io.SEEK_SET:
+ self.position = min(max(position, 0), self.size)
+ elif whence == io.SEEK_CUR:
+ if position < 0:
+ self.position = max(self.position + position, 0)
+ else:
+ self.position = min(self.position + position, self.size)
+ elif whence == io.SEEK_END:
+ self.position = max(min(self.size + position, self.size), 0)
+ else:
+ raise ValueError("Invalid argument")
+ return self.position
+
+ def read(self, size=None):
+ """Read data from the file.
+ """
+ if size is None:
+ size = self.size - self.position
+ else:
+ size = min(size, self.size - self.position)
+
+ buf = b""
+ while size > 0:
+ while True:
+ data, start, stop, offset = self.map[self.map_index]
+ if start <= self.position < stop:
+ break
+ else:
+ self.map_index += 1
+ if self.map_index == len(self.map):
+ self.map_index = 0
+ length = min(size, stop - self.position)
+ if data:
+ self.fileobj.seek(offset + (self.position - start))
+ b = self.fileobj.read(length)
+ if len(b) != length:
+ raise ReadError("unexpected end of data")
+ buf += b
+ else:
+ buf += NUL * length
+ size -= length
+ self.position += length
+ return buf
+
+ def readinto(self, b):
+ buf = self.read(len(b))
+ b[:len(buf)] = buf
+ return len(buf)
+
+ def close(self):
+ self.closed = True
+#class _FileInFile
+
+class ExFileObject(io.BufferedReader):
+
+ def __init__(self, tarfile, tarinfo):
+ fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data,
+ tarinfo.size, tarinfo.sparse)
+ super().__init__(fileobj)
+#class ExFileObject
+
+
+#-----------------------------
+# extraction filters (PEP 706)
+#-----------------------------
+
+class FilterError(TarError):
+ pass
+
+class AbsolutePathError(FilterError):
+ def __init__(self, tarinfo):
+ self.tarinfo = tarinfo
+ super().__init__(f'member {tarinfo.name!r} has an absolute path')
+
+class OutsideDestinationError(FilterError):
+ def __init__(self, tarinfo, path):
+ self.tarinfo = tarinfo
+ self._path = path
+ super().__init__(f'{tarinfo.name!r} would be extracted to {path!r}, '
+ + 'which is outside the destination')
+
+class SpecialFileError(FilterError):
+ def __init__(self, tarinfo):
+ self.tarinfo = tarinfo
+ super().__init__(f'{tarinfo.name!r} is a special file')
+
+class AbsoluteLinkError(FilterError):
+ def __init__(self, tarinfo):
+ self.tarinfo = tarinfo
+ super().__init__(f'{tarinfo.name!r} is a link to an absolute path')
+
+class LinkOutsideDestinationError(FilterError):
+ def __init__(self, tarinfo, path):
+ self.tarinfo = tarinfo
+ self._path = path
+ super().__init__(f'{tarinfo.name!r} would link to {path!r}, '
+ + 'which is outside the destination')
+
+def _get_filtered_attrs(member, dest_path, for_data=True):
+ new_attrs = {}
+ name = member.name
+ dest_path = os.path.realpath(dest_path)
+ # Strip leading / (tar's directory separator) from filenames.
+ # Include os.sep (target OS directory separator) as well.
+ if name.startswith(('/', os.sep)):
+ name = new_attrs['name'] = member.path.lstrip('/' + os.sep)
+ if os.path.isabs(name):
+ # Path is absolute even after stripping.
+ # For example, 'C:/foo' on Windows.
+ raise AbsolutePathError(member)
+ # Ensure we stay in the destination
+ target_path = os.path.realpath(os.path.join(dest_path, name))
+ if os.path.commonpath([target_path, dest_path]) != dest_path:
+ raise OutsideDestinationError(member, target_path)
+ # Limit permissions (no high bits, and go-w)
+ mode = member.mode
+ if mode is not None:
+ # Strip high bits & group/other write bits
+ mode = mode & 0o755
+ if for_data:
+ # For data, handle permissions & file types
+ if member.isreg() or member.islnk():
+ if not mode & 0o100:
+ # Clear executable bits if not executable by user
+ mode &= ~0o111
+ # Ensure owner can read & write
+ mode |= 0o600
+ elif member.isdir() or member.issym():
+ # Ignore mode for directories & symlinks
+ mode = None
+ else:
+ # Reject special files
+ raise SpecialFileError(member)
+ if mode != member.mode:
+ new_attrs['mode'] = mode
+ if for_data:
+ # Ignore ownership for 'data'
+ if member.uid is not None:
+ new_attrs['uid'] = None
+ if member.gid is not None:
+ new_attrs['gid'] = None
+ if member.uname is not None:
+ new_attrs['uname'] = None
+ if member.gname is not None:
+ new_attrs['gname'] = None
+ # Check link destination for 'data'
+ if member.islnk() or member.issym():
+ if os.path.isabs(member.linkname):
+ raise AbsoluteLinkError(member)
+ if member.issym():
+ target_path = os.path.join(dest_path,
+ os.path.dirname(name),
+ member.linkname)
+ else:
+ target_path = os.path.join(dest_path,
+ member.linkname)
+ target_path = os.path.realpath(target_path)
+ if os.path.commonpath([target_path, dest_path]) != dest_path:
+ raise LinkOutsideDestinationError(member, target_path)
+ return new_attrs
+
+def fully_trusted_filter(member, dest_path):
+ return member
+
+def tar_filter(member, dest_path):
+ new_attrs = _get_filtered_attrs(member, dest_path, False)
+ if new_attrs:
+ return member.replace(**new_attrs, deep=False)
+ return member
+
+def data_filter(member, dest_path):
+ new_attrs = _get_filtered_attrs(member, dest_path, True)
+ if new_attrs:
+ return member.replace(**new_attrs, deep=False)
+ return member
+
+_NAMED_FILTERS = {
+ "fully_trusted": fully_trusted_filter,
+ "tar": tar_filter,
+ "data": data_filter,
+}
+
+#------------------
+# Exported Classes
+#------------------
+
+# Sentinel for replace() defaults, meaning "don't change the attribute"
+_KEEP = object()
+
+# Header length is digits followed by a space.
+_header_length_prefix_re = re.compile(br"([0-9]{1,20}) ")
+
+class TarInfo(object):
+ """Informational class which holds the details about an
+ archive member given by a tar header block.
+ TarInfo objects are returned by TarFile.getmember(),
+ TarFile.getmembers() and TarFile.gettarinfo() and are
+ usually created internally.
+ """
+
+ __slots__ = dict(
+ name = 'Name of the archive member.',
+ mode = 'Permission bits.',
+ uid = 'User ID of the user who originally stored this member.',
+ gid = 'Group ID of the user who originally stored this member.',
+ size = 'Size in bytes.',
+ mtime = 'Time of last modification.',
+ chksum = 'Header checksum.',
+ type = ('File type. type is usually one of these constants: '
+ 'REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, '
+ 'CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'),
+ linkname = ('Name of the target file name, which is only present '
+ 'in TarInfo objects of type LNKTYPE and SYMTYPE.'),
+ uname = 'User name.',
+ gname = 'Group name.',
+ devmajor = 'Device major number.',
+ devminor = 'Device minor number.',
+ offset = 'The tar header starts here.',
+ offset_data = "The file's data starts here.",
+ pax_headers = ('A dictionary containing key-value pairs of an '
+ 'associated pax extended header.'),
+ sparse = 'Sparse member information.',
+ tarfile = None,
+ _sparse_structs = None,
+ _link_target = None,
+ )
+
+ def __init__(self, name=""):
+ """Construct a TarInfo object. name is the optional name
+ of the member.
+ """
+ self.name = name # member name
+ self.mode = 0o644 # file permissions
+ self.uid = 0 # user id
+ self.gid = 0 # group id
+ self.size = 0 # file size
+ self.mtime = 0 # modification time
+ self.chksum = 0 # header checksum
+ self.type = REGTYPE # member type
+ self.linkname = "" # link name
+ self.uname = "" # user name
+ self.gname = "" # group name
+ self.devmajor = 0 # device major number
+ self.devminor = 0 # device minor number
+
+ self.offset = 0 # the tar header starts here
+ self.offset_data = 0 # the file's data starts here
+
+ self.sparse = None # sparse member information
+ self.pax_headers = {} # pax header information
+
+ @property
+ def path(self):
+ 'In pax headers, "name" is called "path".'
+ return self.name
+
+ @path.setter
+ def path(self, name):
+ self.name = name
+
+ @property
+ def linkpath(self):
+ 'In pax headers, "linkname" is called "linkpath".'
+ return self.linkname
+
+ @linkpath.setter
+ def linkpath(self, linkname):
+ self.linkname = linkname
+
+ def __repr__(self):
+ return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
+
+ def replace(self, *,
+ name=_KEEP, mtime=_KEEP, mode=_KEEP, linkname=_KEEP,
+ uid=_KEEP, gid=_KEEP, uname=_KEEP, gname=_KEEP,
+ deep=True, _KEEP=_KEEP):
+ """Return a deep copy of self with the given attributes replaced.
+ """
+ if deep:
+ result = copy.deepcopy(self)
+ else:
+ result = copy.copy(self)
+ if name is not _KEEP:
+ result.name = name
+ if mtime is not _KEEP:
+ result.mtime = mtime
+ if mode is not _KEEP:
+ result.mode = mode
+ if linkname is not _KEEP:
+ result.linkname = linkname
+ if uid is not _KEEP:
+ result.uid = uid
+ if gid is not _KEEP:
+ result.gid = gid
+ if uname is not _KEEP:
+ result.uname = uname
+ if gname is not _KEEP:
+ result.gname = gname
+ return result
+
+ def get_info(self):
+ """Return the TarInfo's attributes as a dictionary.
+ """
+ if self.mode is None:
+ mode = None
+ else:
+ mode = self.mode & 0o7777
+ info = {
+ "name": self.name,
+ "mode": mode,
+ "uid": self.uid,
+ "gid": self.gid,
+ "size": self.size,
+ "mtime": self.mtime,
+ "chksum": self.chksum,
+ "type": self.type,
+ "linkname": self.linkname,
+ "uname": self.uname,
+ "gname": self.gname,
+ "devmajor": self.devmajor,
+ "devminor": self.devminor
+ }
+
+ if info["type"] == DIRTYPE and not info["name"].endswith("/"):
+ info["name"] += "/"
+
+ return info
+
+ def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
+ """Return a tar header as a string of 512 byte blocks.
+ """
+ info = self.get_info()
+ for name, value in info.items():
+ if value is None:
+ raise ValueError("%s may not be None" % name)
+
+ if format == USTAR_FORMAT:
+ return self.create_ustar_header(info, encoding, errors)
+ elif format == GNU_FORMAT:
+ return self.create_gnu_header(info, encoding, errors)
+ elif format == PAX_FORMAT:
+ return self.create_pax_header(info, encoding)
+ else:
+ raise ValueError("invalid format")
+
+ def create_ustar_header(self, info, encoding, errors):
+ """Return the object as a ustar header block.
+ """
+ info["magic"] = POSIX_MAGIC
+
+ if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK:
+ raise ValueError("linkname is too long")
+
+ if len(info["name"].encode(encoding, errors)) > LENGTH_NAME:
+ info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors)
+
+ return self._create_header(info, USTAR_FORMAT, encoding, errors)
+
+ def create_gnu_header(self, info, encoding, errors):
+ """Return the object as a GNU header block sequence.
+ """
+ info["magic"] = GNU_MAGIC
+
+ buf = b""
+ if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK:
+ buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
+
+ if len(info["name"].encode(encoding, errors)) > LENGTH_NAME:
+ buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
+
+ return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
+
+ def create_pax_header(self, info, encoding):
+ """Return the object as a ustar header block. If it cannot be
+ represented this way, prepend a pax extended header sequence
+ with supplement information.
+ """
+ info["magic"] = POSIX_MAGIC
+ pax_headers = self.pax_headers.copy()
+
+ # Test string fields for values that exceed the field length or cannot
+ # be represented in ASCII encoding.
+ for name, hname, length in (
+ ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
+ ("uname", "uname", 32), ("gname", "gname", 32)):
+
+ if hname in pax_headers:
+ # The pax header has priority.
+ continue
+
+ # Try to encode the string as ASCII.
+ try:
+ info[name].encode("ascii", "strict")
+ except UnicodeEncodeError:
+ pax_headers[hname] = info[name]
+ continue
+
+ if len(info[name]) > length:
+ pax_headers[hname] = info[name]
+
+ # Test number fields for values that exceed the field limit or values
+ # that like to be stored as float.
+ for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
+ needs_pax = False
+
+ val = info[name]
+ val_is_float = isinstance(val, float)
+ val_int = round(val) if val_is_float else val
+ if not 0 <= val_int < 8 ** (digits - 1):
+ # Avoid overflow.
+ info[name] = 0
+ needs_pax = True
+ elif val_is_float:
+ # Put rounded value in ustar header, and full
+ # precision value in pax header.
+ info[name] = val_int
+ needs_pax = True
+
+ # The existing pax header has priority.
+ if needs_pax and name not in pax_headers:
+ pax_headers[name] = str(val)
+
+ # Create a pax extended header if necessary.
+ if pax_headers:
+ buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
+ else:
+ buf = b""
+
+ return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
+
+ @classmethod
+ def create_pax_global_header(cls, pax_headers):
+ """Return the object as a pax global header block sequence.
+ """
+ return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8")
+
+ def _posix_split_name(self, name, encoding, errors):
+ """Split a name longer than 100 chars into a prefix
+ and a name part.
+ """
+ components = name.split("/")
+ for i in range(1, len(components)):
+ prefix = "/".join(components[:i])
+ name = "/".join(components[i:])
+ if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \
+ len(name.encode(encoding, errors)) <= LENGTH_NAME:
+ break
+ else:
+ raise ValueError("name is too long")
+
+ return prefix, name
+
+ @staticmethod
+ def _create_header(info, format, encoding, errors):
+ """Return a header block. info is a dictionary with file
+ information, format must be one of the *_FORMAT constants.
+ """
+ has_device_fields = info.get("type") in (CHRTYPE, BLKTYPE)
+ if has_device_fields:
+ devmajor = itn(info.get("devmajor", 0), 8, format)
+ devminor = itn(info.get("devminor", 0), 8, format)
+ else:
+ devmajor = stn("", 8, encoding, errors)
+ devminor = stn("", 8, encoding, errors)
+
+ # None values in metadata should cause ValueError.
+ # itn()/stn() do this for all fields except type.
+ filetype = info.get("type", REGTYPE)
+ if filetype is None:
+ raise ValueError("TarInfo.type must not be None")
+
+ parts = [
+ stn(info.get("name", ""), 100, encoding, errors),
+ itn(info.get("mode", 0) & 0o7777, 8, format),
+ itn(info.get("uid", 0), 8, format),
+ itn(info.get("gid", 0), 8, format),
+ itn(info.get("size", 0), 12, format),
+ itn(info.get("mtime", 0), 12, format),
+ b" ", # checksum field
+ filetype,
+ stn(info.get("linkname", ""), 100, encoding, errors),
+ info.get("magic", POSIX_MAGIC),
+ stn(info.get("uname", ""), 32, encoding, errors),
+ stn(info.get("gname", ""), 32, encoding, errors),
+ devmajor,
+ devminor,
+ stn(info.get("prefix", ""), 155, encoding, errors)
+ ]
+
+ buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
+ chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
+ buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
+ return buf
+
+ @staticmethod
+ def _create_payload(payload):
+ """Return the string payload filled with zero bytes
+ up to the next 512 byte border.
+ """
+ blocks, remainder = divmod(len(payload), BLOCKSIZE)
+ if remainder > 0:
+ payload += (BLOCKSIZE - remainder) * NUL
+ return payload
+
+ @classmethod
+ def _create_gnu_long_header(cls, name, type, encoding, errors):
+ """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
+ for name.
+ """
+ name = name.encode(encoding, errors) + NUL
+
+ info = {}
+ info["name"] = "././@LongLink"
+ info["type"] = type
+ info["size"] = len(name)
+ info["magic"] = GNU_MAGIC
+
+ # create extended header + name blocks.
+ return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
+ cls._create_payload(name)
+
+ @classmethod
+ def _create_pax_generic_header(cls, pax_headers, type, encoding):
+ """Return a POSIX.1-2008 extended or global header sequence
+ that contains a list of keyword, value pairs. The values
+ must be strings.
+ """
+ # Check if one of the fields contains surrogate characters and thereby
+ # forces hdrcharset=BINARY, see _proc_pax() for more information.
+ binary = False
+ for keyword, value in pax_headers.items():
+ try:
+ value.encode("utf-8", "strict")
+ except UnicodeEncodeError:
+ binary = True
+ break
+
+ records = b""
+ if binary:
+ # Put the hdrcharset field at the beginning of the header.
+ records += b"21 hdrcharset=BINARY\n"
+
+ for keyword, value in pax_headers.items():
+ keyword = keyword.encode("utf-8")
+ if binary:
+ # Try to restore the original byte representation of `value'.
+ # Needless to say, that the encoding must match the string.
+ value = value.encode(encoding, "surrogateescape")
+ else:
+ value = value.encode("utf-8")
+
+ l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
+ n = p = 0
+ while True:
+ n = l + len(str(p))
+ if n == p:
+ break
+ p = n
+ records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
+
+ # We use a hardcoded "././@PaxHeader" name like star does
+ # instead of the one that POSIX recommends.
+ info = {}
+ info["name"] = "././@PaxHeader"
+ info["type"] = type
+ info["size"] = len(records)
+ info["magic"] = POSIX_MAGIC
+
+ # Create pax header + record blocks.
+ return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
+ cls._create_payload(records)
+
+ @classmethod
+ def frombuf(cls, buf, encoding, errors):
+ """Construct a TarInfo object from a 512 byte bytes object.
+ """
+ if len(buf) == 0:
+ raise EmptyHeaderError("empty header")
+ if len(buf) != BLOCKSIZE:
+ raise TruncatedHeaderError("truncated header")
+ if buf.count(NUL) == BLOCKSIZE:
+ raise EOFHeaderError("end of file header")
+
+ chksum = nti(buf[148:156])
+ if chksum not in calc_chksums(buf):
+ raise InvalidHeaderError("bad checksum")
+
+ obj = cls()
+ obj.name = nts(buf[0:100], encoding, errors)
+ obj.mode = nti(buf[100:108])
+ obj.uid = nti(buf[108:116])
+ obj.gid = nti(buf[116:124])
+ obj.size = nti(buf[124:136])
+ obj.mtime = nti(buf[136:148])
+ obj.chksum = chksum
+ obj.type = buf[156:157]
+ obj.linkname = nts(buf[157:257], encoding, errors)
+ obj.uname = nts(buf[265:297], encoding, errors)
+ obj.gname = nts(buf[297:329], encoding, errors)
+ obj.devmajor = nti(buf[329:337])
+ obj.devminor = nti(buf[337:345])
+ prefix = nts(buf[345:500], encoding, errors)
+
+ # Old V7 tar format represents a directory as a regular
+ # file with a trailing slash.
+ if obj.type == AREGTYPE and obj.name.endswith("/"):
+ obj.type = DIRTYPE
+
+ # The old GNU sparse format occupies some of the unused
+ # space in the buffer for up to 4 sparse structures.
+ # Save them for later processing in _proc_sparse().
+ if obj.type == GNUTYPE_SPARSE:
+ pos = 386
+ structs = []
+ for i in range(4):
+ try:
+ offset = nti(buf[pos:pos + 12])
+ numbytes = nti(buf[pos + 12:pos + 24])
+ except ValueError:
+ break
+ structs.append((offset, numbytes))
+ pos += 24
+ isextended = bool(buf[482])
+ origsize = nti(buf[483:495])
+ obj._sparse_structs = (structs, isextended, origsize)
+
+ # Remove redundant slashes from directories.
+ if obj.isdir():
+ obj.name = obj.name.rstrip("/")
+
+ # Reconstruct a ustar longname.
+ if prefix and obj.type not in GNU_TYPES:
+ obj.name = prefix + "/" + obj.name
+ return obj
+
+ @classmethod
+ def fromtarfile(cls, tarfile):
+ """Return the next TarInfo object from TarFile object
+ tarfile.
+ """
+ buf = tarfile.fileobj.read(BLOCKSIZE)
+ obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
+ obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
+ return obj._proc_member(tarfile)
+
+ #--------------------------------------------------------------------------
+ # The following are methods that are called depending on the type of a
+ # member. The entry point is _proc_member() which can be overridden in a
+ # subclass to add custom _proc_*() methods. A _proc_*() method MUST
+ # implement the following
+ # operations:
+ # 1. Set self.offset_data to the position where the data blocks begin,
+ # if there is data that follows.
+ # 2. Set tarfile.offset to the position where the next member's header will
+ # begin.
+ # 3. Return self or another valid TarInfo object.
+ def _proc_member(self, tarfile):
+ """Choose the right processing method depending on
+ the type and call it.
+ """
+ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
+ return self._proc_gnulong(tarfile)
+ elif self.type == GNUTYPE_SPARSE:
+ return self._proc_sparse(tarfile)
+ elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
+ return self._proc_pax(tarfile)
+ else:
+ return self._proc_builtin(tarfile)
+
+ def _proc_builtin(self, tarfile):
+ """Process a builtin type or an unknown type which
+ will be treated as a regular file.
+ """
+ self.offset_data = tarfile.fileobj.tell()
+ offset = self.offset_data
+ if self.isreg() or self.type not in SUPPORTED_TYPES:
+ # Skip the following data blocks.
+ offset += self._block(self.size)
+ tarfile.offset = offset
+
+ # Patch the TarInfo object with saved global
+ # header information.
+ self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
+
+ # Remove redundant slashes from directories. This is to be consistent
+ # with frombuf().
+ if self.isdir():
+ self.name = self.name.rstrip("/")
+
+ return self
+
+ def _proc_gnulong(self, tarfile):
+ """Process the blocks that hold a GNU longname
+ or longlink member.
+ """
+ buf = tarfile.fileobj.read(self._block(self.size))
+
+ # Fetch the next header and process it.
+ try:
+ next = self.fromtarfile(tarfile)
+ except HeaderError as e:
+ raise SubsequentHeaderError(str(e)) from None
+
+ # Patch the TarInfo object from the next header with
+ # the longname information.
+ next.offset = self.offset
+ if self.type == GNUTYPE_LONGNAME:
+ next.name = nts(buf, tarfile.encoding, tarfile.errors)
+ elif self.type == GNUTYPE_LONGLINK:
+ next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
+
+ # Remove redundant slashes from directories. This is to be consistent
+ # with frombuf().
+ if next.isdir():
+ next.name = next.name.removesuffix("/")
+
+ return next
+
+ def _proc_sparse(self, tarfile):
+ """Process a GNU sparse header plus extra headers.
+ """
+ # We already collected some sparse structures in frombuf().
+ structs, isextended, origsize = self._sparse_structs
+ del self._sparse_structs
+
+ # Collect sparse structures from extended header blocks.
+ while isextended:
+ buf = tarfile.fileobj.read(BLOCKSIZE)
+ pos = 0
+ for i in range(21):
+ try:
+ offset = nti(buf[pos:pos + 12])
+ numbytes = nti(buf[pos + 12:pos + 24])
+ except ValueError:
+ break
+ if offset and numbytes:
+ structs.append((offset, numbytes))
+ pos += 24
+ isextended = bool(buf[504])
+ self.sparse = structs
+
+ self.offset_data = tarfile.fileobj.tell()
+ tarfile.offset = self.offset_data + self._block(self.size)
+ self.size = origsize
+ return self
+
+ def _proc_pax(self, tarfile):
+ """Process an extended or global header as described in
+ POSIX.1-2008.
+ """
+ # Read the header information.
+ buf = tarfile.fileobj.read(self._block(self.size))
+
+ # A pax header stores supplemental information for either
+ # the following file (extended) or all following files
+ # (global).
+ if self.type == XGLTYPE:
+ pax_headers = tarfile.pax_headers
+ else:
+ pax_headers = tarfile.pax_headers.copy()
+
+ # Parse pax header information. A record looks like that:
+ # "%d %s=%s\n" % (length, keyword, value). length is the size
+ # of the complete record including the length field itself and
+ # the newline.
+ pos = 0
+ encoding = None
+ raw_headers = []
+ while len(buf) > pos and buf[pos] != 0x00:
+ if not (match := _header_length_prefix_re.match(buf, pos)):
+ raise InvalidHeaderError("invalid header")
+ try:
+ length = int(match.group(1))
+ except ValueError:
+ raise InvalidHeaderError("invalid header")
+ # Headers must be at least 5 bytes, shortest being '5 x=\n'.
+ # Value is allowed to be empty.
+ if length < 5:
+ raise InvalidHeaderError("invalid header")
+ if pos + length > len(buf):
+ raise InvalidHeaderError("invalid header")
+
+ header_value_end_offset = match.start(1) + length - 1 # Last byte of the header
+ keyword_and_value = buf[match.end(1) + 1:header_value_end_offset]
+ raw_keyword, equals, raw_value = keyword_and_value.partition(b"=")
+
+ # Check the framing of the header. The last character must be '\n' (0x0A)
+ if not raw_keyword or equals != b"=" or buf[header_value_end_offset] != 0x0A:
+ raise InvalidHeaderError("invalid header")
+ raw_headers.append((length, raw_keyword, raw_value))
+
+ # Check if the pax header contains a hdrcharset field. This tells us
+ # the encoding of the path, linkpath, uname and gname fields. Normally,
+ # these fields are UTF-8 encoded but since POSIX.1-2008 tar
+ # implementations are allowed to store them as raw binary strings if
+ # the translation to UTF-8 fails. For the time being, we don't care about
+ # anything other than "BINARY". The only other value that is currently
+ # allowed by the standard is "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
+ # Note that we only follow the initial 'hdrcharset' setting to preserve
+ # the initial behavior of the 'tarfile' module.
+ if raw_keyword == b"hdrcharset" and encoding is None:
+ if raw_value == b"BINARY":
+ encoding = tarfile.encoding
+ else: # This branch ensures only the first 'hdrcharset' header is used.
+ encoding = "utf-8"
+
+ pos += length
+
+ # If no explicit hdrcharset is set, we use UTF-8 as a default.
+ if encoding is None:
+ encoding = "utf-8"
+
+ # After parsing the raw headers we can decode them to text.
+ for length, raw_keyword, raw_value in raw_headers:
+ # Normally, we could just use "utf-8" as the encoding and "strict"
+ # as the error handler, but we better not take the risk. For
+ # example, GNU tar <= 1.23 is known to store filenames it cannot
+ # translate to UTF-8 as raw strings (unfortunately without a
+ # hdrcharset=BINARY header).
+ # We first try the strict standard encoding, and if that fails we
+ # fall back on the user's encoding and error handler.
+ keyword = self._decode_pax_field(raw_keyword, "utf-8", "utf-8",
+ tarfile.errors)
+ if keyword in PAX_NAME_FIELDS:
+ value = self._decode_pax_field(raw_value, encoding, tarfile.encoding,
+ tarfile.errors)
+ else:
+ value = self._decode_pax_field(raw_value, "utf-8", "utf-8",
+ tarfile.errors)
+
+ pax_headers[keyword] = value
+
+ # Fetch the next header.
+ try:
+ next = self.fromtarfile(tarfile)
+ except HeaderError as e:
+ raise SubsequentHeaderError(str(e)) from None
+
+ # Process GNU sparse information.
+ if "GNU.sparse.map" in pax_headers:
+ # GNU extended sparse format version 0.1.
+ self._proc_gnusparse_01(next, pax_headers)
+
+ elif "GNU.sparse.size" in pax_headers:
+ # GNU extended sparse format version 0.0.
+ self._proc_gnusparse_00(next, raw_headers)
+
+ elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
+ # GNU extended sparse format version 1.0.
+ self._proc_gnusparse_10(next, pax_headers, tarfile)
+
+ if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
+ # Patch the TarInfo object with the extended header info.
+ next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
+ next.offset = self.offset
+
+ if "size" in pax_headers:
+ # If the extended header replaces the size field,
+ # we need to recalculate the offset where the next
+ # header starts.
+ offset = next.offset_data
+ if next.isreg() or next.type not in SUPPORTED_TYPES:
+ offset += next._block(next.size)
+ tarfile.offset = offset
+
+ return next
+
+ def _proc_gnusparse_00(self, next, raw_headers):
+ """Process a GNU tar extended sparse header, version 0.0.
+ """
+ offsets = []
+ numbytes = []
+ for _, keyword, value in raw_headers:
+ if keyword == b"GNU.sparse.offset":
+ try:
+ offsets.append(int(value.decode()))
+ except ValueError:
+ raise InvalidHeaderError("invalid header")
+
+ elif keyword == b"GNU.sparse.numbytes":
+ try:
+ numbytes.append(int(value.decode()))
+ except ValueError:
+ raise InvalidHeaderError("invalid header")
+
+ next.sparse = list(zip(offsets, numbytes))
+
+ def _proc_gnusparse_01(self, next, pax_headers):
+ """Process a GNU tar extended sparse header, version 0.1.
+ """
+ sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
+ next.sparse = list(zip(sparse[::2], sparse[1::2]))
+
+ def _proc_gnusparse_10(self, next, pax_headers, tarfile):
+ """Process a GNU tar extended sparse header, version 1.0.
+ """
+ fields = None
+ sparse = []
+ buf = tarfile.fileobj.read(BLOCKSIZE)
+ fields, buf = buf.split(b"\n", 1)
+ fields = int(fields)
+ while len(sparse) < fields * 2:
+ if b"\n" not in buf:
+ buf += tarfile.fileobj.read(BLOCKSIZE)
+ number, buf = buf.split(b"\n", 1)
+ sparse.append(int(number))
+ next.offset_data = tarfile.fileobj.tell()
+ next.sparse = list(zip(sparse[::2], sparse[1::2]))
+
+ def _apply_pax_info(self, pax_headers, encoding, errors):
+ """Replace fields with supplemental information from a previous
+ pax extended or global header.
+ """
+ for keyword, value in pax_headers.items():
+ if keyword == "GNU.sparse.name":
+ setattr(self, "path", value)
+ elif keyword == "GNU.sparse.size":
+ setattr(self, "size", int(value))
+ elif keyword == "GNU.sparse.realsize":
+ setattr(self, "size", int(value))
+ elif keyword in PAX_FIELDS:
+ if keyword in PAX_NUMBER_FIELDS:
+ try:
+ value = PAX_NUMBER_FIELDS[keyword](value)
+ except ValueError:
+ value = 0
+ if keyword == "path":
+ value = value.rstrip("/")
+ setattr(self, keyword, value)
+
+ self.pax_headers = pax_headers.copy()
+
+ def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
+ """Decode a single field from a pax record.
+ """
+ try:
+ return value.decode(encoding, "strict")
+ except UnicodeDecodeError:
+ return value.decode(fallback_encoding, fallback_errors)
+
+ def _block(self, count):
+ """Round up a byte count by BLOCKSIZE and return it,
+ e.g. _block(834) => 1024.
+ """
+ blocks, remainder = divmod(count, BLOCKSIZE)
+ if remainder:
+ blocks += 1
+ return blocks * BLOCKSIZE
+
+ def isreg(self):
+ 'Return True if the Tarinfo object is a regular file.'
+ return self.type in REGULAR_TYPES
+
+ def isfile(self):
+ 'Return True if the Tarinfo object is a regular file.'
+ return self.isreg()
+
+ def isdir(self):
+ 'Return True if it is a directory.'
+ return self.type == DIRTYPE
+
+ def issym(self):
+ 'Return True if it is a symbolic link.'
+ return self.type == SYMTYPE
+
+ def islnk(self):
+ 'Return True if it is a hard link.'
+ return self.type == LNKTYPE
+
+ def ischr(self):
+ 'Return True if it is a character device.'
+ return self.type == CHRTYPE
+
+ def isblk(self):
+ 'Return True if it is a block device.'
+ return self.type == BLKTYPE
+
+ def isfifo(self):
+ 'Return True if it is a FIFO.'
+ return self.type == FIFOTYPE
+
+ def issparse(self):
+ return self.sparse is not None
+
+ def isdev(self):
+ 'Return True if it is one of character device, block device or FIFO.'
+ return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
+# class TarInfo
+
+class TarFile(object):
+ """The TarFile Class provides an interface to tar archives.
+ """
+
+ debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
+
+ dereference = False # If true, add content of linked file to the
+ # tar file, else the link.
+
+ ignore_zeros = False # If true, skips empty or invalid blocks and
+ # continues processing.
+
+ errorlevel = 1 # If 0, fatal errors only appear in debug
+ # messages (if debug >= 0). If > 0, errors
+ # are passed to the caller as exceptions.
+
+ format = DEFAULT_FORMAT # The format to use when creating an archive.
+
+ encoding = ENCODING # Encoding for 8-bit character strings.
+
+ errors = None # Error handler for unicode conversion.
+
+ tarinfo = TarInfo # The default TarInfo class to use.
+
+ fileobject = ExFileObject # The file-object for extractfile().
+
+ extraction_filter = None # The default filter for extraction.
+
+ def __init__(self, name=None, mode="r", fileobj=None, format=None,
+ tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
+ errors="surrogateescape", pax_headers=None, debug=None,
+ errorlevel=None, copybufsize=None):
+ """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
+ read from an existing archive, 'a' to append data to an existing
+ file or 'w' to create a new file overwriting an existing one. `mode'
+ defaults to 'r'.
+ If `fileobj' is given, it is used for reading or writing data. If it
+ can be determined, `mode' is overridden by `fileobj's mode.
+ `fileobj' is not closed, when TarFile is closed.
+ """
+ modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"}
+ if mode not in modes:
+ raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
+ self.mode = mode
+ self._mode = modes[mode]
+
+ if not fileobj:
+ if self.mode == "a" and not os.path.exists(name):
+ # Create nonexistent files in append mode.
+ self.mode = "w"
+ self._mode = "wb"
+ fileobj = bltn_open(name, self._mode)
+ self._extfileobj = False
+ else:
+ if (name is None and hasattr(fileobj, "name") and
+ isinstance(fileobj.name, (str, bytes))):
+ name = fileobj.name
+ if hasattr(fileobj, "mode"):
+ self._mode = fileobj.mode
+ self._extfileobj = True
+ self.name = os.path.abspath(name) if name else None
+ self.fileobj = fileobj
+
+ # Init attributes.
+ if format is not None:
+ self.format = format
+ if tarinfo is not None:
+ self.tarinfo = tarinfo
+ if dereference is not None:
+ self.dereference = dereference
+ if ignore_zeros is not None:
+ self.ignore_zeros = ignore_zeros
+ if encoding is not None:
+ self.encoding = encoding
+ self.errors = errors
+
+ if pax_headers is not None and self.format == PAX_FORMAT:
+ self.pax_headers = pax_headers
+ else:
+ self.pax_headers = {}
+
+ if debug is not None:
+ self.debug = debug
+ if errorlevel is not None:
+ self.errorlevel = errorlevel
+
+ # Init datastructures.
+ self.copybufsize = copybufsize
+ self.closed = False
+ self.members = [] # list of members as TarInfo objects
+ self._loaded = False # flag if all members have been read
+ self.offset = self.fileobj.tell()
+ # current position in the archive file
+ self.inodes = {} # dictionary caching the inodes of
+ # archive members already added
+
+ try:
+ if self.mode == "r":
+ self.firstmember = None
+ self.firstmember = self.next()
+
+ if self.mode == "a":
+ # Move to the end of the archive,
+ # before the first empty block.
+ while True:
+ self.fileobj.seek(self.offset)
+ try:
+ tarinfo = self.tarinfo.fromtarfile(self)
+ self.members.append(tarinfo)
+ except EOFHeaderError:
+ self.fileobj.seek(self.offset)
+ break
+ except HeaderError as e:
+ raise ReadError(str(e)) from None
+
+ if self.mode in ("a", "w", "x"):
+ self._loaded = True
+
+ if self.pax_headers:
+ buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
+ self.fileobj.write(buf)
+ self.offset += len(buf)
+ except:
+ if not self._extfileobj:
+ self.fileobj.close()
+ self.closed = True
+ raise
+
+ #--------------------------------------------------------------------------
+ # Below are the classmethods which act as alternate constructors to the
+ # TarFile class. The open() method is the only one that is needed for
+ # public use; it is the "super"-constructor and is able to select an
+ # adequate "sub"-constructor for a particular compression using the mapping
+ # from OPEN_METH.
+ #
+ # This concept allows one to subclass TarFile without losing the comfort of
+ # the super-constructor. A sub-constructor is registered and made available
+ # by adding it to the mapping in OPEN_METH.
+
+ @classmethod
+ def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
+ """Open a tar archive for reading, writing or appending. Return
+ an appropriate TarFile class.
+
+ mode:
+ 'r' or 'r:*' open for reading with transparent compression
+ 'r:' open for reading exclusively uncompressed
+ 'r:gz' open for reading with gzip compression
+ 'r:bz2' open for reading with bzip2 compression
+ 'r:xz' open for reading with lzma compression
+ 'a' or 'a:' open for appending, creating the file if necessary
+ 'w' or 'w:' open for writing without compression
+ 'w:gz' open for writing with gzip compression
+ 'w:bz2' open for writing with bzip2 compression
+ 'w:xz' open for writing with lzma compression
+
+ 'x' or 'x:' create a tarfile exclusively without compression, raise
+ an exception if the file is already created
+ 'x:gz' create a gzip compressed tarfile, raise an exception
+ if the file is already created
+ 'x:bz2' create a bzip2 compressed tarfile, raise an exception
+ if the file is already created
+ 'x:xz' create an lzma compressed tarfile, raise an exception
+ if the file is already created
+
+ 'r|*' open a stream of tar blocks with transparent compression
+ 'r|' open an uncompressed stream of tar blocks for reading
+ 'r|gz' open a gzip compressed stream of tar blocks
+ 'r|bz2' open a bzip2 compressed stream of tar blocks
+ 'r|xz' open an lzma compressed stream of tar blocks
+ 'w|' open an uncompressed stream for writing
+ 'w|gz' open a gzip compressed stream for writing
+ 'w|bz2' open a bzip2 compressed stream for writing
+ 'w|xz' open an lzma compressed stream for writing
+ """
+
+ if not name and not fileobj:
+ raise ValueError("nothing to open")
+
+ if mode in ("r", "r:*"):
+ # Find out which *open() is appropriate for opening the file.
+ def not_compressed(comptype):
+ return cls.OPEN_METH[comptype] == 'taropen'
+ error_msgs = []
+ for comptype in sorted(cls.OPEN_METH, key=not_compressed):
+ func = getattr(cls, cls.OPEN_METH[comptype])
+ if fileobj is not None:
+ saved_pos = fileobj.tell()
+ try:
+ return func(name, "r", fileobj, **kwargs)
+ except (ReadError, CompressionError) as e:
+ error_msgs.append(f'- method {comptype}: {e!r}')
+ if fileobj is not None:
+ fileobj.seek(saved_pos)
+ continue
+ error_msgs_summary = '\n'.join(error_msgs)
+ raise ReadError(f"file could not be opened successfully:\n{error_msgs_summary}")
+
+ elif ":" in mode:
+ filemode, comptype = mode.split(":", 1)
+ filemode = filemode or "r"
+ comptype = comptype or "tar"
+
+ # Select the *open() function according to
+ # given compression.
+ if comptype in cls.OPEN_METH:
+ func = getattr(cls, cls.OPEN_METH[comptype])
+ else:
+ raise CompressionError("unknown compression type %r" % comptype)
+ return func(name, filemode, fileobj, **kwargs)
+
+ elif "|" in mode:
+ filemode, comptype = mode.split("|", 1)
+ filemode = filemode or "r"
+ comptype = comptype or "tar"
+
+ if filemode not in ("r", "w"):
+ raise ValueError("mode must be 'r' or 'w'")
+
+ stream = _Stream(name, filemode, comptype, fileobj, bufsize)
+ try:
+ t = cls(name, filemode, stream, **kwargs)
+ except:
+ stream.close()
+ raise
+ t._extfileobj = False
+ return t
+
+ elif mode in ("a", "w", "x"):
+ return cls.taropen(name, mode, fileobj, **kwargs)
+
+ raise ValueError("undiscernible mode")
+
+ @classmethod
+ def taropen(cls, name, mode="r", fileobj=None, **kwargs):
+ """Open uncompressed tar archive name for reading or writing.
+ """
+ if mode not in ("r", "a", "w", "x"):
+ raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
+ return cls(name, mode, fileobj, **kwargs)
+
+ @classmethod
+ def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
+ """Open gzip compressed tar archive name for reading or writing.
+ Appending is not allowed.
+ """
+ if mode not in ("r", "w", "x"):
+ raise ValueError("mode must be 'r', 'w' or 'x'")
+
+ try:
+ from gzip import GzipFile
+ except ImportError:
+ raise CompressionError("gzip module is not available") from None
+
+ try:
+ fileobj = GzipFile(name, mode + "b", compresslevel, fileobj)
+ except OSError as e:
+ if fileobj is not None and mode == 'r':
+ raise ReadError("not a gzip file") from e
+ raise
+
+ try:
+ t = cls.taropen(name, mode, fileobj, **kwargs)
+ except OSError as e:
+ fileobj.close()
+ if mode == 'r':
+ raise ReadError("not a gzip file") from e
+ raise
+ except:
+ fileobj.close()
+ raise
+ t._extfileobj = False
+ return t
+
+ @classmethod
+ def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
+ """Open bzip2 compressed tar archive name for reading or writing.
+ Appending is not allowed.
+ """
+ if mode not in ("r", "w", "x"):
+ raise ValueError("mode must be 'r', 'w' or 'x'")
+
+ try:
+ from bz2 import BZ2File
+ except ImportError:
+ raise CompressionError("bz2 module is not available") from None
+
+ fileobj = BZ2File(fileobj or name, mode, compresslevel=compresslevel)
+
+ try:
+ t = cls.taropen(name, mode, fileobj, **kwargs)
+ except (OSError, EOFError) as e:
+ fileobj.close()
+ if mode == 'r':
+ raise ReadError("not a bzip2 file") from e
+ raise
+ except:
+ fileobj.close()
+ raise
+ t._extfileobj = False
+ return t
+
+ @classmethod
+ def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs):
+ """Open lzma compressed tar archive name for reading or writing.
+ Appending is not allowed.
+ """
+ if mode not in ("r", "w", "x"):
+ raise ValueError("mode must be 'r', 'w' or 'x'")
+
+ try:
+ from lzma import LZMAFile, LZMAError
+ except ImportError:
+ raise CompressionError("lzma module is not available") from None
+
+ fileobj = LZMAFile(fileobj or name, mode, preset=preset)
+
+ try:
+ t = cls.taropen(name, mode, fileobj, **kwargs)
+ except (LZMAError, EOFError) as e:
+ fileobj.close()
+ if mode == 'r':
+ raise ReadError("not an lzma file") from e
+ raise
+ except:
+ fileobj.close()
+ raise
+ t._extfileobj = False
+ return t
+
+ # All *open() methods are registered here.
+ OPEN_METH = {
+ "tar": "taropen", # uncompressed tar
+ "gz": "gzopen", # gzip compressed tar
+ "bz2": "bz2open", # bzip2 compressed tar
+ "xz": "xzopen" # lzma compressed tar
+ }
+
+ #--------------------------------------------------------------------------
+ # The public methods which TarFile provides:
+
+ def close(self):
+ """Close the TarFile. In write-mode, two finishing zero blocks are
+ appended to the archive.
+ """
+ if self.closed:
+ return
+
+ self.closed = True
+ try:
+ if self.mode in ("a", "w", "x"):
+ self.fileobj.write(NUL * (BLOCKSIZE * 2))
+ self.offset += (BLOCKSIZE * 2)
+ # fill up the end with zero-blocks
+ # (like option -b20 for tar does)
+ blocks, remainder = divmod(self.offset, RECORDSIZE)
+ if remainder > 0:
+ self.fileobj.write(NUL * (RECORDSIZE - remainder))
+ finally:
+ if not self._extfileobj:
+ self.fileobj.close()
+
+ def getmember(self, name):
+ """Return a TarInfo object for member `name'. If `name' can not be
+ found in the archive, KeyError is raised. If a member occurs more
+ than once in the archive, its last occurrence is assumed to be the
+ most up-to-date version.
+ """
+ tarinfo = self._getmember(name.rstrip('/'))
+ if tarinfo is None:
+ raise KeyError("filename %r not found" % name)
+ return tarinfo
+
+ def getmembers(self):
+ """Return the members of the archive as a list of TarInfo objects. The
+ list has the same order as the members in the archive.
+ """
+ self._check()
+ if not self._loaded: # if we want to obtain a list of
+ self._load() # all members, we first have to
+ # scan the whole archive.
+ return self.members
+
+ def getnames(self):
+ """Return the members of the archive as a list of their names. It has
+ the same order as the list returned by getmembers().
+ """
+ return [tarinfo.name for tarinfo in self.getmembers()]
+
+ def gettarinfo(self, name=None, arcname=None, fileobj=None):
+ """Create a TarInfo object from the result of os.stat or equivalent
+ on an existing file. The file is either named by `name', or
+ specified as a file object `fileobj' with a file descriptor. If
+ given, `arcname' specifies an alternative name for the file in the
+ archive, otherwise, the name is taken from the 'name' attribute of
+ 'fileobj', or the 'name' argument. The name should be a text
+ string.
+ """
+ self._check("awx")
+
+ # When fileobj is given, replace name by
+ # fileobj's real name.
+ if fileobj is not None:
+ name = fileobj.name
+
+ # Building the name of the member in the archive.
+ # Backward slashes are converted to forward slashes,
+ # Absolute paths are turned to relative paths.
+ if arcname is None:
+ arcname = name
+ drv, arcname = os.path.splitdrive(arcname)
+ arcname = arcname.replace(os.sep, "/")
+ arcname = arcname.lstrip("/")
+
+ # Now, fill the TarInfo object with
+ # information specific for the file.
+ tarinfo = self.tarinfo()
+ tarinfo.tarfile = self # Not needed
+
+ # Use os.stat or os.lstat, depending on if symlinks shall be resolved.
+ if fileobj is None:
+ if not self.dereference:
+ statres = os.lstat(name)
+ else:
+ statres = os.stat(name)
+ else:
+ statres = os.fstat(fileobj.fileno())
+ linkname = ""
+
+ stmd = statres.st_mode
+ if stat.S_ISREG(stmd):
+ inode = (statres.st_ino, statres.st_dev)
+ if not self.dereference and statres.st_nlink > 1 and \
+ inode in self.inodes and arcname != self.inodes[inode]:
+ # Is it a hardlink to an already
+ # archived file?
+ type = LNKTYPE
+ linkname = self.inodes[inode]
+ else:
+ # The inode is added only if its valid.
+ # For win32 it is always 0.
+ type = REGTYPE
+ if inode[0]:
+ self.inodes[inode] = arcname
+ elif stat.S_ISDIR(stmd):
+ type = DIRTYPE
+ elif stat.S_ISFIFO(stmd):
+ type = FIFOTYPE
+ elif stat.S_ISLNK(stmd):
+ type = SYMTYPE
+ linkname = os.readlink(name)
+ elif stat.S_ISCHR(stmd):
+ type = CHRTYPE
+ elif stat.S_ISBLK(stmd):
+ type = BLKTYPE
+ else:
+ return None
+
+ # Fill the TarInfo object with all
+ # information we can get.
+ tarinfo.name = arcname
+ tarinfo.mode = stmd
+ tarinfo.uid = statres.st_uid
+ tarinfo.gid = statres.st_gid
+ if type == REGTYPE:
+ tarinfo.size = statres.st_size
+ else:
+ tarinfo.size = 0
+ tarinfo.mtime = statres.st_mtime
+ tarinfo.type = type
+ tarinfo.linkname = linkname
+ if pwd:
+ try:
+ tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
+ except KeyError:
+ pass
+ if grp:
+ try:
+ tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
+ except KeyError:
+ pass
+
+ if type in (CHRTYPE, BLKTYPE):
+ if hasattr(os, "major") and hasattr(os, "minor"):
+ tarinfo.devmajor = os.major(statres.st_rdev)
+ tarinfo.devminor = os.minor(statres.st_rdev)
+ return tarinfo
+
+ def list(self, verbose=True, *, members=None):
+ """Print a table of contents to sys.stdout. If `verbose' is False, only
+ the names of the members are printed. If it is True, an `ls -l'-like
+ output is produced. `members' is optional and must be a subset of the
+ list returned by getmembers().
+ """
+ self._check()
+
+ if members is None:
+ members = self
+ for tarinfo in members:
+ if verbose:
+ if tarinfo.mode is None:
+ _safe_print("??????????")
+ else:
+ _safe_print(stat.filemode(tarinfo.mode))
+ _safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid,
+ tarinfo.gname or tarinfo.gid))
+ if tarinfo.ischr() or tarinfo.isblk():
+ _safe_print("%10s" %
+ ("%d,%d" % (tarinfo.devmajor, tarinfo.devminor)))
+ else:
+ _safe_print("%10d" % tarinfo.size)
+ if tarinfo.mtime is None:
+ _safe_print("????-??-?? ??:??:??")
+ else:
+ _safe_print("%d-%02d-%02d %02d:%02d:%02d" \
+ % time.localtime(tarinfo.mtime)[:6])
+
+ _safe_print(tarinfo.name + ("/" if tarinfo.isdir() else ""))
+
+ if verbose:
+ if tarinfo.issym():
+ _safe_print("-> " + tarinfo.linkname)
+ if tarinfo.islnk():
+ _safe_print("link to " + tarinfo.linkname)
+ print()
+
+ def add(self, name, arcname=None, recursive=True, *, filter=None):
+ """Add the file `name' to the archive. `name' may be any type of file
+ (directory, fifo, symbolic link, etc.). If given, `arcname'
+ specifies an alternative name for the file in the archive.
+ Directories are added recursively by default. This can be avoided by
+ setting `recursive' to False. `filter' is a function
+ that expects a TarInfo object argument and returns the changed
+ TarInfo object, if it returns None the TarInfo object will be
+ excluded from the archive.
+ """
+ self._check("awx")
+
+ if arcname is None:
+ arcname = name
+
+ # Skip if somebody tries to archive the archive...
+ if self.name is not None and os.path.abspath(name) == self.name:
+ self._dbg(2, "tarfile: Skipped %r" % name)
+ return
+
+ self._dbg(1, name)
+
+ # Create a TarInfo object from the file.
+ tarinfo = self.gettarinfo(name, arcname)
+
+ if tarinfo is None:
+ self._dbg(1, "tarfile: Unsupported type %r" % name)
+ return
+
+ # Change or exclude the TarInfo object.
+ if filter is not None:
+ tarinfo = filter(tarinfo)
+ if tarinfo is None:
+ self._dbg(2, "tarfile: Excluded %r" % name)
+ return
+
+ # Append the tar header and data to the archive.
+ if tarinfo.isreg():
+ with bltn_open(name, "rb") as f:
+ self.addfile(tarinfo, f)
+
+ elif tarinfo.isdir():
+ self.addfile(tarinfo)
+ if recursive:
+ for f in sorted(os.listdir(name)):
+ self.add(os.path.join(name, f), os.path.join(arcname, f),
+ recursive, filter=filter)
+
+ else:
+ self.addfile(tarinfo)
+
+ def addfile(self, tarinfo, fileobj=None):
+ """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
+ given, it should be a binary file, and tarinfo.size bytes are read
+ from it and added to the archive. You can create TarInfo objects
+ directly, or by using gettarinfo().
+ """
+ self._check("awx")
+
+ tarinfo = copy.copy(tarinfo)
+
+ buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
+ self.fileobj.write(buf)
+ self.offset += len(buf)
+ bufsize=self.copybufsize
+ # If there's data to follow, append it.
+ if fileobj is not None:
+ copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize)
+ blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
+ if remainder > 0:
+ self.fileobj.write(NUL * (BLOCKSIZE - remainder))
+ blocks += 1
+ self.offset += blocks * BLOCKSIZE
+
+ self.members.append(tarinfo)
+
+ def _get_filter_function(self, filter):
+ if filter is None:
+ filter = self.extraction_filter
+ if filter is None:
+ return fully_trusted_filter
+ if isinstance(filter, str):
+ raise TypeError(
+ 'String names are not supported for '
+ + 'TarFile.extraction_filter. Use a function such as '
+ + 'tarfile.data_filter directly.')
+ return filter
+ if callable(filter):
+ return filter
+ try:
+ return _NAMED_FILTERS[filter]
+ except KeyError:
+ raise ValueError(f"filter {filter!r} not found") from None
+
+ def extractall(self, path=".", members=None, *, numeric_owner=False,
+ filter=None):
+ """Extract all members from the archive to the current working
+ directory and set owner, modification time and permissions on
+ directories afterwards. `path' specifies a different directory
+ to extract to. `members' is optional and must be a subset of the
+ list returned by getmembers(). If `numeric_owner` is True, only
+ the numbers for user/group names are used and not the names.
+
+ The `filter` function will be called on each member just
+ before extraction.
+ It can return a changed TarInfo or None to skip the member.
+ String names of common filters are accepted.
+ """
+ directories = []
+
+ filter_function = self._get_filter_function(filter)
+ if members is None:
+ members = self
+
+ for member in members:
+ tarinfo = self._get_extract_tarinfo(member, filter_function, path)
+ if tarinfo is None:
+ continue
+ if tarinfo.isdir():
+ # For directories, delay setting attributes until later,
+ # since permissions can interfere with extraction and
+ # extracting contents can reset mtime.
+ directories.append(tarinfo)
+ self._extract_one(tarinfo, path, set_attrs=not tarinfo.isdir(),
+ numeric_owner=numeric_owner)
+
+ # Reverse sort directories.
+ directories.sort(key=lambda a: a.name, reverse=True)
+
+ # Set correct owner, mtime and filemode on directories.
+ for tarinfo in directories:
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ self.chown(tarinfo, dirpath, numeric_owner=numeric_owner)
+ self.utime(tarinfo, dirpath)
+ self.chmod(tarinfo, dirpath)
+ except ExtractError as e:
+ self._handle_nonfatal_error(e)
+
+ def extract(self, member, path="", set_attrs=True, *, numeric_owner=False,
+ filter=None):
+ """Extract a member from the archive to the current working directory,
+ using its full name. Its file information is extracted as accurately
+ as possible. `member' may be a filename or a TarInfo object. You can
+ specify a different directory using `path'. File attributes (owner,
+ mtime, mode) are set unless `set_attrs' is False. If `numeric_owner`
+ is True, only the numbers for user/group names are used and not
+ the names.
+
+ The `filter` function will be called before extraction.
+ It can return a changed TarInfo or None to skip the member.
+ String names of common filters are accepted.
+ """
+ filter_function = self._get_filter_function(filter)
+ tarinfo = self._get_extract_tarinfo(member, filter_function, path)
+ if tarinfo is not None:
+ self._extract_one(tarinfo, path, set_attrs, numeric_owner)
+
+ def _get_extract_tarinfo(self, member, filter_function, path):
+ """Get filtered TarInfo (or None) from member, which might be a str"""
+ if isinstance(member, str):
+ tarinfo = self.getmember(member)
+ else:
+ tarinfo = member
+
+ unfiltered = tarinfo
+ try:
+ tarinfo = filter_function(tarinfo, path)
+ except (OSError, FilterError) as e:
+ self._handle_fatal_error(e)
+ except ExtractError as e:
+ self._handle_nonfatal_error(e)
+ if tarinfo is None:
+ self._dbg(2, "tarfile: Excluded %r" % unfiltered.name)
+ return None
+ # Prepare the link target for makelink().
+ if tarinfo.islnk():
+ tarinfo = copy.copy(tarinfo)
+ tarinfo._link_target = os.path.join(path, tarinfo.linkname)
+ return tarinfo
+
+ def _extract_one(self, tarinfo, path, set_attrs, numeric_owner):
+ """Extract from filtered tarinfo to disk"""
+ self._check("r")
+
+ try:
+ self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
+ set_attrs=set_attrs,
+ numeric_owner=numeric_owner)
+ except OSError as e:
+ self._handle_fatal_error(e)
+ except ExtractError as e:
+ self._handle_nonfatal_error(e)
+
+ def _handle_nonfatal_error(self, e):
+ """Handle non-fatal error (ExtractError) according to errorlevel"""
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+
+ def _handle_fatal_error(self, e):
+ """Handle "fatal" error according to self.errorlevel"""
+ if self.errorlevel > 0:
+ raise
+ elif isinstance(e, OSError):
+ if e.filename is None:
+ self._dbg(1, "tarfile: %s" % e.strerror)
+ else:
+ self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
+ else:
+ self._dbg(1, "tarfile: %s %s" % (type(e).__name__, e))
+
+ def extractfile(self, member):
+ """Extract a member from the archive as a file object. `member' may be
+ a filename or a TarInfo object. If `member' is a regular file or
+ a link, an io.BufferedReader object is returned. For all other
+ existing members, None is returned. If `member' does not appear
+ in the archive, KeyError is raised.
+ """
+ self._check("r")
+
+ if isinstance(member, str):
+ tarinfo = self.getmember(member)
+ else:
+ tarinfo = member
+
+ if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
+ # Members with unknown types are treated as regular files.
+ return self.fileobject(self, tarinfo)
+
+ elif tarinfo.islnk() or tarinfo.issym():
+ if isinstance(self.fileobj, _Stream):
+ # A small but ugly workaround for the case that someone tries
+ # to extract a (sym)link as a file-object from a non-seekable
+ # stream of tar blocks.
+ raise StreamError("cannot extract (sym)link as file object")
+ else:
+ # A (sym)link's file object is its target's file object.
+ return self.extractfile(self._find_link_target(tarinfo))
+ else:
+ # If there's no data associated with the member (directory, chrdev,
+ # blkdev, etc.), return None instead of a file object.
+ return None
+
+ def _extract_member(self, tarinfo, targetpath, set_attrs=True,
+ numeric_owner=False):
+ """Extract the TarInfo object tarinfo to a physical
+ file called targetpath.
+ """
+ # Fetch the TarInfo object for the given name
+ # and build the destination pathname, replacing
+ # forward slashes to platform specific separators.
+ targetpath = targetpath.rstrip("/")
+ targetpath = targetpath.replace("/", os.sep)
+
+ # Create all upper directories.
+ upperdirs = os.path.dirname(targetpath)
+ if upperdirs and not os.path.exists(upperdirs):
+ # Create directories that are not part of the archive with
+ # default permissions.
+ os.makedirs(upperdirs)
+
+ if tarinfo.islnk() or tarinfo.issym():
+ self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
+ else:
+ self._dbg(1, tarinfo.name)
+
+ if tarinfo.isreg():
+ self.makefile(tarinfo, targetpath)
+ elif tarinfo.isdir():
+ self.makedir(tarinfo, targetpath)
+ elif tarinfo.isfifo():
+ self.makefifo(tarinfo, targetpath)
+ elif tarinfo.ischr() or tarinfo.isblk():
+ self.makedev(tarinfo, targetpath)
+ elif tarinfo.islnk() or tarinfo.issym():
+ self.makelink(tarinfo, targetpath)
+ elif tarinfo.type not in SUPPORTED_TYPES:
+ self.makeunknown(tarinfo, targetpath)
+ else:
+ self.makefile(tarinfo, targetpath)
+
+ if set_attrs:
+ self.chown(tarinfo, targetpath, numeric_owner)
+ if not tarinfo.issym():
+ self.chmod(tarinfo, targetpath)
+ self.utime(tarinfo, targetpath)
+
+ #--------------------------------------------------------------------------
+ # Below are the different file methods. They are called via
+ # _extract_member() when extract() is called. They can be replaced in a
+ # subclass to implement other functionality.
+
+ def makedir(self, tarinfo, targetpath):
+ """Make a directory called targetpath.
+ """
+ try:
+ if tarinfo.mode is None:
+ # Use the system's default mode
+ os.mkdir(targetpath)
+ else:
+ # Use a safe mode for the directory, the real mode is set
+ # later in _extract_member().
+ os.mkdir(targetpath, 0o700)
+ except FileExistsError:
+ pass
+
+ def makefile(self, tarinfo, targetpath):
+ """Make a file called targetpath.
+ """
+ source = self.fileobj
+ source.seek(tarinfo.offset_data)
+ bufsize = self.copybufsize
+ with bltn_open(targetpath, "wb") as target:
+ if tarinfo.sparse is not None:
+ for offset, size in tarinfo.sparse:
+ target.seek(offset)
+ copyfileobj(source, target, size, ReadError, bufsize)
+ target.seek(tarinfo.size)
+ target.truncate()
+ else:
+ copyfileobj(source, target, tarinfo.size, ReadError, bufsize)
+
+ def makeunknown(self, tarinfo, targetpath):
+ """Make a file from a TarInfo object with an unknown type
+ at targetpath.
+ """
+ self.makefile(tarinfo, targetpath)
+ self._dbg(1, "tarfile: Unknown file type %r, " \
+ "extracted as regular file." % tarinfo.type)
+
+ def makefifo(self, tarinfo, targetpath):
+ """Make a fifo called targetpath.
+ """
+ if hasattr(os, "mkfifo"):
+ os.mkfifo(targetpath)
+ else:
+ raise ExtractError("fifo not supported by system")
+
+ def makedev(self, tarinfo, targetpath):
+ """Make a character or block device called targetpath.
+ """
+ if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
+ raise ExtractError("special devices not supported by system")
+
+ mode = tarinfo.mode
+ if mode is None:
+ # Use mknod's default
+ mode = 0o600
+ if tarinfo.isblk():
+ mode |= stat.S_IFBLK
+ else:
+ mode |= stat.S_IFCHR
+
+ os.mknod(targetpath, mode,
+ os.makedev(tarinfo.devmajor, tarinfo.devminor))
+
+ def makelink(self, tarinfo, targetpath):
+ """Make a (symbolic) link called targetpath. If it cannot be created
+ (platform limitation), we try to make a copy of the referenced file
+ instead of a link.
+ """
+ try:
+ # For systems that support symbolic and hard links.
+ if tarinfo.issym():
+ if os.path.lexists(targetpath):
+ # Avoid FileExistsError on following os.symlink.
+ os.unlink(targetpath)
+ os.symlink(tarinfo.linkname, targetpath)
+ else:
+ if os.path.exists(tarinfo._link_target):
+ os.link(tarinfo._link_target, targetpath)
+ else:
+ self._extract_member(self._find_link_target(tarinfo),
+ targetpath)
+ except symlink_exception:
+ try:
+ self._extract_member(self._find_link_target(tarinfo),
+ targetpath)
+ except KeyError:
+ raise ExtractError("unable to resolve link inside archive") from None
+
+ def chown(self, tarinfo, targetpath, numeric_owner):
+ """Set owner of targetpath according to tarinfo. If numeric_owner
+ is True, use .gid/.uid instead of .gname/.uname. If numeric_owner
+ is False, fall back to .gid/.uid when the search based on name
+ fails.
+ """
+ if hasattr(os, "geteuid") and os.geteuid() == 0:
+ # We have to be root to do so.
+ g = tarinfo.gid
+ u = tarinfo.uid
+ if not numeric_owner:
+ try:
+ if grp and tarinfo.gname:
+ g = grp.getgrnam(tarinfo.gname)[2]
+ except KeyError:
+ pass
+ try:
+ if pwd and tarinfo.uname:
+ u = pwd.getpwnam(tarinfo.uname)[2]
+ except KeyError:
+ pass
+ if g is None:
+ g = -1
+ if u is None:
+ u = -1
+ try:
+ if tarinfo.issym() and hasattr(os, "lchown"):
+ os.lchown(targetpath, u, g)
+ else:
+ os.chown(targetpath, u, g)
+ except OSError as e:
+ raise ExtractError("could not change owner") from e
+
+ def chmod(self, tarinfo, targetpath):
+ """Set file permissions of targetpath according to tarinfo.
+ """
+ if tarinfo.mode is None:
+ return
+ try:
+ os.chmod(targetpath, tarinfo.mode)
+ except OSError as e:
+ raise ExtractError("could not change mode") from e
+
+ def utime(self, tarinfo, targetpath):
+ """Set modification time of targetpath according to tarinfo.
+ """
+ mtime = tarinfo.mtime
+ if mtime is None:
+ return
+ if not hasattr(os, 'utime'):
+ return
+ try:
+ os.utime(targetpath, (mtime, mtime))
+ except OSError as e:
+ raise ExtractError("could not change modification time") from e
+
+ #--------------------------------------------------------------------------
+ def next(self):
+ """Return the next member of the archive as a TarInfo object, when
+ TarFile is opened for reading. Return None if there is no more
+ available.
+ """
+ self._check("ra")
+ if self.firstmember is not None:
+ m = self.firstmember
+ self.firstmember = None
+ return m
+
+ # Advance the file pointer.
+ if self.offset != self.fileobj.tell():
+ self.fileobj.seek(self.offset - 1)
+ if not self.fileobj.read(1):
+ raise ReadError("unexpected end of data")
+
+ # Read the next block.
+ tarinfo = None
+ while True:
+ try:
+ tarinfo = self.tarinfo.fromtarfile(self)
+ except EOFHeaderError as e:
+ if self.ignore_zeros:
+ self._dbg(2, "0x%X: %s" % (self.offset, e))
+ self.offset += BLOCKSIZE
+ continue
+ except InvalidHeaderError as e:
+ if self.ignore_zeros:
+ self._dbg(2, "0x%X: %s" % (self.offset, e))
+ self.offset += BLOCKSIZE
+ continue
+ elif self.offset == 0:
+ raise ReadError(str(e)) from None
+ except EmptyHeaderError:
+ if self.offset == 0:
+ raise ReadError("empty file") from None
+ except TruncatedHeaderError as e:
+ if self.offset == 0:
+ raise ReadError(str(e)) from None
+ except SubsequentHeaderError as e:
+ raise ReadError(str(e)) from None
+ except Exception as e:
+ try:
+ import zlib
+ if isinstance(e, zlib.error):
+ raise ReadError(f'zlib error: {e}') from None
+ else:
+ raise e
+ except ImportError:
+ raise e
+ break
+
+ if tarinfo is not None:
+ self.members.append(tarinfo)
+ else:
+ self._loaded = True
+
+ return tarinfo
+
+ #--------------------------------------------------------------------------
+ # Little helper methods:
+
+ def _getmember(self, name, tarinfo=None, normalize=False):
+ """Find an archive member by name from bottom to top.
+ If tarinfo is given, it is used as the starting point.
+ """
+ # Ensure that all members have been loaded.
+ members = self.getmembers()
+
+ # Limit the member search list up to tarinfo.
+ skipping = False
+ if tarinfo is not None:
+ try:
+ index = members.index(tarinfo)
+ except ValueError:
+ # The given starting point might be a (modified) copy.
+ # We'll later skip members until we find an equivalent.
+ skipping = True
+ else:
+ # Happy fast path
+ members = members[:index]
+
+ if normalize:
+ name = os.path.normpath(name)
+
+ for member in reversed(members):
+ if skipping:
+ if tarinfo.offset == member.offset:
+ skipping = False
+ continue
+ if normalize:
+ member_name = os.path.normpath(member.name)
+ else:
+ member_name = member.name
+
+ if name == member_name:
+ return member
+
+ if skipping:
+ # Starting point was not found
+ raise ValueError(tarinfo)
+
+ def _load(self):
+ """Read through the entire archive file and look for readable
+ members.
+ """
+ while True:
+ tarinfo = self.next()
+ if tarinfo is None:
+ break
+ self._loaded = True
+
+ def _check(self, mode=None):
+ """Check if TarFile is still open, and if the operation's mode
+ corresponds to TarFile's mode.
+ """
+ if self.closed:
+ raise OSError("%s is closed" % self.__class__.__name__)
+ if mode is not None and self.mode not in mode:
+ raise OSError("bad operation for mode %r" % self.mode)
+
+ def _find_link_target(self, tarinfo):
+ """Find the target member of a symlink or hardlink member in the
+ archive.
+ """
+ if tarinfo.issym():
+ # Always search the entire archive.
+ linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
+ limit = None
+ else:
+ # Search the archive before the link, because a hard link is
+ # just a reference to an already archived file.
+ linkname = tarinfo.linkname
+ limit = tarinfo
+
+ member = self._getmember(linkname, tarinfo=limit, normalize=True)
+ if member is None:
+ raise KeyError("linkname %r not found" % linkname)
+ return member
+
+ def __iter__(self):
+ """Provide an iterator object.
+ """
+ if self._loaded:
+ yield from self.members
+ return
+
+ # Yield items using TarFile's next() method.
+ # When all members have been read, set TarFile as _loaded.
+ index = 0
+ # Fix for SF #1100429: Under rare circumstances it can
+ # happen that getmembers() is called during iteration,
+ # which will have already exhausted the next() method.
+ if self.firstmember is not None:
+ tarinfo = self.next()
+ index += 1
+ yield tarinfo
+
+ while True:
+ if index < len(self.members):
+ tarinfo = self.members[index]
+ elif not self._loaded:
+ tarinfo = self.next()
+ if not tarinfo:
+ self._loaded = True
+ return
+ else:
+ return
+ index += 1
+ yield tarinfo
+
+ def _dbg(self, level, msg):
+ """Write debugging output to sys.stderr.
+ """
+ if level <= self.debug:
+ print(msg, file=sys.stderr)
+
+ def __enter__(self):
+ self._check()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if type is None:
+ self.close()
+ else:
+ # An exception occurred. We must not call close() because
+ # it would try to write end-of-archive blocks and padding.
+ if not self._extfileobj:
+ self.fileobj.close()
+ self.closed = True
+
+#--------------------
+# exported functions
+#--------------------
+
+def is_tarfile(name):
+ """Return True if name points to a tar archive that we
+ are able to handle, else return False.
+
+ 'name' should be a string, file, or file-like object.
+ """
+ try:
+ if hasattr(name, "read"):
+ t = open(fileobj=name)
+ else:
+ t = open(name)
+ t.close()
+ return True
+ except TarError:
+ return False
+
+open = TarFile.open
+
+
+def main():
+ import argparse
+
+ description = 'A simple command-line interface for tarfile module.'
+ parser = argparse.ArgumentParser(description=description)
+ parser.add_argument('-v', '--verbose', action='store_true', default=False,
+ help='Verbose output')
+ parser.add_argument('--filter', metavar='',
+ choices=_NAMED_FILTERS,
+ help='Filter for extraction')
+
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('-l', '--list', metavar='',
+ help='Show listing of a tarfile')
+ group.add_argument('-e', '--extract', nargs='+',
+ metavar=('', ''),
+ help='Extract tarfile into target dir')
+ group.add_argument('-c', '--create', nargs='+',
+ metavar=('', ''),
+ help='Create tarfile from sources')
+ group.add_argument('-t', '--test', metavar='',
+ help='Test if a tarfile is valid')
+
+ args = parser.parse_args()
+
+ if args.filter and args.extract is None:
+ parser.exit(1, '--filter is only valid for extraction\n')
+
+ if args.test is not None:
+ src = args.test
+ if is_tarfile(src):
+ with open(src, 'r') as tar:
+ tar.getmembers()
+ print(tar.getmembers(), file=sys.stderr)
+ if args.verbose:
+ print('{!r} is a tar archive.'.format(src))
+ else:
+ parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
+
+ elif args.list is not None:
+ src = args.list
+ if is_tarfile(src):
+ with TarFile.open(src, 'r:*') as tf:
+ tf.list(verbose=args.verbose)
+ else:
+ parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
+
+ elif args.extract is not None:
+ if len(args.extract) == 1:
+ src = args.extract[0]
+ curdir = os.curdir
+ elif len(args.extract) == 2:
+ src, curdir = args.extract
+ else:
+ parser.exit(1, parser.format_help())
+
+ if is_tarfile(src):
+ with TarFile.open(src, 'r:*') as tf:
+ tf.extractall(path=curdir, filter=args.filter)
+ if args.verbose:
+ if curdir == '.':
+ msg = '{!r} file is extracted.'.format(src)
+ else:
+ msg = ('{!r} file is extracted '
+ 'into {!r} directory.').format(src, curdir)
+ print(msg)
+ else:
+ parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
+
+ elif args.create is not None:
+ tar_name = args.create.pop(0)
+ _, ext = os.path.splitext(tar_name)
+ compressions = {
+ # gz
+ '.gz': 'gz',
+ '.tgz': 'gz',
+ # xz
+ '.xz': 'xz',
+ '.txz': 'xz',
+ # bz2
+ '.bz2': 'bz2',
+ '.tbz': 'bz2',
+ '.tbz2': 'bz2',
+ '.tb2': 'bz2',
+ }
+ tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w'
+ tar_files = args.create
+
+ with TarFile.open(tar_name, tar_mode) as tf:
+ for file_name in tar_files:
+ tf.add(file_name)
+
+ if args.verbose:
+ print('{!r} file created.'.format(tar_name))
+
+if __name__ == '__main__':
+ main()
diff --git a/infer_4_37_2/lib/python3.10/tempfile.py b/infer_4_37_2/lib/python3.10/tempfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd78998df9fd85415e15e66cff2dbcdb5768f745
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/tempfile.py
@@ -0,0 +1,884 @@
+"""Temporary files.
+
+This module provides generic, low- and high-level interfaces for
+creating temporary files and directories. All of the interfaces
+provided by this module can be used without fear of race conditions
+except for 'mktemp'. 'mktemp' is subject to race conditions and
+should not be used; it is provided for backward compatibility only.
+
+The default path names are returned as str. If you supply bytes as
+input, all return values will be in bytes. Ex:
+
+ >>> tempfile.mkstemp()
+ (4, '/tmp/tmptpu9nin8')
+ >>> tempfile.mkdtemp(suffix=b'')
+ b'/tmp/tmppbi8f0hy'
+
+This module also provides some data items to the user:
+
+ TMP_MAX - maximum number of names that will be tried before
+ giving up.
+ tempdir - If this is set to a string before the first use of
+ any routine from this module, it will be considered as
+ another candidate location to store temporary files.
+"""
+
+__all__ = [
+ "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
+ "SpooledTemporaryFile", "TemporaryDirectory",
+ "mkstemp", "mkdtemp", # low level safe interfaces
+ "mktemp", # deprecated unsafe interface
+ "TMP_MAX", "gettempprefix", # constants
+ "tempdir", "gettempdir",
+ "gettempprefixb", "gettempdirb",
+ ]
+
+
+# Imports.
+
+import functools as _functools
+import warnings as _warnings
+import io as _io
+import os as _os
+import shutil as _shutil
+import errno as _errno
+from random import Random as _Random
+import sys as _sys
+import types as _types
+import weakref as _weakref
+import _thread
+_allocate_lock = _thread.allocate_lock
+
+_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
+if hasattr(_os, 'O_NOFOLLOW'):
+ _text_openflags |= _os.O_NOFOLLOW
+
+_bin_openflags = _text_openflags
+if hasattr(_os, 'O_BINARY'):
+ _bin_openflags |= _os.O_BINARY
+
+if hasattr(_os, 'TMP_MAX'):
+ TMP_MAX = _os.TMP_MAX
+else:
+ TMP_MAX = 10000
+
+# This variable _was_ unused for legacy reasons, see issue 10354.
+# But as of 3.5 we actually use it at runtime so changing it would
+# have a possibly desirable side effect... But we do not want to support
+# that as an API. It is undocumented on purpose. Do not depend on this.
+template = "tmp"
+
+# Internal routines.
+
+_once_lock = _allocate_lock()
+
+
+def _exists(fn):
+ try:
+ _os.lstat(fn)
+ except OSError:
+ return False
+ else:
+ return True
+
+
+def _infer_return_type(*args):
+ """Look at the type of all args and divine their implied return type."""
+ return_type = None
+ for arg in args:
+ if arg is None:
+ continue
+
+ if isinstance(arg, _os.PathLike):
+ arg = _os.fspath(arg)
+
+ if isinstance(arg, bytes):
+ if return_type is str:
+ raise TypeError("Can't mix bytes and non-bytes in "
+ "path components.")
+ return_type = bytes
+ else:
+ if return_type is bytes:
+ raise TypeError("Can't mix bytes and non-bytes in "
+ "path components.")
+ return_type = str
+ if return_type is None:
+ if tempdir is None or isinstance(tempdir, str):
+ return str # tempfile APIs return a str by default.
+ else:
+ # we could check for bytes but it'll fail later on anyway
+ return bytes
+ return return_type
+
+
+def _sanitize_params(prefix, suffix, dir):
+ """Common parameter processing for most APIs in this module."""
+ output_type = _infer_return_type(prefix, suffix, dir)
+ if suffix is None:
+ suffix = output_type()
+ if prefix is None:
+ if output_type is str:
+ prefix = template
+ else:
+ prefix = _os.fsencode(template)
+ if dir is None:
+ if output_type is str:
+ dir = gettempdir()
+ else:
+ dir = gettempdirb()
+ return prefix, suffix, dir, output_type
+
+
+class _RandomNameSequence:
+ """An instance of _RandomNameSequence generates an endless
+ sequence of unpredictable strings which can safely be incorporated
+ into file names. Each string is eight characters long. Multiple
+ threads can safely use the same instance at the same time.
+
+ _RandomNameSequence is an iterator."""
+
+ characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
+
+ @property
+ def rng(self):
+ cur_pid = _os.getpid()
+ if cur_pid != getattr(self, '_rng_pid', None):
+ self._rng = _Random()
+ self._rng_pid = cur_pid
+ return self._rng
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return ''.join(self.rng.choices(self.characters, k=8))
+
+def _candidate_tempdir_list():
+ """Generate a list of candidate temporary directories which
+ _get_default_tempdir will try."""
+
+ dirlist = []
+
+ # First, try the environment.
+ for envname in 'TMPDIR', 'TEMP', 'TMP':
+ dirname = _os.getenv(envname)
+ if dirname: dirlist.append(dirname)
+
+ # Failing that, try OS-specific locations.
+ if _os.name == 'nt':
+ dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'),
+ _os.path.expandvars(r'%SYSTEMROOT%\Temp'),
+ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
+ else:
+ dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
+
+ # As a last resort, the current directory.
+ try:
+ dirlist.append(_os.getcwd())
+ except (AttributeError, OSError):
+ dirlist.append(_os.curdir)
+
+ return dirlist
+
+def _get_default_tempdir():
+ """Calculate the default directory to use for temporary files.
+ This routine should be called exactly once.
+
+ We determine whether or not a candidate temp dir is usable by
+ trying to create and write to a file in that directory. If this
+ is successful, the test file is deleted. To prevent denial of
+ service, the name of the test file must be randomized."""
+
+ namer = _RandomNameSequence()
+ dirlist = _candidate_tempdir_list()
+
+ for dir in dirlist:
+ if dir != _os.curdir:
+ dir = _os.path.abspath(dir)
+ # Try only a few names per directory.
+ for seq in range(100):
+ name = next(namer)
+ filename = _os.path.join(dir, name)
+ try:
+ fd = _os.open(filename, _bin_openflags, 0o600)
+ try:
+ try:
+ _os.write(fd, b'blat')
+ finally:
+ _os.close(fd)
+ finally:
+ _os.unlink(filename)
+ return dir
+ except FileExistsError:
+ pass
+ except PermissionError:
+ # This exception is thrown when a directory with the chosen name
+ # already exists on windows.
+ if (_os.name == 'nt' and _os.path.isdir(dir) and
+ _os.access(dir, _os.W_OK)):
+ continue
+ break # no point trying more names in this directory
+ except OSError:
+ break # no point trying more names in this directory
+ raise FileNotFoundError(_errno.ENOENT,
+ "No usable temporary directory found in %s" %
+ dirlist)
+
+_name_sequence = None
+
+def _get_candidate_names():
+ """Common setup sequence for all user-callable interfaces."""
+
+ global _name_sequence
+ if _name_sequence is None:
+ _once_lock.acquire()
+ try:
+ if _name_sequence is None:
+ _name_sequence = _RandomNameSequence()
+ finally:
+ _once_lock.release()
+ return _name_sequence
+
+
+def _mkstemp_inner(dir, pre, suf, flags, output_type):
+ """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
+
+ dir = _os.path.abspath(dir)
+ names = _get_candidate_names()
+ if output_type is bytes:
+ names = map(_os.fsencode, names)
+
+ for seq in range(TMP_MAX):
+ name = next(names)
+ file = _os.path.join(dir, pre + name + suf)
+ _sys.audit("tempfile.mkstemp", file)
+ try:
+ fd = _os.open(file, flags, 0o600)
+ except FileExistsError:
+ continue # try again
+ except PermissionError:
+ # This exception is thrown when a directory with the chosen name
+ # already exists on windows.
+ if (_os.name == 'nt' and _os.path.isdir(dir) and
+ _os.access(dir, _os.W_OK)):
+ continue
+ else:
+ raise
+ return fd, file
+
+ raise FileExistsError(_errno.EEXIST,
+ "No usable temporary file name found")
+
+def _dont_follow_symlinks(func, path, *args):
+ # Pass follow_symlinks=False, unless not supported on this platform.
+ if func in _os.supports_follow_symlinks:
+ func(path, *args, follow_symlinks=False)
+ elif _os.name == 'nt' or not _os.path.islink(path):
+ func(path, *args)
+
+def _resetperms(path):
+ try:
+ chflags = _os.chflags
+ except AttributeError:
+ pass
+ else:
+ _dont_follow_symlinks(chflags, path, 0)
+ _dont_follow_symlinks(_os.chmod, path, 0o700)
+
+
+# User visible interfaces.
+
+def gettempprefix():
+ """The default prefix for temporary directories as string."""
+ return _os.fsdecode(template)
+
+def gettempprefixb():
+ """The default prefix for temporary directories as bytes."""
+ return _os.fsencode(template)
+
+tempdir = None
+
+def _gettempdir():
+ """Private accessor for tempfile.tempdir."""
+ global tempdir
+ if tempdir is None:
+ _once_lock.acquire()
+ try:
+ if tempdir is None:
+ tempdir = _get_default_tempdir()
+ finally:
+ _once_lock.release()
+ return tempdir
+
+def gettempdir():
+ """Returns tempfile.tempdir as str."""
+ return _os.fsdecode(_gettempdir())
+
+def gettempdirb():
+ """Returns tempfile.tempdir as bytes."""
+ return _os.fsencode(_gettempdir())
+
+def mkstemp(suffix=None, prefix=None, dir=None, text=False):
+ """User-callable function to create and return a unique temporary
+ file. The return value is a pair (fd, name) where fd is the
+ file descriptor returned by os.open, and name is the filename.
+
+ If 'suffix' is not None, the file name will end with that suffix,
+ otherwise there will be no suffix.
+
+ If 'prefix' is not None, the file name will begin with that prefix,
+ otherwise a default prefix is used.
+
+ If 'dir' is not None, the file will be created in that directory,
+ otherwise a default directory is used.
+
+ If 'text' is specified and true, the file is opened in text
+ mode. Else (the default) the file is opened in binary mode.
+
+ If any of 'suffix', 'prefix' and 'dir' are not None, they must be the
+ same type. If they are bytes, the returned name will be bytes; str
+ otherwise.
+
+ The file is readable and writable only by the creating user ID.
+ If the operating system uses permission bits to indicate whether a
+ file is executable, the file is executable by no one. The file
+ descriptor is not inherited by children of this process.
+
+ Caller is responsible for deleting the file when done with it.
+ """
+
+ prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
+
+ if text:
+ flags = _text_openflags
+ else:
+ flags = _bin_openflags
+
+ return _mkstemp_inner(dir, prefix, suffix, flags, output_type)
+
+
+def mkdtemp(suffix=None, prefix=None, dir=None):
+ """User-callable function to create and return a unique temporary
+ directory. The return value is the pathname of the directory.
+
+ Arguments are as for mkstemp, except that the 'text' argument is
+ not accepted.
+
+ The directory is readable, writable, and searchable only by the
+ creating user.
+
+ Caller is responsible for deleting the directory when done with it.
+ """
+
+ prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
+
+ names = _get_candidate_names()
+ if output_type is bytes:
+ names = map(_os.fsencode, names)
+
+ for seq in range(TMP_MAX):
+ name = next(names)
+ file = _os.path.join(dir, prefix + name + suffix)
+ _sys.audit("tempfile.mkdtemp", file)
+ try:
+ _os.mkdir(file, 0o700)
+ except FileExistsError:
+ continue # try again
+ except PermissionError:
+ # This exception is thrown when a directory with the chosen name
+ # already exists on windows.
+ if (_os.name == 'nt' and _os.path.isdir(dir) and
+ _os.access(dir, _os.W_OK)):
+ continue
+ else:
+ raise
+ return file
+
+ raise FileExistsError(_errno.EEXIST,
+ "No usable temporary directory name found")
+
+def mktemp(suffix="", prefix=template, dir=None):
+ """User-callable function to return a unique temporary file name. The
+ file is not created.
+
+ Arguments are similar to mkstemp, except that the 'text' argument is
+ not accepted, and suffix=None, prefix=None and bytes file names are not
+ supported.
+
+ THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may
+ refer to a file that did not exist at some point, but by the time
+ you get around to creating it, someone else may have beaten you to
+ the punch.
+ """
+
+## from warnings import warn as _warn
+## _warn("mktemp is a potential security risk to your program",
+## RuntimeWarning, stacklevel=2)
+
+ if dir is None:
+ dir = gettempdir()
+
+ names = _get_candidate_names()
+ for seq in range(TMP_MAX):
+ name = next(names)
+ file = _os.path.join(dir, prefix + name + suffix)
+ if not _exists(file):
+ return file
+
+ raise FileExistsError(_errno.EEXIST,
+ "No usable temporary filename found")
+
+
+class _TemporaryFileCloser:
+ """A separate object allowing proper closing of a temporary file's
+ underlying file object, without adding a __del__ method to the
+ temporary file."""
+
+ file = None # Set here since __del__ checks it
+ close_called = False
+
+ def __init__(self, file, name, delete=True):
+ self.file = file
+ self.name = name
+ self.delete = delete
+
+ # NT provides delete-on-close as a primitive, so we don't need
+ # the wrapper to do anything special. We still use it so that
+ # file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
+ if _os.name != 'nt':
+ # Cache the unlinker so we don't get spurious errors at
+ # shutdown when the module-level "os" is None'd out. Note
+ # that this must be referenced as self.unlink, because the
+ # name TemporaryFileWrapper may also get None'd out before
+ # __del__ is called.
+
+ def close(self, unlink=_os.unlink):
+ if not self.close_called and self.file is not None:
+ self.close_called = True
+ try:
+ self.file.close()
+ finally:
+ if self.delete:
+ unlink(self.name)
+
+ # Need to ensure the file is deleted on __del__
+ def __del__(self):
+ self.close()
+
+ else:
+ def close(self):
+ if not self.close_called:
+ self.close_called = True
+ self.file.close()
+
+
+class _TemporaryFileWrapper:
+ """Temporary file wrapper
+
+ This class provides a wrapper around files opened for
+ temporary use. In particular, it seeks to automatically
+ remove the file when it is no longer needed.
+ """
+
+ def __init__(self, file, name, delete=True):
+ self.file = file
+ self.name = name
+ self.delete = delete
+ self._closer = _TemporaryFileCloser(file, name, delete)
+
+ def __getattr__(self, name):
+ # Attribute lookups are delegated to the underlying file
+ # and cached for non-numeric results
+ # (i.e. methods are cached, closed and friends are not)
+ file = self.__dict__['file']
+ a = getattr(file, name)
+ if hasattr(a, '__call__'):
+ func = a
+ @_functools.wraps(func)
+ def func_wrapper(*args, **kwargs):
+ return func(*args, **kwargs)
+ # Avoid closing the file as long as the wrapper is alive,
+ # see issue #18879.
+ func_wrapper._closer = self._closer
+ a = func_wrapper
+ if not isinstance(a, int):
+ setattr(self, name, a)
+ return a
+
+ # The underlying __enter__ method returns the wrong object
+ # (self.file) so override it to return the wrapper
+ def __enter__(self):
+ self.file.__enter__()
+ return self
+
+ # Need to trap __exit__ as well to ensure the file gets
+ # deleted when used in a with statement
+ def __exit__(self, exc, value, tb):
+ result = self.file.__exit__(exc, value, tb)
+ self.close()
+ return result
+
+ def close(self):
+ """
+ Close the temporary file, possibly deleting it.
+ """
+ self._closer.close()
+
+ # iter() doesn't use __getattr__ to find the __iter__ method
+ def __iter__(self):
+ # Don't return iter(self.file), but yield from it to avoid closing
+ # file as long as it's being used as iterator (see issue #23700). We
+ # can't use 'yield from' here because iter(file) returns the file
+ # object itself, which has a close method, and thus the file would get
+ # closed when the generator is finalized, due to PEP380 semantics.
+ for line in self.file:
+ yield line
+
+
+def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
+ newline=None, suffix=None, prefix=None,
+ dir=None, delete=True, *, errors=None):
+ """Create and return a temporary file.
+ Arguments:
+ 'prefix', 'suffix', 'dir' -- as for mkstemp.
+ 'mode' -- the mode argument to io.open (default "w+b").
+ 'buffering' -- the buffer size argument to io.open (default -1).
+ 'encoding' -- the encoding argument to io.open (default None)
+ 'newline' -- the newline argument to io.open (default None)
+ 'delete' -- whether the file is deleted on close (default True).
+ 'errors' -- the errors argument to io.open (default None)
+ The file is created as mkstemp() would do it.
+
+ Returns an object with a file-like interface; the name of the file
+ is accessible as its 'name' attribute. The file will be automatically
+ deleted when it is closed unless the 'delete' argument is set to False.
+ """
+
+ prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
+
+ flags = _bin_openflags
+
+ # Setting O_TEMPORARY in the flags causes the OS to delete
+ # the file when it is closed. This is only supported by Windows.
+ if _os.name == 'nt' and delete:
+ flags |= _os.O_TEMPORARY
+
+ if "b" not in mode:
+ encoding = _io.text_encoding(encoding)
+
+ name = None
+ def opener(*args):
+ nonlocal name
+ fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
+ return fd
+ try:
+ file = _io.open(dir, mode, buffering=buffering,
+ newline=newline, encoding=encoding, errors=errors,
+ opener=opener)
+ try:
+ raw = getattr(file, 'buffer', file)
+ raw = getattr(raw, 'raw', raw)
+ raw.name = name
+ return _TemporaryFileWrapper(file, name, delete)
+ except:
+ file.close()
+ raise
+ except:
+ if name is not None and not (_os.name == 'nt' and delete):
+ _os.unlink(name)
+ raise
+
+if _os.name != 'posix' or _sys.platform == 'cygwin':
+ # On non-POSIX and Cygwin systems, assume that we cannot unlink a file
+ # while it is open.
+ TemporaryFile = NamedTemporaryFile
+
+else:
+ # Is the O_TMPFILE flag available and does it work?
+ # The flag is set to False if os.open(dir, os.O_TMPFILE) raises an
+ # IsADirectoryError exception
+ _O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE')
+
+ def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
+ newline=None, suffix=None, prefix=None,
+ dir=None, *, errors=None):
+ """Create and return a temporary file.
+ Arguments:
+ 'prefix', 'suffix', 'dir' -- as for mkstemp.
+ 'mode' -- the mode argument to io.open (default "w+b").
+ 'buffering' -- the buffer size argument to io.open (default -1).
+ 'encoding' -- the encoding argument to io.open (default None)
+ 'newline' -- the newline argument to io.open (default None)
+ 'errors' -- the errors argument to io.open (default None)
+ The file is created as mkstemp() would do it.
+
+ Returns an object with a file-like interface. The file has no
+ name, and will cease to exist when it is closed.
+ """
+ global _O_TMPFILE_WORKS
+
+ if "b" not in mode:
+ encoding = _io.text_encoding(encoding)
+
+ prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
+
+ flags = _bin_openflags
+ if _O_TMPFILE_WORKS:
+ fd = None
+ def opener(*args):
+ nonlocal fd
+ flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT
+ fd = _os.open(dir, flags2, 0o600)
+ return fd
+ try:
+ file = _io.open(dir, mode, buffering=buffering,
+ newline=newline, encoding=encoding,
+ errors=errors, opener=opener)
+ raw = getattr(file, 'buffer', file)
+ raw = getattr(raw, 'raw', raw)
+ raw.name = fd
+ return file
+ except IsADirectoryError:
+ # Linux kernel older than 3.11 ignores the O_TMPFILE flag:
+ # O_TMPFILE is read as O_DIRECTORY. Trying to open a directory
+ # with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a
+ # directory cannot be open to write. Set flag to False to not
+ # try again.
+ _O_TMPFILE_WORKS = False
+ except OSError:
+ # The filesystem of the directory does not support O_TMPFILE.
+ # For example, OSError(95, 'Operation not supported').
+ #
+ # On Linux kernel older than 3.11, trying to open a regular
+ # file (or a symbolic link to a regular file) with O_TMPFILE
+ # fails with NotADirectoryError, because O_TMPFILE is read as
+ # O_DIRECTORY.
+ pass
+ # Fallback to _mkstemp_inner().
+
+ fd = None
+ def opener(*args):
+ nonlocal fd
+ fd, name = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
+ try:
+ _os.unlink(name)
+ except BaseException as e:
+ _os.close(fd)
+ raise
+ return fd
+ file = _io.open(dir, mode, buffering=buffering,
+ newline=newline, encoding=encoding, errors=errors,
+ opener=opener)
+ raw = getattr(file, 'buffer', file)
+ raw = getattr(raw, 'raw', raw)
+ raw.name = fd
+ return file
+
+class SpooledTemporaryFile:
+ """Temporary file wrapper, specialized to switch from BytesIO
+ or StringIO to a real file when it exceeds a certain size or
+ when a fileno is needed.
+ """
+ _rolled = False
+
+ def __init__(self, max_size=0, mode='w+b', buffering=-1,
+ encoding=None, newline=None,
+ suffix=None, prefix=None, dir=None, *, errors=None):
+ if 'b' in mode:
+ self._file = _io.BytesIO()
+ else:
+ encoding = _io.text_encoding(encoding)
+ self._file = _io.TextIOWrapper(_io.BytesIO(),
+ encoding=encoding, errors=errors,
+ newline=newline)
+ self._max_size = max_size
+ self._rolled = False
+ self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
+ 'suffix': suffix, 'prefix': prefix,
+ 'encoding': encoding, 'newline': newline,
+ 'dir': dir, 'errors': errors}
+
+ __class_getitem__ = classmethod(_types.GenericAlias)
+
+ def _check(self, file):
+ if self._rolled: return
+ max_size = self._max_size
+ if max_size and file.tell() > max_size:
+ self.rollover()
+
+ def rollover(self):
+ if self._rolled: return
+ file = self._file
+ newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
+ del self._TemporaryFileArgs
+
+ pos = file.tell()
+ if hasattr(newfile, 'buffer'):
+ newfile.buffer.write(file.detach().getvalue())
+ else:
+ newfile.write(file.getvalue())
+ newfile.seek(pos, 0)
+
+ self._rolled = True
+
+ # The method caching trick from NamedTemporaryFile
+ # won't work here, because _file may change from a
+ # BytesIO/StringIO instance to a real file. So we list
+ # all the methods directly.
+
+ # Context management protocol
+ def __enter__(self):
+ if self._file.closed:
+ raise ValueError("Cannot enter context with closed file")
+ return self
+
+ def __exit__(self, exc, value, tb):
+ self._file.close()
+
+ # file protocol
+ def __iter__(self):
+ return self._file.__iter__()
+
+ def close(self):
+ self._file.close()
+
+ @property
+ def closed(self):
+ return self._file.closed
+
+ @property
+ def encoding(self):
+ return self._file.encoding
+
+ @property
+ def errors(self):
+ return self._file.errors
+
+ def fileno(self):
+ self.rollover()
+ return self._file.fileno()
+
+ def flush(self):
+ self._file.flush()
+
+ def isatty(self):
+ return self._file.isatty()
+
+ @property
+ def mode(self):
+ try:
+ return self._file.mode
+ except AttributeError:
+ return self._TemporaryFileArgs['mode']
+
+ @property
+ def name(self):
+ try:
+ return self._file.name
+ except AttributeError:
+ return None
+
+ @property
+ def newlines(self):
+ return self._file.newlines
+
+ def read(self, *args):
+ return self._file.read(*args)
+
+ def readline(self, *args):
+ return self._file.readline(*args)
+
+ def readlines(self, *args):
+ return self._file.readlines(*args)
+
+ def seek(self, *args):
+ return self._file.seek(*args)
+
+ def tell(self):
+ return self._file.tell()
+
+ def truncate(self, size=None):
+ if size is None:
+ self._file.truncate()
+ else:
+ if size > self._max_size:
+ self.rollover()
+ self._file.truncate(size)
+
+ def write(self, s):
+ file = self._file
+ rv = file.write(s)
+ self._check(file)
+ return rv
+
+ def writelines(self, iterable):
+ file = self._file
+ rv = file.writelines(iterable)
+ self._check(file)
+ return rv
+
+
+class TemporaryDirectory:
+ """Create and return a temporary directory. This has the same
+ behavior as mkdtemp but can be used as a context manager. For
+ example:
+
+ with TemporaryDirectory() as tmpdir:
+ ...
+
+ Upon exiting the context, the directory and everything contained
+ in it are removed.
+ """
+
+ def __init__(self, suffix=None, prefix=None, dir=None,
+ ignore_cleanup_errors=False):
+ self.name = mkdtemp(suffix, prefix, dir)
+ self._ignore_cleanup_errors = ignore_cleanup_errors
+ self._finalizer = _weakref.finalize(
+ self, self._cleanup, self.name,
+ warn_message="Implicitly cleaning up {!r}".format(self),
+ ignore_errors=self._ignore_cleanup_errors)
+
+ @classmethod
+ def _rmtree(cls, name, ignore_errors=False):
+ def onerror(func, path, exc_info):
+ if issubclass(exc_info[0], PermissionError):
+ try:
+ if path != name:
+ _resetperms(_os.path.dirname(path))
+ _resetperms(path)
+
+ try:
+ _os.unlink(path)
+ # PermissionError is raised on FreeBSD for directories
+ except (IsADirectoryError, PermissionError):
+ cls._rmtree(path, ignore_errors=ignore_errors)
+ except FileNotFoundError:
+ pass
+ elif issubclass(exc_info[0], FileNotFoundError):
+ pass
+ else:
+ if not ignore_errors:
+ raise
+
+ _shutil.rmtree(name, onerror=onerror)
+
+ @classmethod
+ def _cleanup(cls, name, warn_message, ignore_errors=False):
+ cls._rmtree(name, ignore_errors=ignore_errors)
+ _warnings.warn(warn_message, ResourceWarning)
+
+ def __repr__(self):
+ return "<{} {!r}>".format(self.__class__.__name__, self.name)
+
+ def __enter__(self):
+ return self.name
+
+ def __exit__(self, exc, value, tb):
+ self.cleanup()
+
+ def cleanup(self):
+ if self._finalizer.detach() or _os.path.exists(self.name):
+ self._rmtree(self.name, ignore_errors=self._ignore_cleanup_errors)
+
+ __class_getitem__ = classmethod(_types.GenericAlias)
diff --git a/infer_4_37_2/lib/python3.10/threading.py b/infer_4_37_2/lib/python3.10/threading.py
new file mode 100644
index 0000000000000000000000000000000000000000..62f49c05cdc1904ccdb34a68c21b738fb2d22b7f
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/threading.py
@@ -0,0 +1,1645 @@
+"""Thread module emulating a subset of Java's threading model."""
+
+import os as _os
+import sys as _sys
+import _thread
+import functools
+
+from time import monotonic as _time
+from _weakrefset import WeakSet
+from itertools import islice as _islice, count as _count
+try:
+ from _collections import deque as _deque
+except ImportError:
+ from collections import deque as _deque
+
+# Note regarding PEP 8 compliant names
+# This threading model was originally inspired by Java, and inherited
+# the convention of camelCase function and method names from that
+# language. Those original names are not in any imminent danger of
+# being deprecated (even for Py3k),so this module provides them as an
+# alias for the PEP 8 compliant names
+# Note that using the new PEP 8 compliant names facilitates substitution
+# with the multiprocessing module, which doesn't provide the old
+# Java inspired names.
+
+__all__ = ['get_ident', 'active_count', 'Condition', 'current_thread',
+ 'enumerate', 'main_thread', 'TIMEOUT_MAX',
+ 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
+ 'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError',
+ 'setprofile', 'settrace', 'local', 'stack_size',
+ 'excepthook', 'ExceptHookArgs', 'gettrace', 'getprofile']
+
+# Rename some stuff so "from threading import *" is safe
+_start_new_thread = _thread.start_new_thread
+_allocate_lock = _thread.allocate_lock
+_set_sentinel = _thread._set_sentinel
+get_ident = _thread.get_ident
+try:
+ get_native_id = _thread.get_native_id
+ _HAVE_THREAD_NATIVE_ID = True
+ __all__.append('get_native_id')
+except AttributeError:
+ _HAVE_THREAD_NATIVE_ID = False
+ThreadError = _thread.error
+try:
+ _CRLock = _thread.RLock
+except AttributeError:
+ _CRLock = None
+TIMEOUT_MAX = _thread.TIMEOUT_MAX
+del _thread
+
+
+# Support for profile and trace hooks
+
+_profile_hook = None
+_trace_hook = None
+
+def setprofile(func):
+ """Set a profile function for all threads started from the threading module.
+
+ The func will be passed to sys.setprofile() for each thread, before its
+ run() method is called.
+
+ """
+ global _profile_hook
+ _profile_hook = func
+
+def getprofile():
+ """Get the profiler function as set by threading.setprofile()."""
+ return _profile_hook
+
+def settrace(func):
+ """Set a trace function for all threads started from the threading module.
+
+ The func will be passed to sys.settrace() for each thread, before its run()
+ method is called.
+
+ """
+ global _trace_hook
+ _trace_hook = func
+
+def gettrace():
+ """Get the trace function as set by threading.settrace()."""
+ return _trace_hook
+
+# Synchronization classes
+
+Lock = _allocate_lock
+
+def RLock(*args, **kwargs):
+ """Factory function that returns a new reentrant lock.
+
+ A reentrant lock must be released by the thread that acquired it. Once a
+ thread has acquired a reentrant lock, the same thread may acquire it again
+ without blocking; the thread must release it once for each time it has
+ acquired it.
+
+ """
+ if _CRLock is None:
+ return _PyRLock(*args, **kwargs)
+ return _CRLock(*args, **kwargs)
+
+class _RLock:
+ """This class implements reentrant lock objects.
+
+ A reentrant lock must be released by the thread that acquired it. Once a
+ thread has acquired a reentrant lock, the same thread may acquire it
+ again without blocking; the thread must release it once for each time it
+ has acquired it.
+
+ """
+
+ def __init__(self):
+ self._block = _allocate_lock()
+ self._owner = None
+ self._count = 0
+
+ def __repr__(self):
+ owner = self._owner
+ try:
+ owner = _active[owner].name
+ except KeyError:
+ pass
+ return "<%s %s.%s object owner=%r count=%d at %s>" % (
+ "locked" if self._block.locked() else "unlocked",
+ self.__class__.__module__,
+ self.__class__.__qualname__,
+ owner,
+ self._count,
+ hex(id(self))
+ )
+
+ def _at_fork_reinit(self):
+ self._block._at_fork_reinit()
+ self._owner = None
+ self._count = 0
+
+ def acquire(self, blocking=True, timeout=-1):
+ """Acquire a lock, blocking or non-blocking.
+
+ When invoked without arguments: if this thread already owns the lock,
+ increment the recursion level by one, and return immediately. Otherwise,
+ if another thread owns the lock, block until the lock is unlocked. Once
+ the lock is unlocked (not owned by any thread), then grab ownership, set
+ the recursion level to one, and return. If more than one thread is
+ blocked waiting until the lock is unlocked, only one at a time will be
+ able to grab ownership of the lock. There is no return value in this
+ case.
+
+ When invoked with the blocking argument set to true, do the same thing
+ as when called without arguments, and return true.
+
+ When invoked with the blocking argument set to false, do not block. If a
+ call without an argument would block, return false immediately;
+ otherwise, do the same thing as when called without arguments, and
+ return true.
+
+ When invoked with the floating-point timeout argument set to a positive
+ value, block for at most the number of seconds specified by timeout
+ and as long as the lock cannot be acquired. Return true if the lock has
+ been acquired, false if the timeout has elapsed.
+
+ """
+ me = get_ident()
+ if self._owner == me:
+ self._count += 1
+ return 1
+ rc = self._block.acquire(blocking, timeout)
+ if rc:
+ self._owner = me
+ self._count = 1
+ return rc
+
+ __enter__ = acquire
+
+ def release(self):
+ """Release a lock, decrementing the recursion level.
+
+ If after the decrement it is zero, reset the lock to unlocked (not owned
+ by any thread), and if any other threads are blocked waiting for the
+ lock to become unlocked, allow exactly one of them to proceed. If after
+ the decrement the recursion level is still nonzero, the lock remains
+ locked and owned by the calling thread.
+
+ Only call this method when the calling thread owns the lock. A
+ RuntimeError is raised if this method is called when the lock is
+ unlocked.
+
+ There is no return value.
+
+ """
+ if self._owner != get_ident():
+ raise RuntimeError("cannot release un-acquired lock")
+ self._count = count = self._count - 1
+ if not count:
+ self._owner = None
+ self._block.release()
+
+ def __exit__(self, t, v, tb):
+ self.release()
+
+ # Internal methods used by condition variables
+
+ def _acquire_restore(self, state):
+ self._block.acquire()
+ self._count, self._owner = state
+
+ def _release_save(self):
+ if self._count == 0:
+ raise RuntimeError("cannot release un-acquired lock")
+ count = self._count
+ self._count = 0
+ owner = self._owner
+ self._owner = None
+ self._block.release()
+ return (count, owner)
+
+ def _is_owned(self):
+ return self._owner == get_ident()
+
+_PyRLock = _RLock
+
+
+class Condition:
+ """Class that implements a condition variable.
+
+ A condition variable allows one or more threads to wait until they are
+ notified by another thread.
+
+ If the lock argument is given and not None, it must be a Lock or RLock
+ object, and it is used as the underlying lock. Otherwise, a new RLock object
+ is created and used as the underlying lock.
+
+ """
+
+ def __init__(self, lock=None):
+ if lock is None:
+ lock = RLock()
+ self._lock = lock
+ # Export the lock's acquire() and release() methods
+ self.acquire = lock.acquire
+ self.release = lock.release
+ # If the lock defines _release_save() and/or _acquire_restore(),
+ # these override the default implementations (which just call
+ # release() and acquire() on the lock). Ditto for _is_owned().
+ try:
+ self._release_save = lock._release_save
+ except AttributeError:
+ pass
+ try:
+ self._acquire_restore = lock._acquire_restore
+ except AttributeError:
+ pass
+ try:
+ self._is_owned = lock._is_owned
+ except AttributeError:
+ pass
+ self._waiters = _deque()
+
+ def _at_fork_reinit(self):
+ self._lock._at_fork_reinit()
+ self._waiters.clear()
+
+ def __enter__(self):
+ return self._lock.__enter__()
+
+ def __exit__(self, *args):
+ return self._lock.__exit__(*args)
+
+ def __repr__(self):
+ return "" % (self._lock, len(self._waiters))
+
+ def _release_save(self):
+ self._lock.release() # No state to save
+
+ def _acquire_restore(self, x):
+ self._lock.acquire() # Ignore saved state
+
+ def _is_owned(self):
+ # Return True if lock is owned by current_thread.
+ # This method is called only if _lock doesn't have _is_owned().
+ if self._lock.acquire(False):
+ self._lock.release()
+ return False
+ else:
+ return True
+
+ def wait(self, timeout=None):
+ """Wait until notified or until a timeout occurs.
+
+ If the calling thread has not acquired the lock when this method is
+ called, a RuntimeError is raised.
+
+ This method releases the underlying lock, and then blocks until it is
+ awakened by a notify() or notify_all() call for the same condition
+ variable in another thread, or until the optional timeout occurs. Once
+ awakened or timed out, it re-acquires the lock and returns.
+
+ When the timeout argument is present and not None, it should be a
+ floating point number specifying a timeout for the operation in seconds
+ (or fractions thereof).
+
+ When the underlying lock is an RLock, it is not released using its
+ release() method, since this may not actually unlock the lock when it
+ was acquired multiple times recursively. Instead, an internal interface
+ of the RLock class is used, which really unlocks it even when it has
+ been recursively acquired several times. Another internal interface is
+ then used to restore the recursion level when the lock is reacquired.
+
+ """
+ if not self._is_owned():
+ raise RuntimeError("cannot wait on un-acquired lock")
+ waiter = _allocate_lock()
+ waiter.acquire()
+ self._waiters.append(waiter)
+ saved_state = self._release_save()
+ gotit = False
+ try: # restore state no matter what (e.g., KeyboardInterrupt)
+ if timeout is None:
+ waiter.acquire()
+ gotit = True
+ else:
+ if timeout > 0:
+ gotit = waiter.acquire(True, timeout)
+ else:
+ gotit = waiter.acquire(False)
+ return gotit
+ finally:
+ self._acquire_restore(saved_state)
+ if not gotit:
+ try:
+ self._waiters.remove(waiter)
+ except ValueError:
+ pass
+
+ def wait_for(self, predicate, timeout=None):
+ """Wait until a condition evaluates to True.
+
+ predicate should be a callable which result will be interpreted as a
+ boolean value. A timeout may be provided giving the maximum time to
+ wait.
+
+ """
+ endtime = None
+ waittime = timeout
+ result = predicate()
+ while not result:
+ if waittime is not None:
+ if endtime is None:
+ endtime = _time() + waittime
+ else:
+ waittime = endtime - _time()
+ if waittime <= 0:
+ break
+ self.wait(waittime)
+ result = predicate()
+ return result
+
+ def notify(self, n=1):
+ """Wake up one or more threads waiting on this condition, if any.
+
+ If the calling thread has not acquired the lock when this method is
+ called, a RuntimeError is raised.
+
+ This method wakes up at most n of the threads waiting for the condition
+ variable; it is a no-op if no threads are waiting.
+
+ """
+ if not self._is_owned():
+ raise RuntimeError("cannot notify on un-acquired lock")
+ waiters = self._waiters
+ while waiters and n > 0:
+ waiter = waiters[0]
+ try:
+ waiter.release()
+ except RuntimeError:
+ # gh-92530: The previous call of notify() released the lock,
+ # but was interrupted before removing it from the queue.
+ # It can happen if a signal handler raises an exception,
+ # like CTRL+C which raises KeyboardInterrupt.
+ pass
+ else:
+ n -= 1
+ try:
+ waiters.remove(waiter)
+ except ValueError:
+ pass
+
+ def notify_all(self):
+ """Wake up all threads waiting on this condition.
+
+ If the calling thread has not acquired the lock when this method
+ is called, a RuntimeError is raised.
+
+ """
+ self.notify(len(self._waiters))
+
+ def notifyAll(self):
+ """Wake up all threads waiting on this condition.
+
+ This method is deprecated, use notify_all() instead.
+
+ """
+ import warnings
+ warnings.warn('notifyAll() is deprecated, use notify_all() instead',
+ DeprecationWarning, stacklevel=2)
+ self.notify_all()
+
+
+class Semaphore:
+ """This class implements semaphore objects.
+
+ Semaphores manage a counter representing the number of release() calls minus
+ the number of acquire() calls, plus an initial value. The acquire() method
+ blocks if necessary until it can return without making the counter
+ negative. If not given, value defaults to 1.
+
+ """
+
+ # After Tim Peters' semaphore class, but not quite the same (no maximum)
+
+ def __init__(self, value=1):
+ if value < 0:
+ raise ValueError("semaphore initial value must be >= 0")
+ self._cond = Condition(Lock())
+ self._value = value
+
+ def acquire(self, blocking=True, timeout=None):
+ """Acquire a semaphore, decrementing the internal counter by one.
+
+ When invoked without arguments: if the internal counter is larger than
+ zero on entry, decrement it by one and return immediately. If it is zero
+ on entry, block, waiting until some other thread has called release() to
+ make it larger than zero. This is done with proper interlocking so that
+ if multiple acquire() calls are blocked, release() will wake exactly one
+ of them up. The implementation may pick one at random, so the order in
+ which blocked threads are awakened should not be relied on. There is no
+ return value in this case.
+
+ When invoked with blocking set to true, do the same thing as when called
+ without arguments, and return true.
+
+ When invoked with blocking set to false, do not block. If a call without
+ an argument would block, return false immediately; otherwise, do the
+ same thing as when called without arguments, and return true.
+
+ When invoked with a timeout other than None, it will block for at
+ most timeout seconds. If acquire does not complete successfully in
+ that interval, return false. Return true otherwise.
+
+ """
+ if not blocking and timeout is not None:
+ raise ValueError("can't specify timeout for non-blocking acquire")
+ rc = False
+ endtime = None
+ with self._cond:
+ while self._value == 0:
+ if not blocking:
+ break
+ if timeout is not None:
+ if endtime is None:
+ endtime = _time() + timeout
+ else:
+ timeout = endtime - _time()
+ if timeout <= 0:
+ break
+ self._cond.wait(timeout)
+ else:
+ self._value -= 1
+ rc = True
+ return rc
+
+ __enter__ = acquire
+
+ def release(self, n=1):
+ """Release a semaphore, incrementing the internal counter by one or more.
+
+ When the counter is zero on entry and another thread is waiting for it
+ to become larger than zero again, wake up that thread.
+
+ """
+ if n < 1:
+ raise ValueError('n must be one or more')
+ with self._cond:
+ self._value += n
+ for i in range(n):
+ self._cond.notify()
+
+ def __exit__(self, t, v, tb):
+ self.release()
+
+
+class BoundedSemaphore(Semaphore):
+ """Implements a bounded semaphore.
+
+ A bounded semaphore checks to make sure its current value doesn't exceed its
+ initial value. If it does, ValueError is raised. In most situations
+ semaphores are used to guard resources with limited capacity.
+
+ If the semaphore is released too many times it's a sign of a bug. If not
+ given, value defaults to 1.
+
+ Like regular semaphores, bounded semaphores manage a counter representing
+ the number of release() calls minus the number of acquire() calls, plus an
+ initial value. The acquire() method blocks if necessary until it can return
+ without making the counter negative. If not given, value defaults to 1.
+
+ """
+
+ def __init__(self, value=1):
+ Semaphore.__init__(self, value)
+ self._initial_value = value
+
+ def release(self, n=1):
+ """Release a semaphore, incrementing the internal counter by one or more.
+
+ When the counter is zero on entry and another thread is waiting for it
+ to become larger than zero again, wake up that thread.
+
+ If the number of releases exceeds the number of acquires,
+ raise a ValueError.
+
+ """
+ if n < 1:
+ raise ValueError('n must be one or more')
+ with self._cond:
+ if self._value + n > self._initial_value:
+ raise ValueError("Semaphore released too many times")
+ self._value += n
+ for i in range(n):
+ self._cond.notify()
+
+
+class Event:
+ """Class implementing event objects.
+
+ Events manage a flag that can be set to true with the set() method and reset
+ to false with the clear() method. The wait() method blocks until the flag is
+ true. The flag is initially false.
+
+ """
+
+ # After Tim Peters' event class (without is_posted())
+
+ def __init__(self):
+ self._cond = Condition(Lock())
+ self._flag = False
+
+ def _at_fork_reinit(self):
+ # Private method called by Thread._reset_internal_locks()
+ self._cond._at_fork_reinit()
+
+ def is_set(self):
+ """Return true if and only if the internal flag is true."""
+ return self._flag
+
+ def isSet(self):
+ """Return true if and only if the internal flag is true.
+
+ This method is deprecated, use is_set() instead.
+
+ """
+ import warnings
+ warnings.warn('isSet() is deprecated, use is_set() instead',
+ DeprecationWarning, stacklevel=2)
+ return self.is_set()
+
+ def set(self):
+ """Set the internal flag to true.
+
+ All threads waiting for it to become true are awakened. Threads
+ that call wait() once the flag is true will not block at all.
+
+ """
+ with self._cond:
+ self._flag = True
+ self._cond.notify_all()
+
+ def clear(self):
+ """Reset the internal flag to false.
+
+ Subsequently, threads calling wait() will block until set() is called to
+ set the internal flag to true again.
+
+ """
+ with self._cond:
+ self._flag = False
+
+ def wait(self, timeout=None):
+ """Block until the internal flag is true.
+
+ If the internal flag is true on entry, return immediately. Otherwise,
+ block until another thread calls set() to set the flag to true, or until
+ the optional timeout occurs.
+
+ When the timeout argument is present and not None, it should be a
+ floating point number specifying a timeout for the operation in seconds
+ (or fractions thereof).
+
+ This method returns the internal flag on exit, so it will always return
+ True except if a timeout is given and the operation times out.
+
+ """
+ with self._cond:
+ signaled = self._flag
+ if not signaled:
+ signaled = self._cond.wait(timeout)
+ return signaled
+
+
+# A barrier class. Inspired in part by the pthread_barrier_* api and
+# the CyclicBarrier class from Java. See
+# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
+# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
+# CyclicBarrier.html
+# for information.
+# We maintain two main states, 'filling' and 'draining' enabling the barrier
+# to be cyclic. Threads are not allowed into it until it has fully drained
+# since the previous cycle. In addition, a 'resetting' state exists which is
+# similar to 'draining' except that threads leave with a BrokenBarrierError,
+# and a 'broken' state in which all threads get the exception.
+class Barrier:
+ """Implements a Barrier.
+
+ Useful for synchronizing a fixed number of threads at known synchronization
+ points. Threads block on 'wait()' and are simultaneously awoken once they
+ have all made that call.
+
+ """
+
+ def __init__(self, parties, action=None, timeout=None):
+ """Create a barrier, initialised to 'parties' threads.
+
+ 'action' is a callable which, when supplied, will be called by one of
+ the threads after they have all entered the barrier and just prior to
+ releasing them all. If a 'timeout' is provided, it is used as the
+ default for all subsequent 'wait()' calls.
+
+ """
+ self._cond = Condition(Lock())
+ self._action = action
+ self._timeout = timeout
+ self._parties = parties
+ self._state = 0 # 0 filling, 1 draining, -1 resetting, -2 broken
+ self._count = 0
+
+ def wait(self, timeout=None):
+ """Wait for the barrier.
+
+ When the specified number of threads have started waiting, they are all
+ simultaneously awoken. If an 'action' was provided for the barrier, one
+ of the threads will have executed that callback prior to returning.
+ Returns an individual index number from 0 to 'parties-1'.
+
+ """
+ if timeout is None:
+ timeout = self._timeout
+ with self._cond:
+ self._enter() # Block while the barrier drains.
+ index = self._count
+ self._count += 1
+ try:
+ if index + 1 == self._parties:
+ # We release the barrier
+ self._release()
+ else:
+ # We wait until someone releases us
+ self._wait(timeout)
+ return index
+ finally:
+ self._count -= 1
+ # Wake up any threads waiting for barrier to drain.
+ self._exit()
+
+ # Block until the barrier is ready for us, or raise an exception
+ # if it is broken.
+ def _enter(self):
+ while self._state in (-1, 1):
+ # It is draining or resetting, wait until done
+ self._cond.wait()
+ #see if the barrier is in a broken state
+ if self._state < 0:
+ raise BrokenBarrierError
+ assert self._state == 0
+
+ # Optionally run the 'action' and release the threads waiting
+ # in the barrier.
+ def _release(self):
+ try:
+ if self._action:
+ self._action()
+ # enter draining state
+ self._state = 1
+ self._cond.notify_all()
+ except:
+ #an exception during the _action handler. Break and reraise
+ self._break()
+ raise
+
+ # Wait in the barrier until we are released. Raise an exception
+ # if the barrier is reset or broken.
+ def _wait(self, timeout):
+ if not self._cond.wait_for(lambda : self._state != 0, timeout):
+ #timed out. Break the barrier
+ self._break()
+ raise BrokenBarrierError
+ if self._state < 0:
+ raise BrokenBarrierError
+ assert self._state == 1
+
+ # If we are the last thread to exit the barrier, signal any threads
+ # waiting for the barrier to drain.
+ def _exit(self):
+ if self._count == 0:
+ if self._state in (-1, 1):
+ #resetting or draining
+ self._state = 0
+ self._cond.notify_all()
+
+ def reset(self):
+ """Reset the barrier to the initial state.
+
+ Any threads currently waiting will get the BrokenBarrier exception
+ raised.
+
+ """
+ with self._cond:
+ if self._count > 0:
+ if self._state == 0:
+ #reset the barrier, waking up threads
+ self._state = -1
+ elif self._state == -2:
+ #was broken, set it to reset state
+ #which clears when the last thread exits
+ self._state = -1
+ else:
+ self._state = 0
+ self._cond.notify_all()
+
+ def abort(self):
+ """Place the barrier into a 'broken' state.
+
+ Useful in case of error. Any currently waiting threads and threads
+ attempting to 'wait()' will have BrokenBarrierError raised.
+
+ """
+ with self._cond:
+ self._break()
+
+ def _break(self):
+ # An internal error was detected. The barrier is set to
+ # a broken state all parties awakened.
+ self._state = -2
+ self._cond.notify_all()
+
+ @property
+ def parties(self):
+ """Return the number of threads required to trip the barrier."""
+ return self._parties
+
+ @property
+ def n_waiting(self):
+ """Return the number of threads currently waiting at the barrier."""
+ # We don't need synchronization here since this is an ephemeral result
+ # anyway. It returns the correct value in the steady state.
+ if self._state == 0:
+ return self._count
+ return 0
+
+ @property
+ def broken(self):
+ """Return True if the barrier is in a broken state."""
+ return self._state == -2
+
+# exception raised by the Barrier class
+class BrokenBarrierError(RuntimeError):
+ pass
+
+
+# Helper to generate new thread names
+_counter = _count(1).__next__
+def _newname(name_template):
+ return name_template % _counter()
+
+# Active thread administration.
+#
+# bpo-44422: Use a reentrant lock to allow reentrant calls to functions like
+# threading.enumerate().
+_active_limbo_lock = RLock()
+_active = {} # maps thread id to Thread object
+_limbo = {}
+_dangling = WeakSet()
+
+# Set of Thread._tstate_lock locks of non-daemon threads used by _shutdown()
+# to wait until all Python thread states get deleted:
+# see Thread._set_tstate_lock().
+_shutdown_locks_lock = _allocate_lock()
+_shutdown_locks = set()
+
+def _maintain_shutdown_locks():
+ """
+ Drop any shutdown locks that don't correspond to running threads anymore.
+
+ Calling this from time to time avoids an ever-growing _shutdown_locks
+ set when Thread objects are not joined explicitly. See bpo-37788.
+
+ This must be called with _shutdown_locks_lock acquired.
+ """
+ # If a lock was released, the corresponding thread has exited
+ to_remove = [lock for lock in _shutdown_locks if not lock.locked()]
+ _shutdown_locks.difference_update(to_remove)
+
+
+# Main class for threads
+
+class Thread:
+ """A class that represents a thread of control.
+
+ This class can be safely subclassed in a limited fashion. There are two ways
+ to specify the activity: by passing a callable object to the constructor, or
+ by overriding the run() method in a subclass.
+
+ """
+
+ _initialized = False
+
+ def __init__(self, group=None, target=None, name=None,
+ args=(), kwargs=None, *, daemon=None):
+ """This constructor should always be called with keyword arguments. Arguments are:
+
+ *group* should be None; reserved for future extension when a ThreadGroup
+ class is implemented.
+
+ *target* is the callable object to be invoked by the run()
+ method. Defaults to None, meaning nothing is called.
+
+ *name* is the thread name. By default, a unique name is constructed of
+ the form "Thread-N" where N is a small decimal number.
+
+ *args* is the argument tuple for the target invocation. Defaults to ().
+
+ *kwargs* is a dictionary of keyword arguments for the target
+ invocation. Defaults to {}.
+
+ If a subclass overrides the constructor, it must make sure to invoke
+ the base class constructor (Thread.__init__()) before doing anything
+ else to the thread.
+
+ """
+ assert group is None, "group argument must be None for now"
+ if kwargs is None:
+ kwargs = {}
+ if name:
+ name = str(name)
+ else:
+ name = _newname("Thread-%d")
+ if target is not None:
+ try:
+ target_name = target.__name__
+ name += f" ({target_name})"
+ except AttributeError:
+ pass
+
+ self._target = target
+ self._name = name
+ self._args = args
+ self._kwargs = kwargs
+ if daemon is not None:
+ self._daemonic = daemon
+ else:
+ self._daemonic = current_thread().daemon
+ self._ident = None
+ if _HAVE_THREAD_NATIVE_ID:
+ self._native_id = None
+ self._tstate_lock = None
+ self._started = Event()
+ self._is_stopped = False
+ self._initialized = True
+ # Copy of sys.stderr used by self._invoke_excepthook()
+ self._stderr = _sys.stderr
+ self._invoke_excepthook = _make_invoke_excepthook()
+ # For debugging and _after_fork()
+ _dangling.add(self)
+
+ def _reset_internal_locks(self, is_alive):
+ # private! Called by _after_fork() to reset our internal locks as
+ # they may be in an invalid state leading to a deadlock or crash.
+ self._started._at_fork_reinit()
+ if is_alive:
+ # bpo-42350: If the fork happens when the thread is already stopped
+ # (ex: after threading._shutdown() has been called), _tstate_lock
+ # is None. Do nothing in this case.
+ if self._tstate_lock is not None:
+ self._tstate_lock._at_fork_reinit()
+ self._tstate_lock.acquire()
+ else:
+ # The thread isn't alive after fork: it doesn't have a tstate
+ # anymore.
+ self._is_stopped = True
+ self._tstate_lock = None
+
+ def __repr__(self):
+ assert self._initialized, "Thread.__init__() was not called"
+ status = "initial"
+ if self._started.is_set():
+ status = "started"
+ self.is_alive() # easy way to get ._is_stopped set when appropriate
+ if self._is_stopped:
+ status = "stopped"
+ if self._daemonic:
+ status += " daemon"
+ if self._ident is not None:
+ status += " %s" % self._ident
+ return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
+
+ def start(self):
+ """Start the thread's activity.
+
+ It must be called at most once per thread object. It arranges for the
+ object's run() method to be invoked in a separate thread of control.
+
+ This method will raise a RuntimeError if called more than once on the
+ same thread object.
+
+ """
+ if not self._initialized:
+ raise RuntimeError("thread.__init__() not called")
+
+ if self._started.is_set():
+ raise RuntimeError("threads can only be started once")
+
+ with _active_limbo_lock:
+ _limbo[self] = self
+ try:
+ _start_new_thread(self._bootstrap, ())
+ except Exception:
+ with _active_limbo_lock:
+ del _limbo[self]
+ raise
+ self._started.wait()
+
+ def run(self):
+ """Method representing the thread's activity.
+
+ You may override this method in a subclass. The standard run() method
+ invokes the callable object passed to the object's constructor as the
+ target argument, if any, with sequential and keyword arguments taken
+ from the args and kwargs arguments, respectively.
+
+ """
+ try:
+ if self._target is not None:
+ self._target(*self._args, **self._kwargs)
+ finally:
+ # Avoid a refcycle if the thread is running a function with
+ # an argument that has a member that points to the thread.
+ del self._target, self._args, self._kwargs
+
+ def _bootstrap(self):
+ # Wrapper around the real bootstrap code that ignores
+ # exceptions during interpreter cleanup. Those typically
+ # happen when a daemon thread wakes up at an unfortunate
+ # moment, finds the world around it destroyed, and raises some
+ # random exception *** while trying to report the exception in
+ # _bootstrap_inner() below ***. Those random exceptions
+ # don't help anybody, and they confuse users, so we suppress
+ # them. We suppress them only when it appears that the world
+ # indeed has already been destroyed, so that exceptions in
+ # _bootstrap_inner() during normal business hours are properly
+ # reported. Also, we only suppress them for daemonic threads;
+ # if a non-daemonic encounters this, something else is wrong.
+ try:
+ self._bootstrap_inner()
+ except:
+ if self._daemonic and _sys is None:
+ return
+ raise
+
+ def _set_ident(self):
+ self._ident = get_ident()
+
+ if _HAVE_THREAD_NATIVE_ID:
+ def _set_native_id(self):
+ self._native_id = get_native_id()
+
+ def _set_tstate_lock(self):
+ """
+ Set a lock object which will be released by the interpreter when
+ the underlying thread state (see pystate.h) gets deleted.
+ """
+ self._tstate_lock = _set_sentinel()
+ self._tstate_lock.acquire()
+
+ if not self.daemon:
+ with _shutdown_locks_lock:
+ _maintain_shutdown_locks()
+ _shutdown_locks.add(self._tstate_lock)
+
+ def _bootstrap_inner(self):
+ try:
+ self._set_ident()
+ self._set_tstate_lock()
+ if _HAVE_THREAD_NATIVE_ID:
+ self._set_native_id()
+ self._started.set()
+ with _active_limbo_lock:
+ _active[self._ident] = self
+ del _limbo[self]
+
+ if _trace_hook:
+ _sys.settrace(_trace_hook)
+ if _profile_hook:
+ _sys.setprofile(_profile_hook)
+
+ try:
+ self.run()
+ except:
+ self._invoke_excepthook(self)
+ finally:
+ with _active_limbo_lock:
+ try:
+ # We don't call self._delete() because it also
+ # grabs _active_limbo_lock.
+ del _active[get_ident()]
+ except:
+ pass
+
+ def _stop(self):
+ # After calling ._stop(), .is_alive() returns False and .join() returns
+ # immediately. ._tstate_lock must be released before calling ._stop().
+ #
+ # Normal case: C code at the end of the thread's life
+ # (release_sentinel in _threadmodule.c) releases ._tstate_lock, and
+ # that's detected by our ._wait_for_tstate_lock(), called by .join()
+ # and .is_alive(). Any number of threads _may_ call ._stop()
+ # simultaneously (for example, if multiple threads are blocked in
+ # .join() calls), and they're not serialized. That's harmless -
+ # they'll just make redundant rebindings of ._is_stopped and
+ # ._tstate_lock. Obscure: we rebind ._tstate_lock last so that the
+ # "assert self._is_stopped" in ._wait_for_tstate_lock() always works
+ # (the assert is executed only if ._tstate_lock is None).
+ #
+ # Special case: _main_thread releases ._tstate_lock via this
+ # module's _shutdown() function.
+ lock = self._tstate_lock
+ if lock is not None:
+ assert not lock.locked()
+ self._is_stopped = True
+ self._tstate_lock = None
+ if not self.daemon:
+ with _shutdown_locks_lock:
+ # Remove our lock and other released locks from _shutdown_locks
+ _maintain_shutdown_locks()
+
+ def _delete(self):
+ "Remove current thread from the dict of currently running threads."
+ with _active_limbo_lock:
+ del _active[get_ident()]
+ # There must not be any python code between the previous line
+ # and after the lock is released. Otherwise a tracing function
+ # could try to acquire the lock again in the same thread, (in
+ # current_thread()), and would block.
+
+ def join(self, timeout=None):
+ """Wait until the thread terminates.
+
+ This blocks the calling thread until the thread whose join() method is
+ called terminates -- either normally or through an unhandled exception
+ or until the optional timeout occurs.
+
+ When the timeout argument is present and not None, it should be a
+ floating point number specifying a timeout for the operation in seconds
+ (or fractions thereof). As join() always returns None, you must call
+ is_alive() after join() to decide whether a timeout happened -- if the
+ thread is still alive, the join() call timed out.
+
+ When the timeout argument is not present or None, the operation will
+ block until the thread terminates.
+
+ A thread can be join()ed many times.
+
+ join() raises a RuntimeError if an attempt is made to join the current
+ thread as that would cause a deadlock. It is also an error to join() a
+ thread before it has been started and attempts to do so raises the same
+ exception.
+
+ """
+ if not self._initialized:
+ raise RuntimeError("Thread.__init__() not called")
+ if not self._started.is_set():
+ raise RuntimeError("cannot join thread before it is started")
+ if self is current_thread():
+ raise RuntimeError("cannot join current thread")
+
+ if timeout is None:
+ self._wait_for_tstate_lock()
+ else:
+ # the behavior of a negative timeout isn't documented, but
+ # historically .join(timeout=x) for x<0 has acted as if timeout=0
+ self._wait_for_tstate_lock(timeout=max(timeout, 0))
+
+ def _wait_for_tstate_lock(self, block=True, timeout=-1):
+ # Issue #18808: wait for the thread state to be gone.
+ # At the end of the thread's life, after all knowledge of the thread
+ # is removed from C data structures, C code releases our _tstate_lock.
+ # This method passes its arguments to _tstate_lock.acquire().
+ # If the lock is acquired, the C code is done, and self._stop() is
+ # called. That sets ._is_stopped to True, and ._tstate_lock to None.
+ lock = self._tstate_lock
+ if lock is None:
+ # already determined that the C code is done
+ assert self._is_stopped
+ return
+
+ try:
+ if lock.acquire(block, timeout):
+ lock.release()
+ self._stop()
+ except:
+ if lock.locked():
+ # bpo-45274: lock.acquire() acquired the lock, but the function
+ # was interrupted with an exception before reaching the
+ # lock.release(). It can happen if a signal handler raises an
+ # exception, like CTRL+C which raises KeyboardInterrupt.
+ lock.release()
+ self._stop()
+ raise
+
+ @property
+ def name(self):
+ """A string used for identification purposes only.
+
+ It has no semantics. Multiple threads may be given the same name. The
+ initial name is set by the constructor.
+
+ """
+ assert self._initialized, "Thread.__init__() not called"
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ assert self._initialized, "Thread.__init__() not called"
+ self._name = str(name)
+
+ @property
+ def ident(self):
+ """Thread identifier of this thread or None if it has not been started.
+
+ This is a nonzero integer. See the get_ident() function. Thread
+ identifiers may be recycled when a thread exits and another thread is
+ created. The identifier is available even after the thread has exited.
+
+ """
+ assert self._initialized, "Thread.__init__() not called"
+ return self._ident
+
+ if _HAVE_THREAD_NATIVE_ID:
+ @property
+ def native_id(self):
+ """Native integral thread ID of this thread, or None if it has not been started.
+
+ This is a non-negative integer. See the get_native_id() function.
+ This represents the Thread ID as reported by the kernel.
+
+ """
+ assert self._initialized, "Thread.__init__() not called"
+ return self._native_id
+
+ def is_alive(self):
+ """Return whether the thread is alive.
+
+ This method returns True just before the run() method starts until just
+ after the run() method terminates. See also the module function
+ enumerate().
+
+ """
+ assert self._initialized, "Thread.__init__() not called"
+ if self._is_stopped or not self._started.is_set():
+ return False
+ self._wait_for_tstate_lock(False)
+ return not self._is_stopped
+
+ @property
+ def daemon(self):
+ """A boolean value indicating whether this thread is a daemon thread.
+
+ This must be set before start() is called, otherwise RuntimeError is
+ raised. Its initial value is inherited from the creating thread; the
+ main thread is not a daemon thread and therefore all threads created in
+ the main thread default to daemon = False.
+
+ The entire Python program exits when only daemon threads are left.
+
+ """
+ assert self._initialized, "Thread.__init__() not called"
+ return self._daemonic
+
+ @daemon.setter
+ def daemon(self, daemonic):
+ if not self._initialized:
+ raise RuntimeError("Thread.__init__() not called")
+ if self._started.is_set():
+ raise RuntimeError("cannot set daemon status of active thread")
+ self._daemonic = daemonic
+
+ def isDaemon(self):
+ """Return whether this thread is a daemon.
+
+ This method is deprecated, use the daemon attribute instead.
+
+ """
+ import warnings
+ warnings.warn('isDaemon() is deprecated, get the daemon attribute instead',
+ DeprecationWarning, stacklevel=2)
+ return self.daemon
+
+ def setDaemon(self, daemonic):
+ """Set whether this thread is a daemon.
+
+ This method is deprecated, use the .daemon property instead.
+
+ """
+ import warnings
+ warnings.warn('setDaemon() is deprecated, set the daemon attribute instead',
+ DeprecationWarning, stacklevel=2)
+ self.daemon = daemonic
+
+ def getName(self):
+ """Return a string used for identification purposes only.
+
+ This method is deprecated, use the name attribute instead.
+
+ """
+ import warnings
+ warnings.warn('getName() is deprecated, get the name attribute instead',
+ DeprecationWarning, stacklevel=2)
+ return self.name
+
+ def setName(self, name):
+ """Set the name string for this thread.
+
+ This method is deprecated, use the name attribute instead.
+
+ """
+ import warnings
+ warnings.warn('setName() is deprecated, set the name attribute instead',
+ DeprecationWarning, stacklevel=2)
+ self.name = name
+
+
+try:
+ from _thread import (_excepthook as excepthook,
+ _ExceptHookArgs as ExceptHookArgs)
+except ImportError:
+ # Simple Python implementation if _thread._excepthook() is not available
+ from traceback import print_exception as _print_exception
+ from collections import namedtuple
+
+ _ExceptHookArgs = namedtuple(
+ 'ExceptHookArgs',
+ 'exc_type exc_value exc_traceback thread')
+
+ def ExceptHookArgs(args):
+ return _ExceptHookArgs(*args)
+
+ def excepthook(args, /):
+ """
+ Handle uncaught Thread.run() exception.
+ """
+ if args.exc_type == SystemExit:
+ # silently ignore SystemExit
+ return
+
+ if _sys is not None and _sys.stderr is not None:
+ stderr = _sys.stderr
+ elif args.thread is not None:
+ stderr = args.thread._stderr
+ if stderr is None:
+ # do nothing if sys.stderr is None and sys.stderr was None
+ # when the thread was created
+ return
+ else:
+ # do nothing if sys.stderr is None and args.thread is None
+ return
+
+ if args.thread is not None:
+ name = args.thread.name
+ else:
+ name = get_ident()
+ print(f"Exception in thread {name}:",
+ file=stderr, flush=True)
+ _print_exception(args.exc_type, args.exc_value, args.exc_traceback,
+ file=stderr)
+ stderr.flush()
+
+
+# Original value of threading.excepthook
+__excepthook__ = excepthook
+
+
+def _make_invoke_excepthook():
+ # Create a local namespace to ensure that variables remain alive
+ # when _invoke_excepthook() is called, even if it is called late during
+ # Python shutdown. It is mostly needed for daemon threads.
+
+ old_excepthook = excepthook
+ old_sys_excepthook = _sys.excepthook
+ if old_excepthook is None:
+ raise RuntimeError("threading.excepthook is None")
+ if old_sys_excepthook is None:
+ raise RuntimeError("sys.excepthook is None")
+
+ sys_exc_info = _sys.exc_info
+ local_print = print
+ local_sys = _sys
+
+ def invoke_excepthook(thread):
+ global excepthook
+ try:
+ hook = excepthook
+ if hook is None:
+ hook = old_excepthook
+
+ args = ExceptHookArgs([*sys_exc_info(), thread])
+
+ hook(args)
+ except Exception as exc:
+ exc.__suppress_context__ = True
+ del exc
+
+ if local_sys is not None and local_sys.stderr is not None:
+ stderr = local_sys.stderr
+ else:
+ stderr = thread._stderr
+
+ local_print("Exception in threading.excepthook:",
+ file=stderr, flush=True)
+
+ if local_sys is not None and local_sys.excepthook is not None:
+ sys_excepthook = local_sys.excepthook
+ else:
+ sys_excepthook = old_sys_excepthook
+
+ sys_excepthook(*sys_exc_info())
+ finally:
+ # Break reference cycle (exception stored in a variable)
+ args = None
+
+ return invoke_excepthook
+
+
+# The timer class was contributed by Itamar Shtull-Trauring
+
+class Timer(Thread):
+ """Call a function after a specified number of seconds:
+
+ t = Timer(30.0, f, args=None, kwargs=None)
+ t.start()
+ t.cancel() # stop the timer's action if it's still waiting
+
+ """
+
+ def __init__(self, interval, function, args=None, kwargs=None):
+ Thread.__init__(self)
+ self.interval = interval
+ self.function = function
+ self.args = args if args is not None else []
+ self.kwargs = kwargs if kwargs is not None else {}
+ self.finished = Event()
+
+ def cancel(self):
+ """Stop the timer if it hasn't finished yet."""
+ self.finished.set()
+
+ def run(self):
+ self.finished.wait(self.interval)
+ if not self.finished.is_set():
+ self.function(*self.args, **self.kwargs)
+ self.finished.set()
+
+
+# Special thread class to represent the main thread
+
+class _MainThread(Thread):
+
+ def __init__(self):
+ Thread.__init__(self, name="MainThread", daemon=False)
+ self._set_tstate_lock()
+ self._started.set()
+ self._set_ident()
+ if _HAVE_THREAD_NATIVE_ID:
+ self._set_native_id()
+ with _active_limbo_lock:
+ _active[self._ident] = self
+
+
+# Dummy thread class to represent threads not started here.
+# These aren't garbage collected when they die, nor can they be waited for.
+# If they invoke anything in threading.py that calls current_thread(), they
+# leave an entry in the _active dict forever after.
+# Their purpose is to return *something* from current_thread().
+# They are marked as daemon threads so we won't wait for them
+# when we exit (conform previous semantics).
+
+class _DummyThread(Thread):
+
+ def __init__(self):
+ Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
+
+ self._started.set()
+ self._set_ident()
+ if _HAVE_THREAD_NATIVE_ID:
+ self._set_native_id()
+ with _active_limbo_lock:
+ _active[self._ident] = self
+
+ def _stop(self):
+ pass
+
+ def is_alive(self):
+ assert not self._is_stopped and self._started.is_set()
+ return True
+
+ def join(self, timeout=None):
+ assert False, "cannot join a dummy thread"
+
+
+# Global API functions
+
+def current_thread():
+ """Return the current Thread object, corresponding to the caller's thread of control.
+
+ If the caller's thread of control was not created through the threading
+ module, a dummy thread object with limited functionality is returned.
+
+ """
+ try:
+ return _active[get_ident()]
+ except KeyError:
+ return _DummyThread()
+
+def currentThread():
+ """Return the current Thread object, corresponding to the caller's thread of control.
+
+ This function is deprecated, use current_thread() instead.
+
+ """
+ import warnings
+ warnings.warn('currentThread() is deprecated, use current_thread() instead',
+ DeprecationWarning, stacklevel=2)
+ return current_thread()
+
+def active_count():
+ """Return the number of Thread objects currently alive.
+
+ The returned count is equal to the length of the list returned by
+ enumerate().
+
+ """
+ with _active_limbo_lock:
+ return len(_active) + len(_limbo)
+
+def activeCount():
+ """Return the number of Thread objects currently alive.
+
+ This function is deprecated, use active_count() instead.
+
+ """
+ import warnings
+ warnings.warn('activeCount() is deprecated, use active_count() instead',
+ DeprecationWarning, stacklevel=2)
+ return active_count()
+
+def _enumerate():
+ # Same as enumerate(), but without the lock. Internal use only.
+ return list(_active.values()) + list(_limbo.values())
+
+def enumerate():
+ """Return a list of all Thread objects currently alive.
+
+ The list includes daemonic threads, dummy thread objects created by
+ current_thread(), and the main thread. It excludes terminated threads and
+ threads that have not yet been started.
+
+ """
+ with _active_limbo_lock:
+ return list(_active.values()) + list(_limbo.values())
+
+
+_threading_atexits = []
+_SHUTTING_DOWN = False
+
+def _register_atexit(func, *arg, **kwargs):
+ """CPython internal: register *func* to be called before joining threads.
+
+ The registered *func* is called with its arguments just before all
+ non-daemon threads are joined in `_shutdown()`. It provides a similar
+ purpose to `atexit.register()`, but its functions are called prior to
+ threading shutdown instead of interpreter shutdown.
+
+ For similarity to atexit, the registered functions are called in reverse.
+ """
+ if _SHUTTING_DOWN:
+ raise RuntimeError("can't register atexit after shutdown")
+
+ call = functools.partial(func, *arg, **kwargs)
+ _threading_atexits.append(call)
+
+
+from _thread import stack_size
+
+# Create the main thread object,
+# and make it available for the interpreter
+# (Py_Main) as threading._shutdown.
+
+_main_thread = _MainThread()
+
+def _shutdown():
+ """
+ Wait until the Python thread state of all non-daemon threads get deleted.
+ """
+ # Obscure: other threads may be waiting to join _main_thread. That's
+ # dubious, but some code does it. We can't wait for C code to release
+ # the main thread's tstate_lock - that won't happen until the interpreter
+ # is nearly dead. So we release it here. Note that just calling _stop()
+ # isn't enough: other threads may already be waiting on _tstate_lock.
+ if _main_thread._is_stopped:
+ # _shutdown() was already called
+ return
+
+ global _SHUTTING_DOWN
+ _SHUTTING_DOWN = True
+
+ # Call registered threading atexit functions before threads are joined.
+ # Order is reversed, similar to atexit.
+ for atexit_call in reversed(_threading_atexits):
+ atexit_call()
+
+ # Main thread
+ if _main_thread.ident == get_ident():
+ tlock = _main_thread._tstate_lock
+ # The main thread isn't finished yet, so its thread state lock can't
+ # have been released.
+ assert tlock is not None
+ assert tlock.locked()
+ tlock.release()
+ _main_thread._stop()
+ else:
+ # bpo-1596321: _shutdown() must be called in the main thread.
+ # If the threading module was not imported by the main thread,
+ # _main_thread is the thread which imported the threading module.
+ # In this case, ignore _main_thread, similar behavior than for threads
+ # spawned by C libraries or using _thread.start_new_thread().
+ pass
+
+ # Join all non-deamon threads
+ while True:
+ with _shutdown_locks_lock:
+ locks = list(_shutdown_locks)
+ _shutdown_locks.clear()
+
+ if not locks:
+ break
+
+ for lock in locks:
+ # mimic Thread.join()
+ lock.acquire()
+ lock.release()
+
+ # new threads can be spawned while we were waiting for the other
+ # threads to complete
+
+
+def main_thread():
+ """Return the main thread object.
+
+ In normal conditions, the main thread is the thread from which the
+ Python interpreter was started.
+ """
+ return _main_thread
+
+# get thread-local implementation, either from the thread
+# module, or from the python fallback
+
+try:
+ from _thread import _local as local
+except ImportError:
+ from _threading_local import local
+
+
+def _after_fork():
+ """
+ Cleanup threading module state that should not exist after a fork.
+ """
+ # Reset _active_limbo_lock, in case we forked while the lock was held
+ # by another (non-forked) thread. http://bugs.python.org/issue874900
+ global _active_limbo_lock, _main_thread
+ global _shutdown_locks_lock, _shutdown_locks
+ _active_limbo_lock = RLock()
+
+ # fork() only copied the current thread; clear references to others.
+ new_active = {}
+
+ try:
+ current = _active[get_ident()]
+ except KeyError:
+ # fork() was called in a thread which was not spawned
+ # by threading.Thread. For example, a thread spawned
+ # by thread.start_new_thread().
+ current = _MainThread()
+
+ _main_thread = current
+
+ # reset _shutdown() locks: threads re-register their _tstate_lock below
+ _shutdown_locks_lock = _allocate_lock()
+ _shutdown_locks = set()
+
+ with _active_limbo_lock:
+ # Dangling thread instances must still have their locks reset,
+ # because someone may join() them.
+ threads = set(_enumerate())
+ threads.update(_dangling)
+ for thread in threads:
+ # Any lock/condition variable may be currently locked or in an
+ # invalid state, so we reinitialize them.
+ if thread is current:
+ # There is only one active thread. We reset the ident to
+ # its new value since it can have changed.
+ thread._reset_internal_locks(True)
+ ident = get_ident()
+ thread._ident = ident
+ new_active[ident] = thread
+ else:
+ # All the others are already stopped.
+ thread._reset_internal_locks(False)
+ thread._stop()
+
+ _limbo.clear()
+ _active.clear()
+ _active.update(new_active)
+ assert len(_active) == 1
+
+
+if hasattr(_os, "register_at_fork"):
+ _os.register_at_fork(after_in_child=_after_fork)
diff --git a/infer_4_37_2/lib/python3.10/timeit.py b/infer_4_37_2/lib/python3.10/timeit.py
new file mode 100644
index 0000000000000000000000000000000000000000..9dfd454936e6b8c790297f2020d1295fc447dde6
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/timeit.py
@@ -0,0 +1,376 @@
+#! /usr/bin/env python3
+
+"""Tool for measuring execution time of small code snippets.
+
+This module avoids a number of common traps for measuring execution
+times. See also Tim Peters' introduction to the Algorithms chapter in
+the Python Cookbook, published by O'Reilly.
+
+Library usage: see the Timer class.
+
+Command line usage:
+ python timeit.py [-n N] [-r N] [-s S] [-p] [-h] [--] [statement]
+
+Options:
+ -n/--number N: how many times to execute 'statement' (default: see below)
+ -r/--repeat N: how many times to repeat the timer (default 5)
+ -s/--setup S: statement to be executed once initially (default 'pass').
+ Execution time of this setup statement is NOT timed.
+ -p/--process: use time.process_time() (default is time.perf_counter())
+ -v/--verbose: print raw timing results; repeat for more digits precision
+ -u/--unit: set the output time unit (nsec, usec, msec, or sec)
+ -h/--help: print this usage message and exit
+ --: separate options from statement, use when statement starts with -
+ statement: statement to be timed (default 'pass')
+
+A multi-line statement may be given by specifying each line as a
+separate argument; indented lines are possible by enclosing an
+argument in quotes and using leading spaces. Multiple -s options are
+treated similarly.
+
+If -n is not given, a suitable number of loops is calculated by trying
+increasing numbers from the sequence 1, 2, 5, 10, 20, 50, ... until the
+total time is at least 0.2 seconds.
+
+Note: there is a certain baseline overhead associated with executing a
+pass statement. It differs between versions. The code here doesn't try
+to hide it, but you should be aware of it. The baseline overhead can be
+measured by invoking the program without arguments.
+
+Classes:
+
+ Timer
+
+Functions:
+
+ timeit(string, string) -> float
+ repeat(string, string) -> list
+ default_timer() -> float
+
+"""
+
+import gc
+import sys
+import time
+import itertools
+
+__all__ = ["Timer", "timeit", "repeat", "default_timer"]
+
+dummy_src_name = ""
+default_number = 1000000
+default_repeat = 5
+default_timer = time.perf_counter
+
+_globals = globals
+
+# Don't change the indentation of the template; the reindent() calls
+# in Timer.__init__() depend on setup being indented 4 spaces and stmt
+# being indented 8 spaces.
+template = """
+def inner(_it, _timer{init}):
+ {setup}
+ _t0 = _timer()
+ for _i in _it:
+ {stmt}
+ pass
+ _t1 = _timer()
+ return _t1 - _t0
+"""
+
+def reindent(src, indent):
+ """Helper to reindent a multi-line statement."""
+ return src.replace("\n", "\n" + " "*indent)
+
+class Timer:
+ """Class for timing execution speed of small code snippets.
+
+ The constructor takes a statement to be timed, an additional
+ statement used for setup, and a timer function. Both statements
+ default to 'pass'; the timer function is platform-dependent (see
+ module doc string). If 'globals' is specified, the code will be
+ executed within that namespace (as opposed to inside timeit's
+ namespace).
+
+ To measure the execution time of the first statement, use the
+ timeit() method. The repeat() method is a convenience to call
+ timeit() multiple times and return a list of results.
+
+ The statements may contain newlines, as long as they don't contain
+ multi-line string literals.
+ """
+
+ def __init__(self, stmt="pass", setup="pass", timer=default_timer,
+ globals=None):
+ """Constructor. See class doc string."""
+ self.timer = timer
+ local_ns = {}
+ global_ns = _globals() if globals is None else globals
+ init = ''
+ if isinstance(setup, str):
+ # Check that the code can be compiled outside a function
+ compile(setup, dummy_src_name, "exec")
+ stmtprefix = setup + '\n'
+ setup = reindent(setup, 4)
+ elif callable(setup):
+ local_ns['_setup'] = setup
+ init += ', _setup=_setup'
+ stmtprefix = ''
+ setup = '_setup()'
+ else:
+ raise ValueError("setup is neither a string nor callable")
+ if isinstance(stmt, str):
+ # Check that the code can be compiled outside a function
+ compile(stmtprefix + stmt, dummy_src_name, "exec")
+ stmt = reindent(stmt, 8)
+ elif callable(stmt):
+ local_ns['_stmt'] = stmt
+ init += ', _stmt=_stmt'
+ stmt = '_stmt()'
+ else:
+ raise ValueError("stmt is neither a string nor callable")
+ src = template.format(stmt=stmt, setup=setup, init=init)
+ self.src = src # Save for traceback display
+ code = compile(src, dummy_src_name, "exec")
+ exec(code, global_ns, local_ns)
+ self.inner = local_ns["inner"]
+
+ def print_exc(self, file=None):
+ """Helper to print a traceback from the timed code.
+
+ Typical use:
+
+ t = Timer(...) # outside the try/except
+ try:
+ t.timeit(...) # or t.repeat(...)
+ except:
+ t.print_exc()
+
+ The advantage over the standard traceback is that source lines
+ in the compiled template will be displayed.
+
+ The optional file argument directs where the traceback is
+ sent; it defaults to sys.stderr.
+ """
+ import linecache, traceback
+ if self.src is not None:
+ linecache.cache[dummy_src_name] = (len(self.src),
+ None,
+ self.src.split("\n"),
+ dummy_src_name)
+ # else the source is already stored somewhere else
+
+ traceback.print_exc(file=file)
+
+ def timeit(self, number=default_number):
+ """Time 'number' executions of the main statement.
+
+ To be precise, this executes the setup statement once, and
+ then returns the time it takes to execute the main statement
+ a number of times, as a float measured in seconds. The
+ argument is the number of times through the loop, defaulting
+ to one million. The main statement, the setup statement and
+ the timer function to be used are passed to the constructor.
+ """
+ it = itertools.repeat(None, number)
+ gcold = gc.isenabled()
+ gc.disable()
+ try:
+ timing = self.inner(it, self.timer)
+ finally:
+ if gcold:
+ gc.enable()
+ return timing
+
+ def repeat(self, repeat=default_repeat, number=default_number):
+ """Call timeit() a few times.
+
+ This is a convenience function that calls the timeit()
+ repeatedly, returning a list of results. The first argument
+ specifies how many times to call timeit(), defaulting to 5;
+ the second argument specifies the timer argument, defaulting
+ to one million.
+
+ Note: it's tempting to calculate mean and standard deviation
+ from the result vector and report these. However, this is not
+ very useful. In a typical case, the lowest value gives a
+ lower bound for how fast your machine can run the given code
+ snippet; higher values in the result vector are typically not
+ caused by variability in Python's speed, but by other
+ processes interfering with your timing accuracy. So the min()
+ of the result is probably the only number you should be
+ interested in. After that, you should look at the entire
+ vector and apply common sense rather than statistics.
+ """
+ r = []
+ for i in range(repeat):
+ t = self.timeit(number)
+ r.append(t)
+ return r
+
+ def autorange(self, callback=None):
+ """Return the number of loops and time taken so that total time >= 0.2.
+
+ Calls the timeit method with increasing numbers from the sequence
+ 1, 2, 5, 10, 20, 50, ... until the time taken is at least 0.2
+ second. Returns (number, time_taken).
+
+ If *callback* is given and is not None, it will be called after
+ each trial with two arguments: ``callback(number, time_taken)``.
+ """
+ i = 1
+ while True:
+ for j in 1, 2, 5:
+ number = i * j
+ time_taken = self.timeit(number)
+ if callback:
+ callback(number, time_taken)
+ if time_taken >= 0.2:
+ return (number, time_taken)
+ i *= 10
+
+def timeit(stmt="pass", setup="pass", timer=default_timer,
+ number=default_number, globals=None):
+ """Convenience function to create Timer object and call timeit method."""
+ return Timer(stmt, setup, timer, globals).timeit(number)
+
+def repeat(stmt="pass", setup="pass", timer=default_timer,
+ repeat=default_repeat, number=default_number, globals=None):
+ """Convenience function to create Timer object and call repeat method."""
+ return Timer(stmt, setup, timer, globals).repeat(repeat, number)
+
+def main(args=None, *, _wrap_timer=None):
+ """Main program, used when run as a script.
+
+ The optional 'args' argument specifies the command line to be parsed,
+ defaulting to sys.argv[1:].
+
+ The return value is an exit code to be passed to sys.exit(); it
+ may be None to indicate success.
+
+ When an exception happens during timing, a traceback is printed to
+ stderr and the return value is 1. Exceptions at other times
+ (including the template compilation) are not caught.
+
+ '_wrap_timer' is an internal interface used for unit testing. If it
+ is not None, it must be a callable that accepts a timer function
+ and returns another timer function (used for unit testing).
+ """
+ if args is None:
+ args = sys.argv[1:]
+ import getopt
+ try:
+ opts, args = getopt.getopt(args, "n:u:s:r:tcpvh",
+ ["number=", "setup=", "repeat=",
+ "time", "clock", "process",
+ "verbose", "unit=", "help"])
+ except getopt.error as err:
+ print(err)
+ print("use -h/--help for command line help")
+ return 2
+
+ timer = default_timer
+ stmt = "\n".join(args) or "pass"
+ number = 0 # auto-determine
+ setup = []
+ repeat = default_repeat
+ verbose = 0
+ time_unit = None
+ units = {"nsec": 1e-9, "usec": 1e-6, "msec": 1e-3, "sec": 1.0}
+ precision = 3
+ for o, a in opts:
+ if o in ("-n", "--number"):
+ number = int(a)
+ if o in ("-s", "--setup"):
+ setup.append(a)
+ if o in ("-u", "--unit"):
+ if a in units:
+ time_unit = a
+ else:
+ print("Unrecognized unit. Please select nsec, usec, msec, or sec.",
+ file=sys.stderr)
+ return 2
+ if o in ("-r", "--repeat"):
+ repeat = int(a)
+ if repeat <= 0:
+ repeat = 1
+ if o in ("-p", "--process"):
+ timer = time.process_time
+ if o in ("-v", "--verbose"):
+ if verbose:
+ precision += 1
+ verbose += 1
+ if o in ("-h", "--help"):
+ print(__doc__, end=' ')
+ return 0
+ setup = "\n".join(setup) or "pass"
+
+ # Include the current directory, so that local imports work (sys.path
+ # contains the directory of this script, rather than the current
+ # directory)
+ import os
+ sys.path.insert(0, os.curdir)
+ if _wrap_timer is not None:
+ timer = _wrap_timer(timer)
+
+ t = Timer(stmt, setup, timer)
+ if number == 0:
+ # determine number so that 0.2 <= total time < 2.0
+ callback = None
+ if verbose:
+ def callback(number, time_taken):
+ msg = "{num} loop{s} -> {secs:.{prec}g} secs"
+ plural = (number != 1)
+ print(msg.format(num=number, s='s' if plural else '',
+ secs=time_taken, prec=precision))
+ try:
+ number, _ = t.autorange(callback)
+ except:
+ t.print_exc()
+ return 1
+
+ if verbose:
+ print()
+
+ try:
+ raw_timings = t.repeat(repeat, number)
+ except:
+ t.print_exc()
+ return 1
+
+ def format_time(dt):
+ unit = time_unit
+
+ if unit is not None:
+ scale = units[unit]
+ else:
+ scales = [(scale, unit) for unit, scale in units.items()]
+ scales.sort(reverse=True)
+ for scale, unit in scales:
+ if dt >= scale:
+ break
+
+ return "%.*g %s" % (precision, dt / scale, unit)
+
+ if verbose:
+ print("raw times: %s" % ", ".join(map(format_time, raw_timings)))
+ print()
+ timings = [dt / number for dt in raw_timings]
+
+ best = min(timings)
+ print("%d loop%s, best of %d: %s per loop"
+ % (number, 's' if number != 1 else '',
+ repeat, format_time(best)))
+
+ best = min(timings)
+ worst = max(timings)
+ if worst >= best * 4:
+ import warnings
+ warnings.warn_explicit("The test results are likely unreliable. "
+ "The worst time (%s) was more than four times "
+ "slower than the best time (%s)."
+ % (format_time(worst), format_time(best)),
+ UserWarning, '', 0)
+ return None
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/infer_4_37_2/lib/python3.10/tokenize.py b/infer_4_37_2/lib/python3.10/tokenize.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d7736fe985981febeda85733b7252e8bca85d18
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/tokenize.py
@@ -0,0 +1,684 @@
+"""Tokenization help for Python programs.
+
+tokenize(readline) is a generator that breaks a stream of bytes into
+Python tokens. It decodes the bytes according to PEP-0263 for
+determining source file encoding.
+
+It accepts a readline-like method which is called repeatedly to get the
+next line of input (or b"" for EOF). It generates 5-tuples with these
+members:
+
+ the token type (see token.py)
+ the token (a string)
+ the starting (row, column) indices of the token (a 2-tuple of ints)
+ the ending (row, column) indices of the token (a 2-tuple of ints)
+ the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators. Additionally, all token lists start with an ENCODING token
+which tells you which encoding was used to decode the bytes stream.
+"""
+
+__author__ = 'Ka-Ping Yee '
+__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
+ 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
+ 'Michael Foord')
+from builtins import open as _builtin_open
+from codecs import lookup, BOM_UTF8
+import collections
+import functools
+from io import TextIOWrapper
+import itertools as _itertools
+import re
+import sys
+from token import *
+from token import EXACT_TOKEN_TYPES
+
+cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
+blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
+
+import token
+__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
+ "untokenize", "TokenInfo"]
+del token
+
+class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
+ def __repr__(self):
+ annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
+ return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
+ self._replace(type=annotated_type))
+
+ @property
+ def exact_type(self):
+ if self.type == OP and self.string in EXACT_TOKEN_TYPES:
+ return EXACT_TOKEN_TYPES[self.string]
+ else:
+ return self.type
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return group(*choices) + '*'
+def maybe(*choices): return group(*choices) + '?'
+
+# Note: we use unicode matching for names ("\w") but ascii matching for
+# number literals.
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'\w+'
+
+Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
+Binnumber = r'0[bB](?:_?[01])+'
+Octnumber = r'0[oO](?:_?[0-7])+'
+Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
+Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
+Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
+ r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
+Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+# Return the empty string, plus all of the valid string prefixes.
+def _all_string_prefixes():
+ # The valid string prefixes. Only contain the lower case versions,
+ # and don't contain any permutations (include 'fr', but not
+ # 'rf'). The various permutations will be generated.
+ _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
+ # if we add binary f-strings, add: ['fb', 'fbr']
+ result = {''}
+ for prefix in _valid_string_prefixes:
+ for t in _itertools.permutations(prefix):
+ # create a list with upper and lower versions of each
+ # character
+ for u in _itertools.product(*[(c, c.upper()) for c in t]):
+ result.add(''.join(u))
+ return result
+
+@functools.lru_cache
+def _compile(expr):
+ return re.compile(expr, re.UNICODE)
+
+# Note that since _all_string_prefixes includes the empty string,
+# StringPrefix can be the empty string (making it optional).
+StringPrefix = group(*_all_string_prefixes())
+
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group(StringPrefix + "'''", StringPrefix + '"""')
+# Single-line ' or " string.
+String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+ StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+# Sorting in reverse order puts the long operators before their prefixes.
+# Otherwise if = came before ==, == would get recognized as two instances
+# of =.
+Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True)))
+Funny = group(r'\r?\n', Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ group("'", r'\\\r?\n'),
+ StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+
+# For a given string prefix plus quotes, endpats maps it to a regex
+# to match the remainder of that string. _prefix can be empty, for
+# a normal single or triple quoted string (with no prefix).
+endpats = {}
+for _prefix in _all_string_prefixes():
+ endpats[_prefix + "'"] = Single
+ endpats[_prefix + '"'] = Double
+ endpats[_prefix + "'''"] = Single3
+ endpats[_prefix + '"""'] = Double3
+
+# A set of all of the single and triple quoted string prefixes,
+# including the opening quotes.
+single_quoted = set()
+triple_quoted = set()
+for t in _all_string_prefixes():
+ for u in (t + '"', t + "'"):
+ single_quoted.add(u)
+ for u in (t + '"""', t + "'''"):
+ triple_quoted.add(u)
+
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+
+class Untokenizer:
+
+ def __init__(self):
+ self.tokens = []
+ self.prev_row = 1
+ self.prev_col = 0
+ self.encoding = None
+
+ def add_whitespace(self, start):
+ row, col = start
+ if row < self.prev_row or row == self.prev_row and col < self.prev_col:
+ raise ValueError("start ({},{}) precedes previous end ({},{})"
+ .format(row, col, self.prev_row, self.prev_col))
+ row_offset = row - self.prev_row
+ if row_offset:
+ self.tokens.append("\\\n" * row_offset)
+ self.prev_col = 0
+ col_offset = col - self.prev_col
+ if col_offset:
+ self.tokens.append(" " * col_offset)
+
+ def untokenize(self, iterable):
+ it = iter(iterable)
+ indents = []
+ startline = False
+ for t in it:
+ if len(t) == 2:
+ self.compat(t, it)
+ break
+ tok_type, token, start, end, line = t
+ if tok_type == ENCODING:
+ self.encoding = token
+ continue
+ if tok_type == ENDMARKER:
+ break
+ if tok_type == INDENT:
+ indents.append(token)
+ continue
+ elif tok_type == DEDENT:
+ indents.pop()
+ self.prev_row, self.prev_col = end
+ continue
+ elif tok_type in (NEWLINE, NL):
+ startline = True
+ elif startline and indents:
+ indent = indents[-1]
+ if start[1] >= len(indent):
+ self.tokens.append(indent)
+ self.prev_col = len(indent)
+ startline = False
+ self.add_whitespace(start)
+ self.tokens.append(token)
+ self.prev_row, self.prev_col = end
+ if tok_type in (NEWLINE, NL):
+ self.prev_row += 1
+ self.prev_col = 0
+ return "".join(self.tokens)
+
+ def compat(self, token, iterable):
+ indents = []
+ toks_append = self.tokens.append
+ startline = token[0] in (NEWLINE, NL)
+ prevstring = False
+
+ for tok in _itertools.chain([token], iterable):
+ toknum, tokval = tok[:2]
+ if toknum == ENCODING:
+ self.encoding = tokval
+ continue
+
+ if toknum in (NAME, NUMBER):
+ tokval += ' '
+
+ # Insert a space between two consecutive strings
+ if toknum == STRING:
+ if prevstring:
+ tokval = ' ' + tokval
+ prevstring = True
+ else:
+ prevstring = False
+
+ if toknum == INDENT:
+ indents.append(tokval)
+ continue
+ elif toknum == DEDENT:
+ indents.pop()
+ continue
+ elif toknum in (NEWLINE, NL):
+ startline = True
+ elif startline and indents:
+ toks_append(indents[-1])
+ startline = False
+ toks_append(tokval)
+
+
+def untokenize(iterable):
+ """Transform tokens back into Python source code.
+ It returns a bytes object, encoded using the ENCODING
+ token, which is the first token sequence output by tokenize.
+
+ Each element returned by the iterable must be a token sequence
+ with at least two elements, a token number and token value. If
+ only two tokens are passed, the resulting output is poor.
+
+ Round-trip invariant for full input:
+ Untokenized source will match input source exactly
+
+ Round-trip invariant for limited input:
+ # Output bytes will tokenize back to the input
+ t1 = [tok[:2] for tok in tokenize(f.readline)]
+ newcode = untokenize(t1)
+ readline = BytesIO(newcode).readline
+ t2 = [tok[:2] for tok in tokenize(readline)]
+ assert t1 == t2
+ """
+ ut = Untokenizer()
+ out = ut.untokenize(iterable)
+ if ut.encoding is not None:
+ out = out.encode(ut.encoding)
+ return out
+
+
+def _get_normal_name(orig_enc):
+ """Imitates get_normal_name in tokenizer.c."""
+ # Only care about the first 12 characters.
+ enc = orig_enc[:12].lower().replace("_", "-")
+ if enc == "utf-8" or enc.startswith("utf-8-"):
+ return "utf-8"
+ if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
+ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
+ return "iso-8859-1"
+ return orig_enc
+
+def detect_encoding(readline):
+ """
+ The detect_encoding() function is used to detect the encoding that should
+ be used to decode a Python source file. It requires one argument, readline,
+ in the same way as the tokenize() generator.
+
+ It will call readline a maximum of twice, and return the encoding used
+ (as a string) and a list of any lines (left as bytes) it has read in.
+
+ It detects the encoding from the presence of a utf-8 bom or an encoding
+ cookie as specified in pep-0263. If both a bom and a cookie are present,
+ but disagree, a SyntaxError will be raised. If the encoding cookie is an
+ invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
+ 'utf-8-sig' is returned.
+
+ If no encoding is specified, then the default of 'utf-8' will be returned.
+ """
+ try:
+ filename = readline.__self__.name
+ except AttributeError:
+ filename = None
+ bom_found = False
+ encoding = None
+ default = 'utf-8'
+ def read_or_stop():
+ try:
+ return readline()
+ except StopIteration:
+ return b''
+
+ def find_cookie(line):
+ try:
+ # Decode as UTF-8. Either the line is an encoding declaration,
+ # in which case it should be pure ASCII, or it must be UTF-8
+ # per default encoding.
+ line_string = line.decode('utf-8')
+ except UnicodeDecodeError:
+ msg = "invalid or missing encoding declaration"
+ if filename is not None:
+ msg = '{} for {!r}'.format(msg, filename)
+ raise SyntaxError(msg)
+
+ match = cookie_re.match(line_string)
+ if not match:
+ return None
+ encoding = _get_normal_name(match.group(1))
+ try:
+ codec = lookup(encoding)
+ except LookupError:
+ # This behaviour mimics the Python interpreter
+ if filename is None:
+ msg = "unknown encoding: " + encoding
+ else:
+ msg = "unknown encoding for {!r}: {}".format(filename,
+ encoding)
+ raise SyntaxError(msg)
+
+ if bom_found:
+ if encoding != 'utf-8':
+ # This behaviour mimics the Python interpreter
+ if filename is None:
+ msg = 'encoding problem: utf-8'
+ else:
+ msg = 'encoding problem for {!r}: utf-8'.format(filename)
+ raise SyntaxError(msg)
+ encoding += '-sig'
+ return encoding
+
+ first = read_or_stop()
+ if first.startswith(BOM_UTF8):
+ bom_found = True
+ first = first[3:]
+ default = 'utf-8-sig'
+ if not first:
+ return default, []
+
+ encoding = find_cookie(first)
+ if encoding:
+ return encoding, [first]
+ if not blank_re.match(first):
+ return default, [first]
+
+ second = read_or_stop()
+ if not second:
+ return default, [first]
+
+ encoding = find_cookie(second)
+ if encoding:
+ return encoding, [first, second]
+
+ return default, [first, second]
+
+
+def open(filename):
+ """Open a file in read only mode using the encoding detected by
+ detect_encoding().
+ """
+ buffer = _builtin_open(filename, 'rb')
+ try:
+ encoding, lines = detect_encoding(buffer.readline)
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, line_buffering=True)
+ text.mode = 'r'
+ return text
+ except:
+ buffer.close()
+ raise
+
+
+def tokenize(readline):
+ """
+ The tokenize() generator requires one argument, readline, which
+ must be a callable object which provides the same interface as the
+ readline() method of built-in file objects. Each call to the function
+ should return one line of input as bytes. Alternatively, readline
+ can be a callable function terminating with StopIteration:
+ readline = open(myfile, 'rb').__next__ # Example of alternate readline
+
+ The generator produces 5-tuples with these members: the token type; the
+ token string; a 2-tuple (srow, scol) of ints specifying the row and
+ column where the token begins in the source; a 2-tuple (erow, ecol) of
+ ints specifying the row and column where the token ends in the source;
+ and the line on which the token was found. The line passed is the
+ physical line.
+
+ The first token sequence will always be an ENCODING token
+ which tells you which encoding was used to decode the bytes stream.
+ """
+ encoding, consumed = detect_encoding(readline)
+ empty = _itertools.repeat(b"")
+ rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
+ return _tokenize(rl_gen.__next__, encoding)
+
+
+def _tokenize(readline, encoding):
+ lnum = parenlev = continued = 0
+ numchars = '0123456789'
+ contstr, needcont = '', 0
+ contline = None
+ indents = [0]
+
+ if encoding is not None:
+ if encoding == "utf-8-sig":
+ # BOM will already have been stripped.
+ encoding = "utf-8"
+ yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
+ last_line = b''
+ line = b''
+ while True: # loop over lines in stream
+ try:
+ # We capture the value of the line variable here because
+ # readline uses the empty string '' to signal end of input,
+ # hence `line` itself will always be overwritten at the end
+ # of this loop.
+ last_line = line
+ line = readline()
+ except StopIteration:
+ line = b''
+
+ if encoding is not None:
+ line = line.decode(encoding)
+ lnum += 1
+ pos, max = 0, len(line)
+
+ if contstr: # continued string
+ if not line:
+ raise TokenError("EOF in multi-line string", strstart)
+ endmatch = endprog.match(line)
+ if endmatch:
+ pos = end = endmatch.end(0)
+ yield TokenInfo(STRING, contstr + line[:end],
+ strstart, (lnum, end), contline + line)
+ contstr, needcont = '', 0
+ contline = None
+ elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+ yield TokenInfo(ERRORTOKEN, contstr + line,
+ strstart, (lnum, len(line)), contline)
+ contstr = ''
+ contline = None
+ continue
+ else:
+ contstr = contstr + line
+ contline = contline + line
+ continue
+
+ elif parenlev == 0 and not continued: # new statement
+ if not line: break
+ column = 0
+ while pos < max: # measure leading whitespace
+ if line[pos] == ' ':
+ column += 1
+ elif line[pos] == '\t':
+ column = (column//tabsize + 1)*tabsize
+ elif line[pos] == '\f':
+ column = 0
+ else:
+ break
+ pos += 1
+ if pos == max:
+ break
+
+ if line[pos] in '#\r\n': # skip comments or blank lines
+ if line[pos] == '#':
+ comment_token = line[pos:].rstrip('\r\n')
+ yield TokenInfo(COMMENT, comment_token,
+ (lnum, pos), (lnum, pos + len(comment_token)), line)
+ pos += len(comment_token)
+
+ yield TokenInfo(NL, line[pos:],
+ (lnum, pos), (lnum, len(line)), line)
+ continue
+
+ if column > indents[-1]: # count indents or dedents
+ indents.append(column)
+ yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+ while column < indents[-1]:
+ if column not in indents:
+ raise IndentationError(
+ "unindent does not match any outer indentation level",
+ ("", lnum, pos, line))
+ indents = indents[:-1]
+
+ yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+ else: # continued statement
+ if not line:
+ raise TokenError("EOF in multi-line statement", (lnum, 0))
+ continued = 0
+
+ while pos < max:
+ pseudomatch = _compile(PseudoToken).match(line, pos)
+ if pseudomatch: # scan for tokens
+ start, end = pseudomatch.span(1)
+ spos, epos, pos = (lnum, start), (lnum, end), end
+ if start == end:
+ continue
+ token, initial = line[start:end], line[start]
+
+ if (initial in numchars or # ordinary number
+ (initial == '.' and token != '.' and token != '...')):
+ yield TokenInfo(NUMBER, token, spos, epos, line)
+ elif initial in '\r\n':
+ if parenlev > 0:
+ yield TokenInfo(NL, token, spos, epos, line)
+ else:
+ yield TokenInfo(NEWLINE, token, spos, epos, line)
+
+ elif initial == '#':
+ assert not token.endswith("\n")
+ yield TokenInfo(COMMENT, token, spos, epos, line)
+
+ elif token in triple_quoted:
+ endprog = _compile(endpats[token])
+ endmatch = endprog.match(line, pos)
+ if endmatch: # all on one line
+ pos = endmatch.end(0)
+ token = line[start:pos]
+ yield TokenInfo(STRING, token, spos, (lnum, pos), line)
+ else:
+ strstart = (lnum, start) # multiple lines
+ contstr = line[start:]
+ contline = line
+ break
+
+ # Check up to the first 3 chars of the token to see if
+ # they're in the single_quoted set. If so, they start
+ # a string.
+ # We're using the first 3, because we're looking for
+ # "rb'" (for example) at the start of the token. If
+ # we switch to longer prefixes, this needs to be
+ # adjusted.
+ # Note that initial == token[:1].
+ # Also note that single quote checking must come after
+ # triple quote checking (above).
+ elif (initial in single_quoted or
+ token[:2] in single_quoted or
+ token[:3] in single_quoted):
+ if token[-1] == '\n': # continued string
+ strstart = (lnum, start)
+ # Again, using the first 3 chars of the
+ # token. This is looking for the matching end
+ # regex for the correct type of quote
+ # character. So it's really looking for
+ # endpats["'"] or endpats['"'], by trying to
+ # skip string prefix characters, if any.
+ endprog = _compile(endpats.get(initial) or
+ endpats.get(token[1]) or
+ endpats.get(token[2]))
+ contstr, needcont = line[start:], 1
+ contline = line
+ break
+ else: # ordinary string
+ yield TokenInfo(STRING, token, spos, epos, line)
+
+ elif initial.isidentifier(): # ordinary name
+ yield TokenInfo(NAME, token, spos, epos, line)
+ elif initial == '\\': # continued stmt
+ continued = 1
+ else:
+ if initial in '([{':
+ parenlev += 1
+ elif initial in ')]}':
+ parenlev -= 1
+ yield TokenInfo(OP, token, spos, epos, line)
+ else:
+ yield TokenInfo(ERRORTOKEN, line[pos],
+ (lnum, pos), (lnum, pos+1), line)
+ pos += 1
+
+ # Add an implicit NEWLINE if the input doesn't end in one
+ if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"):
+ yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
+ for indent in indents[1:]: # pop remaining indent levels
+ yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
+ yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+
+def generate_tokens(readline):
+ """Tokenize a source reading Python code as unicode strings.
+
+ This has the same API as tokenize(), except that it expects the *readline*
+ callable to return str objects instead of bytes.
+ """
+ return _tokenize(readline, None)
+
+def main():
+ import argparse
+
+ # Helper error handling routines
+ def perror(message):
+ sys.stderr.write(message)
+ sys.stderr.write('\n')
+
+ def error(message, filename=None, location=None):
+ if location:
+ args = (filename,) + location + (message,)
+ perror("%s:%d:%d: error: %s" % args)
+ elif filename:
+ perror("%s: error: %s" % (filename, message))
+ else:
+ perror("error: %s" % message)
+ sys.exit(1)
+
+ # Parse the arguments and options
+ parser = argparse.ArgumentParser(prog='python -m tokenize')
+ parser.add_argument(dest='filename', nargs='?',
+ metavar='filename.py',
+ help='the file to tokenize; defaults to stdin')
+ parser.add_argument('-e', '--exact', dest='exact', action='store_true',
+ help='display token names using the exact type')
+ args = parser.parse_args()
+
+ try:
+ # Tokenize the input
+ if args.filename:
+ filename = args.filename
+ with _builtin_open(filename, 'rb') as f:
+ tokens = list(tokenize(f.readline))
+ else:
+ filename = ""
+ tokens = _tokenize(sys.stdin.readline, None)
+
+ # Output the tokenization
+ for token in tokens:
+ token_type = token.type
+ if args.exact:
+ token_type = token.exact_type
+ token_range = "%d,%d-%d,%d:" % (token.start + token.end)
+ print("%-20s%-15s%-15r" %
+ (token_range, tok_name[token_type], token.string))
+ except IndentationError as err:
+ line, column = err.args[1][1:3]
+ error(err.args[0], filename, (line, column))
+ except TokenError as err:
+ line, column = err.args[1]
+ error(err.args[0], filename, (line, column))
+ except SyntaxError as err:
+ error(err, filename)
+ except OSError as err:
+ error(err)
+ except KeyboardInterrupt:
+ print("interrupted\n")
+ except Exception as err:
+ perror("unexpected error: %s" % err)
+ raise
+
+if __name__ == "__main__":
+ main()
diff --git a/infer_4_37_2/lib/python3.10/traceback.py b/infer_4_37_2/lib/python3.10/traceback.py
new file mode 100644
index 0000000000000000000000000000000000000000..51446f3f26fed4d151c479af2a8d93d45fb7fb4a
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/traceback.py
@@ -0,0 +1,692 @@
+"""Extract, format and print information about Python stack traces."""
+
+import collections
+import itertools
+import linecache
+import sys
+
+__all__ = ['extract_stack', 'extract_tb', 'format_exception',
+ 'format_exception_only', 'format_list', 'format_stack',
+ 'format_tb', 'print_exc', 'format_exc', 'print_exception',
+ 'print_last', 'print_stack', 'print_tb', 'clear_frames',
+ 'FrameSummary', 'StackSummary', 'TracebackException',
+ 'walk_stack', 'walk_tb']
+
+#
+# Formatting and printing lists of traceback lines.
+#
+
+def print_list(extracted_list, file=None):
+ """Print the list of tuples as returned by extract_tb() or
+ extract_stack() as a formatted stack trace to the given file."""
+ if file is None:
+ file = sys.stderr
+ for item in StackSummary.from_list(extracted_list).format():
+ print(item, file=file, end="")
+
+def format_list(extracted_list):
+ """Format a list of tuples or FrameSummary objects for printing.
+
+ Given a list of tuples or FrameSummary objects as returned by
+ extract_tb() or extract_stack(), return a list of strings ready
+ for printing.
+
+ Each string in the resulting list corresponds to the item with the
+ same index in the argument list. Each string ends in a newline;
+ the strings may contain internal newlines as well, for those items
+ whose source text line is not None.
+ """
+ return StackSummary.from_list(extracted_list).format()
+
+#
+# Printing and Extracting Tracebacks.
+#
+
+def print_tb(tb, limit=None, file=None):
+ """Print up to 'limit' stack trace entries from the traceback 'tb'.
+
+ If 'limit' is omitted or None, all entries are printed. If 'file'
+ is omitted or None, the output goes to sys.stderr; otherwise
+ 'file' should be an open file or file-like object with a write()
+ method.
+ """
+ print_list(extract_tb(tb, limit=limit), file=file)
+
+def format_tb(tb, limit=None):
+ """A shorthand for 'format_list(extract_tb(tb, limit))'."""
+ return extract_tb(tb, limit=limit).format()
+
+def extract_tb(tb, limit=None):
+ """
+ Return a StackSummary object representing a list of
+ pre-processed entries from traceback.
+
+ This is useful for alternate formatting of stack traces. If
+ 'limit' is omitted or None, all entries are extracted. A
+ pre-processed stack trace entry is a FrameSummary object
+ containing attributes filename, lineno, name, and line
+ representing the information that is usually printed for a stack
+ trace. The line is a string with leading and trailing
+ whitespace stripped; if the source is not available it is None.
+ """
+ return StackSummary.extract(walk_tb(tb), limit=limit)
+
+#
+# Exception formatting and output.
+#
+
+_cause_message = (
+ "\nThe above exception was the direct cause "
+ "of the following exception:\n\n")
+
+_context_message = (
+ "\nDuring handling of the above exception, "
+ "another exception occurred:\n\n")
+
+
+class _Sentinel:
+ def __repr__(self):
+ return ""
+
+_sentinel = _Sentinel()
+
+def _parse_value_tb(exc, value, tb):
+ if (value is _sentinel) != (tb is _sentinel):
+ raise ValueError("Both or neither of value and tb must be given")
+ if value is tb is _sentinel:
+ if exc is not None:
+ return exc, exc.__traceback__
+ else:
+ return None, None
+ return value, tb
+
+
+def print_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \
+ file=None, chain=True):
+ """Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
+
+ This differs from print_tb() in the following ways: (1) if
+ traceback is not None, it prints a header "Traceback (most recent
+ call last):"; (2) it prints the exception type and value after the
+ stack trace; (3) if type is SyntaxError and value has the
+ appropriate format, it prints the line where the syntax error
+ occurred with a caret on the next line indicating the approximate
+ position of the error.
+ """
+ value, tb = _parse_value_tb(exc, value, tb)
+ if file is None:
+ file = sys.stderr
+ te = TracebackException(type(value), value, tb, limit=limit, compact=True)
+ for line in te.format(chain=chain):
+ print(line, file=file, end="")
+
+
+def format_exception(exc, /, value=_sentinel, tb=_sentinel, limit=None, \
+ chain=True):
+ """Format a stack trace and the exception information.
+
+ The arguments have the same meaning as the corresponding arguments
+ to print_exception(). The return value is a list of strings, each
+ ending in a newline and some containing internal newlines. When
+ these lines are concatenated and printed, exactly the same text is
+ printed as does print_exception().
+ """
+ value, tb = _parse_value_tb(exc, value, tb)
+ te = TracebackException(type(value), value, tb, limit=limit, compact=True)
+ return list(te.format(chain=chain))
+
+
+def format_exception_only(exc, /, value=_sentinel):
+ """Format the exception part of a traceback.
+
+ The return value is a list of strings, each ending in a newline.
+
+ Normally, the list contains a single string; however, for
+ SyntaxError exceptions, it contains several lines that (when
+ printed) display detailed information about where the syntax
+ error occurred.
+
+ The message indicating which exception occurred is always the last
+ string in the list.
+
+ """
+ if value is _sentinel:
+ value = exc
+ te = TracebackException(type(value), value, None, compact=True)
+ return list(te.format_exception_only())
+
+
+# -- not official API but folk probably use these two functions.
+
+def _format_final_exc_line(etype, value):
+ valuestr = _some_str(value)
+ if value is None or not valuestr:
+ line = "%s\n" % etype
+ else:
+ line = "%s: %s\n" % (etype, valuestr)
+ return line
+
+def _some_str(value):
+ try:
+ return str(value)
+ except:
+ return '' % type(value).__name__
+
+# --
+
+def print_exc(limit=None, file=None, chain=True):
+ """Shorthand for 'print_exception(*sys.exc_info(), limit, file)'."""
+ print_exception(*sys.exc_info(), limit=limit, file=file, chain=chain)
+
+def format_exc(limit=None, chain=True):
+ """Like print_exc() but return a string."""
+ return "".join(format_exception(*sys.exc_info(), limit=limit, chain=chain))
+
+def print_last(limit=None, file=None, chain=True):
+ """This is a shorthand for 'print_exception(sys.last_type,
+ sys.last_value, sys.last_traceback, limit, file)'."""
+ if not hasattr(sys, "last_type"):
+ raise ValueError("no last exception")
+ print_exception(sys.last_type, sys.last_value, sys.last_traceback,
+ limit, file, chain)
+
+#
+# Printing and Extracting Stacks.
+#
+
+def print_stack(f=None, limit=None, file=None):
+ """Print a stack trace from its invocation point.
+
+ The optional 'f' argument can be used to specify an alternate
+ stack frame at which to start. The optional 'limit' and 'file'
+ arguments have the same meaning as for print_exception().
+ """
+ if f is None:
+ f = sys._getframe().f_back
+ print_list(extract_stack(f, limit=limit), file=file)
+
+
+def format_stack(f=None, limit=None):
+ """Shorthand for 'format_list(extract_stack(f, limit))'."""
+ if f is None:
+ f = sys._getframe().f_back
+ return format_list(extract_stack(f, limit=limit))
+
+
+def extract_stack(f=None, limit=None):
+ """Extract the raw traceback from the current stack frame.
+
+ The return value has the same format as for extract_tb(). The
+ optional 'f' and 'limit' arguments have the same meaning as for
+ print_stack(). Each item in the list is a quadruple (filename,
+ line number, function name, text), and the entries are in order
+ from oldest to newest stack frame.
+ """
+ if f is None:
+ f = sys._getframe().f_back
+ stack = StackSummary.extract(walk_stack(f), limit=limit)
+ stack.reverse()
+ return stack
+
+
+def clear_frames(tb):
+ "Clear all references to local variables in the frames of a traceback."
+ while tb is not None:
+ try:
+ tb.tb_frame.clear()
+ except RuntimeError:
+ # Ignore the exception raised if the frame is still executing.
+ pass
+ tb = tb.tb_next
+
+
+class FrameSummary:
+ """A single frame from a traceback.
+
+ - :attr:`filename` The filename for the frame.
+ - :attr:`lineno` The line within filename for the frame that was
+ active when the frame was captured.
+ - :attr:`name` The name of the function or method that was executing
+ when the frame was captured.
+ - :attr:`line` The text from the linecache module for the
+ of code that was running when the frame was captured.
+ - :attr:`locals` Either None if locals were not supplied, or a dict
+ mapping the name to the repr() of the variable.
+ """
+
+ __slots__ = ('filename', 'lineno', 'name', '_line', 'locals')
+
+ def __init__(self, filename, lineno, name, *, lookup_line=True,
+ locals=None, line=None):
+ """Construct a FrameSummary.
+
+ :param lookup_line: If True, `linecache` is consulted for the source
+ code line. Otherwise, the line will be looked up when first needed.
+ :param locals: If supplied the frame locals, which will be captured as
+ object representations.
+ :param line: If provided, use this instead of looking up the line in
+ the linecache.
+ """
+ self.filename = filename
+ self.lineno = lineno
+ self.name = name
+ self._line = line
+ if lookup_line:
+ self.line
+ self.locals = {k: repr(v) for k, v in locals.items()} if locals else None
+
+ def __eq__(self, other):
+ if isinstance(other, FrameSummary):
+ return (self.filename == other.filename and
+ self.lineno == other.lineno and
+ self.name == other.name and
+ self.locals == other.locals)
+ if isinstance(other, tuple):
+ return (self.filename, self.lineno, self.name, self.line) == other
+ return NotImplemented
+
+ def __getitem__(self, pos):
+ return (self.filename, self.lineno, self.name, self.line)[pos]
+
+ def __iter__(self):
+ return iter([self.filename, self.lineno, self.name, self.line])
+
+ def __repr__(self):
+ return "".format(
+ filename=self.filename, lineno=self.lineno, name=self.name)
+
+ def __len__(self):
+ return 4
+
+ @property
+ def line(self):
+ if self._line is None:
+ if self.lineno is None:
+ return None
+ self._line = linecache.getline(self.filename, self.lineno)
+ return self._line.strip()
+
+def walk_stack(f):
+ """Walk a stack yielding the frame and line number for each frame.
+
+ This will follow f.f_back from the given frame. If no frame is given, the
+ current stack is used. Usually used with StackSummary.extract.
+ """
+ if f is None:
+ f = sys._getframe().f_back.f_back
+ while f is not None:
+ yield f, f.f_lineno
+ f = f.f_back
+
+
+def walk_tb(tb):
+ """Walk a traceback yielding the frame and line number for each frame.
+
+ This will follow tb.tb_next (and thus is in the opposite order to
+ walk_stack). Usually used with StackSummary.extract.
+ """
+ while tb is not None:
+ yield tb.tb_frame, tb.tb_lineno
+ tb = tb.tb_next
+
+
+_RECURSIVE_CUTOFF = 3 # Also hardcoded in traceback.c.
+
+class StackSummary(list):
+ """A stack of frames."""
+
+ @classmethod
+ def extract(klass, frame_gen, *, limit=None, lookup_lines=True,
+ capture_locals=False):
+ """Create a StackSummary from a traceback or stack object.
+
+ :param frame_gen: A generator that yields (frame, lineno) tuples to
+ include in the stack.
+ :param limit: None to include all frames or the number of frames to
+ include.
+ :param lookup_lines: If True, lookup lines for each frame immediately,
+ otherwise lookup is deferred until the frame is rendered.
+ :param capture_locals: If True, the local variables from each frame will
+ be captured as object representations into the FrameSummary.
+ """
+ if limit is None:
+ limit = getattr(sys, 'tracebacklimit', None)
+ if limit is not None and limit < 0:
+ limit = 0
+ if limit is not None:
+ if limit >= 0:
+ frame_gen = itertools.islice(frame_gen, limit)
+ else:
+ frame_gen = collections.deque(frame_gen, maxlen=-limit)
+
+ result = klass()
+ fnames = set()
+ for f, lineno in frame_gen:
+ co = f.f_code
+ filename = co.co_filename
+ name = co.co_name
+
+ fnames.add(filename)
+ linecache.lazycache(filename, f.f_globals)
+ # Must defer line lookups until we have called checkcache.
+ if capture_locals:
+ f_locals = f.f_locals
+ else:
+ f_locals = None
+ result.append(FrameSummary(
+ filename, lineno, name, lookup_line=False, locals=f_locals))
+ for filename in fnames:
+ linecache.checkcache(filename)
+ # If immediate lookup was desired, trigger lookups now.
+ if lookup_lines:
+ for f in result:
+ f.line
+ return result
+
+ @classmethod
+ def from_list(klass, a_list):
+ """
+ Create a StackSummary object from a supplied list of
+ FrameSummary objects or old-style list of tuples.
+ """
+ # While doing a fast-path check for isinstance(a_list, StackSummary) is
+ # appealing, idlelib.run.cleanup_traceback and other similar code may
+ # break this by making arbitrary frames plain tuples, so we need to
+ # check on a frame by frame basis.
+ result = StackSummary()
+ for frame in a_list:
+ if isinstance(frame, FrameSummary):
+ result.append(frame)
+ else:
+ filename, lineno, name, line = frame
+ result.append(FrameSummary(filename, lineno, name, line=line))
+ return result
+
+ def format(self):
+ """Format the stack ready for printing.
+
+ Returns a list of strings ready for printing. Each string in the
+ resulting list corresponds to a single frame from the stack.
+ Each string ends in a newline; the strings may contain internal
+ newlines as well, for those items with source text lines.
+
+ For long sequences of the same frame and line, the first few
+ repetitions are shown, followed by a summary line stating the exact
+ number of further repetitions.
+ """
+ result = []
+ last_file = None
+ last_line = None
+ last_name = None
+ count = 0
+ for frame in self:
+ if (last_file is None or last_file != frame.filename or
+ last_line is None or last_line != frame.lineno or
+ last_name is None or last_name != frame.name):
+ if count > _RECURSIVE_CUTOFF:
+ count -= _RECURSIVE_CUTOFF
+ result.append(
+ f' [Previous line repeated {count} more '
+ f'time{"s" if count > 1 else ""}]\n'
+ )
+ last_file = frame.filename
+ last_line = frame.lineno
+ last_name = frame.name
+ count = 0
+ count += 1
+ if count > _RECURSIVE_CUTOFF:
+ continue
+ row = []
+ row.append(' File "{}", line {}, in {}\n'.format(
+ frame.filename, frame.lineno, frame.name))
+ if frame.line:
+ row.append(' {}\n'.format(frame.line.strip()))
+ if frame.locals:
+ for name, value in sorted(frame.locals.items()):
+ row.append(' {name} = {value}\n'.format(name=name, value=value))
+ result.append(''.join(row))
+ if count > _RECURSIVE_CUTOFF:
+ count -= _RECURSIVE_CUTOFF
+ result.append(
+ f' [Previous line repeated {count} more '
+ f'time{"s" if count > 1 else ""}]\n'
+ )
+ return result
+
+
+class TracebackException:
+ """An exception ready for rendering.
+
+ The traceback module captures enough attributes from the original exception
+ to this intermediary form to ensure that no references are held, while
+ still being able to fully print or format it.
+
+ Use `from_exception` to create TracebackException instances from exception
+ objects, or the constructor to create TracebackException instances from
+ individual components.
+
+ - :attr:`__cause__` A TracebackException of the original *__cause__*.
+ - :attr:`__context__` A TracebackException of the original *__context__*.
+ - :attr:`__suppress_context__` The *__suppress_context__* value from the
+ original exception.
+ - :attr:`stack` A `StackSummary` representing the traceback.
+ - :attr:`exc_type` The class of the original traceback.
+ - :attr:`filename` For syntax errors - the filename where the error
+ occurred.
+ - :attr:`lineno` For syntax errors - the linenumber where the error
+ occurred.
+ - :attr:`end_lineno` For syntax errors - the end linenumber where the error
+ occurred. Can be `None` if not present.
+ - :attr:`text` For syntax errors - the text where the error
+ occurred.
+ - :attr:`offset` For syntax errors - the offset into the text where the
+ error occurred.
+ - :attr:`end_offset` For syntax errors - the end offset into the text where
+ the error occurred. Can be `None` if not present.
+ - :attr:`msg` For syntax errors - the compiler error message.
+ """
+
+ def __init__(self, exc_type, exc_value, exc_traceback, *, limit=None,
+ lookup_lines=True, capture_locals=False, compact=False,
+ _seen=None):
+ # NB: we need to accept exc_traceback, exc_value, exc_traceback to
+ # permit backwards compat with the existing API, otherwise we
+ # need stub thunk objects just to glue it together.
+ # Handle loops in __cause__ or __context__.
+ is_recursive_call = _seen is not None
+ if _seen is None:
+ _seen = set()
+ _seen.add(id(exc_value))
+
+ # TODO: locals.
+ self.stack = StackSummary.extract(
+ walk_tb(exc_traceback), limit=limit, lookup_lines=lookup_lines,
+ capture_locals=capture_locals)
+ self.exc_type = exc_type
+ # Capture now to permit freeing resources: only complication is in the
+ # unofficial API _format_final_exc_line
+ self._str = _some_str(exc_value)
+ if exc_type and issubclass(exc_type, SyntaxError):
+ # Handle SyntaxError's specially
+ self.filename = exc_value.filename
+ lno = exc_value.lineno
+ self.lineno = str(lno) if lno is not None else None
+ end_lno = exc_value.end_lineno
+ self.end_lineno = str(end_lno) if end_lno is not None else None
+ self.text = exc_value.text
+ self.offset = exc_value.offset
+ self.end_offset = exc_value.end_offset
+ self.msg = exc_value.msg
+ if lookup_lines:
+ self._load_lines()
+ self.__suppress_context__ = \
+ exc_value.__suppress_context__ if exc_value is not None else False
+
+ # Convert __cause__ and __context__ to `TracebackExceptions`s, use a
+ # queue to avoid recursion (only the top-level call gets _seen == None)
+ if not is_recursive_call:
+ queue = [(self, exc_value)]
+ while queue:
+ te, e = queue.pop()
+ if (e and e.__cause__ is not None
+ and id(e.__cause__) not in _seen):
+ cause = TracebackException(
+ type(e.__cause__),
+ e.__cause__,
+ e.__cause__.__traceback__,
+ limit=limit,
+ lookup_lines=lookup_lines,
+ capture_locals=capture_locals,
+ _seen=_seen)
+ else:
+ cause = None
+
+ if compact:
+ need_context = (cause is None and
+ e is not None and
+ not e.__suppress_context__)
+ else:
+ need_context = True
+ if (e and e.__context__ is not None
+ and need_context and id(e.__context__) not in _seen):
+ context = TracebackException(
+ type(e.__context__),
+ e.__context__,
+ e.__context__.__traceback__,
+ limit=limit,
+ lookup_lines=lookup_lines,
+ capture_locals=capture_locals,
+ _seen=_seen)
+ else:
+ context = None
+ te.__cause__ = cause
+ te.__context__ = context
+ if cause:
+ queue.append((te.__cause__, e.__cause__))
+ if context:
+ queue.append((te.__context__, e.__context__))
+
+ @classmethod
+ def from_exception(cls, exc, *args, **kwargs):
+ """Create a TracebackException from an exception."""
+ return cls(type(exc), exc, exc.__traceback__, *args, **kwargs)
+
+ def _load_lines(self):
+ """Private API. force all lines in the stack to be loaded."""
+ for frame in self.stack:
+ frame.line
+
+ def __eq__(self, other):
+ if isinstance(other, TracebackException):
+ return self.__dict__ == other.__dict__
+ return NotImplemented
+
+ def __str__(self):
+ return self._str
+
+ def format_exception_only(self):
+ """Format the exception part of the traceback.
+
+ The return value is a generator of strings, each ending in a newline.
+
+ Normally, the generator emits a single string; however, for
+ SyntaxError exceptions, it emits several lines that (when
+ printed) display detailed information about where the syntax
+ error occurred.
+
+ The message indicating which exception occurred is always the last
+ string in the output.
+ """
+ if self.exc_type is None:
+ yield _format_final_exc_line(None, self._str)
+ return
+
+ stype = self.exc_type.__qualname__
+ smod = self.exc_type.__module__
+ if smod not in ("__main__", "builtins"):
+ if not isinstance(smod, str):
+ smod = ""
+ stype = smod + '.' + stype
+
+ if not issubclass(self.exc_type, SyntaxError):
+ yield _format_final_exc_line(stype, self._str)
+ else:
+ yield from self._format_syntax_error(stype)
+
+ def _format_syntax_error(self, stype):
+ """Format SyntaxError exceptions (internal helper)."""
+ # Show exactly where the problem was found.
+ filename_suffix = ''
+ if self.lineno is not None:
+ yield ' File "{}", line {}\n'.format(
+ self.filename or "", self.lineno)
+ elif self.filename is not None:
+ filename_suffix = ' ({})'.format(self.filename)
+
+ text = self.text
+ if text is not None:
+ # text = " foo\n"
+ # rtext = " foo"
+ # ltext = "foo"
+ rtext = text.rstrip('\n')
+ ltext = rtext.lstrip(' \n\f')
+ spaces = len(rtext) - len(ltext)
+ yield ' {}\n'.format(ltext)
+
+ if self.offset is not None:
+ offset = self.offset
+ end_offset = self.end_offset if self.end_offset not in {None, 0} else offset
+ if offset == end_offset or end_offset == -1:
+ end_offset = offset + 1
+
+ # Convert 1-based column offset to 0-based index into stripped text
+ colno = offset - 1 - spaces
+ end_colno = end_offset - 1 - spaces
+ if colno >= 0:
+ # non-space whitespace (likes tabs) must be kept for alignment
+ caretspace = ((c if c.isspace() else ' ') for c in ltext[:colno])
+ yield ' {}{}'.format("".join(caretspace), ('^' * (end_colno - colno) + "\n"))
+ msg = self.msg or ""
+ yield "{}: {}{}\n".format(stype, msg, filename_suffix)
+
+ def format(self, *, chain=True):
+ """Format the exception.
+
+ If chain is not *True*, *__cause__* and *__context__* will not be formatted.
+
+ The return value is a generator of strings, each ending in a newline and
+ some containing internal newlines. `print_exception` is a wrapper around
+ this method which just prints the lines to a file.
+
+ The message indicating which exception occurred is always the last
+ string in the output.
+ """
+
+ output = []
+ exc = self
+ while exc:
+ if chain:
+ if exc.__cause__ is not None:
+ chained_msg = _cause_message
+ chained_exc = exc.__cause__
+ elif (exc.__context__ is not None and
+ not exc.__suppress_context__):
+ chained_msg = _context_message
+ chained_exc = exc.__context__
+ else:
+ chained_msg = None
+ chained_exc = None
+
+ output.append((chained_msg, exc))
+ exc = chained_exc
+ else:
+ output.append((None, exc))
+ exc = None
+
+ for msg, exc in reversed(output):
+ if msg is not None:
+ yield msg
+ if exc.stack:
+ yield 'Traceback (most recent call last):\n'
+ yield from exc.stack.format()
+ yield from exc.format_exception_only()
diff --git a/infer_4_37_2/lib/python3.10/tracemalloc.py b/infer_4_37_2/lib/python3.10/tracemalloc.py
new file mode 100644
index 0000000000000000000000000000000000000000..cec99c59700fe05e457f3668d2b947cc6bfef9c6
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/tracemalloc.py
@@ -0,0 +1,560 @@
+from collections.abc import Sequence, Iterable
+from functools import total_ordering
+import fnmatch
+import linecache
+import os.path
+import pickle
+
+# Import types and functions implemented in C
+from _tracemalloc import *
+from _tracemalloc import _get_object_traceback, _get_traces
+
+
+def _format_size(size, sign):
+ for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB'):
+ if abs(size) < 100 and unit != 'B':
+ # 3 digits (xx.x UNIT)
+ if sign:
+ return "%+.1f %s" % (size, unit)
+ else:
+ return "%.1f %s" % (size, unit)
+ if abs(size) < 10 * 1024 or unit == 'TiB':
+ # 4 or 5 digits (xxxx UNIT)
+ if sign:
+ return "%+.0f %s" % (size, unit)
+ else:
+ return "%.0f %s" % (size, unit)
+ size /= 1024
+
+
+class Statistic:
+ """
+ Statistic difference on memory allocations between two Snapshot instance.
+ """
+
+ __slots__ = ('traceback', 'size', 'count')
+
+ def __init__(self, traceback, size, count):
+ self.traceback = traceback
+ self.size = size
+ self.count = count
+
+ def __hash__(self):
+ return hash((self.traceback, self.size, self.count))
+
+ def __eq__(self, other):
+ if not isinstance(other, Statistic):
+ return NotImplemented
+ return (self.traceback == other.traceback
+ and self.size == other.size
+ and self.count == other.count)
+
+ def __str__(self):
+ text = ("%s: size=%s, count=%i"
+ % (self.traceback,
+ _format_size(self.size, False),
+ self.count))
+ if self.count:
+ average = self.size / self.count
+ text += ", average=%s" % _format_size(average, False)
+ return text
+
+ def __repr__(self):
+ return (''
+ % (self.traceback, self.size, self.count))
+
+ def _sort_key(self):
+ return (self.size, self.count, self.traceback)
+
+
+class StatisticDiff:
+ """
+ Statistic difference on memory allocations between an old and a new
+ Snapshot instance.
+ """
+ __slots__ = ('traceback', 'size', 'size_diff', 'count', 'count_diff')
+
+ def __init__(self, traceback, size, size_diff, count, count_diff):
+ self.traceback = traceback
+ self.size = size
+ self.size_diff = size_diff
+ self.count = count
+ self.count_diff = count_diff
+
+ def __hash__(self):
+ return hash((self.traceback, self.size, self.size_diff,
+ self.count, self.count_diff))
+
+ def __eq__(self, other):
+ if not isinstance(other, StatisticDiff):
+ return NotImplemented
+ return (self.traceback == other.traceback
+ and self.size == other.size
+ and self.size_diff == other.size_diff
+ and self.count == other.count
+ and self.count_diff == other.count_diff)
+
+ def __str__(self):
+ text = ("%s: size=%s (%s), count=%i (%+i)"
+ % (self.traceback,
+ _format_size(self.size, False),
+ _format_size(self.size_diff, True),
+ self.count,
+ self.count_diff))
+ if self.count:
+ average = self.size / self.count
+ text += ", average=%s" % _format_size(average, False)
+ return text
+
+ def __repr__(self):
+ return (''
+ % (self.traceback, self.size, self.size_diff,
+ self.count, self.count_diff))
+
+ def _sort_key(self):
+ return (abs(self.size_diff), self.size,
+ abs(self.count_diff), self.count,
+ self.traceback)
+
+
+def _compare_grouped_stats(old_group, new_group):
+ statistics = []
+ for traceback, stat in new_group.items():
+ previous = old_group.pop(traceback, None)
+ if previous is not None:
+ stat = StatisticDiff(traceback,
+ stat.size, stat.size - previous.size,
+ stat.count, stat.count - previous.count)
+ else:
+ stat = StatisticDiff(traceback,
+ stat.size, stat.size,
+ stat.count, stat.count)
+ statistics.append(stat)
+
+ for traceback, stat in old_group.items():
+ stat = StatisticDiff(traceback, 0, -stat.size, 0, -stat.count)
+ statistics.append(stat)
+ return statistics
+
+
+@total_ordering
+class Frame:
+ """
+ Frame of a traceback.
+ """
+ __slots__ = ("_frame",)
+
+ def __init__(self, frame):
+ # frame is a tuple: (filename: str, lineno: int)
+ self._frame = frame
+
+ @property
+ def filename(self):
+ return self._frame[0]
+
+ @property
+ def lineno(self):
+ return self._frame[1]
+
+ def __eq__(self, other):
+ if not isinstance(other, Frame):
+ return NotImplemented
+ return (self._frame == other._frame)
+
+ def __lt__(self, other):
+ if not isinstance(other, Frame):
+ return NotImplemented
+ return (self._frame < other._frame)
+
+ def __hash__(self):
+ return hash(self._frame)
+
+ def __str__(self):
+ return "%s:%s" % (self.filename, self.lineno)
+
+ def __repr__(self):
+ return "" % (self.filename, self.lineno)
+
+
+@total_ordering
+class Traceback(Sequence):
+ """
+ Sequence of Frame instances sorted from the oldest frame
+ to the most recent frame.
+ """
+ __slots__ = ("_frames", '_total_nframe')
+
+ def __init__(self, frames, total_nframe=None):
+ Sequence.__init__(self)
+ # frames is a tuple of frame tuples: see Frame constructor for the
+ # format of a frame tuple; it is reversed, because _tracemalloc
+ # returns frames sorted from most recent to oldest, but the
+ # Python API expects oldest to most recent
+ self._frames = tuple(reversed(frames))
+ self._total_nframe = total_nframe
+
+ @property
+ def total_nframe(self):
+ return self._total_nframe
+
+ def __len__(self):
+ return len(self._frames)
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ return tuple(Frame(trace) for trace in self._frames[index])
+ else:
+ return Frame(self._frames[index])
+
+ def __contains__(self, frame):
+ return frame._frame in self._frames
+
+ def __hash__(self):
+ return hash(self._frames)
+
+ def __eq__(self, other):
+ if not isinstance(other, Traceback):
+ return NotImplemented
+ return (self._frames == other._frames)
+
+ def __lt__(self, other):
+ if not isinstance(other, Traceback):
+ return NotImplemented
+ return (self._frames < other._frames)
+
+ def __str__(self):
+ return str(self[0])
+
+ def __repr__(self):
+ s = f""
+ else:
+ s += f" total_nframe={self.total_nframe}>"
+ return s
+
+ def format(self, limit=None, most_recent_first=False):
+ lines = []
+ if limit is not None:
+ if limit > 0:
+ frame_slice = self[-limit:]
+ else:
+ frame_slice = self[:limit]
+ else:
+ frame_slice = self
+
+ if most_recent_first:
+ frame_slice = reversed(frame_slice)
+ for frame in frame_slice:
+ lines.append(' File "%s", line %s'
+ % (frame.filename, frame.lineno))
+ line = linecache.getline(frame.filename, frame.lineno).strip()
+ if line:
+ lines.append(' %s' % line)
+ return lines
+
+
+def get_object_traceback(obj):
+ """
+ Get the traceback where the Python object *obj* was allocated.
+ Return a Traceback instance.
+
+ Return None if the tracemalloc module is not tracing memory allocations or
+ did not trace the allocation of the object.
+ """
+ frames = _get_object_traceback(obj)
+ if frames is not None:
+ return Traceback(frames)
+ else:
+ return None
+
+
+class Trace:
+ """
+ Trace of a memory block.
+ """
+ __slots__ = ("_trace",)
+
+ def __init__(self, trace):
+ # trace is a tuple: (domain: int, size: int, traceback: tuple).
+ # See Traceback constructor for the format of the traceback tuple.
+ self._trace = trace
+
+ @property
+ def domain(self):
+ return self._trace[0]
+
+ @property
+ def size(self):
+ return self._trace[1]
+
+ @property
+ def traceback(self):
+ return Traceback(*self._trace[2:])
+
+ def __eq__(self, other):
+ if not isinstance(other, Trace):
+ return NotImplemented
+ return (self._trace == other._trace)
+
+ def __hash__(self):
+ return hash(self._trace)
+
+ def __str__(self):
+ return "%s: %s" % (self.traceback, _format_size(self.size, False))
+
+ def __repr__(self):
+ return (""
+ % (self.domain, _format_size(self.size, False), self.traceback))
+
+
+class _Traces(Sequence):
+ def __init__(self, traces):
+ Sequence.__init__(self)
+ # traces is a tuple of trace tuples: see Trace constructor
+ self._traces = traces
+
+ def __len__(self):
+ return len(self._traces)
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ return tuple(Trace(trace) for trace in self._traces[index])
+ else:
+ return Trace(self._traces[index])
+
+ def __contains__(self, trace):
+ return trace._trace in self._traces
+
+ def __eq__(self, other):
+ if not isinstance(other, _Traces):
+ return NotImplemented
+ return (self._traces == other._traces)
+
+ def __repr__(self):
+ return "" % len(self)
+
+
+def _normalize_filename(filename):
+ filename = os.path.normcase(filename)
+ if filename.endswith('.pyc'):
+ filename = filename[:-1]
+ return filename
+
+
+class BaseFilter:
+ def __init__(self, inclusive):
+ self.inclusive = inclusive
+
+ def _match(self, trace):
+ raise NotImplementedError
+
+
+class Filter(BaseFilter):
+ def __init__(self, inclusive, filename_pattern,
+ lineno=None, all_frames=False, domain=None):
+ super().__init__(inclusive)
+ self.inclusive = inclusive
+ self._filename_pattern = _normalize_filename(filename_pattern)
+ self.lineno = lineno
+ self.all_frames = all_frames
+ self.domain = domain
+
+ @property
+ def filename_pattern(self):
+ return self._filename_pattern
+
+ def _match_frame_impl(self, filename, lineno):
+ filename = _normalize_filename(filename)
+ if not fnmatch.fnmatch(filename, self._filename_pattern):
+ return False
+ if self.lineno is None:
+ return True
+ else:
+ return (lineno == self.lineno)
+
+ def _match_frame(self, filename, lineno):
+ return self._match_frame_impl(filename, lineno) ^ (not self.inclusive)
+
+ def _match_traceback(self, traceback):
+ if self.all_frames:
+ if any(self._match_frame_impl(filename, lineno)
+ for filename, lineno in traceback):
+ return self.inclusive
+ else:
+ return (not self.inclusive)
+ else:
+ filename, lineno = traceback[0]
+ return self._match_frame(filename, lineno)
+
+ def _match(self, trace):
+ domain, size, traceback, total_nframe = trace
+ res = self._match_traceback(traceback)
+ if self.domain is not None:
+ if self.inclusive:
+ return res and (domain == self.domain)
+ else:
+ return res or (domain != self.domain)
+ return res
+
+
+class DomainFilter(BaseFilter):
+ def __init__(self, inclusive, domain):
+ super().__init__(inclusive)
+ self._domain = domain
+
+ @property
+ def domain(self):
+ return self._domain
+
+ def _match(self, trace):
+ domain, size, traceback, total_nframe = trace
+ return (domain == self.domain) ^ (not self.inclusive)
+
+
+class Snapshot:
+ """
+ Snapshot of traces of memory blocks allocated by Python.
+ """
+
+ def __init__(self, traces, traceback_limit):
+ # traces is a tuple of trace tuples: see _Traces constructor for
+ # the exact format
+ self.traces = _Traces(traces)
+ self.traceback_limit = traceback_limit
+
+ def dump(self, filename):
+ """
+ Write the snapshot into a file.
+ """
+ with open(filename, "wb") as fp:
+ pickle.dump(self, fp, pickle.HIGHEST_PROTOCOL)
+
+ @staticmethod
+ def load(filename):
+ """
+ Load a snapshot from a file.
+ """
+ with open(filename, "rb") as fp:
+ return pickle.load(fp)
+
+ def _filter_trace(self, include_filters, exclude_filters, trace):
+ if include_filters:
+ if not any(trace_filter._match(trace)
+ for trace_filter in include_filters):
+ return False
+ if exclude_filters:
+ if any(not trace_filter._match(trace)
+ for trace_filter in exclude_filters):
+ return False
+ return True
+
+ def filter_traces(self, filters):
+ """
+ Create a new Snapshot instance with a filtered traces sequence, filters
+ is a list of Filter or DomainFilter instances. If filters is an empty
+ list, return a new Snapshot instance with a copy of the traces.
+ """
+ if not isinstance(filters, Iterable):
+ raise TypeError("filters must be a list of filters, not %s"
+ % type(filters).__name__)
+ if filters:
+ include_filters = []
+ exclude_filters = []
+ for trace_filter in filters:
+ if trace_filter.inclusive:
+ include_filters.append(trace_filter)
+ else:
+ exclude_filters.append(trace_filter)
+ new_traces = [trace for trace in self.traces._traces
+ if self._filter_trace(include_filters,
+ exclude_filters,
+ trace)]
+ else:
+ new_traces = self.traces._traces.copy()
+ return Snapshot(new_traces, self.traceback_limit)
+
+ def _group_by(self, key_type, cumulative):
+ if key_type not in ('traceback', 'filename', 'lineno'):
+ raise ValueError("unknown key_type: %r" % (key_type,))
+ if cumulative and key_type not in ('lineno', 'filename'):
+ raise ValueError("cumulative mode cannot by used "
+ "with key type %r" % key_type)
+
+ stats = {}
+ tracebacks = {}
+ if not cumulative:
+ for trace in self.traces._traces:
+ domain, size, trace_traceback, total_nframe = trace
+ try:
+ traceback = tracebacks[trace_traceback]
+ except KeyError:
+ if key_type == 'traceback':
+ frames = trace_traceback
+ elif key_type == 'lineno':
+ frames = trace_traceback[:1]
+ else: # key_type == 'filename':
+ frames = ((trace_traceback[0][0], 0),)
+ traceback = Traceback(frames)
+ tracebacks[trace_traceback] = traceback
+ try:
+ stat = stats[traceback]
+ stat.size += size
+ stat.count += 1
+ except KeyError:
+ stats[traceback] = Statistic(traceback, size, 1)
+ else:
+ # cumulative statistics
+ for trace in self.traces._traces:
+ domain, size, trace_traceback, total_nframe = trace
+ for frame in trace_traceback:
+ try:
+ traceback = tracebacks[frame]
+ except KeyError:
+ if key_type == 'lineno':
+ frames = (frame,)
+ else: # key_type == 'filename':
+ frames = ((frame[0], 0),)
+ traceback = Traceback(frames)
+ tracebacks[frame] = traceback
+ try:
+ stat = stats[traceback]
+ stat.size += size
+ stat.count += 1
+ except KeyError:
+ stats[traceback] = Statistic(traceback, size, 1)
+ return stats
+
+ def statistics(self, key_type, cumulative=False):
+ """
+ Group statistics by key_type. Return a sorted list of Statistic
+ instances.
+ """
+ grouped = self._group_by(key_type, cumulative)
+ statistics = list(grouped.values())
+ statistics.sort(reverse=True, key=Statistic._sort_key)
+ return statistics
+
+ def compare_to(self, old_snapshot, key_type, cumulative=False):
+ """
+ Compute the differences with an old snapshot old_snapshot. Get
+ statistics as a sorted list of StatisticDiff instances, grouped by
+ group_by.
+ """
+ new_group = self._group_by(key_type, cumulative)
+ old_group = old_snapshot._group_by(key_type, cumulative)
+ statistics = _compare_grouped_stats(old_group, new_group)
+ statistics.sort(reverse=True, key=StatisticDiff._sort_key)
+ return statistics
+
+
+def take_snapshot():
+ """
+ Take a snapshot of traces of memory blocks allocated by Python.
+ """
+ if not is_tracing():
+ raise RuntimeError("the tracemalloc module must be tracing memory "
+ "allocations to take a snapshot")
+ traces = _get_traces()
+ traceback_limit = get_traceback_limit()
+ return Snapshot(traces, traceback_limit)
diff --git a/infer_4_37_2/lib/python3.10/types.py b/infer_4_37_2/lib/python3.10/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..62122a994866fe2fd49f94d10146f040e1bf2e8c
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/types.py
@@ -0,0 +1,307 @@
+"""
+Define names for built-in types that aren't directly accessible as a builtin.
+"""
+import sys
+
+# Iterators in Python aren't a matter of type but of protocol. A large
+# and changing number of builtin types implement *some* flavor of
+# iterator. Don't check the type! Use hasattr to check for both
+# "__iter__" and "__next__" attributes instead.
+
+def _f(): pass
+FunctionType = type(_f)
+LambdaType = type(lambda: None) # Same as FunctionType
+CodeType = type(_f.__code__)
+MappingProxyType = type(type.__dict__)
+SimpleNamespace = type(sys.implementation)
+
+def _cell_factory():
+ a = 1
+ def f():
+ nonlocal a
+ return f.__closure__[0]
+CellType = type(_cell_factory())
+
+def _g():
+ yield 1
+GeneratorType = type(_g())
+
+async def _c(): pass
+_c = _c()
+CoroutineType = type(_c)
+_c.close() # Prevent ResourceWarning
+
+async def _ag():
+ yield
+_ag = _ag()
+AsyncGeneratorType = type(_ag)
+
+class _C:
+ def _m(self): pass
+MethodType = type(_C()._m)
+
+BuiltinFunctionType = type(len)
+BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
+
+WrapperDescriptorType = type(object.__init__)
+MethodWrapperType = type(object().__str__)
+MethodDescriptorType = type(str.join)
+ClassMethodDescriptorType = type(dict.__dict__['fromkeys'])
+
+ModuleType = type(sys)
+
+try:
+ raise TypeError
+except TypeError:
+ tb = sys.exc_info()[2]
+ TracebackType = type(tb)
+ FrameType = type(tb.tb_frame)
+ tb = None; del tb
+
+# For Jython, the following two types are identical
+GetSetDescriptorType = type(FunctionType.__code__)
+MemberDescriptorType = type(FunctionType.__globals__)
+
+del sys, _f, _g, _C, _c, _ag # Not for export
+
+
+# Provide a PEP 3115 compliant mechanism for class creation
+def new_class(name, bases=(), kwds=None, exec_body=None):
+ """Create a class object dynamically using the appropriate metaclass."""
+ resolved_bases = resolve_bases(bases)
+ meta, ns, kwds = prepare_class(name, resolved_bases, kwds)
+ if exec_body is not None:
+ exec_body(ns)
+ if resolved_bases is not bases:
+ ns['__orig_bases__'] = bases
+ return meta(name, resolved_bases, ns, **kwds)
+
+def resolve_bases(bases):
+ """Resolve MRO entries dynamically as specified by PEP 560."""
+ new_bases = list(bases)
+ updated = False
+ shift = 0
+ for i, base in enumerate(bases):
+ if isinstance(base, type) and not isinstance(base, GenericAlias):
+ continue
+ if not hasattr(base, "__mro_entries__"):
+ continue
+ new_base = base.__mro_entries__(bases)
+ updated = True
+ if not isinstance(new_base, tuple):
+ raise TypeError("__mro_entries__ must return a tuple")
+ else:
+ new_bases[i+shift:i+shift+1] = new_base
+ shift += len(new_base) - 1
+ if not updated:
+ return bases
+ return tuple(new_bases)
+
+def prepare_class(name, bases=(), kwds=None):
+ """Call the __prepare__ method of the appropriate metaclass.
+
+ Returns (metaclass, namespace, kwds) as a 3-tuple
+
+ *metaclass* is the appropriate metaclass
+ *namespace* is the prepared class namespace
+ *kwds* is an updated copy of the passed in kwds argument with any
+ 'metaclass' entry removed. If no kwds argument is passed in, this will
+ be an empty dict.
+ """
+ if kwds is None:
+ kwds = {}
+ else:
+ kwds = dict(kwds) # Don't alter the provided mapping
+ if 'metaclass' in kwds:
+ meta = kwds.pop('metaclass')
+ else:
+ if bases:
+ meta = type(bases[0])
+ else:
+ meta = type
+ if isinstance(meta, type):
+ # when meta is a type, we first determine the most-derived metaclass
+ # instead of invoking the initial candidate directly
+ meta = _calculate_meta(meta, bases)
+ if hasattr(meta, '__prepare__'):
+ ns = meta.__prepare__(name, bases, **kwds)
+ else:
+ ns = {}
+ return meta, ns, kwds
+
+def _calculate_meta(meta, bases):
+ """Calculate the most derived metaclass."""
+ winner = meta
+ for base in bases:
+ base_meta = type(base)
+ if issubclass(winner, base_meta):
+ continue
+ if issubclass(base_meta, winner):
+ winner = base_meta
+ continue
+ # else:
+ raise TypeError("metaclass conflict: "
+ "the metaclass of a derived class "
+ "must be a (non-strict) subclass "
+ "of the metaclasses of all its bases")
+ return winner
+
+class DynamicClassAttribute:
+ """Route attribute access on a class to __getattr__.
+
+ This is a descriptor, used to define attributes that act differently when
+ accessed through an instance and through a class. Instance access remains
+ normal, but access to an attribute through a class will be routed to the
+ class's __getattr__ method; this is done by raising AttributeError.
+
+ This allows one to have properties active on an instance, and have virtual
+ attributes on the class with the same name. (Enum used this between Python
+ versions 3.4 - 3.9 .)
+
+ Subclass from this to use a different method of accessing virtual atributes
+ and still be treated properly by the inspect module. (Enum uses this since
+ Python 3.10 .)
+
+ """
+ def __init__(self, fget=None, fset=None, fdel=None, doc=None):
+ self.fget = fget
+ self.fset = fset
+ self.fdel = fdel
+ # next two lines make DynamicClassAttribute act the same as property
+ self.__doc__ = doc or fget.__doc__
+ self.overwrite_doc = doc is None
+ # support for abstract methods
+ self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False))
+
+ def __get__(self, instance, ownerclass=None):
+ if instance is None:
+ if self.__isabstractmethod__:
+ return self
+ raise AttributeError()
+ elif self.fget is None:
+ raise AttributeError("unreadable attribute")
+ return self.fget(instance)
+
+ def __set__(self, instance, value):
+ if self.fset is None:
+ raise AttributeError("can't set attribute")
+ self.fset(instance, value)
+
+ def __delete__(self, instance):
+ if self.fdel is None:
+ raise AttributeError("can't delete attribute")
+ self.fdel(instance)
+
+ def getter(self, fget):
+ fdoc = fget.__doc__ if self.overwrite_doc else None
+ result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__)
+ result.overwrite_doc = self.overwrite_doc
+ return result
+
+ def setter(self, fset):
+ result = type(self)(self.fget, fset, self.fdel, self.__doc__)
+ result.overwrite_doc = self.overwrite_doc
+ return result
+
+ def deleter(self, fdel):
+ result = type(self)(self.fget, self.fset, fdel, self.__doc__)
+ result.overwrite_doc = self.overwrite_doc
+ return result
+
+
+class _GeneratorWrapper:
+ # TODO: Implement this in C.
+ def __init__(self, gen):
+ self.__wrapped = gen
+ self.__isgen = gen.__class__ is GeneratorType
+ self.__name__ = getattr(gen, '__name__', None)
+ self.__qualname__ = getattr(gen, '__qualname__', None)
+ def send(self, val):
+ return self.__wrapped.send(val)
+ def throw(self, tp, *rest):
+ return self.__wrapped.throw(tp, *rest)
+ def close(self):
+ return self.__wrapped.close()
+ @property
+ def gi_code(self):
+ return self.__wrapped.gi_code
+ @property
+ def gi_frame(self):
+ return self.__wrapped.gi_frame
+ @property
+ def gi_running(self):
+ return self.__wrapped.gi_running
+ @property
+ def gi_yieldfrom(self):
+ return self.__wrapped.gi_yieldfrom
+ cr_code = gi_code
+ cr_frame = gi_frame
+ cr_running = gi_running
+ cr_await = gi_yieldfrom
+ def __next__(self):
+ return next(self.__wrapped)
+ def __iter__(self):
+ if self.__isgen:
+ return self.__wrapped
+ return self
+ __await__ = __iter__
+
+def coroutine(func):
+ """Convert regular generator function to a coroutine."""
+
+ if not callable(func):
+ raise TypeError('types.coroutine() expects a callable')
+
+ if (func.__class__ is FunctionType and
+ getattr(func, '__code__', None).__class__ is CodeType):
+
+ co_flags = func.__code__.co_flags
+
+ # Check if 'func' is a coroutine function.
+ # (0x180 == CO_COROUTINE | CO_ITERABLE_COROUTINE)
+ if co_flags & 0x180:
+ return func
+
+ # Check if 'func' is a generator function.
+ # (0x20 == CO_GENERATOR)
+ if co_flags & 0x20:
+ # TODO: Implement this in C.
+ co = func.__code__
+ # 0x100 == CO_ITERABLE_COROUTINE
+ func.__code__ = co.replace(co_flags=co.co_flags | 0x100)
+ return func
+
+ # The following code is primarily to support functions that
+ # return generator-like objects (for instance generators
+ # compiled with Cython).
+
+ # Delay functools and _collections_abc import for speeding up types import.
+ import functools
+ import _collections_abc
+ @functools.wraps(func)
+ def wrapped(*args, **kwargs):
+ coro = func(*args, **kwargs)
+ if (coro.__class__ is CoroutineType or
+ coro.__class__ is GeneratorType and coro.gi_code.co_flags & 0x100):
+ # 'coro' is a native coroutine object or an iterable coroutine
+ return coro
+ if (isinstance(coro, _collections_abc.Generator) and
+ not isinstance(coro, _collections_abc.Coroutine)):
+ # 'coro' is either a pure Python generator iterator, or it
+ # implements collections.abc.Generator (and does not implement
+ # collections.abc.Coroutine).
+ return _GeneratorWrapper(coro)
+ # 'coro' is either an instance of collections.abc.Coroutine or
+ # some other object -- pass it through.
+ return coro
+
+ return wrapped
+
+GenericAlias = type(list[int])
+UnionType = type(int | str)
+
+EllipsisType = type(Ellipsis)
+NoneType = type(None)
+NotImplementedType = type(NotImplemented)
+
+__all__ = [n for n in globals() if n[:1] != '_']
diff --git a/infer_4_37_2/lib/python3.10/uu.py b/infer_4_37_2/lib/python3.10/uu.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fe252a639eace1d288c31a4993508a2e40b0427
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/uu.py
@@ -0,0 +1,213 @@
+#! /usr/bin/env python3
+
+# Copyright 1994 by Lance Ellinghouse
+# Cathedral City, California Republic, United States of America.
+# All Rights Reserved
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose and without fee is hereby granted,
+# provided that the above copyright notice appear in all copies and that
+# both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of Lance Ellinghouse
+# not be used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
+# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
+# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+# Modified by Jack Jansen, CWI, July 1995:
+# - Use binascii module to do the actual line-by-line conversion
+# between ascii and binary. This results in a 1000-fold speedup. The C
+# version is still 5 times faster, though.
+# - Arguments more compliant with python standard
+
+"""Implementation of the UUencode and UUdecode functions.
+
+encode(in_file, out_file [,name, mode], *, backtick=False)
+decode(in_file [, out_file, mode, quiet])
+"""
+
+import binascii
+import os
+import sys
+
+__all__ = ["Error", "encode", "decode"]
+
+class Error(Exception):
+ pass
+
+def encode(in_file, out_file, name=None, mode=None, *, backtick=False):
+ """Uuencode file"""
+ #
+ # If in_file is a pathname open it and change defaults
+ #
+ opened_files = []
+ try:
+ if in_file == '-':
+ in_file = sys.stdin.buffer
+ elif isinstance(in_file, str):
+ if name is None:
+ name = os.path.basename(in_file)
+ if mode is None:
+ try:
+ mode = os.stat(in_file).st_mode
+ except AttributeError:
+ pass
+ in_file = open(in_file, 'rb')
+ opened_files.append(in_file)
+ #
+ # Open out_file if it is a pathname
+ #
+ if out_file == '-':
+ out_file = sys.stdout.buffer
+ elif isinstance(out_file, str):
+ out_file = open(out_file, 'wb')
+ opened_files.append(out_file)
+ #
+ # Set defaults for name and mode
+ #
+ if name is None:
+ name = '-'
+ if mode is None:
+ mode = 0o666
+
+ #
+ # Remove newline chars from name
+ #
+ name = name.replace('\n','\\n')
+ name = name.replace('\r','\\r')
+
+ #
+ # Write the data
+ #
+ out_file.write(('begin %o %s\n' % ((mode & 0o777), name)).encode("ascii"))
+ data = in_file.read(45)
+ while len(data) > 0:
+ out_file.write(binascii.b2a_uu(data, backtick=backtick))
+ data = in_file.read(45)
+ if backtick:
+ out_file.write(b'`\nend\n')
+ else:
+ out_file.write(b' \nend\n')
+ finally:
+ for f in opened_files:
+ f.close()
+
+
+def decode(in_file, out_file=None, mode=None, quiet=False):
+ """Decode uuencoded file"""
+ #
+ # Open the input file, if needed.
+ #
+ opened_files = []
+ if in_file == '-':
+ in_file = sys.stdin.buffer
+ elif isinstance(in_file, str):
+ in_file = open(in_file, 'rb')
+ opened_files.append(in_file)
+
+ try:
+ #
+ # Read until a begin is encountered or we've exhausted the file
+ #
+ while True:
+ hdr = in_file.readline()
+ if not hdr:
+ raise Error('No valid begin line found in input file')
+ if not hdr.startswith(b'begin'):
+ continue
+ hdrfields = hdr.split(b' ', 2)
+ if len(hdrfields) == 3 and hdrfields[0] == b'begin':
+ try:
+ int(hdrfields[1], 8)
+ break
+ except ValueError:
+ pass
+ if out_file is None:
+ # If the filename isn't ASCII, what's up with that?!?
+ out_file = hdrfields[2].rstrip(b' \t\r\n\f').decode("ascii")
+ if os.path.exists(out_file):
+ raise Error(f'Cannot overwrite existing file: {out_file}')
+ if (out_file.startswith(os.sep) or
+ f'..{os.sep}' in out_file or (
+ os.altsep and
+ (out_file.startswith(os.altsep) or
+ f'..{os.altsep}' in out_file))
+ ):
+ raise Error(f'Refusing to write to {out_file} due to directory traversal')
+ if mode is None:
+ mode = int(hdrfields[1], 8)
+ #
+ # Open the output file
+ #
+ if out_file == '-':
+ out_file = sys.stdout.buffer
+ elif isinstance(out_file, str):
+ fp = open(out_file, 'wb')
+ os.chmod(out_file, mode)
+ out_file = fp
+ opened_files.append(out_file)
+ #
+ # Main decoding loop
+ #
+ s = in_file.readline()
+ while s and s.strip(b' \t\r\n\f') != b'end':
+ try:
+ data = binascii.a2b_uu(s)
+ except binascii.Error as v:
+ # Workaround for broken uuencoders by /Fredrik Lundh
+ nbytes = (((s[0]-32) & 63) * 4 + 5) // 3
+ data = binascii.a2b_uu(s[:nbytes])
+ if not quiet:
+ sys.stderr.write("Warning: %s\n" % v)
+ out_file.write(data)
+ s = in_file.readline()
+ if not s:
+ raise Error('Truncated input file')
+ finally:
+ for f in opened_files:
+ f.close()
+
+def test():
+ """uuencode/uudecode main program"""
+
+ import optparse
+ parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
+ parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
+ parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
+
+ (options, args) = parser.parse_args()
+ if len(args) > 2:
+ parser.error('incorrect number of arguments')
+ sys.exit(1)
+
+ # Use the binary streams underlying stdin/stdout
+ input = sys.stdin.buffer
+ output = sys.stdout.buffer
+ if len(args) > 0:
+ input = args[0]
+ if len(args) > 1:
+ output = args[1]
+
+ if options.decode:
+ if options.text:
+ if isinstance(output, str):
+ output = open(output, 'wb')
+ else:
+ print(sys.argv[0], ': cannot do -t to stdout')
+ sys.exit(1)
+ decode(input, output)
+ else:
+ if options.text:
+ if isinstance(input, str):
+ input = open(input, 'rb')
+ else:
+ print(sys.argv[0], ': cannot do -t from stdin')
+ sys.exit(1)
+ encode(input, output)
+
+if __name__ == '__main__':
+ test()
diff --git a/infer_4_37_2/lib/python3.10/warnings.py b/infer_4_37_2/lib/python3.10/warnings.py
new file mode 100644
index 0000000000000000000000000000000000000000..691ccddfa450ad242e45458e693dbdead2d56bf4
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/warnings.py
@@ -0,0 +1,549 @@
+"""Python part of the warnings subsystem."""
+
+import sys
+
+
+__all__ = ["warn", "warn_explicit", "showwarning",
+ "formatwarning", "filterwarnings", "simplefilter",
+ "resetwarnings", "catch_warnings"]
+
+def showwarning(message, category, filename, lineno, file=None, line=None):
+ """Hook to write a warning to a file; replace if you like."""
+ msg = WarningMessage(message, category, filename, lineno, file, line)
+ _showwarnmsg_impl(msg)
+
+def formatwarning(message, category, filename, lineno, line=None):
+ """Function to format a warning the standard way."""
+ msg = WarningMessage(message, category, filename, lineno, None, line)
+ return _formatwarnmsg_impl(msg)
+
+def _showwarnmsg_impl(msg):
+ file = msg.file
+ if file is None:
+ file = sys.stderr
+ if file is None:
+ # sys.stderr is None when run with pythonw.exe:
+ # warnings get lost
+ return
+ text = _formatwarnmsg(msg)
+ try:
+ file.write(text)
+ except OSError:
+ # the file (probably stderr) is invalid - this warning gets lost.
+ pass
+
+def _formatwarnmsg_impl(msg):
+ category = msg.category.__name__
+ s = f"{msg.filename}:{msg.lineno}: {category}: {msg.message}\n"
+
+ if msg.line is None:
+ try:
+ import linecache
+ line = linecache.getline(msg.filename, msg.lineno)
+ except Exception:
+ # When a warning is logged during Python shutdown, linecache
+ # and the import machinery don't work anymore
+ line = None
+ linecache = None
+ else:
+ line = msg.line
+ if line:
+ line = line.strip()
+ s += " %s\n" % line
+
+ if msg.source is not None:
+ try:
+ import tracemalloc
+ # Logging a warning should not raise a new exception:
+ # catch Exception, not only ImportError and RecursionError.
+ except Exception:
+ # don't suggest to enable tracemalloc if it's not available
+ tracing = True
+ tb = None
+ else:
+ tracing = tracemalloc.is_tracing()
+ try:
+ tb = tracemalloc.get_object_traceback(msg.source)
+ except Exception:
+ # When a warning is logged during Python shutdown, tracemalloc
+ # and the import machinery don't work anymore
+ tb = None
+
+ if tb is not None:
+ s += 'Object allocated at (most recent call last):\n'
+ for frame in tb:
+ s += (' File "%s", lineno %s\n'
+ % (frame.filename, frame.lineno))
+
+ try:
+ if linecache is not None:
+ line = linecache.getline(frame.filename, frame.lineno)
+ else:
+ line = None
+ except Exception:
+ line = None
+ if line:
+ line = line.strip()
+ s += ' %s\n' % line
+ elif not tracing:
+ s += (f'{category}: Enable tracemalloc to get the object '
+ f'allocation traceback\n')
+ return s
+
+# Keep a reference to check if the function was replaced
+_showwarning_orig = showwarning
+
+def _showwarnmsg(msg):
+ """Hook to write a warning to a file; replace if you like."""
+ try:
+ sw = showwarning
+ except NameError:
+ pass
+ else:
+ if sw is not _showwarning_orig:
+ # warnings.showwarning() was replaced
+ if not callable(sw):
+ raise TypeError("warnings.showwarning() must be set to a "
+ "function or method")
+
+ sw(msg.message, msg.category, msg.filename, msg.lineno,
+ msg.file, msg.line)
+ return
+ _showwarnmsg_impl(msg)
+
+# Keep a reference to check if the function was replaced
+_formatwarning_orig = formatwarning
+
+def _formatwarnmsg(msg):
+ """Function to format a warning the standard way."""
+ try:
+ fw = formatwarning
+ except NameError:
+ pass
+ else:
+ if fw is not _formatwarning_orig:
+ # warnings.formatwarning() was replaced
+ return fw(msg.message, msg.category,
+ msg.filename, msg.lineno, msg.line)
+ return _formatwarnmsg_impl(msg)
+
+def filterwarnings(action, message="", category=Warning, module="", lineno=0,
+ append=False):
+ """Insert an entry into the list of warnings filters (at the front).
+
+ 'action' -- one of "error", "ignore", "always", "default", "module",
+ or "once"
+ 'message' -- a regex that the warning message must match
+ 'category' -- a class that the warning must be a subclass of
+ 'module' -- a regex that the module name must match
+ 'lineno' -- an integer line number, 0 matches all warnings
+ 'append' -- if true, append to the list of filters
+ """
+ assert action in ("error", "ignore", "always", "default", "module",
+ "once"), "invalid action: %r" % (action,)
+ assert isinstance(message, str), "message must be a string"
+ assert isinstance(category, type), "category must be a class"
+ assert issubclass(category, Warning), "category must be a Warning subclass"
+ assert isinstance(module, str), "module must be a string"
+ assert isinstance(lineno, int) and lineno >= 0, \
+ "lineno must be an int >= 0"
+
+ if message or module:
+ import re
+
+ if message:
+ message = re.compile(message, re.I)
+ else:
+ message = None
+ if module:
+ module = re.compile(module)
+ else:
+ module = None
+
+ _add_filter(action, message, category, module, lineno, append=append)
+
+def simplefilter(action, category=Warning, lineno=0, append=False):
+ """Insert a simple entry into the list of warnings filters (at the front).
+
+ A simple filter matches all modules and messages.
+ 'action' -- one of "error", "ignore", "always", "default", "module",
+ or "once"
+ 'category' -- a class that the warning must be a subclass of
+ 'lineno' -- an integer line number, 0 matches all warnings
+ 'append' -- if true, append to the list of filters
+ """
+ assert action in ("error", "ignore", "always", "default", "module",
+ "once"), "invalid action: %r" % (action,)
+ assert isinstance(lineno, int) and lineno >= 0, \
+ "lineno must be an int >= 0"
+ _add_filter(action, None, category, None, lineno, append=append)
+
+def _add_filter(*item, append):
+ # Remove possible duplicate filters, so new one will be placed
+ # in correct place. If append=True and duplicate exists, do nothing.
+ if not append:
+ try:
+ filters.remove(item)
+ except ValueError:
+ pass
+ filters.insert(0, item)
+ else:
+ if item not in filters:
+ filters.append(item)
+ _filters_mutated()
+
+def resetwarnings():
+ """Clear the list of warning filters, so that no filters are active."""
+ filters[:] = []
+ _filters_mutated()
+
+class _OptionError(Exception):
+ """Exception used by option processing helpers."""
+ pass
+
+# Helper to process -W options passed via sys.warnoptions
+def _processoptions(args):
+ for arg in args:
+ try:
+ _setoption(arg)
+ except _OptionError as msg:
+ print("Invalid -W option ignored:", msg, file=sys.stderr)
+
+# Helper for _processoptions()
+def _setoption(arg):
+ parts = arg.split(':')
+ if len(parts) > 5:
+ raise _OptionError("too many fields (max 5): %r" % (arg,))
+ while len(parts) < 5:
+ parts.append('')
+ action, message, category, module, lineno = [s.strip()
+ for s in parts]
+ action = _getaction(action)
+ category = _getcategory(category)
+ if message or module:
+ import re
+ if message:
+ message = re.escape(message)
+ if module:
+ module = re.escape(module) + r'\Z'
+ if lineno:
+ try:
+ lineno = int(lineno)
+ if lineno < 0:
+ raise ValueError
+ except (ValueError, OverflowError):
+ raise _OptionError("invalid lineno %r" % (lineno,)) from None
+ else:
+ lineno = 0
+ filterwarnings(action, message, category, module, lineno)
+
+# Helper for _setoption()
+def _getaction(action):
+ if not action:
+ return "default"
+ if action == "all": return "always" # Alias
+ for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
+ if a.startswith(action):
+ return a
+ raise _OptionError("invalid action: %r" % (action,))
+
+# Helper for _setoption()
+def _getcategory(category):
+ if not category:
+ return Warning
+ if '.' not in category:
+ import builtins as m
+ klass = category
+ else:
+ module, _, klass = category.rpartition('.')
+ try:
+ m = __import__(module, None, None, [klass])
+ except ImportError:
+ raise _OptionError("invalid module name: %r" % (module,)) from None
+ try:
+ cat = getattr(m, klass)
+ except AttributeError:
+ raise _OptionError("unknown warning category: %r" % (category,)) from None
+ if not issubclass(cat, Warning):
+ raise _OptionError("invalid warning category: %r" % (category,))
+ return cat
+
+
+def _is_internal_frame(frame):
+ """Signal whether the frame is an internal CPython implementation detail."""
+ filename = frame.f_code.co_filename
+ return 'importlib' in filename and '_bootstrap' in filename
+
+
+def _next_external_frame(frame):
+ """Find the next frame that doesn't involve CPython internals."""
+ frame = frame.f_back
+ while frame is not None and _is_internal_frame(frame):
+ frame = frame.f_back
+ return frame
+
+
+# Code typically replaced by _warnings
+def warn(message, category=None, stacklevel=1, source=None):
+ """Issue a warning, or maybe ignore it or raise an exception."""
+ # Check if message is already a Warning object
+ if isinstance(message, Warning):
+ category = message.__class__
+ # Check category argument
+ if category is None:
+ category = UserWarning
+ if not (isinstance(category, type) and issubclass(category, Warning)):
+ raise TypeError("category must be a Warning subclass, "
+ "not '{:s}'".format(type(category).__name__))
+ # Get context information
+ try:
+ if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)):
+ # If frame is too small to care or if the warning originated in
+ # internal code, then do not try to hide any frames.
+ frame = sys._getframe(stacklevel)
+ else:
+ frame = sys._getframe(1)
+ # Look for one frame less since the above line starts us off.
+ for x in range(stacklevel-1):
+ frame = _next_external_frame(frame)
+ if frame is None:
+ raise ValueError
+ except ValueError:
+ globals = sys.__dict__
+ filename = "sys"
+ lineno = 1
+ else:
+ globals = frame.f_globals
+ filename = frame.f_code.co_filename
+ lineno = frame.f_lineno
+ if '__name__' in globals:
+ module = globals['__name__']
+ else:
+ module = ""
+ registry = globals.setdefault("__warningregistry__", {})
+ warn_explicit(message, category, filename, lineno, module, registry,
+ globals, source)
+
+def warn_explicit(message, category, filename, lineno,
+ module=None, registry=None, module_globals=None,
+ source=None):
+ lineno = int(lineno)
+ if module is None:
+ module = filename or ""
+ if module[-3:].lower() == ".py":
+ module = module[:-3] # XXX What about leading pathname?
+ if registry is None:
+ registry = {}
+ if registry.get('version', 0) != _filters_version:
+ registry.clear()
+ registry['version'] = _filters_version
+ if isinstance(message, Warning):
+ text = str(message)
+ category = message.__class__
+ else:
+ text = message
+ message = category(message)
+ key = (text, category, lineno)
+ # Quick test for common case
+ if registry.get(key):
+ return
+ # Search the filters
+ for item in filters:
+ action, msg, cat, mod, ln = item
+ if ((msg is None or msg.match(text)) and
+ issubclass(category, cat) and
+ (mod is None or mod.match(module)) and
+ (ln == 0 or lineno == ln)):
+ break
+ else:
+ action = defaultaction
+ # Early exit actions
+ if action == "ignore":
+ return
+
+ # Prime the linecache for formatting, in case the
+ # "file" is actually in a zipfile or something.
+ import linecache
+ linecache.getlines(filename, module_globals)
+
+ if action == "error":
+ raise message
+ # Other actions
+ if action == "once":
+ registry[key] = 1
+ oncekey = (text, category)
+ if onceregistry.get(oncekey):
+ return
+ onceregistry[oncekey] = 1
+ elif action == "always":
+ pass
+ elif action == "module":
+ registry[key] = 1
+ altkey = (text, category, 0)
+ if registry.get(altkey):
+ return
+ registry[altkey] = 1
+ elif action == "default":
+ registry[key] = 1
+ else:
+ # Unrecognized actions are errors
+ raise RuntimeError(
+ "Unrecognized action (%r) in warnings.filters:\n %s" %
+ (action, item))
+ # Print message and context
+ msg = WarningMessage(message, category, filename, lineno, source)
+ _showwarnmsg(msg)
+
+
+class WarningMessage(object):
+
+ _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
+ "line", "source")
+
+ def __init__(self, message, category, filename, lineno, file=None,
+ line=None, source=None):
+ self.message = message
+ self.category = category
+ self.filename = filename
+ self.lineno = lineno
+ self.file = file
+ self.line = line
+ self.source = source
+ self._category_name = category.__name__ if category else None
+
+ def __str__(self):
+ return ("{message : %r, category : %r, filename : %r, lineno : %s, "
+ "line : %r}" % (self.message, self._category_name,
+ self.filename, self.lineno, self.line))
+
+
+class catch_warnings(object):
+
+ """A context manager that copies and restores the warnings filter upon
+ exiting the context.
+
+ The 'record' argument specifies whether warnings should be captured by a
+ custom implementation of warnings.showwarning() and be appended to a list
+ returned by the context manager. Otherwise None is returned by the context
+ manager. The objects appended to the list are arguments whose attributes
+ mirror the arguments to showwarning().
+
+ The 'module' argument is to specify an alternative module to the module
+ named 'warnings' and imported under that name. This argument is only useful
+ when testing the warnings module itself.
+
+ """
+
+ def __init__(self, *, record=False, module=None):
+ """Specify whether to record warnings and if an alternative module
+ should be used other than sys.modules['warnings'].
+
+ For compatibility with Python 3.0, please consider all arguments to be
+ keyword-only.
+
+ """
+ self._record = record
+ self._module = sys.modules['warnings'] if module is None else module
+ self._entered = False
+
+ def __repr__(self):
+ args = []
+ if self._record:
+ args.append("record=True")
+ if self._module is not sys.modules['warnings']:
+ args.append("module=%r" % self._module)
+ name = type(self).__name__
+ return "%s(%s)" % (name, ", ".join(args))
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("Cannot enter %r twice" % self)
+ self._entered = True
+ self._filters = self._module.filters
+ self._module.filters = self._filters[:]
+ self._module._filters_mutated()
+ self._showwarning = self._module.showwarning
+ self._showwarnmsg_impl = self._module._showwarnmsg_impl
+ if self._record:
+ log = []
+ self._module._showwarnmsg_impl = log.append
+ # Reset showwarning() to the default implementation to make sure
+ # that _showwarnmsg() calls _showwarnmsg_impl()
+ self._module.showwarning = self._module._showwarning_orig
+ return log
+ else:
+ return None
+
+ def __exit__(self, *exc_info):
+ if not self._entered:
+ raise RuntimeError("Cannot exit %r without entering first" % self)
+ self._module.filters = self._filters
+ self._module._filters_mutated()
+ self._module.showwarning = self._showwarning
+ self._module._showwarnmsg_impl = self._showwarnmsg_impl
+
+
+# Private utility function called by _PyErr_WarnUnawaitedCoroutine
+def _warn_unawaited_coroutine(coro):
+ msg_lines = [
+ f"coroutine '{coro.__qualname__}' was never awaited\n"
+ ]
+ if coro.cr_origin is not None:
+ import linecache, traceback
+ def extract():
+ for filename, lineno, funcname in reversed(coro.cr_origin):
+ line = linecache.getline(filename, lineno)
+ yield (filename, lineno, funcname, line)
+ msg_lines.append("Coroutine created at (most recent call last)\n")
+ msg_lines += traceback.format_list(list(extract()))
+ msg = "".join(msg_lines).rstrip("\n")
+ # Passing source= here means that if the user happens to have tracemalloc
+ # enabled and tracking where the coroutine was created, the warning will
+ # contain that traceback. This does mean that if they have *both*
+ # coroutine origin tracking *and* tracemalloc enabled, they'll get two
+ # partially-redundant tracebacks. If we wanted to be clever we could
+ # probably detect this case and avoid it, but for now we don't bother.
+ warn(msg, category=RuntimeWarning, stacklevel=2, source=coro)
+
+
+# filters contains a sequence of filter 5-tuples
+# The components of the 5-tuple are:
+# - an action: error, ignore, always, default, module, or once
+# - a compiled regex that must match the warning message
+# - a class representing the warning category
+# - a compiled regex that must match the module that is being warned
+# - a line number for the line being warning, or 0 to mean any line
+# If either if the compiled regexs are None, match anything.
+try:
+ from _warnings import (filters, _defaultaction, _onceregistry,
+ warn, warn_explicit, _filters_mutated)
+ defaultaction = _defaultaction
+ onceregistry = _onceregistry
+ _warnings_defaults = True
+except ImportError:
+ filters = []
+ defaultaction = "default"
+ onceregistry = {}
+
+ _filters_version = 1
+
+ def _filters_mutated():
+ global _filters_version
+ _filters_version += 1
+
+ _warnings_defaults = False
+
+
+# Module initialization
+_processoptions(sys.warnoptions)
+if not _warnings_defaults:
+ # Several warning categories are ignored by default in regular builds
+ if not hasattr(sys, 'gettotalrefcount'):
+ filterwarnings("default", category=DeprecationWarning,
+ module="__main__", append=1)
+ simplefilter("ignore", category=DeprecationWarning, append=1)
+ simplefilter("ignore", category=PendingDeprecationWarning, append=1)
+ simplefilter("ignore", category=ImportWarning, append=1)
+ simplefilter("ignore", category=ResourceWarning, append=1)
+
+del _warnings_defaults
diff --git a/infer_4_37_2/lib/python3.10/wave.py b/infer_4_37_2/lib/python3.10/wave.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7071198e6b8413cea89839a221f613779506714
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/wave.py
@@ -0,0 +1,513 @@
+"""Stuff to parse WAVE files.
+
+Usage.
+
+Reading WAVE files:
+ f = wave.open(file, 'r')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods read(), seek(), and close().
+When the setpos() and rewind() methods are not used, the seek()
+method is not necessary.
+
+This returns an instance of a class with the following public methods:
+ getnchannels() -- returns number of audio channels (1 for
+ mono, 2 for stereo)
+ getsampwidth() -- returns sample width in bytes
+ getframerate() -- returns sampling frequency
+ getnframes() -- returns number of audio frames
+ getcomptype() -- returns compression type ('NONE' for linear samples)
+ getcompname() -- returns human-readable version of
+ compression type ('not compressed' linear samples)
+ getparams() -- returns a namedtuple consisting of all of the
+ above in the above order
+ getmarkers() -- returns None (for compatibility with the
+ aifc module)
+ getmark(id) -- raises an error since the mark does not
+ exist (for compatibility with the aifc module)
+ readframes(n) -- returns at most n frames of audio
+ rewind() -- rewind to the beginning of the audio stream
+ setpos(pos) -- seek to the specified position
+ tell() -- return the current position
+ close() -- close the instance (make it unusable)
+The position returned by tell() and the position given to setpos()
+are compatible and have nothing to do with the actual position in the
+file.
+The close() method is called automatically when the class instance
+is destroyed.
+
+Writing WAVE files:
+ f = wave.open(file, 'w')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods write(), tell(), seek(), and
+close().
+
+This returns an instance of a class with the following public methods:
+ setnchannels(n) -- set the number of channels
+ setsampwidth(n) -- set the sample width
+ setframerate(n) -- set the frame rate
+ setnframes(n) -- set the number of frames
+ setcomptype(type, name)
+ -- set the compression type and the
+ human-readable compression type
+ setparams(tuple)
+ -- set all parameters at once
+ tell() -- return current position in output file
+ writeframesraw(data)
+ -- write audio frames without patching up the
+ file header
+ writeframes(data)
+ -- write audio frames and patch up the file header
+ close() -- patch up the file header and close the
+ output file
+You should set the parameters before the first writeframesraw or
+writeframes. The total number of frames does not need to be set,
+but when it is set to the correct value, the header does not have to
+be patched up.
+It is best to first set all parameters, perhaps possibly the
+compression type, and then write audio frames using writeframesraw.
+When all frames have been written, either call writeframes(b'') or
+close() to patch up the sizes in the header.
+The close() method is called automatically when the class instance
+is destroyed.
+"""
+
+from chunk import Chunk
+from collections import namedtuple
+import audioop
+import builtins
+import struct
+import sys
+
+
+__all__ = ["open", "Error", "Wave_read", "Wave_write"]
+
+class Error(Exception):
+ pass
+
+WAVE_FORMAT_PCM = 0x0001
+
+_array_fmts = None, 'b', 'h', None, 'i'
+
+_wave_params = namedtuple('_wave_params',
+ 'nchannels sampwidth framerate nframes comptype compname')
+
+class Wave_read:
+ """Variables used in this class:
+
+ These variables are available to the user though appropriate
+ methods of this class:
+ _file -- the open file with methods read(), close(), and seek()
+ set through the __init__() method
+ _nchannels -- the number of audio channels
+ available through the getnchannels() method
+ _nframes -- the number of audio frames
+ available through the getnframes() method
+ _sampwidth -- the number of bytes per audio sample
+ available through the getsampwidth() method
+ _framerate -- the sampling frequency
+ available through the getframerate() method
+ _comptype -- the AIFF-C compression type ('NONE' if AIFF)
+ available through the getcomptype() method
+ _compname -- the human-readable AIFF-C compression type
+ available through the getcomptype() method
+ _soundpos -- the position in the audio stream
+ available through the tell() method, set through the
+ setpos() method
+
+ These variables are used internally only:
+ _fmt_chunk_read -- 1 iff the FMT chunk has been read
+ _data_seek_needed -- 1 iff positioned correctly in audio
+ file for readframes()
+ _data_chunk -- instantiation of a chunk class for the DATA chunk
+ _framesize -- size of one frame in the file
+ """
+
+ def initfp(self, file):
+ self._convert = None
+ self._soundpos = 0
+ self._file = Chunk(file, bigendian = 0)
+ if self._file.getname() != b'RIFF':
+ raise Error('file does not start with RIFF id')
+ if self._file.read(4) != b'WAVE':
+ raise Error('not a WAVE file')
+ self._fmt_chunk_read = 0
+ self._data_chunk = None
+ while 1:
+ self._data_seek_needed = 1
+ try:
+ chunk = Chunk(self._file, bigendian = 0)
+ except EOFError:
+ break
+ chunkname = chunk.getname()
+ if chunkname == b'fmt ':
+ self._read_fmt_chunk(chunk)
+ self._fmt_chunk_read = 1
+ elif chunkname == b'data':
+ if not self._fmt_chunk_read:
+ raise Error('data chunk before fmt chunk')
+ self._data_chunk = chunk
+ self._nframes = chunk.chunksize // self._framesize
+ self._data_seek_needed = 0
+ break
+ chunk.skip()
+ if not self._fmt_chunk_read or not self._data_chunk:
+ raise Error('fmt chunk and/or data chunk missing')
+
+ def __init__(self, f):
+ self._i_opened_the_file = None
+ if isinstance(f, str):
+ f = builtins.open(f, 'rb')
+ self._i_opened_the_file = f
+ # else, assume it is an open file object already
+ try:
+ self.initfp(f)
+ except:
+ if self._i_opened_the_file:
+ f.close()
+ raise
+
+ def __del__(self):
+ self.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+ #
+ # User visible methods.
+ #
+ def getfp(self):
+ return self._file
+
+ def rewind(self):
+ self._data_seek_needed = 1
+ self._soundpos = 0
+
+ def close(self):
+ self._file = None
+ file = self._i_opened_the_file
+ if file:
+ self._i_opened_the_file = None
+ file.close()
+
+ def tell(self):
+ return self._soundpos
+
+ def getnchannels(self):
+ return self._nchannels
+
+ def getnframes(self):
+ return self._nframes
+
+ def getsampwidth(self):
+ return self._sampwidth
+
+ def getframerate(self):
+ return self._framerate
+
+ def getcomptype(self):
+ return self._comptype
+
+ def getcompname(self):
+ return self._compname
+
+ def getparams(self):
+ return _wave_params(self.getnchannels(), self.getsampwidth(),
+ self.getframerate(), self.getnframes(),
+ self.getcomptype(), self.getcompname())
+
+ def getmarkers(self):
+ return None
+
+ def getmark(self, id):
+ raise Error('no marks')
+
+ def setpos(self, pos):
+ if pos < 0 or pos > self._nframes:
+ raise Error('position not in range')
+ self._soundpos = pos
+ self._data_seek_needed = 1
+
+ def readframes(self, nframes):
+ if self._data_seek_needed:
+ self._data_chunk.seek(0, 0)
+ pos = self._soundpos * self._framesize
+ if pos:
+ self._data_chunk.seek(pos, 0)
+ self._data_seek_needed = 0
+ if nframes == 0:
+ return b''
+ data = self._data_chunk.read(nframes * self._framesize)
+ if self._sampwidth != 1 and sys.byteorder == 'big':
+ data = audioop.byteswap(data, self._sampwidth)
+ if self._convert and data:
+ data = self._convert(data)
+ self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
+ return data
+
+ #
+ # Internal methods.
+ #
+
+ def _read_fmt_chunk(self, chunk):
+ try:
+ wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack_from(' 4:
+ raise Error('bad sample width')
+ self._sampwidth = sampwidth
+
+ def getsampwidth(self):
+ if not self._sampwidth:
+ raise Error('sample width not set')
+ return self._sampwidth
+
+ def setframerate(self, framerate):
+ if self._datawritten:
+ raise Error('cannot change parameters after starting to write')
+ if framerate <= 0:
+ raise Error('bad frame rate')
+ self._framerate = int(round(framerate))
+
+ def getframerate(self):
+ if not self._framerate:
+ raise Error('frame rate not set')
+ return self._framerate
+
+ def setnframes(self, nframes):
+ if self._datawritten:
+ raise Error('cannot change parameters after starting to write')
+ self._nframes = nframes
+
+ def getnframes(self):
+ return self._nframeswritten
+
+ def setcomptype(self, comptype, compname):
+ if self._datawritten:
+ raise Error('cannot change parameters after starting to write')
+ if comptype not in ('NONE',):
+ raise Error('unsupported compression type')
+ self._comptype = comptype
+ self._compname = compname
+
+ def getcomptype(self):
+ return self._comptype
+
+ def getcompname(self):
+ return self._compname
+
+ def setparams(self, params):
+ nchannels, sampwidth, framerate, nframes, comptype, compname = params
+ if self._datawritten:
+ raise Error('cannot change parameters after starting to write')
+ self.setnchannels(nchannels)
+ self.setsampwidth(sampwidth)
+ self.setframerate(framerate)
+ self.setnframes(nframes)
+ self.setcomptype(comptype, compname)
+
+ def getparams(self):
+ if not self._nchannels or not self._sampwidth or not self._framerate:
+ raise Error('not all parameters set')
+ return _wave_params(self._nchannels, self._sampwidth, self._framerate,
+ self._nframes, self._comptype, self._compname)
+
+ def setmark(self, id, pos, name):
+ raise Error('setmark() not supported')
+
+ def getmark(self, id):
+ raise Error('no marks')
+
+ def getmarkers(self):
+ return None
+
+ def tell(self):
+ return self._nframeswritten
+
+ def writeframesraw(self, data):
+ if not isinstance(data, (bytes, bytearray)):
+ data = memoryview(data).cast('B')
+ self._ensure_header_written(len(data))
+ nframes = len(data) // (self._sampwidth * self._nchannels)
+ if self._convert:
+ data = self._convert(data)
+ if self._sampwidth != 1 and sys.byteorder == 'big':
+ data = audioop.byteswap(data, self._sampwidth)
+ self._file.write(data)
+ self._datawritten += len(data)
+ self._nframeswritten = self._nframeswritten + nframes
+
+ def writeframes(self, data):
+ self.writeframesraw(data)
+ if self._datalength != self._datawritten:
+ self._patchheader()
+
+ def close(self):
+ try:
+ if self._file:
+ self._ensure_header_written(0)
+ if self._datalength != self._datawritten:
+ self._patchheader()
+ self._file.flush()
+ finally:
+ self._file = None
+ file = self._i_opened_the_file
+ if file:
+ self._i_opened_the_file = None
+ file.close()
+
+ #
+ # Internal methods.
+ #
+
+ def _ensure_header_written(self, datasize):
+ if not self._headerwritten:
+ if not self._nchannels:
+ raise Error('# channels not specified')
+ if not self._sampwidth:
+ raise Error('sample width not specified')
+ if not self._framerate:
+ raise Error('sampling rate not specified')
+ self._write_header(datasize)
+
+ def _write_header(self, initlength):
+ assert not self._headerwritten
+ self._file.write(b'RIFF')
+ if not self._nframes:
+ self._nframes = initlength // (self._nchannels * self._sampwidth)
+ self._datalength = self._nframes * self._nchannels * self._sampwidth
+ try:
+ self._form_length_pos = self._file.tell()
+ except (AttributeError, OSError):
+ self._form_length_pos = None
+ self._file.write(struct.pack('" % (self.__class__.__name__, id(self))
+
+ def __setitem__(self, key, value):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data[key] = KeyedRef(value, self._remove, key)
+
+ def copy(self):
+ if self._pending_removals:
+ self._commit_removals()
+ new = WeakValueDictionary()
+ with _IterationGuard(self):
+ for key, wr in self.data.items():
+ o = wr()
+ if o is not None:
+ new[key] = o
+ return new
+
+ __copy__ = copy
+
+ def __deepcopy__(self, memo):
+ from copy import deepcopy
+ if self._pending_removals:
+ self._commit_removals()
+ new = self.__class__()
+ with _IterationGuard(self):
+ for key, wr in self.data.items():
+ o = wr()
+ if o is not None:
+ new[deepcopy(key, memo)] = o
+ return new
+
+ def get(self, key, default=None):
+ if self._pending_removals:
+ self._commit_removals()
+ try:
+ wr = self.data[key]
+ except KeyError:
+ return default
+ else:
+ o = wr()
+ if o is None:
+ # This should only happen
+ return default
+ else:
+ return o
+
+ def items(self):
+ if self._pending_removals:
+ self._commit_removals()
+ with _IterationGuard(self):
+ for k, wr in self.data.items():
+ v = wr()
+ if v is not None:
+ yield k, v
+
+ def keys(self):
+ if self._pending_removals:
+ self._commit_removals()
+ with _IterationGuard(self):
+ for k, wr in self.data.items():
+ if wr() is not None:
+ yield k
+
+ __iter__ = keys
+
+ def itervaluerefs(self):
+ """Return an iterator that yields the weak references to the values.
+
+ The references are not guaranteed to be 'live' at the time
+ they are used, so the result of calling the references needs
+ to be checked before being used. This can be used to avoid
+ creating references that will cause the garbage collector to
+ keep the values around longer than needed.
+
+ """
+ if self._pending_removals:
+ self._commit_removals()
+ with _IterationGuard(self):
+ yield from self.data.values()
+
+ def values(self):
+ if self._pending_removals:
+ self._commit_removals()
+ with _IterationGuard(self):
+ for wr in self.data.values():
+ obj = wr()
+ if obj is not None:
+ yield obj
+
+ def popitem(self):
+ if self._pending_removals:
+ self._commit_removals()
+ while True:
+ key, wr = self.data.popitem()
+ o = wr()
+ if o is not None:
+ return key, o
+
+ def pop(self, key, *args):
+ if self._pending_removals:
+ self._commit_removals()
+ try:
+ o = self.data.pop(key)()
+ except KeyError:
+ o = None
+ if o is None:
+ if args:
+ return args[0]
+ else:
+ raise KeyError(key)
+ else:
+ return o
+
+ def setdefault(self, key, default=None):
+ try:
+ o = self.data[key]()
+ except KeyError:
+ o = None
+ if o is None:
+ if self._pending_removals:
+ self._commit_removals()
+ self.data[key] = KeyedRef(default, self._remove, key)
+ return default
+ else:
+ return o
+
+ def update(self, other=None, /, **kwargs):
+ if self._pending_removals:
+ self._commit_removals()
+ d = self.data
+ if other is not None:
+ if not hasattr(other, "items"):
+ other = dict(other)
+ for key, o in other.items():
+ d[key] = KeyedRef(o, self._remove, key)
+ for key, o in kwargs.items():
+ d[key] = KeyedRef(o, self._remove, key)
+
+ def valuerefs(self):
+ """Return a list of weak references to the values.
+
+ The references are not guaranteed to be 'live' at the time
+ they are used, so the result of calling the references needs
+ to be checked before being used. This can be used to avoid
+ creating references that will cause the garbage collector to
+ keep the values around longer than needed.
+
+ """
+ if self._pending_removals:
+ self._commit_removals()
+ return list(self.data.values())
+
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ def __or__(self, other):
+ if isinstance(other, _collections_abc.Mapping):
+ c = self.copy()
+ c.update(other)
+ return c
+ return NotImplemented
+
+ def __ror__(self, other):
+ if isinstance(other, _collections_abc.Mapping):
+ c = self.__class__()
+ c.update(other)
+ c.update(self)
+ return c
+ return NotImplemented
+
+
+class KeyedRef(ref):
+ """Specialized reference that includes a key corresponding to the value.
+
+ This is used in the WeakValueDictionary to avoid having to create
+ a function object for each key stored in the mapping. A shared
+ callback object can use the 'key' attribute of a KeyedRef instead
+ of getting a reference to the key from an enclosing scope.
+
+ """
+
+ __slots__ = "key",
+
+ def __new__(type, ob, callback, key):
+ self = ref.__new__(type, ob, callback)
+ self.key = key
+ return self
+
+ def __init__(self, ob, callback, key):
+ super().__init__(ob, callback)
+
+
+class WeakKeyDictionary(_collections_abc.MutableMapping):
+ """ Mapping class that references keys weakly.
+
+ Entries in the dictionary will be discarded when there is no
+ longer a strong reference to the key. This can be used to
+ associate additional data with an object owned by other parts of
+ an application without adding attributes to those objects. This
+ can be especially useful with objects that override attribute
+ accesses.
+ """
+
+ def __init__(self, dict=None):
+ self.data = {}
+ def remove(k, selfref=ref(self)):
+ self = selfref()
+ if self is not None:
+ if self._iterating:
+ self._pending_removals.append(k)
+ else:
+ try:
+ del self.data[k]
+ except KeyError:
+ pass
+ self._remove = remove
+ # A list of dead weakrefs (keys to be removed)
+ self._pending_removals = []
+ self._iterating = set()
+ self._dirty_len = False
+ if dict is not None:
+ self.update(dict)
+
+ def _commit_removals(self):
+ # NOTE: We don't need to call this method before mutating the dict,
+ # because a dead weakref never compares equal to a live weakref,
+ # even if they happened to refer to equal objects.
+ # However, it means keys may already have been removed.
+ pop = self._pending_removals.pop
+ d = self.data
+ while True:
+ try:
+ key = pop()
+ except IndexError:
+ return
+
+ try:
+ del d[key]
+ except KeyError:
+ pass
+
+ def _scrub_removals(self):
+ d = self.data
+ self._pending_removals = [k for k in self._pending_removals if k in d]
+ self._dirty_len = False
+
+ def __delitem__(self, key):
+ self._dirty_len = True
+ del self.data[ref(key)]
+
+ def __getitem__(self, key):
+ return self.data[ref(key)]
+
+ def __len__(self):
+ if self._dirty_len and self._pending_removals:
+ # self._pending_removals may still contain keys which were
+ # explicitly removed, we have to scrub them (see issue #21173).
+ self._scrub_removals()
+ return len(self.data) - len(self._pending_removals)
+
+ def __repr__(self):
+ return "<%s at %#x>" % (self.__class__.__name__, id(self))
+
+ def __setitem__(self, key, value):
+ self.data[ref(key, self._remove)] = value
+
+ def copy(self):
+ new = WeakKeyDictionary()
+ with _IterationGuard(self):
+ for key, value in self.data.items():
+ o = key()
+ if o is not None:
+ new[o] = value
+ return new
+
+ __copy__ = copy
+
+ def __deepcopy__(self, memo):
+ from copy import deepcopy
+ new = self.__class__()
+ with _IterationGuard(self):
+ for key, value in self.data.items():
+ o = key()
+ if o is not None:
+ new[o] = deepcopy(value, memo)
+ return new
+
+ def get(self, key, default=None):
+ return self.data.get(ref(key),default)
+
+ def __contains__(self, key):
+ try:
+ wr = ref(key)
+ except TypeError:
+ return False
+ return wr in self.data
+
+ def items(self):
+ with _IterationGuard(self):
+ for wr, value in self.data.items():
+ key = wr()
+ if key is not None:
+ yield key, value
+
+ def keys(self):
+ with _IterationGuard(self):
+ for wr in self.data:
+ obj = wr()
+ if obj is not None:
+ yield obj
+
+ __iter__ = keys
+
+ def values(self):
+ with _IterationGuard(self):
+ for wr, value in self.data.items():
+ if wr() is not None:
+ yield value
+
+ def keyrefs(self):
+ """Return a list of weak references to the keys.
+
+ The references are not guaranteed to be 'live' at the time
+ they are used, so the result of calling the references needs
+ to be checked before being used. This can be used to avoid
+ creating references that will cause the garbage collector to
+ keep the keys around longer than needed.
+
+ """
+ return list(self.data)
+
+ def popitem(self):
+ self._dirty_len = True
+ while True:
+ key, value = self.data.popitem()
+ o = key()
+ if o is not None:
+ return o, value
+
+ def pop(self, key, *args):
+ self._dirty_len = True
+ return self.data.pop(ref(key), *args)
+
+ def setdefault(self, key, default=None):
+ return self.data.setdefault(ref(key, self._remove),default)
+
+ def update(self, dict=None, /, **kwargs):
+ d = self.data
+ if dict is not None:
+ if not hasattr(dict, "items"):
+ dict = type({})(dict)
+ for key, value in dict.items():
+ d[ref(key, self._remove)] = value
+ if len(kwargs):
+ self.update(kwargs)
+
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ def __or__(self, other):
+ if isinstance(other, _collections_abc.Mapping):
+ c = self.copy()
+ c.update(other)
+ return c
+ return NotImplemented
+
+ def __ror__(self, other):
+ if isinstance(other, _collections_abc.Mapping):
+ c = self.__class__()
+ c.update(other)
+ c.update(self)
+ return c
+ return NotImplemented
+
+
+class finalize:
+ """Class for finalization of weakrefable objects
+
+ finalize(obj, func, *args, **kwargs) returns a callable finalizer
+ object which will be called when obj is garbage collected. The
+ first time the finalizer is called it evaluates func(*arg, **kwargs)
+ and returns the result. After this the finalizer is dead, and
+ calling it just returns None.
+
+ When the program exits any remaining finalizers for which the
+ atexit attribute is true will be run in reverse order of creation.
+ By default atexit is true.
+ """
+
+ # Finalizer objects don't have any state of their own. They are
+ # just used as keys to lookup _Info objects in the registry. This
+ # ensures that they cannot be part of a ref-cycle.
+
+ __slots__ = ()
+ _registry = {}
+ _shutdown = False
+ _index_iter = itertools.count()
+ _dirty = False
+ _registered_with_atexit = False
+
+ class _Info:
+ __slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
+
+ def __init__(self, obj, func, /, *args, **kwargs):
+ if not self._registered_with_atexit:
+ # We may register the exit function more than once because
+ # of a thread race, but that is harmless
+ import atexit
+ atexit.register(self._exitfunc)
+ finalize._registered_with_atexit = True
+ info = self._Info()
+ info.weakref = ref(obj, self)
+ info.func = func
+ info.args = args
+ info.kwargs = kwargs or None
+ info.atexit = True
+ info.index = next(self._index_iter)
+ self._registry[self] = info
+ finalize._dirty = True
+
+ def __call__(self, _=None):
+ """If alive then mark as dead and return func(*args, **kwargs);
+ otherwise return None"""
+ info = self._registry.pop(self, None)
+ if info and not self._shutdown:
+ return info.func(*info.args, **(info.kwargs or {}))
+
+ def detach(self):
+ """If alive then mark as dead and return (obj, func, args, kwargs);
+ otherwise return None"""
+ info = self._registry.get(self)
+ obj = info and info.weakref()
+ if obj is not None and self._registry.pop(self, None):
+ return (obj, info.func, info.args, info.kwargs or {})
+
+ def peek(self):
+ """If alive then return (obj, func, args, kwargs);
+ otherwise return None"""
+ info = self._registry.get(self)
+ obj = info and info.weakref()
+ if obj is not None:
+ return (obj, info.func, info.args, info.kwargs or {})
+
+ @property
+ def alive(self):
+ """Whether finalizer is alive"""
+ return self in self._registry
+
+ @property
+ def atexit(self):
+ """Whether finalizer should be called at exit"""
+ info = self._registry.get(self)
+ return bool(info) and info.atexit
+
+ @atexit.setter
+ def atexit(self, value):
+ info = self._registry.get(self)
+ if info:
+ info.atexit = bool(value)
+
+ def __repr__(self):
+ info = self._registry.get(self)
+ obj = info and info.weakref()
+ if obj is None:
+ return '<%s object at %#x; dead>' % (type(self).__name__, id(self))
+ else:
+ return '<%s object at %#x; for %r at %#x>' % \
+ (type(self).__name__, id(self), type(obj).__name__, id(obj))
+
+ @classmethod
+ def _select_for_exit(cls):
+ # Return live finalizers marked for exit, oldest first
+ L = [(f,i) for (f,i) in cls._registry.items() if i.atexit]
+ L.sort(key=lambda item:item[1].index)
+ return [f for (f,i) in L]
+
+ @classmethod
+ def _exitfunc(cls):
+ # At shutdown invoke finalizers for which atexit is true.
+ # This is called once all other non-daemonic threads have been
+ # joined.
+ reenable_gc = False
+ try:
+ if cls._registry:
+ import gc
+ if gc.isenabled():
+ reenable_gc = True
+ gc.disable()
+ pending = None
+ while True:
+ if pending is None or finalize._dirty:
+ pending = cls._select_for_exit()
+ finalize._dirty = False
+ if not pending:
+ break
+ f = pending.pop()
+ try:
+ # gc is disabled, so (assuming no daemonic
+ # threads) the following is the only line in
+ # this function which might trigger creation
+ # of a new finalizer
+ f()
+ except Exception:
+ sys.excepthook(*sys.exc_info())
+ assert f not in cls._registry
+ finally:
+ # prevent any more finalizers from executing during shutdown
+ finalize._shutdown = True
+ if reenable_gc:
+ gc.enable()
diff --git a/infer_4_37_2/lib/python3.10/zipapp.py b/infer_4_37_2/lib/python3.10/zipapp.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce77632516c646ecc11e231a1d855e3a48670a71
--- /dev/null
+++ b/infer_4_37_2/lib/python3.10/zipapp.py
@@ -0,0 +1,206 @@
+import contextlib
+import os
+import pathlib
+import shutil
+import stat
+import sys
+import zipfile
+
+__all__ = ['ZipAppError', 'create_archive', 'get_interpreter']
+
+
+# The __main__.py used if the users specifies "-m module:fn".
+# Note that this will always be written as UTF-8 (module and
+# function names can be non-ASCII in Python 3).
+# We add a coding cookie even though UTF-8 is the default in Python 3
+# because the resulting archive may be intended to be run under Python 2.
+MAIN_TEMPLATE = """\
+# -*- coding: utf-8 -*-
+import {module}
+{module}.{fn}()
+"""
+
+
+# The Windows launcher defaults to UTF-8 when parsing shebang lines if the
+# file has no BOM. So use UTF-8 on Windows.
+# On Unix, use the filesystem encoding.
+if sys.platform.startswith('win'):
+ shebang_encoding = 'utf-8'
+else:
+ shebang_encoding = sys.getfilesystemencoding()
+
+
+class ZipAppError(ValueError):
+ pass
+
+
+@contextlib.contextmanager
+def _maybe_open(archive, mode):
+ if isinstance(archive, (str, os.PathLike)):
+ with open(archive, mode) as f:
+ yield f
+ else:
+ yield archive
+
+
+def _write_file_prefix(f, interpreter):
+ """Write a shebang line."""
+ if interpreter:
+ shebang = b'#!' + interpreter.encode(shebang_encoding) + b'\n'
+ f.write(shebang)
+
+
+def _copy_archive(archive, new_archive, interpreter=None):
+ """Copy an application archive, modifying the shebang line."""
+ with _maybe_open(archive, 'rb') as src:
+ # Skip the shebang line from the source.
+ # Read 2 bytes of the source and check if they are #!.
+ first_2 = src.read(2)
+ if first_2 == b'#!':
+ # Discard the initial 2 bytes and the rest of the shebang line.
+ first_2 = b''
+ src.readline()
+
+ with _maybe_open(new_archive, 'wb') as dst:
+ _write_file_prefix(dst, interpreter)
+ # If there was no shebang, "first_2" contains the first 2 bytes
+ # of the source file, so write them before copying the rest
+ # of the file.
+ dst.write(first_2)
+ shutil.copyfileobj(src, dst)
+
+ if interpreter and isinstance(new_archive, str):
+ os.chmod(new_archive, os.stat(new_archive).st_mode | stat.S_IEXEC)
+
+
+def create_archive(source, target=None, interpreter=None, main=None,
+ filter=None, compressed=False):
+ """Create an application archive from SOURCE.
+
+ The SOURCE can be the name of a directory, or a filename or a file-like
+ object referring to an existing archive.
+
+ The content of SOURCE is packed into an application archive in TARGET,
+ which can be a filename or a file-like object. If SOURCE is a directory,
+ TARGET can be omitted and will default to the name of SOURCE with .pyz
+ appended.
+
+ The created application archive will have a shebang line specifying
+ that it should run with INTERPRETER (there will be no shebang line if
+ INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is
+ not specified, an existing __main__.py will be used). It is an error
+ to specify MAIN for anything other than a directory source with no
+ __main__.py, and it is an error to omit MAIN if the directory has no
+ __main__.py.
+ """
+ # Are we copying an existing archive?
+ source_is_file = False
+ if hasattr(source, 'read') and hasattr(source, 'readline'):
+ source_is_file = True
+ else:
+ source = pathlib.Path(source)
+ if source.is_file():
+ source_is_file = True
+
+ if source_is_file:
+ _copy_archive(source, target, interpreter)
+ return
+
+ # We are creating a new archive from a directory.
+ if not source.exists():
+ raise ZipAppError("Source does not exist")
+ has_main = (source / '__main__.py').is_file()
+ if main and has_main:
+ raise ZipAppError(
+ "Cannot specify entry point if the source has __main__.py")
+ if not (main or has_main):
+ raise ZipAppError("Archive has no entry point")
+
+ main_py = None
+ if main:
+ # Check that main has the right format.
+ mod, sep, fn = main.partition(':')
+ mod_ok = all(part.isidentifier() for part in mod.split('.'))
+ fn_ok = all(part.isidentifier() for part in fn.split('.'))
+ if not (sep == ':' and mod_ok and fn_ok):
+ raise ZipAppError("Invalid entry point: " + main)
+ main_py = MAIN_TEMPLATE.format(module=mod, fn=fn)
+
+ if target is None:
+ target = source.with_suffix('.pyz')
+ elif not hasattr(target, 'write'):
+ target = pathlib.Path(target)
+
+ with _maybe_open(target, 'wb') as fd:
+ _write_file_prefix(fd, interpreter)
+ compression = (zipfile.ZIP_DEFLATED if compressed else
+ zipfile.ZIP_STORED)
+ with zipfile.ZipFile(fd, 'w', compression=compression) as z:
+ for child in source.rglob('*'):
+ arcname = child.relative_to(source)
+ if filter is None or filter(arcname):
+ z.write(child, arcname.as_posix())
+ if main_py:
+ z.writestr('__main__.py', main_py.encode('utf-8'))
+
+ if interpreter and not hasattr(target, 'write'):
+ target.chmod(target.stat().st_mode | stat.S_IEXEC)
+
+
+def get_interpreter(archive):
+ with _maybe_open(archive, 'rb') as f:
+ if f.read(2) == b'#!':
+ return f.readline().strip().decode(shebang_encoding)
+
+
+def main(args=None):
+ """Run the zipapp command line interface.
+
+ The ARGS parameter lets you specify the argument list directly.
+ Omitting ARGS (or setting it to None) works as for argparse, using
+ sys.argv[1:] as the argument list.
+ """
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--output', '-o', default=None,
+ help="The name of the output archive. "
+ "Required if SOURCE is an archive.")
+ parser.add_argument('--python', '-p', default=None,
+ help="The name of the Python interpreter to use "
+ "(default: no shebang line).")
+ parser.add_argument('--main', '-m', default=None,
+ help="The main function of the application "
+ "(default: use an existing __main__.py).")
+ parser.add_argument('--compress', '-c', action='store_true',
+ help="Compress files with the deflate method. "
+ "Files are stored uncompressed by default.")
+ parser.add_argument('--info', default=False, action='store_true',
+ help="Display the interpreter from the archive.")
+ parser.add_argument('source',
+ help="Source directory (or existing archive).")
+
+ args = parser.parse_args(args)
+
+ # Handle `python -m zipapp archive.pyz --info`.
+ if args.info:
+ if not os.path.isfile(args.source):
+ raise SystemExit("Can only get info for an archive file")
+ interpreter = get_interpreter(args.source)
+ print("Interpreter: {}".format(interpreter or ""))
+ sys.exit(0)
+
+ if os.path.isfile(args.source):
+ if args.output is None or (os.path.exists(args.output) and
+ os.path.samefile(args.source, args.output)):
+ raise SystemExit("In-place editing of archives is not supported")
+ if args.main:
+ raise SystemExit("Cannot change the main function when copying")
+
+ create_archive(args.source, args.output,
+ interpreter=args.python, main=args.main,
+ compressed=args.compress)
+
+
+if __name__ == '__main__':
+ main()