diff --git a/.gitattributes b/.gitattributes index a0a8d1a783c06183d559c61423dedac2df38775c..46af1c57ea14d704215538c8152798bdda710c92 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1038,3 +1038,4 @@ emu3/lib/libpython3.10.so filter=lfs diff=lfs merge=lfs -text emu3/lib/python3.10/site-packages/pandas/core/__pycache__/series.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text infer_4_33_0/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_constructors.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text infer_4_33_0/lib/python3.10/site-packages/tokenizers.libs/libk5crypto-b1f99d5c.so.3.1 filter=lfs diff=lfs merge=lfs -text +infer_4_33_0/lib/python3.10/site-packages/opencv_python.libs/libswresample-d02fa90a.so.4.7.100 filter=lfs diff=lfs merge=lfs -text diff --git a/emu3/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15b21aa86ebb1539a0e7f10db2f4cf9cebbf5771 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/api/extensions/__init__.py b/emu3/lib/python3.10/site-packages/pandas/api/extensions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea5f1ba926899f9d11e34e70181ed77cae7ead1d --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/api/extensions/__init__.py @@ -0,0 +1,33 @@ +""" +Public API for extending pandas objects. +""" + +from pandas._libs.lib import no_default + +from pandas.core.dtypes.base import ( + ExtensionDtype, + register_extension_dtype, +) + +from pandas.core.accessor import ( + register_dataframe_accessor, + register_index_accessor, + register_series_accessor, +) +from pandas.core.algorithms import take +from pandas.core.arrays import ( + ExtensionArray, + ExtensionScalarOpsMixin, +) + +__all__ = [ + "no_default", + "ExtensionDtype", + "register_extension_dtype", + "register_dataframe_accessor", + "register_index_accessor", + "register_series_accessor", + "take", + "ExtensionArray", + "ExtensionScalarOpsMixin", +] diff --git a/emu3/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9462bb7947da1dd0591dd9e589026e20674b8eff Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/api/indexers/__init__.py b/emu3/lib/python3.10/site-packages/pandas/api/indexers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..78357f11dc3b79f13490b91c69ef5457fbfa9768 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/api/indexers/__init__.py @@ -0,0 +1,17 @@ +""" +Public API for Rolling Window Indexers. +""" + +from pandas.core.indexers import check_array_indexer +from pandas.core.indexers.objects import ( + BaseIndexer, + FixedForwardWindowIndexer, + VariableOffsetWindowIndexer, +) + +__all__ = [ + "check_array_indexer", + "BaseIndexer", + "FixedForwardWindowIndexer", + "VariableOffsetWindowIndexer", +] diff --git a/emu3/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c37e89987f6d5a8eef4f9ca09834a763010b6fbd Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/api/interchange/__init__.py b/emu3/lib/python3.10/site-packages/pandas/api/interchange/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3a73bc46b3109c3c13e1a3468a69aed2ffb2e8 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/api/interchange/__init__.py @@ -0,0 +1,8 @@ +""" +Public API for DataFrame interchange protocol. +""" + +from pandas.core.interchange.dataframe_protocol import DataFrame +from pandas.core.interchange.from_dataframe import from_dataframe + +__all__ = ["from_dataframe", "DataFrame"] diff --git a/emu3/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec5792e0881dbb1a01b57f139d3004d93a638c16 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/api/typing/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_cov_corr.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_cov_corr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..629b64092e6dbe1c09b7ca66d6f798630638638e Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_cov_corr.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_is_unique.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_is_unique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22935d22e0324124f90667ce3c1870b5cc9327f6 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_is_unique.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_nunique.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_nunique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82c8790b8c89ed65695bfd741c225d70caa97f9f Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_nunique.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_pct_change.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_pct_change.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36af37c356798af14925c27b9b1b5a00979bef8f Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_pct_change.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_rename.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_rename.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90bbc34e502d3f12551a71779d86a86c82e8177b Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_rename.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_replace.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_replace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c986bb7218f55713f500fe7fefa6fe428e5fb0a Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_replace.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_round.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_round.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a9c41e1504b202d0984c4f162aee9eca8ce5d3a Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_round.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_sort_values.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_sort_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c38a0515872609ddc1a09de4465cc0de32a9dd83 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_sort_values.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_to_numpy.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_to_numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea3876f68a4f88fd59f0d9408b685fd722456934 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/__pycache__/test_to_numpy.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/test_interpolate.py b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/test_interpolate.py new file mode 100644 index 0000000000000000000000000000000000000000..d854f0b7877595fba5ac0050a281aa3708240b0e --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/tests/series/methods/test_interpolate.py @@ -0,0 +1,868 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Index, + MultiIndex, + Series, + date_range, + isna, +) +import pandas._testing as tm + + +@pytest.fixture( + params=[ + "linear", + "index", + "values", + "nearest", + "slinear", + "zero", + "quadratic", + "cubic", + "barycentric", + "krogh", + "polynomial", + "spline", + "piecewise_polynomial", + "from_derivatives", + "pchip", + "akima", + "cubicspline", + ] +) +def nontemporal_method(request): + """Fixture that returns an (method name, required kwargs) pair. + + This fixture does not include method 'time' as a parameterization; that + method requires a Series with a DatetimeIndex, and is generally tested + separately from these non-temporal methods. + """ + method = request.param + kwargs = {"order": 1} if method in ("spline", "polynomial") else {} + return method, kwargs + + +@pytest.fixture( + params=[ + "linear", + "slinear", + "zero", + "quadratic", + "cubic", + "barycentric", + "krogh", + "polynomial", + "spline", + "piecewise_polynomial", + "from_derivatives", + "pchip", + "akima", + "cubicspline", + ] +) +def interp_methods_ind(request): + """Fixture that returns a (method name, required kwargs) pair to + be tested for various Index types. + + This fixture does not include methods - 'time', 'index', 'nearest', + 'values' as a parameterization + """ + method = request.param + kwargs = {"order": 1} if method in ("spline", "polynomial") else {} + return method, kwargs + + +class TestSeriesInterpolateData: + @pytest.mark.xfail(reason="EA.fillna does not handle 'linear' method") + def test_interpolate_period_values(self): + orig = Series(date_range("2012-01-01", periods=5)) + ser = orig.copy() + ser[2] = pd.NaT + + # period cast + ser_per = ser.dt.to_period("D") + res_per = ser_per.interpolate() + expected_per = orig.dt.to_period("D") + tm.assert_series_equal(res_per, expected_per) + + def test_interpolate(self, datetime_series): + ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index) + + ts_copy = ts.copy() + ts_copy[5:10] = np.nan + + linear_interp = ts_copy.interpolate(method="linear") + tm.assert_series_equal(linear_interp, ts) + + ord_ts = Series( + [d.toordinal() for d in datetime_series.index], index=datetime_series.index + ).astype(float) + + ord_ts_copy = ord_ts.copy() + ord_ts_copy[5:10] = np.nan + + time_interp = ord_ts_copy.interpolate(method="time") + tm.assert_series_equal(time_interp, ord_ts) + + def test_interpolate_time_raises_for_non_timeseries(self): + # When method='time' is used on a non-TimeSeries that contains a null + # value, a ValueError should be raised. + non_ts = Series([0, 1, 2, np.nan]) + msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex" + with pytest.raises(ValueError, match=msg): + non_ts.interpolate(method="time") + + def test_interpolate_cubicspline(self): + pytest.importorskip("scipy") + ser = Series([10, 11, 12, 13]) + + expected = Series( + [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00], + index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]), + ) + # interpolate at new_index + new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype( + float + ) + result = ser.reindex(new_index).interpolate(method="cubicspline").loc[1:3] + tm.assert_series_equal(result, expected) + + def test_interpolate_pchip(self): + pytest.importorskip("scipy") + ser = Series(np.sort(np.random.default_rng(2).uniform(size=100))) + + # interpolate at new_index + new_index = ser.index.union( + Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75]) + ).astype(float) + interp_s = ser.reindex(new_index).interpolate(method="pchip") + # does not blow up, GH5977 + interp_s.loc[49:51] + + def test_interpolate_akima(self): + pytest.importorskip("scipy") + ser = Series([10, 11, 12, 13]) + + # interpolate at new_index where `der` is zero + expected = Series( + [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00], + index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]), + ) + new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype( + float + ) + interp_s = ser.reindex(new_index).interpolate(method="akima") + tm.assert_series_equal(interp_s.loc[1:3], expected) + + # interpolate at new_index where `der` is a non-zero int + expected = Series( + [11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0], + index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]), + ) + new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype( + float + ) + interp_s = ser.reindex(new_index).interpolate(method="akima", der=1) + tm.assert_series_equal(interp_s.loc[1:3], expected) + + def test_interpolate_piecewise_polynomial(self): + pytest.importorskip("scipy") + ser = Series([10, 11, 12, 13]) + + expected = Series( + [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00], + index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]), + ) + # interpolate at new_index + new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype( + float + ) + interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial") + tm.assert_series_equal(interp_s.loc[1:3], expected) + + def test_interpolate_from_derivatives(self): + pytest.importorskip("scipy") + ser = Series([10, 11, 12, 13]) + + expected = Series( + [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00], + index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]), + ) + # interpolate at new_index + new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype( + float + ) + interp_s = ser.reindex(new_index).interpolate(method="from_derivatives") + tm.assert_series_equal(interp_s.loc[1:3], expected) + + @pytest.mark.parametrize( + "kwargs", + [ + {}, + pytest.param( + {"method": "polynomial", "order": 1}, marks=td.skip_if_no("scipy") + ), + ], + ) + def test_interpolate_corners(self, kwargs): + s = Series([np.nan, np.nan]) + tm.assert_series_equal(s.interpolate(**kwargs), s) + + s = Series([], dtype=object).interpolate() + tm.assert_series_equal(s.interpolate(**kwargs), s) + + def test_interpolate_index_values(self): + s = Series(np.nan, index=np.sort(np.random.default_rng(2).random(30))) + s.loc[::3] = np.random.default_rng(2).standard_normal(10) + + vals = s.index.values.astype(float) + + result = s.interpolate(method="index") + + expected = s.copy() + bad = isna(expected.values) + good = ~bad + expected = Series( + np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad] + ) + + tm.assert_series_equal(result[bad], expected) + + # 'values' is synonymous with 'index' for the method kwarg + other_result = s.interpolate(method="values") + + tm.assert_series_equal(other_result, result) + tm.assert_series_equal(other_result[bad], expected) + + def test_interpolate_non_ts(self): + s = Series([1, 3, np.nan, np.nan, np.nan, 11]) + msg = ( + "time-weighted interpolation only works on Series or DataFrames " + "with a DatetimeIndex" + ) + with pytest.raises(ValueError, match=msg): + s.interpolate(method="time") + + @pytest.mark.parametrize( + "kwargs", + [ + {}, + pytest.param( + {"method": "polynomial", "order": 1}, marks=td.skip_if_no("scipy") + ), + ], + ) + def test_nan_interpolate(self, kwargs): + s = Series([0, 1, np.nan, 3]) + result = s.interpolate(**kwargs) + expected = Series([0.0, 1.0, 2.0, 3.0]) + tm.assert_series_equal(result, expected) + + def test_nan_irregular_index(self): + s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9]) + result = s.interpolate() + expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9]) + tm.assert_series_equal(result, expected) + + def test_nan_str_index(self): + s = Series([0, 1, 2, np.nan], index=list("abcd")) + result = s.interpolate() + expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd")) + tm.assert_series_equal(result, expected) + + def test_interp_quad(self): + pytest.importorskip("scipy") + sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4]) + result = sq.interpolate(method="quadratic") + expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4]) + tm.assert_series_equal(result, expected) + + def test_interp_scipy_basic(self): + pytest.importorskip("scipy") + s = Series([1, 3, np.nan, 12, np.nan, 25]) + # slinear + expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0]) + result = s.interpolate(method="slinear") + tm.assert_series_equal(result, expected) + + msg = "The 'downcast' keyword in Series.interpolate is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.interpolate(method="slinear", downcast="infer") + tm.assert_series_equal(result, expected) + # nearest + expected = Series([1, 3, 3, 12, 12, 25]) + result = s.interpolate(method="nearest") + tm.assert_series_equal(result, expected.astype("float")) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.interpolate(method="nearest", downcast="infer") + tm.assert_series_equal(result, expected) + # zero + expected = Series([1, 3, 3, 12, 12, 25]) + result = s.interpolate(method="zero") + tm.assert_series_equal(result, expected.astype("float")) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.interpolate(method="zero", downcast="infer") + tm.assert_series_equal(result, expected) + # quadratic + # GH #15662. + expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0]) + result = s.interpolate(method="quadratic") + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.interpolate(method="quadratic", downcast="infer") + tm.assert_series_equal(result, expected) + # cubic + expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0]) + result = s.interpolate(method="cubic") + tm.assert_series_equal(result, expected) + + def test_interp_limit(self): + s = Series([1, 3, np.nan, np.nan, np.nan, 11]) + + expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0]) + result = s.interpolate(method="linear", limit=2) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("limit", [-1, 0]) + def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit): + # GH 9217: make sure limit is greater than zero. + s = Series([1, 2, np.nan, 4]) + method, kwargs = nontemporal_method + with pytest.raises(ValueError, match="Limit must be greater than 0"): + s.interpolate(limit=limit, method=method, **kwargs) + + def test_interpolate_invalid_float_limit(self, nontemporal_method): + # GH 9217: make sure limit is an integer. + s = Series([1, 2, np.nan, 4]) + method, kwargs = nontemporal_method + limit = 2.0 + with pytest.raises(ValueError, match="Limit must be an integer"): + s.interpolate(limit=limit, method=method, **kwargs) + + @pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"]) + def test_interp_invalid_method(self, invalid_method): + s = Series([1, 3, np.nan, 12, np.nan, 25]) + + msg = f"method must be one of.* Got '{invalid_method}' instead" + if invalid_method is None: + msg = "'method' should be a string, not None" + with pytest.raises(ValueError, match=msg): + s.interpolate(method=invalid_method) + + # When an invalid method and invalid limit (such as -1) are + # provided, the error message reflects the invalid method. + with pytest.raises(ValueError, match=msg): + s.interpolate(method=invalid_method, limit=-1) + + def test_interp_invalid_method_and_value(self): + # GH#36624 + ser = Series([1, 3, np.nan, 12, np.nan, 25]) + + msg = "'fill_value' is not a valid keyword for Series.interpolate" + msg2 = "Series.interpolate with method=pad" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + ser.interpolate(fill_value=3, method="pad") + + def test_interp_limit_forward(self): + s = Series([1, 3, np.nan, np.nan, np.nan, 11]) + + # Provide 'forward' (the default) explicitly here. + expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0]) + + result = s.interpolate(method="linear", limit=2, limit_direction="forward") + tm.assert_series_equal(result, expected) + + result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD") + tm.assert_series_equal(result, expected) + + def test_interp_unlimited(self): + # these test are for issue #16282 default Limit=None is unlimited + s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan]) + expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0]) + result = s.interpolate(method="linear", limit_direction="both") + tm.assert_series_equal(result, expected) + + expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0]) + result = s.interpolate(method="linear", limit_direction="forward") + tm.assert_series_equal(result, expected) + + expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan]) + result = s.interpolate(method="linear", limit_direction="backward") + tm.assert_series_equal(result, expected) + + def test_interp_limit_bad_direction(self): + s = Series([1, 3, np.nan, np.nan, np.nan, 11]) + + msg = ( + r"Invalid limit_direction: expecting one of \['forward', " + r"'backward', 'both'\], got 'abc'" + ) + with pytest.raises(ValueError, match=msg): + s.interpolate(method="linear", limit=2, limit_direction="abc") + + # raises an error even if no limit is specified. + with pytest.raises(ValueError, match=msg): + s.interpolate(method="linear", limit_direction="abc") + + # limit_area introduced GH #16284 + def test_interp_limit_area(self): + # These tests are for issue #9218 -- fill NaNs in both directions. + s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan]) + + expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan]) + result = s.interpolate(method="linear", limit_area="inside") + tm.assert_series_equal(result, expected) + + expected = Series( + [np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan] + ) + result = s.interpolate(method="linear", limit_area="inside", limit=1) + tm.assert_series_equal(result, expected) + + expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan]) + result = s.interpolate( + method="linear", limit_area="inside", limit_direction="both", limit=1 + ) + tm.assert_series_equal(result, expected) + + expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0]) + result = s.interpolate(method="linear", limit_area="outside") + tm.assert_series_equal(result, expected) + + expected = Series( + [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan] + ) + result = s.interpolate(method="linear", limit_area="outside", limit=1) + tm.assert_series_equal(result, expected) + + expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]) + result = s.interpolate( + method="linear", limit_area="outside", limit_direction="both", limit=1 + ) + tm.assert_series_equal(result, expected) + + expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan]) + result = s.interpolate( + method="linear", limit_area="outside", limit_direction="backward" + ) + tm.assert_series_equal(result, expected) + + # raises an error even if limit type is wrong. + msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc" + with pytest.raises(ValueError, match=msg): + s.interpolate(method="linear", limit_area="abc") + + @pytest.mark.parametrize( + "method, limit_direction, expected", + [ + ("pad", "backward", "forward"), + ("ffill", "backward", "forward"), + ("backfill", "forward", "backward"), + ("bfill", "forward", "backward"), + ("pad", "both", "forward"), + ("ffill", "both", "forward"), + ("backfill", "both", "backward"), + ("bfill", "both", "backward"), + ], + ) + def test_interp_limit_direction_raises(self, method, limit_direction, expected): + # https://github.com/pandas-dev/pandas/pull/34746 + s = Series([1, 2, 3]) + + msg = f"`limit_direction` must be '{expected}' for method `{method}`" + msg2 = "Series.interpolate with method=" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + s.interpolate(method=method, limit_direction=limit_direction) + + @pytest.mark.parametrize( + "data, expected_data, kwargs", + ( + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, 3.0, 3.0, 3.0, 7.0, np.nan, np.nan], + {"method": "pad", "limit_area": "inside"}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, 3.0, np.nan, np.nan, 7.0, np.nan, np.nan], + {"method": "pad", "limit_area": "inside", "limit": 1}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0], + {"method": "pad", "limit_area": "outside"}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan], + {"method": "pad", "limit_area": "outside", "limit": 1}, + ), + ( + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + {"method": "pad", "limit_area": "outside", "limit": 1}, + ), + ( + range(5), + range(5), + {"method": "pad", "limit_area": "outside", "limit": 1}, + ), + ), + ) + def test_interp_limit_area_with_pad(self, data, expected_data, kwargs): + # GH26796 + + s = Series(data) + expected = Series(expected_data) + msg = "Series.interpolate with method=pad" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.interpolate(**kwargs) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data, expected_data, kwargs", + ( + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, 7.0, 7.0, 7.0, 7.0, np.nan, np.nan], + {"method": "bfill", "limit_area": "inside"}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, np.nan, 3.0, np.nan, np.nan, 7.0, 7.0, np.nan, np.nan], + {"method": "bfill", "limit_area": "inside", "limit": 1}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan], + {"method": "bfill", "limit_area": "outside"}, + ), + ( + [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], + [np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan], + {"method": "bfill", "limit_area": "outside", "limit": 1}, + ), + ), + ) + def test_interp_limit_area_with_backfill(self, data, expected_data, kwargs): + # GH26796 + + s = Series(data) + expected = Series(expected_data) + msg = "Series.interpolate with method=bfill" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.interpolate(**kwargs) + tm.assert_series_equal(result, expected) + + def test_interp_limit_direction(self): + # These tests are for issue #9218 -- fill NaNs in both directions. + s = Series([1, 3, np.nan, np.nan, np.nan, 11]) + + expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0]) + result = s.interpolate(method="linear", limit=2, limit_direction="backward") + tm.assert_series_equal(result, expected) + + expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0]) + result = s.interpolate(method="linear", limit=1, limit_direction="both") + tm.assert_series_equal(result, expected) + + # Check that this works on a longer series of nans. + s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan]) + + expected = Series([1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0]) + result = s.interpolate(method="linear", limit=2, limit_direction="both") + tm.assert_series_equal(result, expected) + + expected = Series( + [1.0, 3.0, 4.0, np.nan, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0] + ) + result = s.interpolate(method="linear", limit=1, limit_direction="both") + tm.assert_series_equal(result, expected) + + def test_interp_limit_to_ends(self): + # These test are for issue #10420 -- flow back to beginning. + s = Series([np.nan, np.nan, 5, 7, 9, np.nan]) + + expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, np.nan]) + result = s.interpolate(method="linear", limit=2, limit_direction="backward") + tm.assert_series_equal(result, expected) + + expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, 9.0]) + result = s.interpolate(method="linear", limit=2, limit_direction="both") + tm.assert_series_equal(result, expected) + + def test_interp_limit_before_ends(self): + # These test are for issue #11115 -- limit ends properly. + s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan]) + + expected = Series([np.nan, np.nan, 5.0, 7.0, 7.0, np.nan]) + result = s.interpolate(method="linear", limit=1, limit_direction="forward") + tm.assert_series_equal(result, expected) + + expected = Series([np.nan, 5.0, 5.0, 7.0, np.nan, np.nan]) + result = s.interpolate(method="linear", limit=1, limit_direction="backward") + tm.assert_series_equal(result, expected) + + expected = Series([np.nan, 5.0, 5.0, 7.0, 7.0, np.nan]) + result = s.interpolate(method="linear", limit=1, limit_direction="both") + tm.assert_series_equal(result, expected) + + def test_interp_all_good(self): + pytest.importorskip("scipy") + s = Series([1, 2, 3]) + result = s.interpolate(method="polynomial", order=1) + tm.assert_series_equal(result, s) + + # non-scipy + result = s.interpolate() + tm.assert_series_equal(result, s) + + @pytest.mark.parametrize( + "check_scipy", [False, pytest.param(True, marks=td.skip_if_no("scipy"))] + ) + def test_interp_multiIndex(self, check_scipy): + idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")]) + s = Series([1, 2, np.nan], index=idx) + + expected = s.copy() + expected.loc[2] = 2 + result = s.interpolate() + tm.assert_series_equal(result, expected) + + msg = "Only `method=linear` interpolation is supported on MultiIndexes" + if check_scipy: + with pytest.raises(ValueError, match=msg): + s.interpolate(method="polynomial", order=1) + + def test_interp_nonmono_raise(self): + pytest.importorskip("scipy") + s = Series([1, np.nan, 3], index=[0, 2, 1]) + msg = "krogh interpolation requires that the index be monotonic" + with pytest.raises(ValueError, match=msg): + s.interpolate(method="krogh") + + @pytest.mark.parametrize("method", ["nearest", "pad"]) + def test_interp_datetime64(self, method, tz_naive_fixture): + pytest.importorskip("scipy") + df = Series( + [1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture) + ) + warn = None if method == "nearest" else FutureWarning + msg = "Series.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(warn, match=msg): + result = df.interpolate(method=method) + if warn is not None: + # check the "use ffill instead" is equivalent + alt = df.ffill() + tm.assert_series_equal(result, alt) + + expected = Series( + [1.0, 1.0, 3.0], + index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture), + ) + tm.assert_series_equal(result, expected) + + def test_interp_pad_datetime64tz_values(self): + # GH#27628 missing.interpolate_2d should handle datetimetz values + dti = date_range("2015-04-05", periods=3, tz="US/Central") + ser = Series(dti) + ser[1] = pd.NaT + + msg = "Series.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.interpolate(method="pad") + # check the "use ffill instead" is equivalent + alt = ser.ffill() + tm.assert_series_equal(result, alt) + + expected = Series(dti) + expected[1] = expected[0] + tm.assert_series_equal(result, expected) + + def test_interp_limit_no_nans(self): + # GH 7173 + s = Series([1.0, 2.0, 3.0]) + result = s.interpolate(limit=1) + expected = s + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("method", ["polynomial", "spline"]) + def test_no_order(self, method): + # see GH-10633, GH-24014 + pytest.importorskip("scipy") + s = Series([0, 1, np.nan, 3]) + msg = "You must specify the order of the spline or polynomial" + with pytest.raises(ValueError, match=msg): + s.interpolate(method=method) + + @pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan]) + def test_interpolate_spline_invalid_order(self, order): + pytest.importorskip("scipy") + s = Series([0, 1, np.nan, 3]) + msg = "order needs to be specified and greater than 0" + with pytest.raises(ValueError, match=msg): + s.interpolate(method="spline", order=order) + + def test_spline(self): + pytest.importorskip("scipy") + s = Series([1, 2, np.nan, 4, 5, np.nan, 7]) + result = s.interpolate(method="spline", order=1) + expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]) + tm.assert_series_equal(result, expected) + + def test_spline_extrapolate(self): + pytest.importorskip("scipy") + s = Series([1, 2, 3, 4, np.nan, 6, np.nan]) + result3 = s.interpolate(method="spline", order=1, ext=3) + expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0]) + tm.assert_series_equal(result3, expected3) + + result1 = s.interpolate(method="spline", order=1, ext=0) + expected1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]) + tm.assert_series_equal(result1, expected1) + + def test_spline_smooth(self): + pytest.importorskip("scipy") + s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7]) + assert ( + s.interpolate(method="spline", order=3, s=0)[5] + != s.interpolate(method="spline", order=3)[5] + ) + + def test_spline_interpolation(self): + # Explicit cast to float to avoid implicit cast when setting np.nan + pytest.importorskip("scipy") + s = Series(np.arange(10) ** 2, dtype="float") + s[np.random.default_rng(2).integers(0, 9, 3)] = np.nan + result1 = s.interpolate(method="spline", order=1) + expected1 = s.interpolate(method="spline", order=1) + tm.assert_series_equal(result1, expected1) + + def test_interp_timedelta64(self): + # GH 6424 + df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 3])) + result = df.interpolate(method="time") + expected = Series([1.0, 2.0, 3.0], index=pd.to_timedelta([1, 2, 3])) + tm.assert_series_equal(result, expected) + + # test for non uniform spacing + df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 4])) + result = df.interpolate(method="time") + expected = Series([1.0, 1.666667, 3.0], index=pd.to_timedelta([1, 2, 4])) + tm.assert_series_equal(result, expected) + + def test_series_interpolate_method_values(self): + # GH#1646 + rng = date_range("1/1/2000", "1/20/2000", freq="D") + ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) + + ts[::2] = np.nan + + result = ts.interpolate(method="values") + exp = ts.interpolate() + tm.assert_series_equal(result, exp) + + def test_series_interpolate_intraday(self): + # #1698 + index = date_range("1/1/2012", periods=4, freq="12D") + ts = Series([0, 12, 24, 36], index) + new_index = index.append(index + pd.DateOffset(days=1)).sort_values() + + exp = ts.reindex(new_index).interpolate(method="time") + + index = date_range("1/1/2012", periods=4, freq="12h") + ts = Series([0, 12, 24, 36], index) + new_index = index.append(index + pd.DateOffset(hours=1)).sort_values() + result = ts.reindex(new_index).interpolate(method="time") + + tm.assert_numpy_array_equal(result.values, exp.values) + + @pytest.mark.parametrize( + "ind", + [ + ["a", "b", "c", "d"], + pd.period_range(start="2019-01-01", periods=4), + pd.interval_range(start=0, end=4), + ], + ) + def test_interp_non_timedelta_index(self, interp_methods_ind, ind): + # gh 21662 + df = pd.DataFrame([0, 1, np.nan, 3], index=ind) + + method, kwargs = interp_methods_ind + if method == "pchip": + pytest.importorskip("scipy") + + if method == "linear": + result = df[0].interpolate(**kwargs) + expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind) + tm.assert_series_equal(result, expected) + else: + expected_error = ( + "Index column must be numeric or datetime type when " + f"using {method} method other than linear. " + "Try setting a numeric or datetime index column before " + "interpolating." + ) + with pytest.raises(ValueError, match=expected_error): + df[0].interpolate(method=method, **kwargs) + + def test_interpolate_timedelta_index(self, request, interp_methods_ind): + """ + Tests for non numerical index types - object, period, timedelta + Note that all methods except time, index, nearest and values + are tested here. + """ + # gh 21662 + pytest.importorskip("scipy") + ind = pd.timedelta_range(start=1, periods=4) + df = pd.DataFrame([0, 1, np.nan, 3], index=ind) + + method, kwargs = interp_methods_ind + + if method in {"cubic", "zero"}: + request.applymarker( + pytest.mark.xfail( + reason=f"{method} interpolation is not supported for TimedeltaIndex" + ) + ) + result = df[0].interpolate(method=method, **kwargs) + expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "ascending, expected_values", + [(True, [1, 2, 3, 9, 10]), (False, [10, 9, 3, 2, 1])], + ) + def test_interpolate_unsorted_index(self, ascending, expected_values): + # GH 21037 + ts = Series(data=[10, 9, np.nan, 2, 1], index=[10, 9, 3, 2, 1]) + result = ts.sort_index(ascending=ascending).interpolate(method="index") + expected = Series(data=expected_values, index=expected_values, dtype=float) + tm.assert_series_equal(result, expected) + + def test_interpolate_asfreq_raises(self): + ser = Series(["a", None, "b"], dtype=object) + msg2 = "Series.interpolate with object dtype" + msg = "Invalid fill method" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + ser.interpolate(method="asfreq") + + def test_interpolate_fill_value(self): + # GH#54920 + pytest.importorskip("scipy") + ser = Series([np.nan, 0, 1, np.nan, 3, np.nan]) + result = ser.interpolate(method="nearest", fill_value=0) + expected = Series([np.nan, 0, 1, 1, 3, 0]) + tm.assert_series_equal(result, expected) diff --git a/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/INSTALLER b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/METADATA b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b4ced65633f7c8ef9bf38d0c26707ff1514fc7fe --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/METADATA @@ -0,0 +1,55 @@ +Metadata-Version: 2.3 +Name: jsonschema-specifications +Version: 2024.10.1 +Summary: The JSON Schema meta-schemas and vocabularies, exposed as a Registry +Project-URL: Documentation, https://jsonschema-specifications.readthedocs.io/ +Project-URL: Homepage, https://github.com/python-jsonschema/jsonschema-specifications +Project-URL: Issues, https://github.com/python-jsonschema/jsonschema-specifications/issues/ +Project-URL: Funding, https://github.com/sponsors/Julian +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-jsonschema-specifications?utm_source=pypi-jsonschema-specifications&utm_medium=referral&utm_campaign=pypi-link +Project-URL: Source, https://github.com/python-jsonschema/jsonschema-specifications +Author-email: Julian Berman +License-File: COPYING +Keywords: data validation,json,json schema,jsonschema,validation +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: File Formats :: JSON +Classifier: Topic :: File Formats :: JSON :: JSON Schema +Requires-Python: >=3.9 +Requires-Dist: referencing>=0.31.0 +Description-Content-Type: text/x-rst + +============================= +``jsonschema-specifications`` +============================= + +|PyPI| |Pythons| |CI| |ReadTheDocs| + +JSON support files from the `JSON Schema Specifications `_ (metaschemas, vocabularies, etc.), packaged for runtime access from Python as a `referencing-based Schema Registry `_. + +.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema-specifications.svg + :alt: PyPI version + :target: https://pypi.org/project/jsonschema-specifications/ + +.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema-specifications.svg + :alt: Supported Python versions + :target: https://pypi.org/project/jsonschema-specifications/ + +.. |CI| image:: https://github.com/python-jsonschema/jsonschema-specifications/workflows/CI/badge.svg + :alt: Build status + :target: https://github.com/python-jsonschema/jsonschema-specifications/actions?query=workflow%3ACI + +.. |ReadTheDocs| image:: https://readthedocs.org/projects/jsonschema-specifications/badge/?version=stable&style=flat + :alt: ReadTheDocs status + :target: https://jsonschema-specifications.readthedocs.io/en/stable/ diff --git a/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/RECORD b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..0f10ac285a4f55af378fc7d94c8909d6f687535c --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/RECORD @@ -0,0 +1,34 @@ +jsonschema_specifications-2024.10.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonschema_specifications-2024.10.1.dist-info/METADATA,sha256=-jCfClPka5D4aDTtJ683zNiEcSHXhPBLuk9r9XWwyHI,2985 +jsonschema_specifications-2024.10.1.dist-info/RECORD,, +jsonschema_specifications-2024.10.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema_specifications-2024.10.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87 +jsonschema_specifications-2024.10.1.dist-info/licenses/COPYING,sha256=QtzWNJX4e063x3V6-jebtVpT-Ur9el9lfZrfVyNuUVw,1057 +jsonschema_specifications/__init__.py,sha256=qoTB2DKY7qvNrGhMPH6gtmAJRLilmVQ-fFZwT6ryqw0,386 +jsonschema_specifications/__pycache__/__init__.cpython-310.pyc,, +jsonschema_specifications/__pycache__/_core.cpython-310.pyc,, +jsonschema_specifications/_core.py,sha256=tFhc1CMleJ3AJOK_bjxOpFQTdrsUClFGfFxPBU_CebM,1140 +jsonschema_specifications/schemas/draft201909/metaschema.json,sha256=e3YbPhIfCgyh6ioLjizIVrz4AWBLgmjXG6yqICvAwTs,1785 +jsonschema_specifications/schemas/draft201909/vocabularies/applicator,sha256=aJUQDplyb7sQcFhRK77D7P1LJOj9L6zuPlBe5ysNTDE,1860 +jsonschema_specifications/schemas/draft201909/vocabularies/content,sha256=m31PVaTi_bAsQwBo_f-rxzKt3OI42j8d8mkCScM1MnQ,517 +jsonschema_specifications/schemas/draft201909/vocabularies/core,sha256=taLElX9kldClCB8ECevooU5BOayyA_x0hHH47eKvWyw,1531 +jsonschema_specifications/schemas/draft201909/vocabularies/meta-data,sha256=1H4kRd1qgicaKY2DzGxsuNSuHhXg3Fa-zTehY-zwEoY,892 +jsonschema_specifications/schemas/draft201909/vocabularies/validation,sha256=HlJsHTNac0gF_ILPV5jBK5YK19olF8Zs2lobCTWcPBw,2834 +jsonschema_specifications/schemas/draft202012/metaschema.json,sha256=Qdp29a-3zgYtJI92JGOpL3ykfk4PkFsiS6av7vkd7Q8,2452 +jsonschema_specifications/schemas/draft202012/vocabularies/applicator,sha256=xKbkFHuR_vf-ptwFjLG_k0AvdBS3ZXiosWqvHa1qrO8,1659 +jsonschema_specifications/schemas/draft202012/vocabularies/content,sha256=CDQ3R3ZOSlgUJieTz01lIFenkThjxZUNQyl-jh_axbY,519 +jsonschema_specifications/schemas/draft202012/vocabularies/core,sha256=wtEqjk3RHTNt_IOj9mOqTGnwtJs76wlP_rJbUxb0gD0,1564 +jsonschema_specifications/schemas/draft202012/vocabularies/format,sha256=UOu_55BhGoSbjMQAoJwdDg-2q1wNQ6DyIgH9NiUFa_Q,403 +jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation,sha256=q8d1rf79idIjWBcNm_k_Tr0jSVY7u-3WDwK-98gSvMA,448 +jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion,sha256=xSJCuaG7eGsmw-gset1CjDH5yW5XXc6Z5W6l_qptogw,445 +jsonschema_specifications/schemas/draft202012/vocabularies/meta-data,sha256=j3bW4U9Bubku-TO3CM3FFEyLUmhlGtEZGEhfsXVPHHY,892 +jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated,sha256=Lb-8tzmUtnCwl2SSre4f_7RsIWgnhNL1pMpWH54tDLQ,506 +jsonschema_specifications/schemas/draft202012/vocabularies/validation,sha256=cBCjHlQfMtK-ch4t40jfdcmzaHaj7TBId_wKvaHTelg,2834 +jsonschema_specifications/schemas/draft3/metaschema.json,sha256=LPdfZENvtb43Si6qJ6uLfh_WUcm0ba6mxnsC_WTiRYs,2600 +jsonschema_specifications/schemas/draft4/metaschema.json,sha256=4UidC0dV8CeTMCWR0_y48Htok6gqlPJIlfjk7fEbguI,4357 +jsonschema_specifications/schemas/draft6/metaschema.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437 +jsonschema_specifications/schemas/draft7/metaschema.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819 +jsonschema_specifications/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema_specifications/tests/__pycache__/__init__.cpython-310.pyc,, +jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-310.pyc,, +jsonschema_specifications/tests/test_jsonschema_specifications.py,sha256=WkbYRW6A6FoZ0rivShfqVLSCsAiHJ2x8TxqECJTXPTY,1106 diff --git a/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/REQUESTED b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/WHEEL b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..cdd68a497cdfa8d3f2b837225beacef711b85047 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.25.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/licenses/COPYING b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/licenses/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..a9f853e43069b8e3f8a156a4af2b1198a004230d --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/licenses/COPYING @@ -0,0 +1,19 @@ +Copyright (c) 2022 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/INSTALLER b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/RECORD b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..c7c54e8ff37370dccc44782aca566ecdd7440fc8 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/RECORD @@ -0,0 +1,21 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cufft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cufft/__pycache__/__init__.cpython-310.pyc,, +nvidia/cufft/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cufft/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cufft/include/cudalibxt.h,sha256=9GDuRiOzJuO61zRDhIpWpF7XHp8FXSOIlHJNoIMwOZQ,4105 +nvidia/cufft/include/cufft.h,sha256=Ui7ajKuYZcP-2bm9mpH96YN1igLKeDLgrttyc4jMQJE,12570 +nvidia/cufft/include/cufftXt.h,sha256=LfRdibvAlaNQ35vYqI4n8YcMpPYROrIjpZu2L0tISi4,11463 +nvidia/cufft/include/cufftw.h,sha256=DBrJQf-dnCWD-OYgdhnEzn8OiAX0U3xdteEaNdhs7mU,19412 +nvidia/cufft/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cufft/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cufft/lib/libcufft.so.10,sha256=ylxvgdWEkGtNMrmErYcE3WW_db2rQzTtIs5-73UBqVo,279161544 +nvidia/cufft/lib/libcufftw.so.10,sha256=GlkqWy81mpB3VQ7h_a3VjrLPnMC_q4_jl6N0-5SdoUM,1618440 +nvidia_cufft_cu11-10.9.0.58.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cufft_cu11-10.9.0.58.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cufft_cu11-10.9.0.58.dist-info/METADATA,sha256=8ZTc0J0I8IQvk3daH0k2WK62JYpm2a9XDaL3gUzZXA8,1506 +nvidia_cufft_cu11-10.9.0.58.dist-info/RECORD,, +nvidia_cufft_cu11-10.9.0.58.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia_cufft_cu11-10.9.0.58.dist-info/WHEEL,sha256=6NTOzyjaR-9aJxJquqd0X0mRtq4MUYsP0w_ppLcEVkQ,108 +nvidia_cufft_cu11-10.9.0.58.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/REQUESTED b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/WHEEL b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..2cbd0cba57b06979b7006f8f837931386d6836ce --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (72.2.0) +Root-Is-Purelib: true +Tag: py3-none-manylinux2014_x86_64 + diff --git a/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/top_level.txt b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/nvidia_cufft_cu11-10.9.0.58.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/infer_4_33_0/lib/python3.10/site-packages/opencv_python.libs/libswresample-d02fa90a.so.4.7.100 b/infer_4_33_0/lib/python3.10/site-packages/opencv_python.libs/libswresample-d02fa90a.so.4.7.100 new file mode 100644 index 0000000000000000000000000000000000000000..d2871bc979410a396782e8bf881a1837c9b16442 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/opencv_python.libs/libswresample-d02fa90a.so.4.7.100 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90374fd1304c04382b387a609dd1ca9c5fe381e02814249719ffdbc1cbad5cf9 +size 132417 diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/AUTHORS.txt b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/AUTHORS.txt new file mode 100644 index 0000000000000000000000000000000000000000..f42daec02e222ccf1badd81f348a76e4f4641115 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/AUTHORS.txt @@ -0,0 +1,806 @@ +@Switch01 +A_Rog +Aakanksha Agrawal +Abhinav Sagar +ABHYUDAY PRATAP SINGH +abs51295 +AceGentile +Adam Chainz +Adam Tse +Adam Wentz +admin +Adolfo Ochagavía +Adrien Morison +Agus +ahayrapetyan +Ahilya +AinsworthK +Akash Srivastava +Alan Yee +Albert Tugushev +Albert-Guan +albertg +Alberto Sottile +Aleks Bunin +Ales Erjavec +Alethea Flowers +Alex Gaynor +Alex Grönholm +Alex Hedges +Alex Loosley +Alex Morega +Alex Stachowiak +Alexander Shtyrov +Alexandre Conrad +Alexey Popravka +Aleš Erjavec +Alli +Ami Fischman +Ananya Maiti +Anatoly Techtonik +Anders Kaseorg +Andre Aguiar +Andreas Lutro +Andrei Geacar +Andrew Gaul +Andrew Shymanel +Andrey Bienkowski +Andrey Bulgakov +Andrés Delfino +Andy Freeland +Andy Kluger +Ani Hayrapetyan +Aniruddha Basak +Anish Tambe +Anrs Hu +Anthony Sottile +Antoine Musso +Anton Ovchinnikov +Anton Patrushev +Anton Zelenov +Antonio Alvarado Hernandez +Antony Lee +Antti Kaihola +Anubhav Patel +Anudit Nagar +Anuj Godase +AQNOUCH Mohammed +AraHaan +arena +arenasys +Arindam Choudhury +Armin Ronacher +Arnon Yaari +Artem +Arun Babu Neelicattu +Ashley Manton +Ashwin Ramaswami +atse +Atsushi Odagiri +Avinash Karhana +Avner Cohen +Awit (Ah-Wit) Ghirmai +Baptiste Mispelon +Barney Gale +barneygale +Bartek Ogryczak +Bastian Venthur +Ben Bodenmiller +Ben Darnell +Ben Hoyt +Ben Mares +Ben Rosser +Bence Nagy +Benjamin Peterson +Benjamin VanEvery +Benoit Pierre +Berker Peksag +Bernard +Bernard Tyers +Bernardo B. Marques +Bernhard M. Wiedemann +Bertil Hatt +Bhavam Vidyarthi +Blazej Michalik +Bogdan Opanchuk +BorisZZZ +Brad Erickson +Bradley Ayers +Branch Vincent +Brandon L. Reiss +Brandt Bucher +Brannon Dorsey +Brett Randall +Brett Rosen +Brian Cristante +Brian Rosner +briantracy +BrownTruck +Bruno Oliveira +Bruno Renié +Bruno S +Bstrdsmkr +Buck Golemon +burrows +Bussonnier Matthias +bwoodsend +c22 +Caleb Brown +Caleb Martinez +Calvin Smith +Carl Meyer +Carlos Liam +Carol Willing +Carter Thayer +Cass +Chandrasekhar Atina +Charlie Marsh +charwick +Chih-Hsuan Yen +Chris Brinker +Chris Hunt +Chris Jerdonek +Chris Kuehl +Chris Markiewicz +Chris McDonough +Chris Pawley +Chris Pryer +Chris Wolfe +Christian Clauss +Christian Heimes +Christian Oudard +Christoph Reiter +Christopher Hunt +Christopher Snyder +chrysle +cjc7373 +Clark Boylan +Claudio Jolowicz +Clay McClure +Cody +Cody Soyland +Colin Watson +Collin Anderson +Connor Osborn +Cooper Lees +Cooper Ry Lees +Cory Benfield +Cory Wright +Craig Kerstiens +Cristian Sorinel +Cristina +Cristina Muñoz +ctg123 +Curtis Doty +cytolentino +Daan De Meyer +Dale +Damian +Damian Quiroga +Damian Shaw +Dan Black +Dan Savilonis +Dan Sully +Dane Hillard +daniel +Daniel Collins +Daniel Hahler +Daniel Holth +Daniel Jost +Daniel Katz +Daniel Shaulov +Daniele Esposti +Daniele Nicolodi +Daniele Procida +Daniil Konovalenko +Danny Hermes +Danny McClanahan +Darren Kavanagh +Dav Clark +Dave Abrahams +Dave Jones +David Aguilar +David Black +David Bordeynik +David Caro +David D Lowe +David Evans +David Hewitt +David Linke +David Poggi +David Poznik +David Pursehouse +David Runge +David Tucker +David Wales +Davidovich +ddelange +Deepak Sharma +Deepyaman Datta +Denise Yu +dependabot[bot] +derwolfe +Desetude +Devesh Kumar Singh +devsagul +Diego Caraballo +Diego Ramirez +DiegoCaraballo +Dimitri Merejkowsky +Dimitri Papadopoulos +Dimitri Papadopoulos Orfanos +Dirk Stolle +Dmitry Gladkov +Dmitry Volodin +Domen Kožar +Dominic Davis-Foster +Donald Stufft +Dongweiming +doron zarhi +Dos Moonen +Douglas Thor +DrFeathers +Dustin Ingram +Dustin Rodrigues +Dwayne Bailey +Ed Morley +Edgar Ramírez +Edgar Ramírez Mondragón +Ee Durbin +Efflam Lemaillet +efflamlemaillet +Eitan Adler +ekristina +elainechan +Eli Schwartz +Elisha Hollander +Ellen Marie Dash +Emil Burzo +Emil Styrke +Emmanuel Arias +Endoh Takanao +enoch +Erdinc Mutlu +Eric Cousineau +Eric Gillingham +Eric Hanchrow +Eric Hopper +Erik M. Bray +Erik Rose +Erwin Janssen +Eugene Vereshchagin +everdimension +Federico +Felipe Peter +Felix Yan +fiber-space +Filip Kokosiński +Filipe Laíns +Finn Womack +finnagin +Flavio Amurrio +Florian Briand +Florian Rathgeber +Francesco +Francesco Montesano +Fredrik Orderud +Frost Ming +Gabriel Curio +Gabriel de Perthuis +Garry Polley +gavin +gdanielson +Geoffrey Sneddon +George Song +Georgi Valkov +Georgy Pchelkin +ghost +Giftlin Rajaiah +gizmoguy1 +gkdoc +Godefroid Chapelle +Gopinath M +GOTO Hayato +gousaiyang +gpiks +Greg Roodt +Greg Ward +Guilherme Espada +Guillaume Seguin +gutsytechster +Guy Rozendorn +Guy Tuval +gzpan123 +Hanjun Kim +Hari Charan +Harsh Vardhan +harupy +Harutaka Kawamura +hauntsaninja +Henrich Hartzer +Henry Schreiner +Herbert Pfennig +Holly Stotelmyer +Honnix +Hsiaoming Yang +Hugo Lopes Tavares +Hugo van Kemenade +Hugues Bruant +Hynek Schlawack +Ian Bicking +Ian Cordasco +Ian Lee +Ian Stapleton Cordasco +Ian Wienand +Igor Kuzmitshov +Igor Sobreira +Ikko Ashimine +Ilan Schnell +Illia Volochii +Ilya Baryshev +Inada Naoki +Ionel Cristian Mărieș +Ionel Maries Cristian +Itamar Turner-Trauring +Ivan Pozdeev +J. Nick Koston +Jacob Kim +Jacob Walls +Jaime Sanz +jakirkham +Jakub Kuczys +Jakub Stasiak +Jakub Vysoky +Jakub Wilk +James Cleveland +James Curtin +James Firth +James Gerity +James Polley +Jan Pokorný +Jannis Leidel +Jarek Potiuk +jarondl +Jason Curtis +Jason R. Coombs +JasonMo +JasonMo1 +Jay Graves +Jean Abou Samra +Jean-Christophe Fillion-Robin +Jeff Barber +Jeff Dairiki +Jeff Widman +Jelmer Vernooij +jenix21 +Jeremy Fleischman +Jeremy Stanley +Jeremy Zafran +Jesse Rittner +Jiashuo Li +Jim Fisher +Jim Garrison +Jinzhe Zeng +Jiun Bae +Jivan Amara +Joe Bylund +Joe Michelini +John Paton +John Sirois +John T. Wodder II +John-Scott Atlakson +johnthagen +Jon Banafato +Jon Dufresne +Jon Parise +Jonas Nockert +Jonathan Herbert +Joonatan Partanen +Joost Molenaar +Jorge Niedbalski +Joseph Bylund +Joseph Long +Josh Bronson +Josh Cannon +Josh Hansen +Josh Schneier +Joshua +JoshuaPerdue +Juan Luis Cano Rodríguez +Juanjo Bazán +Judah Rand +Julian Berman +Julian Gethmann +Julien Demoor +July Tikhonov +Jussi Kukkonen +Justin van Heek +jwg4 +Jyrki Pulliainen +Kai Chen +Kai Mueller +Kamal Bin Mustafa +Karolina Surma +kasium +kaustav haldar +keanemind +Keith Maxwell +Kelsey Hightower +Kenneth Belitzky +Kenneth Reitz +Kevin Burke +Kevin Carter +Kevin Frommelt +Kevin R Patterson +Kexuan Sun +Kit Randel +Klaas van Schelven +KOLANICH +konstin +kpinc +Krishna Oza +Kumar McMillan +Kuntal Majumder +Kurt McKee +Kyle Persohn +lakshmanaram +Laszlo Kiss-Kollar +Laurent Bristiel +Laurent LAPORTE +Laurie O +Laurie Opperman +layday +Leon Sasson +Lev Givon +Lincoln de Sousa +Lipis +lorddavidiii +Loren Carvalho +Lucas Cimon +Ludovic Gasc +Luis Medel +Lukas Geiger +Lukas Juhrich +Luke Macken +Luo Jiebin +luojiebin +luz.paz +László Kiss Kollár +M00nL1ght +Marc Abramowitz +Marc Tamlyn +Marcus Smith +Mariatta +Mark Kohler +Mark McLoughlin +Mark Williams +Markus Hametner +Martey Dodoo +Martin Fischer +Martin Häcker +Martin Pavlasek +Masaki +Masklinn +Matej Stuchlik +Mathew Jennings +Mathieu Bridon +Mathieu Kniewallner +Matt Bacchi +Matt Good +Matt Maker +Matt Robenolt +Matt Wozniski +matthew +Matthew Einhorn +Matthew Feickert +Matthew Gilliard +Matthew Hughes +Matthew Iversen +Matthew Treinish +Matthew Trumbell +Matthew Willson +Matthias Bussonnier +mattip +Maurits van Rees +Max W Chase +Maxim Kurnikov +Maxime Rouyrre +mayeut +mbaluna +mdebi +memoselyk +meowmeowcat +Michael +Michael Aquilina +Michael E. Karpeles +Michael Klich +Michael Mintz +Michael Williamson +michaelpacer +Michał Górny +Mickaël Schoentgen +Miguel Araujo Perez +Mihir Singh +Mike +Mike Hendricks +Min RK +MinRK +Miro Hrončok +Monica Baluna +montefra +Monty Taylor +morotti +mrKazzila +Muha Ajjan +Nadav Wexler +Nahuel Ambrosini +Nate Coraor +Nate Prewitt +Nathan Houghton +Nathaniel J. Smith +Nehal J Wani +Neil Botelho +Nguyễn Gia Phong +Nicholas Serra +Nick Coghlan +Nick Stenning +Nick Timkovich +Nicolas Bock +Nicole Harris +Nikhil Benesch +Nikhil Ladha +Nikita Chepanov +Nikolay Korolev +Nipunn Koorapati +Nitesh Sharma +Niyas Sait +Noah +Noah Gorny +Nowell Strite +NtaleGrey +nvdv +OBITORASU +Ofek Lev +ofrinevo +Oliver Freund +Oliver Jeeves +Oliver Mannion +Oliver Tonnhofer +Olivier Girardot +Olivier Grisel +Ollie Rutherfurd +OMOTO Kenji +Omry Yadan +onlinejudge95 +Oren Held +Oscar Benjamin +Oz N Tiram +Pachwenko +Patrick Dubroy +Patrick Jenkins +Patrick Lawson +patricktokeeffe +Patrik Kopkan +Paul Ganssle +Paul Kehrer +Paul Moore +Paul Nasrat +Paul Oswald +Paul van der Linden +Paulus Schoutsen +Pavel Safronov +Pavithra Eswaramoorthy +Pawel Jasinski +Paweł Szramowski +Pekka Klärck +Peter Gessler +Peter Lisák +Peter Shen +Peter Waller +Petr Viktorin +petr-tik +Phaneendra Chiruvella +Phil Elson +Phil Freo +Phil Pennock +Phil Whelan +Philip Jägenstedt +Philip Molloy +Philippe Ombredanne +Pi Delport +Pierre-Yves Rofes +Pieter Degroote +pip +Prabakaran Kumaresshan +Prabhjyotsing Surjit Singh Sodhi +Prabhu Marappan +Pradyun Gedam +Prashant Sharma +Pratik Mallya +pre-commit-ci[bot] +Preet Thakkar +Preston Holmes +Przemek Wrzos +Pulkit Goyal +q0w +Qiangning Hong +Qiming Xu +Quentin Lee +Quentin Pradet +R. David Murray +Rafael Caricio +Ralf Schmitt +Ran Benita +Randy Döring +Razzi Abuissa +rdb +Reece Dunham +Remi Rampin +Rene Dudfield +Riccardo Magliocchetti +Riccardo Schirone +Richard Jones +Richard Si +Ricky Ng-Adam +Rishi +rmorotti +RobberPhex +Robert Collins +Robert McGibbon +Robert Pollak +Robert T. McGibbon +robin elisha robinson +Roey Berman +Rohan Jain +Roman Bogorodskiy +Roman Donchenko +Romuald Brunet +ronaudinho +Ronny Pfannschmidt +Rory McCann +Ross Brattain +Roy Wellington Ⅳ +Ruairidh MacLeod +Russell Keith-Magee +Ryan Shepherd +Ryan Wooden +ryneeverett +S. Guliaev +Sachi King +Salvatore Rinchiera +sandeepkiran-js +Sander Van Balen +Savio Jomton +schlamar +Scott Kitterman +Sean +seanj +Sebastian Jordan +Sebastian Schaetz +Segev Finer +SeongSoo Cho +Sergey Vasilyev +Seth Michael Larson +Seth Woodworth +Shahar Epstein +Shantanu +shenxianpeng +shireenrao +Shivansh-007 +Shixian Sheng +Shlomi Fish +Shovan Maity +Simeon Visser +Simon Cross +Simon Pichugin +sinoroc +sinscary +snook92 +socketubs +Sorin Sbarnea +Srinivas Nyayapati +Srishti Hegde +Stavros Korokithakis +Stefan Scherfke +Stefano Rivera +Stephan Erb +Stephen Rosen +stepshal +Steve (Gadget) Barnes +Steve Barnes +Steve Dower +Steve Kowalik +Steven Myint +Steven Silvester +stonebig +studioj +Stéphane Bidoul +Stéphane Bidoul (ACSONE) +Stéphane Klein +Sumana Harihareswara +Surbhi Sharma +Sviatoslav Sydorenko +Sviatoslav Sydorenko (Святослав Сидоренко) +Swat009 +Sylvain +Takayuki SHIMIZUKAWA +Taneli Hukkinen +tbeswick +Thiago +Thijs Triemstra +Thomas Fenzl +Thomas Grainger +Thomas Guettler +Thomas Johansson +Thomas Kluyver +Thomas Smith +Thomas VINCENT +Tim D. Smith +Tim Gates +Tim Harder +Tim Heap +tim smith +tinruufu +Tobias Hermann +Tom Forbes +Tom Freudenheim +Tom V +Tomas Hrnciar +Tomas Orsava +Tomer Chachamu +Tommi Enenkel | AnB +Tomáš Hrnčiar +Tony Beswick +Tony Narlock +Tony Zhaocheng Tan +TonyBeswick +toonarmycaptain +Toshio Kuratomi +toxinu +Travis Swicegood +Tushar Sadhwani +Tzu-ping Chung +Valentin Haenel +Victor Stinner +victorvpaulo +Vikram - Google +Viktor Szépe +Ville Skyttä +Vinay Sajip +Vincent Philippon +Vinicyus Macedo +Vipul Kumar +Vitaly Babiy +Vladimir Fokow +Vladimir Rutsky +W. Trevor King +Wil Tan +Wilfred Hughes +William Edwards +William ML Leslie +William T Olson +William Woodruff +Wilson Mo +wim glenn +Winson Luk +Wolfgang Maier +Wu Zhenyu +XAMES3 +Xavier Fernandez +Xianpeng Shen +xoviat +xtreak +YAMAMOTO Takashi +Yen Chi Hsuan +Yeray Diaz Diaz +Yoval P +Yu Jian +Yuan Jing Vincent Yan +Yusuke Hayashi +Zearin +Zhiping Deng +ziebam +Zvezdan Petkovic +Łukasz Langa +Роман Донченко +Семён Марьясин diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/INSTALLER b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/LICENSE.txt b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e7b65eaf628360e6f32f4140fcdd7ec7c2b7077 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008-present The pip developers (see AUTHORS.txt file) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/METADATA b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3315c0635745a36012b57aa783435b0bfa425789 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/METADATA @@ -0,0 +1,90 @@ +Metadata-Version: 2.2 +Name: pip +Version: 25.0.1 +Summary: The PyPA recommended tool for installing Python packages. +Author-email: The pip developers +License: MIT +Project-URL: Homepage, https://pip.pypa.io/ +Project-URL: Documentation, https://pip.pypa.io +Project-URL: Source, https://github.com/pypa/pip +Project-URL: Changelog, https://pip.pypa.io/en/stable/news/ +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Topic :: Software Development :: Build Tools +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +License-File: AUTHORS.txt + +pip - The Python Package Installer +================================== + +.. |pypi-version| image:: https://img.shields.io/pypi/v/pip.svg + :target: https://pypi.org/project/pip/ + :alt: PyPI + +.. |python-versions| image:: https://img.shields.io/pypi/pyversions/pip + :target: https://pypi.org/project/pip + :alt: PyPI - Python Version + +.. |docs-badge| image:: https://readthedocs.org/projects/pip/badge/?version=latest + :target: https://pip.pypa.io/en/latest + :alt: Documentation + +|pypi-version| |python-versions| |docs-badge| + +pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes. + +Please take a look at our documentation for how to install and use pip: + +* `Installation`_ +* `Usage`_ + +We release updates regularly, with a new version every 3 months. Find more details in our documentation: + +* `Release notes`_ +* `Release process`_ + +If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms: + +* `Issue tracking`_ +* `Discourse channel`_ +* `User IRC`_ + +If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms: + +* `GitHub page`_ +* `Development documentation`_ +* `Development IRC`_ + +Code of Conduct +--------------- + +Everyone interacting in the pip project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. + +.. _package installer: https://packaging.python.org/guides/tool-recommendations/ +.. _Python Package Index: https://pypi.org +.. _Installation: https://pip.pypa.io/en/stable/installation/ +.. _Usage: https://pip.pypa.io/en/stable/ +.. _Release notes: https://pip.pypa.io/en/stable/news.html +.. _Release process: https://pip.pypa.io/en/latest/development/release-process/ +.. _GitHub page: https://github.com/pypa/pip +.. _Development documentation: https://pip.pypa.io/en/latest/development +.. _Issue tracking: https://github.com/pypa/pip/issues +.. _Discourse channel: https://discuss.python.org/c/packaging +.. _User IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa +.. _Development IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa-dev +.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/RECORD b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..0e608fb0aea0bc0fd101bbb4c983fe96c286cf67 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/RECORD @@ -0,0 +1,854 @@ +../../../bin/pip,sha256=TPQWN6GWIRSnaH8ZhYSzdRwVqHP54T91zpj6CykxwLw,239 +../../../bin/pip3,sha256=TPQWN6GWIRSnaH8ZhYSzdRwVqHP54T91zpj6CykxwLw,239 +../../../bin/pip3.10,sha256=TPQWN6GWIRSnaH8ZhYSzdRwVqHP54T91zpj6CykxwLw,239 +pip-25.0.1.dist-info/AUTHORS.txt,sha256=HqzpBVLfT1lBthqQfiDlVeFkg65hJ7ZQvvWhoq-BAsA,11018 +pip-25.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-25.0.1.dist-info/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093 +pip-25.0.1.dist-info/METADATA,sha256=T6cxjPMPl523zsRcEsu8K0-IoV8S7vv1mmKR0KA6-SY,3677 +pip-25.0.1.dist-info/RECORD,, +pip-25.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip-25.0.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91 +pip-25.0.1.dist-info/entry_points.txt,sha256=eeIjuzfnfR2PrhbjnbzFU6MnSS70kZLxwaHHq6M-bD0,87 +pip-25.0.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip/__init__.py,sha256=aKiv_sTe7UbE7qmtCinJutFjqN0MndZQZ1fKLNwFFLE,357 +pip/__main__.py,sha256=WzbhHXTbSE6gBY19mNN9m4s5o_365LOvTYSgqgbdBhE,854 +pip/__pip-runner__.py,sha256=cPPWuJ6NK_k-GzfvlejLFgwzmYUROmpAR6QC3Q-vkXQ,1450 +pip/__pycache__/__init__.cpython-310.pyc,, +pip/__pycache__/__main__.cpython-310.pyc,, +pip/__pycache__/__pip-runner__.cpython-310.pyc,, +pip/_internal/__init__.py,sha256=MfcoOluDZ8QMCFYal04IqOJ9q6m2V7a0aOsnI-WOxUo,513 +pip/_internal/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/__pycache__/build_env.cpython-310.pyc,, +pip/_internal/__pycache__/cache.cpython-310.pyc,, +pip/_internal/__pycache__/configuration.cpython-310.pyc,, +pip/_internal/__pycache__/exceptions.cpython-310.pyc,, +pip/_internal/__pycache__/main.cpython-310.pyc,, +pip/_internal/__pycache__/pyproject.cpython-310.pyc,, +pip/_internal/__pycache__/self_outdated_check.cpython-310.pyc,, +pip/_internal/__pycache__/wheel_builder.cpython-310.pyc,, +pip/_internal/build_env.py,sha256=Dv4UCClSg4uNaal_hL-trg5-zl3Is9CuIDxkChCkXF4,10700 +pip/_internal/cache.py,sha256=Jb698p5PNigRtpW5o26wQNkkUv4MnQ94mc471wL63A0,10369 +pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132 +pip/_internal/cli/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/cli/__pycache__/autocompletion.cpython-310.pyc,, +pip/_internal/cli/__pycache__/base_command.cpython-310.pyc,, +pip/_internal/cli/__pycache__/cmdoptions.cpython-310.pyc,, +pip/_internal/cli/__pycache__/command_context.cpython-310.pyc,, +pip/_internal/cli/__pycache__/index_command.cpython-310.pyc,, +pip/_internal/cli/__pycache__/main.cpython-310.pyc,, +pip/_internal/cli/__pycache__/main_parser.cpython-310.pyc,, +pip/_internal/cli/__pycache__/parser.cpython-310.pyc,, +pip/_internal/cli/__pycache__/progress_bars.cpython-310.pyc,, +pip/_internal/cli/__pycache__/req_command.cpython-310.pyc,, +pip/_internal/cli/__pycache__/spinners.cpython-310.pyc,, +pip/_internal/cli/__pycache__/status_codes.cpython-310.pyc,, +pip/_internal/cli/autocompletion.py,sha256=Lli3Mr6aDNu7ZkJJFFvwD2-hFxNI6Avz8OwMyS5TVrs,6865 +pip/_internal/cli/base_command.py,sha256=NZin6KMzW9NSYzKk4Tc8isb_TQYKR4CKd5j9mSm46PI,8625 +pip/_internal/cli/cmdoptions.py,sha256=V3BB22F4_v_RkHaZ5onWnszhbBtjYZvNhbn9M0NO0HI,30116 +pip/_internal/cli/command_context.py,sha256=RHgIPwtObh5KhMrd3YZTkl8zbVG-6Okml7YbFX4Ehg0,774 +pip/_internal/cli/index_command.py,sha256=i_sgNlPmXC5iHUaY-dmmrHKKTgc5O4hWzisr5Al1rr0,5677 +pip/_internal/cli/main.py,sha256=BDZef-bWe9g9Jpr4OVs4dDf-845HJsKw835T7AqEnAc,2817 +pip/_internal/cli/main_parser.py,sha256=laDpsuBDl6kyfywp9eMMA9s84jfH2TJJn-vmL0GG90w,4338 +pip/_internal/cli/parser.py,sha256=VCMtduzECUV87KaHNu-xJ-wLNL82yT3x16V4XBxOAqI,10825 +pip/_internal/cli/progress_bars.py,sha256=9GcgusWtwfqou2zhAQp1XNbQHIDslqyyz9UwLzw7Jgc,2717 +pip/_internal/cli/req_command.py,sha256=DqeFhmUMs6o6Ev8qawAcOoYNdAZsfyKS0MZI5jsJYwQ,12250 +pip/_internal/cli/spinners.py,sha256=hIJ83GerdFgFCdobIA23Jggetegl_uC4Sp586nzFbPE,5118 +pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116 +pip/_internal/commands/__init__.py,sha256=5oRO9O3dM2vGuh0bFw4HOVletryrz5HHMmmPWwJrH9U,3882 +pip/_internal/commands/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/commands/__pycache__/cache.cpython-310.pyc,, +pip/_internal/commands/__pycache__/check.cpython-310.pyc,, +pip/_internal/commands/__pycache__/completion.cpython-310.pyc,, +pip/_internal/commands/__pycache__/configuration.cpython-310.pyc,, +pip/_internal/commands/__pycache__/debug.cpython-310.pyc,, +pip/_internal/commands/__pycache__/download.cpython-310.pyc,, +pip/_internal/commands/__pycache__/freeze.cpython-310.pyc,, +pip/_internal/commands/__pycache__/hash.cpython-310.pyc,, +pip/_internal/commands/__pycache__/help.cpython-310.pyc,, +pip/_internal/commands/__pycache__/index.cpython-310.pyc,, +pip/_internal/commands/__pycache__/inspect.cpython-310.pyc,, +pip/_internal/commands/__pycache__/install.cpython-310.pyc,, +pip/_internal/commands/__pycache__/list.cpython-310.pyc,, +pip/_internal/commands/__pycache__/search.cpython-310.pyc,, +pip/_internal/commands/__pycache__/show.cpython-310.pyc,, +pip/_internal/commands/__pycache__/uninstall.cpython-310.pyc,, +pip/_internal/commands/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/commands/cache.py,sha256=IOezTicHjGE5sWdBx2nwPVgbjuJHM3s-BZEkpZLemuY,8107 +pip/_internal/commands/check.py,sha256=Hr_4eiMd9cgVDgEvjtIdw915NmL7ROIWW8enkr8slPQ,2268 +pip/_internal/commands/completion.py,sha256=HT4lD0bgsflHq2IDgYfiEdp7IGGtE7s6MgI3xn0VQEw,4287 +pip/_internal/commands/configuration.py,sha256=n98enwp6y0b5G6fiRQjaZo43FlJKYve_daMhN-4BRNc,9766 +pip/_internal/commands/debug.py,sha256=DNDRgE9YsKrbYzU0s3VKi8rHtKF4X13CJ_br_8PUXO0,6797 +pip/_internal/commands/download.py,sha256=0qB0nys6ZEPsog451lDsjL5Bx7Z97t-B80oFZKhpzKM,5273 +pip/_internal/commands/freeze.py,sha256=2Vt72BYTSm9rzue6d8dNzt8idxWK4Db6Hd-anq7GQ80,3203 +pip/_internal/commands/hash.py,sha256=EVVOuvGtoPEdFi8SNnmdqlCQrhCxV-kJsdwtdcCnXGQ,1703 +pip/_internal/commands/help.py,sha256=gcc6QDkcgHMOuAn5UxaZwAStsRBrnGSn_yxjS57JIoM,1132 +pip/_internal/commands/index.py,sha256=RAXxmJwFhVb5S1BYzb5ifX3sn9Na8v2CCVYwSMP8pao,4731 +pip/_internal/commands/inspect.py,sha256=PGrY9TRTRCM3y5Ml8Bdk8DEOXquWRfscr4DRo1LOTPc,3189 +pip/_internal/commands/install.py,sha256=r3yHQUxvxt7gD5j9n6zRDslAvtx9CT_whLuQJcktp6M,29390 +pip/_internal/commands/list.py,sha256=oiIzSjLP6__d7dIS3q0Xb5ywsaOThBWRqMyjjKzkPdM,12769 +pip/_internal/commands/search.py,sha256=fWkUQVx_gm8ebbFAlCgqtxKXT9rNahpJ-BI__3HNZpg,5626 +pip/_internal/commands/show.py,sha256=0YBhCga3PAd81vT3l7UWflktSpB5-aYqQcJxBVPazVM,7857 +pip/_internal/commands/uninstall.py,sha256=7pOR7enK76gimyxQbzxcG1OsyLXL3DvX939xmM8Fvtg,3892 +pip/_internal/commands/wheel.py,sha256=eJRhr_qoNNxWAkkdJCNiQM7CXd4E1_YyQhsqJnBPGGg,6414 +pip/_internal/configuration.py,sha256=-KOok6jh3hFzXMPQFPJ1_EFjBpAsge-RSreQuLHLmzo,14005 +pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858 +pip/_internal/distributions/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/distributions/__pycache__/base.cpython-310.pyc,, +pip/_internal/distributions/__pycache__/installed.cpython-310.pyc,, +pip/_internal/distributions/__pycache__/sdist.cpython-310.pyc,, +pip/_internal/distributions/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/distributions/base.py,sha256=QeB9qvKXDIjLdPBDE5fMgpfGqMMCr-govnuoQnGuiF8,1783 +pip/_internal/distributions/installed.py,sha256=QinHFbWAQ8oE0pbD8MFZWkwlnfU1QYTccA1vnhrlYOU,842 +pip/_internal/distributions/sdist.py,sha256=PlcP4a6-R6c98XnOM-b6Lkb3rsvh9iG4ok8shaanrzs,6751 +pip/_internal/distributions/wheel.py,sha256=THBYfnv7VVt8mYhMYUtH13S1E7FDwtDyDfmUcl8ai0E,1317 +pip/_internal/exceptions.py,sha256=2_byISIv3kSnI_9T-Esfxrt0LnTRgcUHyxu0twsHjQY,26481 +pip/_internal/index/__init__.py,sha256=vpt-JeTZefh8a-FC22ZeBSXFVbuBcXSGiILhQZJaNpQ,30 +pip/_internal/index/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/index/__pycache__/collector.cpython-310.pyc,, +pip/_internal/index/__pycache__/package_finder.cpython-310.pyc,, +pip/_internal/index/__pycache__/sources.cpython-310.pyc,, +pip/_internal/index/collector.py,sha256=RdPO0JLAlmyBWPAWYHPyRoGjz3GNAeTngCNkbGey_mE,16265 +pip/_internal/index/package_finder.py,sha256=mJHAljlHeHuclyuxtjvBZO6DtovKjsZjF_tCh_wux5E,38076 +pip/_internal/index/sources.py,sha256=lPBLK5Xiy8Q6IQMio26Wl7ocfZOKkgGklIBNyUJ23fI,8632 +pip/_internal/locations/__init__.py,sha256=UaAxeZ_f93FyouuFf4p7SXYF-4WstXuEvd3LbmPCAno,14925 +pip/_internal/locations/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/locations/__pycache__/_distutils.cpython-310.pyc,, +pip/_internal/locations/__pycache__/_sysconfig.cpython-310.pyc,, +pip/_internal/locations/__pycache__/base.cpython-310.pyc,, +pip/_internal/locations/_distutils.py,sha256=x6nyVLj7X11Y4khIdf-mFlxMl2FWadtVEgeb8upc_WI,6013 +pip/_internal/locations/_sysconfig.py,sha256=IGzds60qsFneRogC-oeBaY7bEh3lPt_v47kMJChQXsU,7724 +pip/_internal/locations/base.py,sha256=RQiPi1d4FVM2Bxk04dQhXZ2PqkeljEL2fZZ9SYqIQ78,2556 +pip/_internal/main.py,sha256=r-UnUe8HLo5XFJz8inTcOOTiu_sxNhgHb6VwlGUllOI,340 +pip/_internal/metadata/__init__.py,sha256=CU8jK1TZso7jOLdr0sX9xDjrcs5iy8d7IRK-hvaIO5Y,4337 +pip/_internal/metadata/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/metadata/__pycache__/_json.cpython-310.pyc,, +pip/_internal/metadata/__pycache__/base.cpython-310.pyc,, +pip/_internal/metadata/__pycache__/pkg_resources.cpython-310.pyc,, +pip/_internal/metadata/_json.py,sha256=ezrIYazHCINM2QUk1eA9wEAMj3aeGWeDVgGalgUzKpc,2707 +pip/_internal/metadata/base.py,sha256=ft0K5XNgI4ETqZnRv2-CtvgYiMOMAeGMAzxT-f6VLJA,25298 +pip/_internal/metadata/importlib/__init__.py,sha256=jUUidoxnHcfITHHaAWG1G2i5fdBYklv_uJcjo2x7VYE,135 +pip/_internal/metadata/importlib/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/metadata/importlib/__pycache__/_compat.cpython-310.pyc,, +pip/_internal/metadata/importlib/__pycache__/_dists.cpython-310.pyc,, +pip/_internal/metadata/importlib/__pycache__/_envs.cpython-310.pyc,, +pip/_internal/metadata/importlib/_compat.py,sha256=c6av8sP8BBjAZuFSJow1iWfygUXNM3xRTCn5nqw6B9M,2796 +pip/_internal/metadata/importlib/_dists.py,sha256=ftmYiyfUGUIjnVwt6W-Ijsimy5c28KgmXly5Q5IQ2P4,8279 +pip/_internal/metadata/importlib/_envs.py,sha256=UUB980XSrDWrMpQ1_G45i0r8Hqlg_tg3IPQ63mEqbNc,7431 +pip/_internal/metadata/pkg_resources.py,sha256=U07ETAINSGeSRBfWUG93E4tZZbaW_f7PGzEqZN0hulc,10542 +pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63 +pip/_internal/models/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/models/__pycache__/candidate.cpython-310.pyc,, +pip/_internal/models/__pycache__/direct_url.cpython-310.pyc,, +pip/_internal/models/__pycache__/format_control.cpython-310.pyc,, +pip/_internal/models/__pycache__/index.cpython-310.pyc,, +pip/_internal/models/__pycache__/installation_report.cpython-310.pyc,, +pip/_internal/models/__pycache__/link.cpython-310.pyc,, +pip/_internal/models/__pycache__/scheme.cpython-310.pyc,, +pip/_internal/models/__pycache__/search_scope.cpython-310.pyc,, +pip/_internal/models/__pycache__/selection_prefs.cpython-310.pyc,, +pip/_internal/models/__pycache__/target_python.cpython-310.pyc,, +pip/_internal/models/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/models/candidate.py,sha256=zzgFRuw_kWPjKpGw7LC0ZUMD2CQ2EberUIYs8izjdCA,753 +pip/_internal/models/direct_url.py,sha256=uBtY2HHd3TO9cKQJWh0ThvE5FRr-MWRYChRU4IG9HZE,6578 +pip/_internal/models/format_control.py,sha256=wtsQqSK9HaUiNxQEuB-C62eVimw6G4_VQFxV9-_KDBE,2486 +pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030 +pip/_internal/models/installation_report.py,sha256=zRVZoaz-2vsrezj_H3hLOhMZCK9c7TbzWgC-jOalD00,2818 +pip/_internal/models/link.py,sha256=GQ8hq7x-FDFPv25Nbn2veIM-MlBrGZDGLd7aZeF4Xrg,21448 +pip/_internal/models/scheme.py,sha256=PakmHJM3e8OOWSZFtfz1Az7f1meONJnkGuQxFlt3wBE,575 +pip/_internal/models/search_scope.py,sha256=67NEnsYY84784S-MM7ekQuo9KXLH-7MzFntXjapvAo0,4531 +pip/_internal/models/selection_prefs.py,sha256=qaFfDs3ciqoXPg6xx45N1jPLqccLJw4N0s4P0PyHTQ8,2015 +pip/_internal/models/target_python.py,sha256=2XaH2rZ5ZF-K5wcJbEMGEl7SqrTToDDNkrtQ2v_v_-Q,4271 +pip/_internal/models/wheel.py,sha256=G7dND_s4ebPkEL7RJ1qCY0QhUUWIIK6AnjWgRATF5no,4539 +pip/_internal/network/__init__.py,sha256=jf6Tt5nV_7zkARBrKojIXItgejvoegVJVKUbhAa5Ioc,50 +pip/_internal/network/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/network/__pycache__/auth.cpython-310.pyc,, +pip/_internal/network/__pycache__/cache.cpython-310.pyc,, +pip/_internal/network/__pycache__/download.cpython-310.pyc,, +pip/_internal/network/__pycache__/lazy_wheel.cpython-310.pyc,, +pip/_internal/network/__pycache__/session.cpython-310.pyc,, +pip/_internal/network/__pycache__/utils.cpython-310.pyc,, +pip/_internal/network/__pycache__/xmlrpc.cpython-310.pyc,, +pip/_internal/network/auth.py,sha256=D4gASjUrqoDFlSt6gQ767KAAjv6PUyJU0puDlhXNVRE,20809 +pip/_internal/network/cache.py,sha256=0yGMA3Eet59xBSLtbPAenvI53dl29oUOeqZ2c0QL2Ss,4614 +pip/_internal/network/download.py,sha256=FLOP29dPYECBiAi7eEjvAbNkyzaKNqbyjOT2m8HPW8U,6048 +pip/_internal/network/lazy_wheel.py,sha256=PBdoMoNQQIA84Fhgne38jWF52W4x_KtsHjxgv4dkRKA,7622 +pip/_internal/network/session.py,sha256=msM4es16LmmNEYNkrYyg8fTc7gAHbKFltawfKP27LOI,18771 +pip/_internal/network/utils.py,sha256=Inaxel-NxBu4PQWkjyErdnfewsFCcgHph7dzR1-FboY,4088 +pip/_internal/network/xmlrpc.py,sha256=sAxzOacJ-N1NXGPvap9jC3zuYWSnnv3GXtgR2-E2APA,1838 +pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/operations/__pycache__/check.cpython-310.pyc,, +pip/_internal/operations/__pycache__/freeze.cpython-310.pyc,, +pip/_internal/operations/__pycache__/prepare.cpython-310.pyc,, +pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/build/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/build_tracker.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/metadata.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/metadata_editable.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/wheel_editable.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-310.pyc,, +pip/_internal/operations/build/build_tracker.py,sha256=-ARW_TcjHCOX7D2NUOGntB4Fgc6b4aolsXkAK6BWL7w,4774 +pip/_internal/operations/build/metadata.py,sha256=9S0CUD8U3QqZeXp-Zyt8HxwU90lE4QrnYDgrqZDzBnc,1422 +pip/_internal/operations/build/metadata_editable.py,sha256=xlAwcP9q_8_fmv_3I39w9EZ7SQV9hnJZr9VuTsq2Y68,1510 +pip/_internal/operations/build/metadata_legacy.py,sha256=8i6i1QZX9m_lKPStEFsHKM0MT4a-CD408JOw99daLmo,2190 +pip/_internal/operations/build/wheel.py,sha256=sT12FBLAxDC6wyrDorh8kvcZ1jG5qInCRWzzP-UkJiQ,1075 +pip/_internal/operations/build/wheel_editable.py,sha256=yOtoH6zpAkoKYEUtr8FhzrYnkNHQaQBjWQ2HYae1MQg,1417 +pip/_internal/operations/build/wheel_legacy.py,sha256=K-6kNhmj-1xDF45ny1yheMerF0ui4EoQCLzEoHh6-tc,3045 +pip/_internal/operations/check.py,sha256=L24vRL8VWbyywdoeAhM89WCd8zLTnjIbULlKelUgIec,5912 +pip/_internal/operations/freeze.py,sha256=1_M79jAQKnCxWr-KCCmHuVXOVFGaUJHmoWLfFzgh7K4,9843 +pip/_internal/operations/install/__init__.py,sha256=mX7hyD2GNBO2mFGokDQ30r_GXv7Y_PLdtxcUv144e-s,51 +pip/_internal/operations/install/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/operations/install/__pycache__/editable_legacy.cpython-310.pyc,, +pip/_internal/operations/install/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/operations/install/editable_legacy.py,sha256=PoEsNEPGbIZ2yQphPsmYTKLOCMs4gv5OcCdzW124NcA,1283 +pip/_internal/operations/install/wheel.py,sha256=X5Iz9yUg5LlK5VNQ9g2ikc6dcRu8EPi_SUi5iuEDRgo,27615 +pip/_internal/operations/prepare.py,sha256=joWJwPkuqGscQgVNImLK71e9hRapwKvRCM8HclysmvU,28118 +pip/_internal/pyproject.py,sha256=GLJ6rWRS5_2noKdajohoLyDty57Z7QXhcUAYghmTnWc,7286 +pip/_internal/req/__init__.py,sha256=HxBFtZy_BbCclLgr26waMtpzYdO5T3vxePvpGAXSt5s,2653 +pip/_internal/req/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/req/__pycache__/constructors.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_file.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_install.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_set.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_uninstall.cpython-310.pyc,, +pip/_internal/req/constructors.py,sha256=v1qzCN1mIldwx-nCrPc8JO4lxkm3Fv8M5RWvt8LISjc,18430 +pip/_internal/req/req_file.py,sha256=eys82McgaICOGic2UZRHjD720piKJPwmeSYdXlWwl6w,20234 +pip/_internal/req/req_install.py,sha256=BMptxHYg2uG_b-7HFEULPb3nuw0FMAbuea8zTq2rE7w,35786 +pip/_internal/req/req_set.py,sha256=j3esG0s6SzoVReX9rWn4rpYNtyET_fwxbwJPRimvRxo,2858 +pip/_internal/req/req_uninstall.py,sha256=qzDIxJo-OETWqGais7tSMCDcWbATYABT-Tid3ityF0s,23853 +pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/resolution/__pycache__/base.cpython-310.pyc,, +pip/_internal/resolution/base.py,sha256=qlmh325SBVfvG6Me9gc5Nsh5sdwHBwzHBq6aEXtKsLA,583 +pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/legacy/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/resolution/legacy/__pycache__/resolver.cpython-310.pyc,, +pip/_internal/resolution/legacy/resolver.py,sha256=3HZiJBRd1FTN6jQpI4qRO8-TbLYeIbUTS6PFvXnXs2w,24068 +pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/base.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/base.py,sha256=DCf669FsqyQY5uqXeePDHQY1e4QO-pBzWH8O0s9-K94,5023 +pip/_internal/resolution/resolvelib/candidates.py,sha256=5UZ1upNnmqsP-nmEZaDYxaBgCoejw_e2WVGmmAvBxXc,20001 +pip/_internal/resolution/resolvelib/factory.py,sha256=MJOLSZJY8_28PPdcutoQ6gjJ_1eBDt6Z1edtfTJyR4E,32659 +pip/_internal/resolution/resolvelib/found_candidates.py,sha256=9hrTyQqFvl9I7Tji79F1AxHv39Qh1rkJ_7deSHSMfQc,6383 +pip/_internal/resolution/resolvelib/provider.py,sha256=bcsFnYvlmtB80cwVdW1fIwgol8ZNr1f1VHyRTkz47SM,9935 +pip/_internal/resolution/resolvelib/reporter.py,sha256=00JtoXEkTlw0-rl_sl54d71avwOsJHt9GGHcrj5Sza0,3168 +pip/_internal/resolution/resolvelib/requirements.py,sha256=7JG4Z72e5Yk4vU0S5ulGvbqTy4FMQGYhY5zQhX9zTtY,8065 +pip/_internal/resolution/resolvelib/resolver.py,sha256=nLJOsVMEVi2gQUVJoUFKMZAeu2f7GRMjGMvNSWyz0Bc,12592 +pip/_internal/self_outdated_check.py,sha256=1PFtttvLAeyCVR3tPoBq2sOlPD0IJ-KSqU6bc1HUk9c,8318 +pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/utils/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/utils/__pycache__/_jaraco_text.cpython-310.pyc,, +pip/_internal/utils/__pycache__/_log.cpython-310.pyc,, +pip/_internal/utils/__pycache__/appdirs.cpython-310.pyc,, +pip/_internal/utils/__pycache__/compat.cpython-310.pyc,, +pip/_internal/utils/__pycache__/compatibility_tags.cpython-310.pyc,, +pip/_internal/utils/__pycache__/datetime.cpython-310.pyc,, +pip/_internal/utils/__pycache__/deprecation.cpython-310.pyc,, +pip/_internal/utils/__pycache__/direct_url_helpers.cpython-310.pyc,, +pip/_internal/utils/__pycache__/egg_link.cpython-310.pyc,, +pip/_internal/utils/__pycache__/entrypoints.cpython-310.pyc,, +pip/_internal/utils/__pycache__/filesystem.cpython-310.pyc,, +pip/_internal/utils/__pycache__/filetypes.cpython-310.pyc,, +pip/_internal/utils/__pycache__/glibc.cpython-310.pyc,, +pip/_internal/utils/__pycache__/hashes.cpython-310.pyc,, +pip/_internal/utils/__pycache__/logging.cpython-310.pyc,, +pip/_internal/utils/__pycache__/misc.cpython-310.pyc,, +pip/_internal/utils/__pycache__/packaging.cpython-310.pyc,, +pip/_internal/utils/__pycache__/retry.cpython-310.pyc,, +pip/_internal/utils/__pycache__/setuptools_build.cpython-310.pyc,, +pip/_internal/utils/__pycache__/subprocess.cpython-310.pyc,, +pip/_internal/utils/__pycache__/temp_dir.cpython-310.pyc,, +pip/_internal/utils/__pycache__/unpacking.cpython-310.pyc,, +pip/_internal/utils/__pycache__/urls.cpython-310.pyc,, +pip/_internal/utils/__pycache__/virtualenv.cpython-310.pyc,, +pip/_internal/utils/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/utils/_jaraco_text.py,sha256=M15uUPIh5NpP1tdUGBxRau6q1ZAEtI8-XyLEETscFfE,3350 +pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015 +pip/_internal/utils/appdirs.py,sha256=swgcTKOm3daLeXTW6v5BUS2Ti2RvEnGRQYH_yDXklAo,1665 +pip/_internal/utils/compat.py,sha256=ckkFveBiYQjRWjkNsajt_oWPS57tJvE8XxoC4OIYgCY,2399 +pip/_internal/utils/compatibility_tags.py,sha256=OWq5axHpW-MEEPztGdvgADrgJPAcV9a88Rxm4Z8VBs8,6272 +pip/_internal/utils/datetime.py,sha256=m21Y3wAtQc-ji6Veb6k_M5g6A0ZyFI4egchTdnwh-pQ,242 +pip/_internal/utils/deprecation.py,sha256=k7Qg_UBAaaTdyq82YVARA6D7RmcGTXGv7fnfcgigj4Q,3707 +pip/_internal/utils/direct_url_helpers.py,sha256=r2MRtkVDACv9AGqYODBUC9CjwgtsUU1s68hmgfCJMtA,3196 +pip/_internal/utils/egg_link.py,sha256=0FePZoUYKv4RGQ2t6x7w5Z427wbA_Uo3WZnAkrgsuqo,2463 +pip/_internal/utils/entrypoints.py,sha256=YlhLTRl2oHBAuqhc-zmL7USS67TPWVHImjeAQHreZTQ,3064 +pip/_internal/utils/filesystem.py,sha256=ajvA-q4ocliW9kPp8Yquh-4vssXbu-UKbo5FV9V4X64,4950 +pip/_internal/utils/filetypes.py,sha256=i8XAQ0eFCog26Fw9yV0Yb1ygAqKYB1w9Cz9n0fj8gZU,716 +pip/_internal/utils/glibc.py,sha256=vUkWq_1pJuzcYNcGKLlQmABoUiisK8noYY1yc8Wq4w4,3734 +pip/_internal/utils/hashes.py,sha256=XGGLL0AG8-RhWnyz87xF6MFZ--BKadHU35D47eApCKI,4972 +pip/_internal/utils/logging.py,sha256=ONfbrhaD248akkosK79if97n20EABxwjOxp5dE5RCRY,11845 +pip/_internal/utils/misc.py,sha256=DWnYxBUItjRp7hhxEg4ih6P6YpKrykM86dbi_EcU8SQ,23450 +pip/_internal/utils/packaging.py,sha256=cm-X_0HVHV_jRwUVZh6AuEWqSitzf8EpaJ7Uv2UGu6A,2142 +pip/_internal/utils/retry.py,sha256=mhFbykXjhTnZfgzeuy-vl9c8nECnYn_CMtwNJX2tYzQ,1392 +pip/_internal/utils/setuptools_build.py,sha256=ouXpud-jeS8xPyTPsXJ-m34NPvK5os45otAzdSV_IJE,4435 +pip/_internal/utils/subprocess.py,sha256=EsvqSRiSMHF98T8Txmu6NLU3U--MpTTQjtNgKP0P--M,8988 +pip/_internal/utils/temp_dir.py,sha256=5qOXe8M4JeY6vaFQM867d5zkp1bSwMZ-KT5jymmP0Zg,9310 +pip/_internal/utils/unpacking.py,sha256=_gVdyzTRDMYktpnYljn4OoxrZTtMCf4xknSm4rK0WaA,11967 +pip/_internal/utils/urls.py,sha256=qceSOZb5lbNDrHNsv7_S4L4Ytszja5NwPKUMnZHbYnM,1599 +pip/_internal/utils/virtualenv.py,sha256=S6f7csYorRpiD6cvn3jISZYc3I8PJC43H5iMFpRAEDU,3456 +pip/_internal/utils/wheel.py,sha256=b442jkydFHjXzDy6cMR7MpzWBJ1Q82hR5F33cmcHV3g,4494 +pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596 +pip/_internal/vcs/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/bazaar.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/git.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/mercurial.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/subversion.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/versioncontrol.cpython-310.pyc,, +pip/_internal/vcs/bazaar.py,sha256=EKStcQaKpNu0NK4p5Q10Oc4xb3DUxFw024XrJy40bFQ,3528 +pip/_internal/vcs/git.py,sha256=3tpc9LQA_J4IVW5r5NvWaaSeDzcmJOrSFZN0J8vIKfU,18177 +pip/_internal/vcs/mercurial.py,sha256=oULOhzJ2Uie-06d1omkL-_Gc6meGaUkyogvqG9ZCyPs,5249 +pip/_internal/vcs/subversion.py,sha256=ddTugHBqHzV3ebKlU5QXHPN4gUqlyXbOx8q8NgXKvs8,11735 +pip/_internal/vcs/versioncontrol.py,sha256=cvf_-hnTAjQLXJ3d17FMNhQfcO1AcKWUF10tfrYyP-c,22440 +pip/_internal/wheel_builder.py,sha256=DL3A8LKeRj_ACp11WS5wSgASgPFqeyAeXJKdXfmaWXU,11799 +pip/_vendor/__init__.py,sha256=JYuAXvClhInxIrA2FTp5p-uuWVL7WV6-vEpTs46-Qh4,4873 +pip/_vendor/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/__pycache__/typing_extensions.cpython-310.pyc,, +pip/_vendor/cachecontrol/__init__.py,sha256=LMC5CBe94ZRL5xhlzwyPDmHXvBD0p7lT4R3Z73D6a_I,677 +pip/_vendor/cachecontrol/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/adapter.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/cache.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/controller.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/serialize.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-310.pyc,, +pip/_vendor/cachecontrol/_cmd.py,sha256=iist2EpzJvDVIhMAxXq8iFnTBsiZAd6iplxfmNboNyk,1737 +pip/_vendor/cachecontrol/adapter.py,sha256=febjY4LV87iiCIK3jcl8iH58iaSA7b9WkovsByIDK0Y,6348 +pip/_vendor/cachecontrol/cache.py,sha256=OXwv7Fn2AwnKNiahJHnjtvaKLndvVLv_-zO-ltlV9qI,1953 +pip/_vendor/cachecontrol/caches/__init__.py,sha256=dtrrroK5BnADR1GWjCZ19aZ0tFsMfvFBtLQQU1sp_ag,303 +pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-310.pyc,, +pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-310.pyc,, +pip/_vendor/cachecontrol/caches/file_cache.py,sha256=b7oMgsRSqPmEsonVJw6uFEYUlFgD6GF8TyacOGG1x3M,5399 +pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=9rmqwtYu_ljVkW6_oLqbC7EaX_a8YT_yLuna-eS0dgo,1386 +pip/_vendor/cachecontrol/controller.py,sha256=glbPj2iZlGqdBg8z09D2DtQOzoOGXnWvy7K2LEyBsEQ,18576 +pip/_vendor/cachecontrol/filewrapper.py,sha256=2ktXNPE0KqnyzF24aOsKCA58HQq1xeC6l2g6_zwjghc,4291 +pip/_vendor/cachecontrol/heuristics.py,sha256=gqMXU8w0gQuEQiSdu3Yg-0vd9kW7nrWKbLca75rheGE,4881 +pip/_vendor/cachecontrol/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/cachecontrol/serialize.py,sha256=HQd2IllQ05HzPkVLMXTF2uX5mjEQjDBkxCqUJUODpZk,5163 +pip/_vendor/cachecontrol/wrapper.py,sha256=hsGc7g8QGQTT-4f8tgz3AM5qwScg6FO0BSdLSRdEvpU,1417 +pip/_vendor/certifi/__init__.py,sha256=p_GYZrjUwPBUhpLlCZoGb0miKBKSqDAyZC5DvIuqbHQ,94 +pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255 +pip/_vendor/certifi/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/certifi/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/certifi/__pycache__/core.cpython-310.pyc,, +pip/_vendor/certifi/cacert.pem,sha256=lO3rZukXdPyuk6BWUJFOKQliWaXH6HGh9l1GGrUgG0c,299427 +pip/_vendor/certifi/core.py,sha256=2SRT5rIcQChFDbe37BQa-kULxAgJ8qN6l1jfqTp4HIs,4486 +pip/_vendor/certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/distlib/__init__.py,sha256=dcwgYGYGQqAEawBXPDtIx80DO_3cOmFv8HTc8JMzknQ,625 +pip/_vendor/distlib/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/database.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/index.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/locators.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/manifest.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/markers.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/metadata.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/resources.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/scripts.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/util.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/version.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/wheel.cpython-310.pyc,, +pip/_vendor/distlib/compat.py,sha256=2jRSjRI4o-vlXeTK2BCGIUhkc6e9ZGhSsacRM5oseTw,41467 +pip/_vendor/distlib/database.py,sha256=mHy_LxiXIsIVRb-T0-idBrVLw3Ffij5teHCpbjmJ9YU,51160 +pip/_vendor/distlib/index.py,sha256=lTbw268rRhj8dw1sib3VZ_0EhSGgoJO3FKJzSFMOaeA,20797 +pip/_vendor/distlib/locators.py,sha256=oBeAZpFuPQSY09MgNnLfQGGAXXvVO96BFpZyKMuK4tM,51026 +pip/_vendor/distlib/manifest.py,sha256=3qfmAmVwxRqU1o23AlfXrQGZzh6g_GGzTAP_Hb9C5zQ,14168 +pip/_vendor/distlib/markers.py,sha256=X6sDvkFGcYS8gUW8hfsWuKEKAqhQZAJ7iXOMLxRYjYk,5164 +pip/_vendor/distlib/metadata.py,sha256=zil3sg2EUfLXVigljY2d_03IJt-JSs7nX-73fECMX2s,38724 +pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 +pip/_vendor/distlib/scripts.py,sha256=BJliaDAZaVB7WAkwokgC3HXwLD2iWiHaVI50H7C6eG8,18608 +pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792 +pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784 +pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032 +pip/_vendor/distlib/util.py,sha256=vMPGvsS4j9hF6Y9k3Tyom1aaHLb0rFmZAEyzeAdel9w,66682 +pip/_vendor/distlib/version.py,sha256=s5VIs8wBn0fxzGxWM_aA2ZZyx525HcZbMvcTlTyZ3Rg,23727 +pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648 +pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448 +pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888 +pip/_vendor/distlib/wheel.py,sha256=DFIVguEQHCdxnSdAO0dfFsgMcvVZitg7bCOuLwZ7A_s,43979 +pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981 +pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64 +pip/_vendor/distro/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/distro/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/distro/__pycache__/distro.cpython-310.pyc,, +pip/_vendor/distro/distro.py,sha256=XqbefacAhDT4zr_trnbA15eY8vdK4GTghgmvUGrEM_4,49430 +pip/_vendor/distro/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868 +pip/_vendor/idna/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/codec.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/core.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/idnadata.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/intranges.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/package_data.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/uts46data.cpython-310.pyc,, +pip/_vendor/idna/codec.py,sha256=PEew3ItwzjW4hymbasnty2N2OXvNcgHB-JjrBuxHPYY,3422 +pip/_vendor/idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316 +pip/_vendor/idna/core.py,sha256=YJYyAMnwiQEPjVC4-Fqu_p4CJ6yKKuDGmppBNQNQpFs,13239 +pip/_vendor/idna/idnadata.py,sha256=W30GcIGvtOWYwAjZj4ZjuouUutC6ffgNuyjJy7fZ-lo,78306 +pip/_vendor/idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898 +pip/_vendor/idna/package_data.py,sha256=q59S3OXsc5VI8j6vSD0sGBMyk6zZ4vWFREE88yCJYKs,21 +pip/_vendor/idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/idna/uts46data.py,sha256=rt90K9J40gUSwppDPCrhjgi5AA6pWM65dEGRSf6rIhM,239289 +pip/_vendor/msgpack/__init__.py,sha256=reRaiOtEzSjPnr7TpxjgIvbfln5pV66FhricAs2eC-g,1109 +pip/_vendor/msgpack/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/msgpack/__pycache__/exceptions.cpython-310.pyc,, +pip/_vendor/msgpack/__pycache__/ext.cpython-310.pyc,, +pip/_vendor/msgpack/__pycache__/fallback.cpython-310.pyc,, +pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081 +pip/_vendor/msgpack/ext.py,sha256=kteJv03n9tYzd5oo3xYopVTo4vRaAxonBQQJhXohZZo,5726 +pip/_vendor/msgpack/fallback.py,sha256=0g1Pzp0vtmBEmJ5w9F3s_-JMVURP8RS4G1cc5TRaAsI,32390 +pip/_vendor/packaging/__init__.py,sha256=dk4Ta_vmdVJxYHDcfyhvQNw8V3PgSBomKNXqg-D2JDY,494 +pip/_vendor/packaging/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_elffile.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_parser.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_structures.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_tokenizer.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/markers.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/metadata.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/requirements.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/tags.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/utils.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/version.cpython-310.pyc,, +pip/_vendor/packaging/_elffile.py,sha256=cflAQAkE25tzhYmq_aCi72QfbT_tn891tPzfpbeHOwE,3306 +pip/_vendor/packaging/_manylinux.py,sha256=vl5OCoz4kx80H5rwXKeXWjl9WNISGmr4ZgTpTP9lU9c,9612 +pip/_vendor/packaging/_musllinux.py,sha256=p9ZqNYiOItGee8KcZFeHF_YcdhVwGHdK6r-8lgixvGQ,2694 +pip/_vendor/packaging/_parser.py,sha256=s_TvTvDNK0NrM2QB3VKThdWFM4Nc0P6JnkObkl3MjpM,10236 +pip/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 +pip/_vendor/packaging/_tokenizer.py,sha256=J6v5H7Jzvb-g81xp_2QACKwO7LxHQA6ikryMU7zXwN8,5273 +pip/_vendor/packaging/licenses/__init__.py,sha256=A116-FU49_Dz4162M4y1uAiZN4Rgdc83FxNd8EjlfqI,5727 +pip/_vendor/packaging/licenses/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/packaging/licenses/__pycache__/_spdx.cpython-310.pyc,, +pip/_vendor/packaging/licenses/_spdx.py,sha256=oAm1ztPFwlsmCKe7lAAsv_OIOfS1cWDu9bNBkeu-2ns,48398 +pip/_vendor/packaging/markers.py,sha256=c89TNzB7ZdGYhkovm6PYmqGyHxXlYVaLW591PHUNKD8,10561 +pip/_vendor/packaging/metadata.py,sha256=YJibM7GYe4re8-0a3OlXmGS-XDgTEoO4tlBt2q25Bng,34762 +pip/_vendor/packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/packaging/requirements.py,sha256=gYyRSAdbrIyKDY66ugIDUQjRMvxkH2ALioTmX3tnL6o,2947 +pip/_vendor/packaging/specifiers.py,sha256=hGU6kuCd77bL-msIL6yLCp6MNT75RSMUKZDuju26c8U,40098 +pip/_vendor/packaging/tags.py,sha256=CFqrJzAzc2XNGexerH__T-Y5Iwq7WbsYXsiLERLWxY0,21014 +pip/_vendor/packaging/utils.py,sha256=0F3Hh9OFuRgrhTgGZUl5K22Fv1YP2tZl1z_2gO6kJiA,5050 +pip/_vendor/packaging/version.py,sha256=oiHqzTUv_p12hpjgsLDVcaF5hT7pDaSOViUNMD4GTW0,16688 +pip/_vendor/pkg_resources/__init__.py,sha256=jrhDRbOubP74QuPXxd7U7Po42PH2l-LZ2XfcO7llpZ4,124463 +pip/_vendor/pkg_resources/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/platformdirs/__init__.py,sha256=JueR2cRLkxY7iwik-qNWJCwKOrAlBgVgcZ_IHQzqGLE,22344 +pip/_vendor/platformdirs/__main__.py,sha256=jBJ8zb7Mpx5ebcqF83xrpO94MaeCpNGHVf9cvDN2JLg,1505 +pip/_vendor/platformdirs/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/android.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/api.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/macos.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/unix.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/version.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/windows.cpython-310.pyc,, +pip/_vendor/platformdirs/android.py,sha256=kV5oL3V3DZ6WZKu9yFiQupv18yp_jlSV2ChH1TmPcds,9007 +pip/_vendor/platformdirs/api.py,sha256=2dfUDNbEXeDhDKarqtR5NY7oUikUZ4RZhs3ozstmhBQ,9246 +pip/_vendor/platformdirs/macos.py,sha256=UlbyFZ8Rzu3xndCqQEHrfsYTeHwYdFap1Ioz-yxveT4,6154 +pip/_vendor/platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/platformdirs/unix.py,sha256=uRPJWRyQEtv7yOSvU94rUmsblo5XKDLA1SzFg55kbK0,10393 +pip/_vendor/platformdirs/version.py,sha256=oH4KgTfK4AklbTYVcV_yynvJ9JLI3pyvDVay0hRsLCs,411 +pip/_vendor/platformdirs/windows.py,sha256=IFpiohUBwxPtCzlyKwNtxyW4Jk8haa6W8o59mfrDXVo,10125 +pip/_vendor/pygments/__init__.py,sha256=7N1oiaWulw_nCsTY4EEixYLz15pWY5u4uPAFFi-ielU,2983 +pip/_vendor/pygments/__main__.py,sha256=isIhBxLg65nLlXukG4VkMuPfNdd7gFzTZ_R_z3Q8diY,353 +pip/_vendor/pygments/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/cmdline.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/console.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/filter.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/formatter.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/lexer.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/modeline.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/plugin.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/regexopt.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/scanner.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/sphinxext.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/style.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/token.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/unistring.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/util.cpython-310.pyc,, +pip/_vendor/pygments/cmdline.py,sha256=LIVzmAunlk9sRJJp54O4KRy9GDIN4Wu13v9p9QzfGPM,23656 +pip/_vendor/pygments/console.py,sha256=yhP9UsLAVmWKVQf2446JJewkA7AiXeeTf4Ieg3Oi2fU,1718 +pip/_vendor/pygments/filter.py,sha256=_ADNPCskD8_GmodHi6_LoVgPU3Zh336aBCT5cOeTMs0,1910 +pip/_vendor/pygments/filters/__init__.py,sha256=RdedK2KWKXlKwR7cvkfr3NUj9YiZQgMgilRMFUg2jPA,40392 +pip/_vendor/pygments/filters/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/formatter.py,sha256=jDWBTndlBH2Z5IYZFVDnP0qn1CaTQjTWt7iAGtCnJEg,4390 +pip/_vendor/pygments/formatters/__init__.py,sha256=8No-NUs8rBTSSBJIv4hSEQt2M0cFB4hwAT0snVc2QGE,5385 +pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/bbcode.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/groff.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/html.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/img.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/irc.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/latex.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/other.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/pangomarkup.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/rtf.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/svg.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/terminal.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/terminal256.cpython-310.pyc,, +pip/_vendor/pygments/formatters/_mapping.py,sha256=1Cw37FuQlNacnxRKmtlPX4nyLoX9_ttko5ZwscNUZZ4,4176 +pip/_vendor/pygments/formatters/bbcode.py,sha256=3JQLI45tcrQ_kRUMjuab6C7Hb0XUsbVWqqbSn9cMjkI,3320 +pip/_vendor/pygments/formatters/groff.py,sha256=M39k0PaSSZRnxWjqBSVPkF0mu1-Vr7bm6RsFvs-CNN4,5106 +pip/_vendor/pygments/formatters/html.py,sha256=SE2jc3YCqbMS3rZW9EAmDlAUhdVxJ52gA4dileEvCGU,35669 +pip/_vendor/pygments/formatters/img.py,sha256=MwA4xWPLOwh6j7Yc6oHzjuqSPt0M1fh5r-5BTIIUfsU,23287 +pip/_vendor/pygments/formatters/irc.py,sha256=dp1Z0l_ObJ5NFh9MhqLGg5ptG5hgJqedT2Vkutt9v0M,4981 +pip/_vendor/pygments/formatters/latex.py,sha256=XMmhOCqUKDBQtG5mGJNAFYxApqaC5puo5cMmPfK3944,19306 +pip/_vendor/pygments/formatters/other.py,sha256=56PMJOliin-rAUdnRM0i1wsV1GdUPd_dvQq0_UPfF9c,5034 +pip/_vendor/pygments/formatters/pangomarkup.py,sha256=y16U00aVYYEFpeCfGXlYBSMacG425CbfoG8oKbKegIg,2218 +pip/_vendor/pygments/formatters/rtf.py,sha256=ZT90dmcKyJboIB0mArhL7IhE467GXRN0G7QAUgG03To,11957 +pip/_vendor/pygments/formatters/svg.py,sha256=KKsiophPupHuxm0So-MsbQEWOT54IAiSF7hZPmxtKXE,7174 +pip/_vendor/pygments/formatters/terminal.py,sha256=AojNG4MlKq2L6IsC_VnXHu4AbHCBn9Otog6u45XvxeI,4674 +pip/_vendor/pygments/formatters/terminal256.py,sha256=kGkNUVo3FpwjytIDS0if79EuUoroAprcWt3igrcIqT0,11753 +pip/_vendor/pygments/lexer.py,sha256=TYHDt___gNW4axTl2zvPZff-VQi8fPaIh5OKRcVSjUM,35349 +pip/_vendor/pygments/lexers/__init__.py,sha256=pIlxyQJuu_syh9lE080cq8ceVbEVcKp0osAFU5fawJU,12115 +pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-310.pyc,, +pip/_vendor/pygments/lexers/__pycache__/python.cpython-310.pyc,, +pip/_vendor/pygments/lexers/_mapping.py,sha256=61-h3zr103m01OS5BUq_AfUiL9YI06Ves9ipQ7k4vr4,76097 +pip/_vendor/pygments/lexers/python.py,sha256=2J_YJrPTr_A6fJY_qKiKv0GpgPwHMrlMSeo59qN3fe4,53687 +pip/_vendor/pygments/modeline.py,sha256=gtRYZBS-CKOCDXHhGZqApboHBaZwGH8gznN3O6nuxj4,1005 +pip/_vendor/pygments/plugin.py,sha256=ioeJ3QeoJ-UQhZpY9JL7vbxsTVuwwM7BCu-Jb8nN0AU,1891 +pip/_vendor/pygments/regexopt.py,sha256=Hky4EB13rIXEHQUNkwmCrYqtIlnXDehNR3MztafZ43w,3072 +pip/_vendor/pygments/scanner.py,sha256=NDy3ofK_fHRFK4hIDvxpamG871aewqcsIb6sgTi7Fhk,3092 +pip/_vendor/pygments/sphinxext.py,sha256=iOptJBcqOGPwMEJ2p70PvwpZPIGdvdZ8dxvq6kzxDgA,7981 +pip/_vendor/pygments/style.py,sha256=rSCZWFpg1_DwFMXDU0nEVmAcBHpuQGf9RxvOPPQvKLQ,6420 +pip/_vendor/pygments/styles/__init__.py,sha256=qUk6_1z5KmT8EdJFZYgESmG6P_HJF_2vVrDD7HSCGYY,2042 +pip/_vendor/pygments/styles/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/styles/__pycache__/_mapping.cpython-310.pyc,, +pip/_vendor/pygments/styles/_mapping.py,sha256=6lovFUE29tz6EsV3XYY4hgozJ7q1JL7cfO3UOlgnS8w,3312 +pip/_vendor/pygments/token.py,sha256=qZwT7LSPy5YBY3JgDjut642CCy7JdQzAfmqD9NmT5j0,6226 +pip/_vendor/pygments/unistring.py,sha256=p5c1i-HhoIhWemy9CUsaN9o39oomYHNxXll0Xfw6tEA,63208 +pip/_vendor/pygments/util.py,sha256=2tj2nS1X9_OpcuSjf8dOET2bDVZhs8cEKd_uT6-Fgg8,10031 +pip/_vendor/pyproject_hooks/__init__.py,sha256=cPB_a9LXz5xvsRbX1o2qyAdjLatZJdQ_Lc5McNX-X7Y,691 +pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-310.pyc,, +pip/_vendor/pyproject_hooks/_impl.py,sha256=jY-raxnmyRyB57ruAitrJRUzEexuAhGTpgMygqx67Z4,14936 +pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=MJNPpfIxcO-FghxpBbxkG1rFiQf6HOUbV4U5mq0HFns,557 +pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-310.pyc,, +pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=qcXMhmx__MIJq10gGHW3mA4Tl8dy8YzHMccwnNoKlw0,12216 +pip/_vendor/pyproject_hooks/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/requests/__init__.py,sha256=HlB_HzhrzGtfD_aaYUwUh1zWXLZ75_YCLyit75d0Vz8,5057 +pip/_vendor/requests/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/__version__.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/_internal_utils.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/adapters.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/api.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/auth.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/certs.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/cookies.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/exceptions.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/help.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/hooks.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/models.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/packages.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/sessions.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/status_codes.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/structures.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/utils.cpython-310.pyc,, +pip/_vendor/requests/__version__.py,sha256=FVfglgZmNQnmYPXpOohDU58F5EUb_-VnSTaAesS187g,435 +pip/_vendor/requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 +pip/_vendor/requests/adapters.py,sha256=J7VeVxKBvawbtlX2DERVo05J9BXTcWYLMHNd1Baa-bk,27607 +pip/_vendor/requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449 +pip/_vendor/requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186 +pip/_vendor/requests/certs.py,sha256=kHDlkK_beuHXeMPc5jta2wgl8gdKeUWt5f2nTDVrvt8,441 +pip/_vendor/requests/compat.py,sha256=Mo9f9xZpefod8Zm-n9_StJcVTmwSukXR2p3IQyyVXvU,1485 +pip/_vendor/requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590 +pip/_vendor/requests/exceptions.py,sha256=D1wqzYWne1mS2rU43tP9CeN1G7QAy7eqL9o1god6Ejw,4272 +pip/_vendor/requests/help.py,sha256=hRKaf9u0G7fdwrqMHtF3oG16RKktRf6KiwtSq2Fo1_0,3813 +pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 +pip/_vendor/requests/models.py,sha256=x4K4CmH-lC0l2Kb-iPfMN4dRXxHEcbOaEWBL_i09AwI,35483 +pip/_vendor/requests/packages.py,sha256=_ZQDCJTJ8SP3kVWunSqBsRZNPzj2c1WFVqbdr08pz3U,1057 +pip/_vendor/requests/sessions.py,sha256=ykTI8UWGSltOfH07HKollH7kTBGw4WhiBVaQGmckTw4,30495 +pip/_vendor/requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322 +pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 +pip/_vendor/requests/utils.py,sha256=L79vnFbzJ3SFLKtJwpoWe41Tozi3RlZv94pY1TFIyow,33631 +pip/_vendor/resolvelib/__init__.py,sha256=h509TdEcpb5-44JonaU3ex2TM15GVBLjM9CNCPwnTTs,537 +pip/_vendor/resolvelib/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/resolvelib/__pycache__/providers.cpython-310.pyc,, +pip/_vendor/resolvelib/__pycache__/reporters.cpython-310.pyc,, +pip/_vendor/resolvelib/__pycache__/resolvers.cpython-310.pyc,, +pip/_vendor/resolvelib/__pycache__/structs.cpython-310.pyc,, +pip/_vendor/resolvelib/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-310.pyc,, +pip/_vendor/resolvelib/compat/collections_abc.py,sha256=uy8xUZ-NDEw916tugUXm8HgwCGiMO0f-RcdnpkfXfOs,156 +pip/_vendor/resolvelib/providers.py,sha256=fuuvVrCetu5gsxPB43ERyjfO8aReS3rFQHpDgiItbs4,5871 +pip/_vendor/resolvelib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/resolvelib/reporters.py,sha256=TSbRmWzTc26w0ggsV1bxVpeWDB8QNIre6twYl7GIZBE,1601 +pip/_vendor/resolvelib/resolvers.py,sha256=G8rsLZSq64g5VmIq-lB7UcIJ1gjAxIQJmTF4REZleQ0,20511 +pip/_vendor/resolvelib/structs.py,sha256=0_1_XO8z_CLhegP3Vpf9VJ3zJcfLm0NOHRM-i0Ykz3o,4963 +pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090 +pip/_vendor/rich/__main__.py,sha256=eO7Cq8JnrgG8zVoeImiAs92q3hXNMIfp0w5lMsO7Q2Y,8477 +pip/_vendor/rich/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_cell_widths.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_emoji_codes.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_emoji_replace.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_export_format.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_extension.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_fileno.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_inspect.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_log_render.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_loop.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_null_file.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_palettes.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_pick.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_ratio.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_spinners.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_stack.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_timer.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_win32_console.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_windows.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_windows_renderer.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_wrap.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/abc.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/align.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/ansi.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/bar.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/box.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/cells.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/color.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/color_triplet.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/columns.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/console.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/constrain.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/containers.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/control.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/default_styles.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/diagnose.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/emoji.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/errors.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/file_proxy.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/filesize.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/highlighter.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/json.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/jupyter.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/layout.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/live.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/live_render.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/logging.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/markup.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/measure.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/padding.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/pager.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/palette.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/panel.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/pretty.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/progress.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/progress_bar.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/prompt.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/protocol.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/region.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/repr.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/rule.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/scope.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/screen.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/segment.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/spinner.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/status.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/style.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/styled.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/syntax.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/table.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/terminal_theme.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/text.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/theme.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/themes.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/traceback.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/tree.cpython-310.pyc,, +pip/_vendor/rich/_cell_widths.py,sha256=fbmeyetEdHjzE_Vx2l1uK7tnPOhMs2X1lJfO3vsKDpA,10209 +pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235 +pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064 +pip/_vendor/rich/_export_format.py,sha256=RI08pSrm5tBSzPMvnbTqbD9WIalaOoN5d4M1RTmLq1Y,2128 +pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265 +pip/_vendor/rich/_fileno.py,sha256=HWZxP5C2ajMbHryvAQZseflVfQoGzsKOHzKGsLD8ynQ,799 +pip/_vendor/rich/_inspect.py,sha256=QM05lEFnFoTaFqpnbx-zBEI6k8oIKrD3cvjEOQNhKig,9655 +pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225 +pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236 +pip/_vendor/rich/_null_file.py,sha256=ADGKp1yt-k70FMKV6tnqCqecB-rSJzp-WQsD7LPL-kg,1394 +pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063 +pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423 +pip/_vendor/rich/_ratio.py,sha256=Zt58apszI6hAAcXPpgdWKpu3c31UBWebOeR4mbyptvU,5471 +pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919 +pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351 +pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417 +pip/_vendor/rich/_win32_console.py,sha256=BSaDRIMwBLITn_m0mTRLPqME5q-quGdSMuYMpYeYJwc,22755 +pip/_vendor/rich/_windows.py,sha256=aBwaD_S56SbgopIvayVmpk0Y28uwY2C5Bab1wl3Bp-I,1925 +pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783 +pip/_vendor/rich/_wrap.py,sha256=FlSsom5EX0LVkA3KWy34yHnCfLtqX-ZIepXKh-70rpc,3404 +pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890 +pip/_vendor/rich/align.py,sha256=Rh-3adnDaN1Ao07EjR2PhgE62PGLPgO8SMwJBku1urQ,10469 +pip/_vendor/rich/ansi.py,sha256=Avs1LHbSdcyOvDOdpELZUoULcBiYewY76eNBp6uFBhs,6921 +pip/_vendor/rich/bar.py,sha256=ldbVHOzKJOnflVNuv1xS7g6dLX2E3wMnXkdPbpzJTcs,3263 +pip/_vendor/rich/box.py,sha256=nr5fYIUghB_iUCEq6y0Z3LlCT8gFPDrzN9u2kn7tJl4,10831 +pip/_vendor/rich/cells.py,sha256=KrQkj5-LghCCpJLSNQIyAZjndc4bnEqOEmi5YuZ9UCY,5130 +pip/_vendor/rich/color.py,sha256=3HSULVDj7qQkXUdFWv78JOiSZzfy5y1nkcYhna296V0,18211 +pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054 +pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131 +pip/_vendor/rich/console.py,sha256=nKjrEx_7xy8KGmDVT-BgNII0R5hm1cexhAHDwdwNVqg,100156 +pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288 +pip/_vendor/rich/containers.py,sha256=c_56TxcedGYqDepHBMTuZdUIijitAQgnox-Qde0Z1qo,5502 +pip/_vendor/rich/control.py,sha256=DSkHTUQLorfSERAKE_oTAEUFefZnZp4bQb4q8rHbKws,6630 +pip/_vendor/rich/default_styles.py,sha256=dZxgaSD9VUy7SXQShO33aLYiAWspCr2sCQZFX_JK1j4,8159 +pip/_vendor/rich/diagnose.py,sha256=an6uouwhKPAlvQhYpNNpGq9EJysfMIOvvCbO3oSoR24,972 +pip/_vendor/rich/emoji.py,sha256=omTF9asaAnsM4yLY94eR_9dgRRSm1lHUszX20D1yYCQ,2501 +pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642 +pip/_vendor/rich/file_proxy.py,sha256=Tl9THMDZ-Pk5Wm8sI1gGg_U5DhusmxD-FZ0fUbcU0W0,1683 +pip/_vendor/rich/filesize.py,sha256=_iz9lIpRgvW7MNSeCZnLg-HwzbP4GETg543WqD8SFs0,2484 +pip/_vendor/rich/highlighter.py,sha256=G_sn-8DKjM1sEjLG_oc4ovkWmiUpWvj8bXi0yed2LnY,9586 +pip/_vendor/rich/json.py,sha256=vVEoKdawoJRjAFayPwXkMBPLy7RSTs-f44wSQDR2nJ0,5031 +pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252 +pip/_vendor/rich/layout.py,sha256=ajkSFAtEVv9EFTcFs-w4uZfft7nEXhNzL7ZVdgrT5rI,14004 +pip/_vendor/rich/live.py,sha256=DhzAPEnjTxQuq9_0Y2xh2MUwQcP_aGPkenLfKETslwM,14270 +pip/_vendor/rich/live_render.py,sha256=zJtB471jGziBtEwxc54x12wEQtH4BuQr1SA8v9kU82w,3666 +pip/_vendor/rich/logging.py,sha256=ZgpKMMBY_BuMAI_BYzo-UtXak6t5oH9VK8m9Q2Lm0f4,12458 +pip/_vendor/rich/markup.py,sha256=3euGKP5s41NCQwaSjTnJxus5iZMHjxpIM0W6fCxra38,8451 +pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305 +pip/_vendor/rich/padding.py,sha256=KVEI3tOwo9sgK1YNSuH__M1_jUWmLZwRVV_KmOtVzyM,4908 +pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828 +pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396 +pip/_vendor/rich/panel.py,sha256=fFRHcviXvWhk3V3zx5Zwmsb_RL9KJ3esD-sU0NYEVyw,11235 +pip/_vendor/rich/pretty.py,sha256=gy3S72u4FRg2ytoo7N1ZDWDIvB4unbzd5iUGdgm-8fc,36391 +pip/_vendor/rich/progress.py,sha256=MtmCjTk5zYU_XtRHxRHTAEHG6hF9PeF7EMWbEPleIC0,60357 +pip/_vendor/rich/progress_bar.py,sha256=mZTPpJUwcfcdgQCTTz3kyY-fc79ddLwtx6Ghhxfo064,8162 +pip/_vendor/rich/prompt.py,sha256=l0RhQU-0UVTV9e08xW1BbIj0Jq2IXyChX4lC0lFNzt4,12447 +pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391 +pip/_vendor/rich/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166 +pip/_vendor/rich/repr.py,sha256=5MZJZmONgC6kud-QW-_m1okXwL2aR6u6y-pUcUCJz28,4431 +pip/_vendor/rich/rule.py,sha256=0fNaS_aERa3UMRc3T5WMpN_sumtDxfaor2y3of1ftBk,4602 +pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843 +pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591 +pip/_vendor/rich/segment.py,sha256=otnKeKGEV-WRlQVosfJVeFDcDxAKHpvJ_hLzSu5lumM,24743 +pip/_vendor/rich/spinner.py,sha256=PT5qgXPG3ZpqRj7n3EZQ6NW56mx3ldZqZCU7gEMyZk4,4364 +pip/_vendor/rich/status.py,sha256=kkPph3YeAZBo-X-4wPp8gTqZyU466NLwZBA4PZTTewo,4424 +pip/_vendor/rich/style.py,sha256=aSoUNbVgfP1PAnduAqgbbl4AMQy668qs2S1FEwr3Oqs,27067 +pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258 +pip/_vendor/rich/syntax.py,sha256=qqAnEUZ4K57Po81_5RBxnsuU4KRzSdvDPAhKw8ma_3E,35763 +pip/_vendor/rich/table.py,sha256=yXYUr0YsPpG466N50HCAw2bpb5ZUuuzdc-G66Zk-oTc,40103 +pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370 +pip/_vendor/rich/text.py,sha256=AO7JPCz6-gaN1thVLXMBntEmDPVYFgFNG1oM61_sanU,47552 +pip/_vendor/rich/theme.py,sha256=oNyhXhGagtDlbDye3tVu3esWOWk0vNkuxFw-_unlaK0,3771 +pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102 +pip/_vendor/rich/traceback.py,sha256=z8UoN7NbTQKW6YDDUVwOh7F8snZf6gYnUWtOrKsLE1w,31797 +pip/_vendor/rich/tree.py,sha256=yWnQ6rAvRGJ3qZGqBrxS2SW2TKBTNrP0SdY8QxOFPuw,9451 +pip/_vendor/tomli/__init__.py,sha256=PhNw_eyLgdn7McJ6nrAN8yIm3dXC75vr1sVGVVwDSpA,314 +pip/_vendor/tomli/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/tomli/__pycache__/_parser.cpython-310.pyc,, +pip/_vendor/tomli/__pycache__/_re.cpython-310.pyc,, +pip/_vendor/tomli/__pycache__/_types.cpython-310.pyc,, +pip/_vendor/tomli/_parser.py,sha256=9w8LG0jB7fwmZZWB0vVXbeejDHcl4ANIJxB2scEnDlA,25591 +pip/_vendor/tomli/_re.py,sha256=sh4sBDRgO94KJZwNIrgdcyV_qQast50YvzOAUGpRDKA,3171 +pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 +pip/_vendor/tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +pip/_vendor/truststore/__init__.py,sha256=WIDeyzWm7EVX44g354M25vpRXbeY1lsPH6EmUJUcq4o,1264 +pip/_vendor/truststore/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/truststore/__pycache__/_api.cpython-310.pyc,, +pip/_vendor/truststore/__pycache__/_macos.cpython-310.pyc,, +pip/_vendor/truststore/__pycache__/_openssl.cpython-310.pyc,, +pip/_vendor/truststore/__pycache__/_ssl_constants.cpython-310.pyc,, +pip/_vendor/truststore/__pycache__/_windows.cpython-310.pyc,, +pip/_vendor/truststore/_api.py,sha256=GeXRNTlxPZ3kif4kNoh6JY0oE4QRzTGcgXr6l_X_Gk0,10555 +pip/_vendor/truststore/_macos.py,sha256=nZlLkOmszUE0g6ryRwBVGY5COzPyudcsiJtDWarM5LQ,20503 +pip/_vendor/truststore/_openssl.py,sha256=LLUZ7ZGaio-i5dpKKjKCSeSufmn6T8pi9lDcFnvSyq0,2324 +pip/_vendor/truststore/_ssl_constants.py,sha256=NUD4fVKdSD02ri7-db0tnO0VqLP9aHuzmStcW7tAl08,1130 +pip/_vendor/truststore/_windows.py,sha256=rAHyKYD8M7t-bXfG8VgOVa3TpfhVhbt4rZQlO45YuP8,17993 +pip/_vendor/truststore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/typing_extensions.py,sha256=78hFl0HpDY-ylHUVCnWdU5nTHxUP2-S-3wEZk6CQmLk,134499 +pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333 +pip/_vendor/urllib3/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/_collections.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/_version.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/connection.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/connectionpool.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/exceptions.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/fields.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/filepost.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/poolmanager.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/request.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/response.cpython-310.pyc,, +pip/_vendor/urllib3/_collections.py,sha256=pyASJJhW7wdOpqJj9QJA8FyGRfr8E8uUUhqUvhF0728,11372 +pip/_vendor/urllib3/_version.py,sha256=t9wGB6ooOTXXgiY66K1m6BZS1CJyXHAU8EoWDTe6Shk,64 +pip/_vendor/urllib3/connection.py,sha256=ttIA909BrbTUzwkqEe_TzZVh4JOOj7g61Ysei2mrwGg,20314 +pip/_vendor/urllib3/connectionpool.py,sha256=e2eiAwNbFNCKxj4bwDKNK-w7HIdSz3OmMxU_TIt-evQ,40408 +pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 +pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 +pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 +pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036 +pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528 +pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081 +pip/_vendor/urllib3/contrib/securetransport.py,sha256=Fef1IIUUFHqpevzXiDPbIGkDKchY2FVKeVeLGR1Qq3g,34446 +pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 +pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 +pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 +pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 +pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/packages/__pycache__/six.cpython-310.pyc,, +pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-310.pyc,, +pip/_vendor/urllib3/packages/backports/__pycache__/weakref_finalize.cpython-310.pyc,, +pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 +pip/_vendor/urllib3/packages/backports/weakref_finalize.py,sha256=tRCal5OAhNSRyb0DhHp-38AtIlCsRP8BxF3NX-6rqIA,5343 +pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665 +pip/_vendor/urllib3/poolmanager.py,sha256=aWyhXRtNO4JUnCSVVqKTKQd8EXTvUm1VN9pgs2bcONo,19990 +pip/_vendor/urllib3/request.py,sha256=YTWFNr7QIwh7E1W9dde9LM77v2VWTJ5V78XuTTw7D1A,6691 +pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641 +pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 +pip/_vendor/urllib3/util/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/connection.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/proxy.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/queue.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/request.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/response.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/retry.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/timeout.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/url.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/wait.cpython-310.pyc,, +pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 +pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 +pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 +pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997 +pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 +pip/_vendor/urllib3/util/retry.py,sha256=6ENvOZ8PBDzh8kgixpql9lIrb2dxH-k7ZmBanJF2Ng4,22050 +pip/_vendor/urllib3/util/ssl_.py,sha256=QDuuTxPSCj1rYtZ4xpD7Ux-r20TD50aHyqKyhQ7Bq4A,17460 +pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758 +pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 +pip/_vendor/urllib3/util/timeout.py,sha256=cwq4dMk87mJHSBktK1miYJ-85G-3T3RmT20v7SFCpno,10168 +pip/_vendor/urllib3/util/url.py,sha256=lCAE7M5myA8EDdW0sJuyyZhVB9K_j38ljWhHAnFaWoE,14296 +pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403 +pip/_vendor/vendor.txt,sha256=EW-E3cE5XEAtVFzGInikArOMDxGP0DLUWzXpY4RZfFY,333 +pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286 diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/REQUESTED b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/WHEEL b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..505164bc02d63fe6b0b3299f849a77c5f1beeb41 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.8.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/entry_points.txt b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..25fcf7e2cdf0943ad5c074d4777c721cd3340986 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +pip = pip._internal.cli.main:main +pip3 = pip._internal.cli.main:main diff --git a/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/top_level.txt b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/pip-25.0.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pip diff --git a/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/INSTALLER b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/LICENSE.txt b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..12da46eb7c140efb1a684d015a4823a8f620b639 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/LICENSE.txt @@ -0,0 +1,26 @@ +Copyright (c) 2009-2022, Barthelemy Dagenais and individual contributors. All +rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +- The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/METADATA b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..44198aabca9ac7e94b88c6ef27389fcb2013f354 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/METADATA @@ -0,0 +1,25 @@ +Metadata-Version: 2.1 +Name: py4j +Version: 0.10.9.8 +Summary: Enables Python programs to dynamically access arbitrary Java objects +Home-page: https://www.py4j.org/ +Author: Barthelemy Dagenais +Author-email: barthelemy@infobart.com +License: BSD License +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Java +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Object Brokering +License-File: LICENSE.txt + +Py4J enables Python programs running in a Python interpreter to dynamically access Java objects in a Java Virtual Machine. Methods are called as if the Java objects resided in the Python interpreter and Java collections can be accessed through standard Python collection methods. Py4J also enables Java programs to call back Python objects. diff --git a/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/RECORD b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..1b903dfa0c74170a4a6c559141513c0c21a8f9c9 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/RECORD @@ -0,0 +1,74 @@ +../../../share/py4j/py4j0.10.9.8.jar,sha256=-JmT9ipxLJ2fVuBeH8E16OCsy26oUrw82wapxhZ2OUg,123954 +py4j-0.10.9.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +py4j-0.10.9.8.dist-info/LICENSE.txt,sha256=_rhU9vXqN6n-Xy-Ej0bu_g_SU_axboZx1XtC9e4fko4,1481 +py4j-0.10.9.8.dist-info/METADATA,sha256=N-oOEvRVgc5hAMtT__taQhMJ0vuqK2fXxSd-coNo8IA,1304 +py4j-0.10.9.8.dist-info/RECORD,, +py4j-0.10.9.8.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +py4j-0.10.9.8.dist-info/WHEEL,sha256=iYlv5fX357PQyRT2o6tw1bN-YcKFFHKqB_LwHO5wP-g,110 +py4j-0.10.9.8.dist-info/top_level.txt,sha256=Oift0DgHMEUO4lOv-LHdgkQ8pD4K-oecUngKB4wx78I,5 +py4j/__init__.py,sha256=MQ4CO-3lN3wGiyJX3zJcykIYqiBbYEzPrLmGDycXLu8,111 +py4j/__pycache__/__init__.cpython-310.pyc,, +py4j/__pycache__/clientserver.cpython-310.pyc,, +py4j/__pycache__/compat.cpython-310.pyc,, +py4j/__pycache__/finalizer.cpython-310.pyc,, +py4j/__pycache__/java_collections.cpython-310.pyc,, +py4j/__pycache__/java_gateway.cpython-310.pyc,, +py4j/__pycache__/protocol.cpython-310.pyc,, +py4j/__pycache__/signals.cpython-310.pyc,, +py4j/__pycache__/version.cpython-310.pyc,, +py4j/clientserver.py,sha256=GXeGW_HroVZXh5zaqTbFWMCoYQfJpgsEggnKpScI_dc,29765 +py4j/compat.py,sha256=21RecutHWClfyWmExGzhEpZIQzUO5LIGUj2d5pe86ak,2329 +py4j/finalizer.py,sha256=8lrKGBAXTq6fjA02zGZn8uOv4SBQwWyUrA4zDlfsOEw,4306 +py4j/java_collections.py,sha256=AjzFAN9pJw7xSzxjY1AFaXcRfaZdpJATl8gdBAgQuD8,18289 +py4j/java_gateway.py,sha256=FJh-RCoqrXzmIDlwZ2pbVXsaUA7lUjMbU7MSkRqaiC4,99345 +py4j/protocol.py,sha256=UTJkjpzaM4fhGKyJ3RdoV7JNAnqN2UqnVyyhmHVaMUg,14805 +py4j/signals.py,sha256=ItjZOB0ayIouEAcnXR5B70jSEUn64BlYJwey0054cy0,4361 +py4j/tests/__init__.py,sha256=dKCGUUzNSP9gI6noIpdf1NzXVJ7CFeRlmTwmtCbV6E4,20 +py4j/tests/__pycache__/__init__.cpython-310.pyc,, +py4j/tests/__pycache__/benchmark1.cpython-310.pyc,, +py4j/tests/__pycache__/byte_string_test.cpython-310.pyc,, +py4j/tests/__pycache__/client_server_test.cpython-310.pyc,, +py4j/tests/__pycache__/finalizer_test.cpython-310.pyc,, +py4j/tests/__pycache__/instrumented.cpython-310.pyc,, +py4j/tests/__pycache__/java_array_test.cpython-310.pyc,, +py4j/tests/__pycache__/java_callback_test.cpython-310.pyc,, +py4j/tests/__pycache__/java_dir_test.cpython-310.pyc,, +py4j/tests/__pycache__/java_gateway_test.cpython-310.pyc,, +py4j/tests/__pycache__/java_help_test.cpython-310.pyc,, +py4j/tests/__pycache__/java_list_test.cpython-310.pyc,, +py4j/tests/__pycache__/java_map_test.cpython-310.pyc,, +py4j/tests/__pycache__/java_set_test.cpython-310.pyc,, +py4j/tests/__pycache__/java_tls_test.cpython-310.pyc,, +py4j/tests/__pycache__/memory_leak_test.cpython-310.pyc,, +py4j/tests/__pycache__/multithreadtest.cpython-310.pyc,, +py4j/tests/__pycache__/py4j_callback_example.cpython-310.pyc,, +py4j/tests/__pycache__/py4j_callback_example2.cpython-310.pyc,, +py4j/tests/__pycache__/py4j_callback_listener_example.cpython-310.pyc,, +py4j/tests/__pycache__/py4j_callback_recursive_example.cpython-310.pyc,, +py4j/tests/__pycache__/py4j_example.cpython-310.pyc,, +py4j/tests/__pycache__/py4j_signals_test.cpython-310.pyc,, +py4j/tests/__pycache__/signals_test.cpython-310.pyc,, +py4j/tests/benchmark1.py,sha256=3eKkSaqT02BQDEVza66OK84oV1vA98dnevqoGclJ2ZM,3946 +py4j/tests/byte_string_test.py,sha256=vfWHU-gqfwlZmREXKiUyZ6hGX3moXxQZBdPfX-vHk0s,1109 +py4j/tests/client_server_test.py,sha256=lblZq7QnNw_2G2g-_O0EavYCmoUAXRunwXXcq21XpjY,28344 +py4j/tests/finalizer_test.py,sha256=cGpcoRoe3k8IztW1X6UC81X0of7AJXO-QTfcHlxZDv4,4440 +py4j/tests/instrumented.py,sha256=FQXm_fS0Y-B-9ePV7skoSe-W_pyzOMvNEomCha9ZbCY,5570 +py4j/tests/java_array_test.py,sha256=DH8WpYZWZttJwHb8KPqSyVEznfiSbojIzY38elkI1S0,2785 +py4j/tests/java_callback_test.py,sha256=CNbKRKceeXkwbTSWIlAkK0FJKRzeFnPkorXfyBwN5rQ,17003 +py4j/tests/java_dir_test.py,sha256=Z2MlRoxb0Hf-mgau1A6rcB2T2pv3KFL6WHWZOhWS3o4,4659 +py4j/tests/java_gateway_test.py,sha256=SUFPN0v1NXS4_ZxARdChFwLgOFTkz9QQMSnMBr6RXkI,48355 +py4j/tests/java_help_test.py,sha256=QAoPPGT_XPjW3EsCk7ZcKrLhN1d4eZQD1KQzqVa2ROs,3182 +py4j/tests/java_list_test.py,sha256=MzLcS_OLjJsco9dKjWWQWJyXhSGiZ_EH4MG1NKnOuyk,10462 +py4j/tests/java_map_test.py,sha256=jy5wXM3pWuMuuwb2CvqOY4qyoo3ws5IUCiWV1gQG5mQ,2329 +py4j/tests/java_set_test.py,sha256=fZVXbWY5nG1SvFko7duSrpB9tISjcTTv7ZHPTbgrYDo,3056 +py4j/tests/java_tls_test.py,sha256=FSMjEtqGe4RTXqcg-yZkqvh7sPqCWMIoJ1a2xVk1-74,2847 +py4j/tests/memory_leak_test.py,sha256=QizfIGievrQJ4RA-Pb3O5po7gnrIDRY-sTu9evC0qoM,31233 +py4j/tests/multithreadtest.py,sha256=vgZxSz-TsVqTGhaUGTONneHE3Rkidjn9UyYXg8sqlt8,2686 +py4j/tests/py4j_callback_example.py,sha256=dJWLeLt7XEd8XjssKxAcHHNOBDf2lA--NXZkWP1pCgM,681 +py4j/tests/py4j_callback_example2.py,sha256=H8jtAICazAN9bfW2cngVsqrV4FmbJbPvEv5d1z85COE,400 +py4j/tests/py4j_callback_listener_example.py,sha256=IdHInp9uBlkY4KdY0QEGB4gQSAKgiEzt0t-EEAXhrUI,689 +py4j/tests/py4j_callback_recursive_example.py,sha256=TiVzb1o_P1ICbsxpLacP3M1Ta47KqG_nS06KH-wfV1w,2024 +py4j/tests/py4j_example.py,sha256=nj7GxM4YJbrASVzMXdnWYvLsr42WZ0rfgkaL-h_DAn4,325 +py4j/tests/py4j_signals_test.py,sha256=_xD2_ZN-xIM78hN0I3PXjm1v3Cq7QKnpUC_uM9tAklo,6428 +py4j/tests/signals_test.py,sha256=9jsztB4Mw_kM28OM7OWlXM46WtXYqT87RzA9v_ionVY,3192 +py4j/version.py,sha256=rNMXVteSkb40YhTWJwwQgn-zqVv0qysyDg0QVDBMq4g,25 diff --git a/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/REQUESTED b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/WHEEL b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..c34f1162ef9a50c355df1261ef6194ffc1b39975 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/top_level.txt b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..4464d310fcc7d66be40469d67d0e6ec943dbb92d --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/py4j-0.10.9.8.dist-info/top_level.txt @@ -0,0 +1 @@ +py4j diff --git a/infer_4_33_0/lib/python3.10/site-packages/regex/__init__.py b/infer_4_33_0/lib/python3.10/site-packages/regex/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb06564ab033a2b0b501f7f41efb169dacd1f801 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/regex/__init__.py @@ -0,0 +1,3 @@ +from .regex import * +from . import regex +__all__ = regex.__all__ diff --git a/infer_4_33_0/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69529085736958ebfef667c2b85b1301046125b2 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/regex/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc b/infer_4_33_0/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eae931464043da9b66b0614f2d3f8d19db890cd0 Binary files /dev/null and b/infer_4_33_0/lib/python3.10/site-packages/regex/__pycache__/regex.cpython-310.pyc differ diff --git a/infer_4_33_0/lib/python3.10/site-packages/regex/_regex_core.py b/infer_4_33_0/lib/python3.10/site-packages/regex/_regex_core.py new file mode 100644 index 0000000000000000000000000000000000000000..b2ffeaea1f73ce386a7c23d29d34d97a6378fee6 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/regex/_regex_core.py @@ -0,0 +1,4495 @@ +# +# Secret Labs' Regular Expression Engine core module +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# +# 2010-01-16 mrab Python front-end re-written and extended + +import enum +import string +import unicodedata +from collections import defaultdict + +import regex._regex as _regex + +__all__ = ["A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH", + "F", "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "P", + "POSIX", "R", "REVERSE", "S", "DOTALL", "T", "TEMPLATE", "U", "UNICODE", + "V0", "VERSION0", "V1", "VERSION1", "W", "WORD", "X", "VERBOSE", "error", + "Scanner", "RegexFlag"] + +# The regex exception. +class error(Exception): + """Exception raised for invalid regular expressions. + + Attributes: + + msg: The unformatted error message + pattern: The regular expression pattern + pos: The position in the pattern where compilation failed, or None + lineno: The line number where compilation failed, unless pos is None + colno: The column number where compilation failed, unless pos is None + """ + + def __init__(self, message, pattern=None, pos=None): + newline = '\n' if isinstance(pattern, str) else b'\n' + self.msg = message + self.pattern = pattern + self.pos = pos + if pattern is not None and pos is not None: + self.lineno = pattern.count(newline, 0, pos) + 1 + self.colno = pos - pattern.rfind(newline, 0, pos) + + message = "{} at position {}".format(message, pos) + + if newline in pattern: + message += " (line {}, column {})".format(self.lineno, + self.colno) + + Exception.__init__(self, message) + +# The exception for when a positional flag has been turned on in the old +# behaviour. +class _UnscopedFlagSet(Exception): + pass + +# The exception for when parsing fails and we want to try something else. +class ParseError(Exception): + pass + +# The exception for when there isn't a valid first set. +class _FirstSetError(Exception): + pass + +# Flags. +class RegexFlag(enum.IntFlag): + A = ASCII = 0x80 # Assume ASCII locale. + B = BESTMATCH = 0x1000 # Best fuzzy match. + D = DEBUG = 0x200 # Print parsed pattern. + E = ENHANCEMATCH = 0x8000 # Attempt to improve the fit after finding the first + # fuzzy match. + F = FULLCASE = 0x4000 # Unicode full case-folding. + I = IGNORECASE = 0x2 # Ignore case. + L = LOCALE = 0x4 # Assume current 8-bit locale. + M = MULTILINE = 0x8 # Make anchors look for newline. + P = POSIX = 0x10000 # POSIX-style matching (leftmost longest). + R = REVERSE = 0x400 # Search backwards. + S = DOTALL = 0x10 # Make dot match newline. + U = UNICODE = 0x20 # Assume Unicode locale. + V0 = VERSION0 = 0x2000 # Old legacy behaviour. + V1 = VERSION1 = 0x100 # New enhanced behaviour. + W = WORD = 0x800 # Default Unicode word breaks. + X = VERBOSE = 0x40 # Ignore whitespace and comments. + T = TEMPLATE = 0x1 # Template (present because re module has it). + + def __repr__(self): + if self._name_ is not None: + return 'regex.%s' % self._name_ + + value = self._value_ + members = [] + negative = value < 0 + + if negative: + value = ~value + + for m in self.__class__: + if value & m._value_: + value &= ~m._value_ + members.append('regex.%s' % m._name_) + + if value: + members.append(hex(value)) + + res = '|'.join(members) + + if negative: + if len(members) > 1: + res = '~(%s)' % res + else: + res = '~%s' % res + + return res + + __str__ = object.__str__ + +globals().update(RegexFlag.__members__) + +DEFAULT_VERSION = VERSION1 + +_ALL_VERSIONS = VERSION0 | VERSION1 +_ALL_ENCODINGS = ASCII | LOCALE | UNICODE + +# The default flags for the various versions. +DEFAULT_FLAGS = {VERSION0: 0, VERSION1: FULLCASE} + +# The mask for the flags. +GLOBAL_FLAGS = (_ALL_VERSIONS | BESTMATCH | DEBUG | ENHANCEMATCH | POSIX | + REVERSE) +SCOPED_FLAGS = (FULLCASE | IGNORECASE | MULTILINE | DOTALL | WORD | VERBOSE | + _ALL_ENCODINGS) + +ALPHA = frozenset(string.ascii_letters) +DIGITS = frozenset(string.digits) +ALNUM = ALPHA | DIGITS +OCT_DIGITS = frozenset(string.octdigits) +HEX_DIGITS = frozenset(string.hexdigits) +SPECIAL_CHARS = frozenset("()|?*+{^$.[\\#") | frozenset([""]) +NAMED_CHAR_PART = ALNUM | frozenset(" -") +PROPERTY_NAME_PART = ALNUM | frozenset(" &_-.") +SET_OPS = ("||", "~~", "&&", "--") + +# The width of the code words inside the regex engine. +BYTES_PER_CODE = _regex.get_code_size() +BITS_PER_CODE = BYTES_PER_CODE * 8 + +# The repeat count which represents infinity. +UNLIMITED = (1 << BITS_PER_CODE) - 1 + +# The regular expression flags. +REGEX_FLAGS = {"a": ASCII, "b": BESTMATCH, "e": ENHANCEMATCH, "f": FULLCASE, + "i": IGNORECASE, "L": LOCALE, "m": MULTILINE, "p": POSIX, "r": REVERSE, + "s": DOTALL, "u": UNICODE, "V0": VERSION0, "V1": VERSION1, "w": WORD, "x": + VERBOSE} + +# The case flags. +CASE_FLAGS = FULLCASE | IGNORECASE +NOCASE = 0 +FULLIGNORECASE = FULLCASE | IGNORECASE + +FULL_CASE_FOLDING = UNICODE | FULLIGNORECASE + +CASE_FLAGS_COMBINATIONS = {0: 0, FULLCASE: 0, IGNORECASE: IGNORECASE, + FULLIGNORECASE: FULLIGNORECASE} + +# The number of digits in hexadecimal escapes. +HEX_ESCAPES = {"x": 2, "u": 4, "U": 8} + +# The names of the opcodes. +OPCODES = """ +FAILURE +SUCCESS +ANY +ANY_ALL +ANY_ALL_REV +ANY_REV +ANY_U +ANY_U_REV +ATOMIC +BOUNDARY +BRANCH +CALL_REF +CHARACTER +CHARACTER_IGN +CHARACTER_IGN_REV +CHARACTER_REV +CONDITIONAL +DEFAULT_BOUNDARY +DEFAULT_END_OF_WORD +DEFAULT_START_OF_WORD +END +END_OF_LINE +END_OF_LINE_U +END_OF_STRING +END_OF_STRING_LINE +END_OF_STRING_LINE_U +END_OF_WORD +FUZZY +GRAPHEME_BOUNDARY +GREEDY_REPEAT +GROUP +GROUP_CALL +GROUP_EXISTS +KEEP +LAZY_REPEAT +LOOKAROUND +NEXT +PROPERTY +PROPERTY_IGN +PROPERTY_IGN_REV +PROPERTY_REV +PRUNE +RANGE +RANGE_IGN +RANGE_IGN_REV +RANGE_REV +REF_GROUP +REF_GROUP_FLD +REF_GROUP_FLD_REV +REF_GROUP_IGN +REF_GROUP_IGN_REV +REF_GROUP_REV +SEARCH_ANCHOR +SET_DIFF +SET_DIFF_IGN +SET_DIFF_IGN_REV +SET_DIFF_REV +SET_INTER +SET_INTER_IGN +SET_INTER_IGN_REV +SET_INTER_REV +SET_SYM_DIFF +SET_SYM_DIFF_IGN +SET_SYM_DIFF_IGN_REV +SET_SYM_DIFF_REV +SET_UNION +SET_UNION_IGN +SET_UNION_IGN_REV +SET_UNION_REV +SKIP +START_OF_LINE +START_OF_LINE_U +START_OF_STRING +START_OF_WORD +STRING +STRING_FLD +STRING_FLD_REV +STRING_IGN +STRING_IGN_REV +STRING_REV +FUZZY_EXT +""" + +# Define the opcodes in a namespace. +class Namespace: + pass + +OP = Namespace() +for i, op in enumerate(OPCODES.split()): + setattr(OP, op, i) + +def _shrink_cache(cache_dict, args_dict, locale_sensitive, max_length, divisor=5): + """Make room in the given cache. + + Args: + cache_dict: The cache dictionary to modify. + args_dict: The dictionary of named list args used by patterns. + max_length: Maximum # of entries in cache_dict before it is shrunk. + divisor: Cache will shrink to max_length - 1/divisor*max_length items. + """ + # Toss out a fraction of the entries at random to make room for new ones. + # A random algorithm was chosen as opposed to simply cache_dict.popitem() + # as popitem could penalize the same regular expression repeatedly based + # on its internal hash value. Being random should spread the cache miss + # love around. + cache_keys = tuple(cache_dict.keys()) + overage = len(cache_keys) - max_length + if overage < 0: + # Cache is already within limits. Normally this should not happen + # but it could due to multithreading. + return + + number_to_toss = max_length // divisor + overage + + # The import is done here to avoid a circular dependency. + import random + if not hasattr(random, 'sample'): + # Do nothing while resolving the circular dependency: + # re->random->warnings->tokenize->string->re + return + + for doomed_key in random.sample(cache_keys, number_to_toss): + try: + del cache_dict[doomed_key] + except KeyError: + # Ignore problems if the cache changed from another thread. + pass + + # Rebuild the arguments and locale-sensitivity dictionaries. + args_dict.clear() + sensitivity_dict = {} + for pattern, pattern_type, flags, args, default_version, locale in tuple(cache_dict): + args_dict[pattern, pattern_type, flags, default_version, locale] = args + try: + sensitivity_dict[pattern_type, pattern] = locale_sensitive[pattern_type, pattern] + except KeyError: + pass + + locale_sensitive.clear() + locale_sensitive.update(sensitivity_dict) + +def _fold_case(info, string): + "Folds the case of a string." + flags = info.flags + if (flags & _ALL_ENCODINGS) == 0: + flags |= info.guess_encoding + + return _regex.fold_case(flags, string) + +def is_cased_i(info, char): + "Checks whether a character is cased." + return len(_regex.get_all_cases(info.flags, char)) > 1 + +def is_cased_f(flags, char): + "Checks whether a character is cased." + return len(_regex.get_all_cases(flags, char)) > 1 + +def _compile_firstset(info, fs): + "Compiles the firstset for the pattern." + reverse = bool(info.flags & REVERSE) + fs = _check_firstset(info, reverse, fs) + if not fs: + return [] + + # Compile the firstset. + return fs.compile(reverse) + +def _check_firstset(info, reverse, fs): + "Checks the firstset for the pattern." + if not fs or None in fs: + return None + + # If we ignore the case, for simplicity we won't build a firstset. + members = set() + case_flags = NOCASE + for i in fs: + if isinstance(i, Character) and not i.positive: + return None + +# if i.case_flags: +# if isinstance(i, Character): +# if is_cased_i(info, i.value): +# return [] +# elif isinstance(i, SetBase): +# return [] + case_flags |= i.case_flags + members.add(i.with_flags(case_flags=NOCASE)) + + if case_flags == (FULLCASE | IGNORECASE): + return None + + # Build the firstset. + fs = SetUnion(info, list(members), case_flags=case_flags & ~FULLCASE, + zerowidth=True) + fs = fs.optimise(info, reverse, in_set=True) + + return fs + +def _flatten_code(code): + "Flattens the code from a list of tuples." + flat_code = [] + for c in code: + flat_code.extend(c) + + return flat_code + +def make_case_flags(info): + "Makes the case flags." + flags = info.flags & CASE_FLAGS + + # Turn off FULLCASE if ASCII is turned on. + if info.flags & ASCII: + flags &= ~FULLCASE + + return flags + +def make_character(info, value, in_set=False): + "Makes a character literal." + if in_set: + # A character set is built case-sensitively. + return Character(value) + + return Character(value, case_flags=make_case_flags(info)) + +def make_ref_group(info, name, position): + "Makes a group reference." + return RefGroup(info, name, position, case_flags=make_case_flags(info)) + +def make_string_set(info, name): + "Makes a string set." + return StringSet(info, name, case_flags=make_case_flags(info)) + +def make_property(info, prop, in_set): + "Makes a property." + if in_set: + return prop + + return prop.with_flags(case_flags=make_case_flags(info)) + +def _parse_pattern(source, info): + "Parses a pattern, eg. 'a|b|c'." + branches = [parse_sequence(source, info)] + while source.match("|"): + branches.append(parse_sequence(source, info)) + + if len(branches) == 1: + return branches[0] + return Branch(branches) + +def parse_sequence(source, info): + "Parses a sequence, eg. 'abc'." + sequence = [None] + case_flags = make_case_flags(info) + while True: + saved_pos = source.pos + ch = source.get() + if ch in SPECIAL_CHARS: + if ch in ")|": + # The end of a sequence. At the end of the pattern ch is "". + source.pos = saved_pos + break + elif ch == "\\": + # An escape sequence outside a set. + sequence.append(parse_escape(source, info, False)) + elif ch == "(": + # A parenthesised subpattern or a flag. + element = parse_paren(source, info) + if element is None: + case_flags = make_case_flags(info) + else: + sequence.append(element) + elif ch == ".": + # Any character. + if info.flags & DOTALL: + sequence.append(AnyAll()) + elif info.flags & WORD: + sequence.append(AnyU()) + else: + sequence.append(Any()) + elif ch == "[": + # A character set. + sequence.append(parse_set(source, info)) + elif ch == "^": + # The start of a line or the string. + if info.flags & MULTILINE: + if info.flags & WORD: + sequence.append(StartOfLineU()) + else: + sequence.append(StartOfLine()) + else: + sequence.append(StartOfString()) + elif ch == "$": + # The end of a line or the string. + if info.flags & MULTILINE: + if info.flags & WORD: + sequence.append(EndOfLineU()) + else: + sequence.append(EndOfLine()) + else: + if info.flags & WORD: + sequence.append(EndOfStringLineU()) + else: + sequence.append(EndOfStringLine()) + elif ch in "?*+{": + # Looks like a quantifier. + counts = parse_quantifier(source, info, ch) + if counts: + # It _is_ a quantifier. + apply_quantifier(source, info, counts, case_flags, ch, + saved_pos, sequence) + sequence.append(None) + else: + # It's not a quantifier. Maybe it's a fuzzy constraint. + constraints = parse_fuzzy(source, info, ch, case_flags) + if constraints: + # It _is_ a fuzzy constraint. + apply_constraint(source, info, constraints, case_flags, + saved_pos, sequence) + sequence.append(None) + else: + # The element was just a literal. + sequence.append(Character(ord(ch), + case_flags=case_flags)) + else: + # A literal. + sequence.append(Character(ord(ch), case_flags=case_flags)) + else: + # A literal. + sequence.append(Character(ord(ch), case_flags=case_flags)) + + sequence = [item for item in sequence if item is not None] + return Sequence(sequence) + +def apply_quantifier(source, info, counts, case_flags, ch, saved_pos, + sequence): + element = sequence.pop() + if element is None: + if sequence: + raise error("multiple repeat", source.string, saved_pos) + raise error("nothing to repeat", source.string, saved_pos) + + if isinstance(element, (GreedyRepeat, LazyRepeat, PossessiveRepeat)): + raise error("multiple repeat", source.string, saved_pos) + + min_count, max_count = counts + saved_pos = source.pos + ch = source.get() + if ch == "?": + # The "?" suffix that means it's a lazy repeat. + repeated = LazyRepeat + elif ch == "+": + # The "+" suffix that means it's a possessive repeat. + repeated = PossessiveRepeat + else: + # No suffix means that it's a greedy repeat. + source.pos = saved_pos + repeated = GreedyRepeat + + # Ignore the quantifier if it applies to a zero-width item or the number of + # repeats is fixed at 1. + if not element.is_empty() and (min_count != 1 or max_count != 1): + element = repeated(element, min_count, max_count) + + sequence.append(element) + +def apply_constraint(source, info, constraints, case_flags, saved_pos, + sequence): + element = sequence.pop() + if element is None: + raise error("nothing for fuzzy constraint", source.string, saved_pos) + + # If a group is marked as fuzzy then put all of the fuzzy part in the + # group. + if isinstance(element, Group): + element.subpattern = Fuzzy(element.subpattern, constraints) + sequence.append(element) + else: + sequence.append(Fuzzy(element, constraints)) + +_QUANTIFIERS = {"?": (0, 1), "*": (0, None), "+": (1, None)} + +def parse_quantifier(source, info, ch): + "Parses a quantifier." + q = _QUANTIFIERS.get(ch) + if q: + # It's a quantifier. + return q + + if ch == "{": + # Looks like a limited repeated element, eg. 'a{2,3}'. + counts = parse_limited_quantifier(source) + if counts: + return counts + + return None + +def is_above_limit(count): + "Checks whether a count is above the maximum." + return count is not None and count >= UNLIMITED + +def parse_limited_quantifier(source): + "Parses a limited quantifier." + saved_pos = source.pos + min_count = parse_count(source) + if source.match(","): + max_count = parse_count(source) + + # No minimum means 0 and no maximum means unlimited. + min_count = int(min_count or 0) + max_count = int(max_count) if max_count else None + else: + if not min_count: + source.pos = saved_pos + return None + + min_count = max_count = int(min_count) + + if not source.match ("}"): + source.pos = saved_pos + return None + + if is_above_limit(min_count) or is_above_limit(max_count): + raise error("repeat count too big", source.string, saved_pos) + + if max_count is not None and min_count > max_count: + raise error("min repeat greater than max repeat", source.string, + saved_pos) + + return min_count, max_count + +def parse_fuzzy(source, info, ch, case_flags): + "Parses a fuzzy setting, if present." + saved_pos = source.pos + + if ch != "{": + return None + + constraints = {} + try: + parse_fuzzy_item(source, constraints) + while source.match(","): + parse_fuzzy_item(source, constraints) + except ParseError: + source.pos = saved_pos + return None + + if source.match(":"): + constraints["test"] = parse_fuzzy_test(source, info, case_flags) + + if not source.match("}"): + raise error("expected }", source.string, source.pos) + + return constraints + +def parse_fuzzy_item(source, constraints): + "Parses a fuzzy setting item." + saved_pos = source.pos + try: + parse_cost_constraint(source, constraints) + except ParseError: + source.pos = saved_pos + + parse_cost_equation(source, constraints) + +def parse_cost_constraint(source, constraints): + "Parses a cost constraint." + saved_pos = source.pos + ch = source.get() + if ch in ALPHA: + # Syntax: constraint [("<=" | "<") cost] + constraint = parse_constraint(source, constraints, ch) + + max_inc = parse_fuzzy_compare(source) + + if max_inc is None: + # No maximum cost. + constraints[constraint] = 0, None + else: + # There's a maximum cost. + cost_pos = source.pos + max_cost = parse_cost_limit(source) + + # Inclusive or exclusive limit? + if not max_inc: + max_cost -= 1 + + if max_cost < 0: + raise error("bad fuzzy cost limit", source.string, cost_pos) + + constraints[constraint] = 0, max_cost + elif ch in DIGITS: + # Syntax: cost ("<=" | "<") constraint ("<=" | "<") cost + source.pos = saved_pos + + # Minimum cost. + cost_pos = source.pos + min_cost = parse_cost_limit(source) + + min_inc = parse_fuzzy_compare(source) + if min_inc is None: + raise ParseError() + + constraint = parse_constraint(source, constraints, source.get()) + + max_inc = parse_fuzzy_compare(source) + if max_inc is None: + raise ParseError() + + # Maximum cost. + cost_pos = source.pos + max_cost = parse_cost_limit(source) + + # Inclusive or exclusive limits? + if not min_inc: + min_cost += 1 + if not max_inc: + max_cost -= 1 + + if not 0 <= min_cost <= max_cost: + raise error("bad fuzzy cost limit", source.string, cost_pos) + + constraints[constraint] = min_cost, max_cost + else: + raise ParseError() + +def parse_cost_limit(source): + "Parses a cost limit." + cost_pos = source.pos + digits = parse_count(source) + + try: + return int(digits) + except ValueError: + pass + + raise error("bad fuzzy cost limit", source.string, cost_pos) + +def parse_constraint(source, constraints, ch): + "Parses a constraint." + if ch not in "deis": + raise ParseError() + + if ch in constraints: + raise ParseError() + + return ch + +def parse_fuzzy_compare(source): + "Parses a cost comparator." + if source.match("<="): + return True + elif source.match("<"): + return False + else: + return None + +def parse_cost_equation(source, constraints): + "Parses a cost equation." + if "cost" in constraints: + raise error("more than one cost equation", source.string, source.pos) + + cost = {} + + parse_cost_term(source, cost) + while source.match("+"): + parse_cost_term(source, cost) + + max_inc = parse_fuzzy_compare(source) + if max_inc is None: + raise ParseError() + + max_cost = int(parse_count(source)) + + if not max_inc: + max_cost -= 1 + + if max_cost < 0: + raise error("bad fuzzy cost limit", source.string, source.pos) + + cost["max"] = max_cost + + constraints["cost"] = cost + +def parse_cost_term(source, cost): + "Parses a cost equation term." + coeff = parse_count(source) + ch = source.get() + if ch not in "dis": + raise ParseError() + + if ch in cost: + raise error("repeated fuzzy cost", source.string, source.pos) + + cost[ch] = int(coeff or 1) + +def parse_fuzzy_test(source, info, case_flags): + saved_pos = source.pos + ch = source.get() + if ch in SPECIAL_CHARS: + if ch == "\\": + # An escape sequence outside a set. + return parse_escape(source, info, False) + elif ch == ".": + # Any character. + if info.flags & DOTALL: + return AnyAll() + elif info.flags & WORD: + return AnyU() + else: + return Any() + elif ch == "[": + # A character set. + return parse_set(source, info) + else: + raise error("expected character set", source.string, saved_pos) + elif ch: + # A literal. + return Character(ord(ch), case_flags=case_flags) + else: + raise error("expected character set", source.string, saved_pos) + +def parse_count(source): + "Parses a quantifier's count, which can be empty." + return source.get_while(DIGITS) + +def parse_paren(source, info): + """Parses a parenthesised subpattern or a flag. Returns FLAGS if it's an + inline flag. + """ + saved_pos = source.pos + ch = source.get(True) + if ch == "?": + # (?... + saved_pos_2 = source.pos + ch = source.get(True) + if ch == "<": + # (?<... + saved_pos_3 = source.pos + ch = source.get() + if ch in ("=", "!"): + # (?<=... or (?") + saved_flags = info.flags + try: + subpattern = _parse_pattern(source, info) + source.expect(")") + finally: + info.flags = saved_flags + source.ignore_space = bool(info.flags & VERBOSE) + + info.close_group() + return Group(info, group, subpattern) + if ch in ("=", "!"): + # (?=... or (?!...: lookahead. + return parse_lookaround(source, info, False, ch == "=") + if ch == "P": + # (?P...: a Python extension. + return parse_extension(source, info) + if ch == "#": + # (?#...: a comment. + return parse_comment(source) + if ch == "(": + # (?(...: a conditional subpattern. + return parse_conditional(source, info) + if ch == ">": + # (?>...: an atomic subpattern. + return parse_atomic(source, info) + if ch == "|": + # (?|...: a common/reset groups branch. + return parse_common(source, info) + if ch == "R" or "0" <= ch <= "9": + # (?R...: probably a call to a group. + return parse_call_group(source, info, ch, saved_pos_2) + if ch == "&": + # (?&...: a call to a named group. + return parse_call_named_group(source, info, saved_pos_2) + + # (?...: probably a flags subpattern. + source.pos = saved_pos_2 + return parse_flags_subpattern(source, info) + + if ch == "*": + # (*... + saved_pos_2 = source.pos + word = source.get_while(set(")>"), include=False) + if word[ : 1].isalpha(): + verb = VERBS.get(word) + if not verb: + raise error("unknown verb", source.string, saved_pos_2) + + source.expect(")") + + return verb + + # (...: an unnamed capture group. + source.pos = saved_pos + group = info.open_group() + saved_flags = info.flags + try: + subpattern = _parse_pattern(source, info) + source.expect(")") + finally: + info.flags = saved_flags + source.ignore_space = bool(info.flags & VERBOSE) + + info.close_group() + + return Group(info, group, subpattern) + +def parse_extension(source, info): + "Parses a Python extension." + saved_pos = source.pos + ch = source.get() + if ch == "<": + # (?P<...: a named capture group. + name = parse_name(source) + group = info.open_group(name) + source.expect(">") + saved_flags = info.flags + try: + subpattern = _parse_pattern(source, info) + source.expect(")") + finally: + info.flags = saved_flags + source.ignore_space = bool(info.flags & VERBOSE) + + info.close_group() + + return Group(info, group, subpattern) + if ch == "=": + # (?P=...: a named group reference. + name = parse_name(source, allow_numeric=True) + source.expect(")") + if info.is_open_group(name): + raise error("cannot refer to an open group", source.string, + saved_pos) + + return make_ref_group(info, name, saved_pos) + if ch == ">" or ch == "&": + # (?P>...: a call to a group. + return parse_call_named_group(source, info, saved_pos) + + source.pos = saved_pos + raise error("unknown extension", source.string, saved_pos) + +def parse_comment(source): + "Parses a comment." + while True: + saved_pos = source.pos + c = source.get(True) + + if not c or c == ")": + break + + if c == "\\": + c = source.get(True) + + source.pos = saved_pos + source.expect(")") + + return None + +def parse_lookaround(source, info, behind, positive): + "Parses a lookaround." + saved_flags = info.flags + try: + subpattern = _parse_pattern(source, info) + source.expect(")") + finally: + info.flags = saved_flags + source.ignore_space = bool(info.flags & VERBOSE) + + return LookAround(behind, positive, subpattern) + +def parse_conditional(source, info): + "Parses a conditional subpattern." + saved_flags = info.flags + saved_pos = source.pos + ch = source.get() + if ch == "?": + # (?(?... + ch = source.get() + if ch in ("=", "!"): + # (?(?=... or (?(?!...: lookahead conditional. + return parse_lookaround_conditional(source, info, False, ch == "=") + if ch == "<": + # (?(?<... + ch = source.get() + if ch in ("=", "!"): + # (?(?<=... or (?(?"), include=False) + + if not name: + raise error("missing group name", source.string, source.pos) + + if name.isdigit(): + min_group = 0 if allow_group_0 else 1 + if not allow_numeric or int(name) < min_group: + raise error("bad character in group name", source.string, + source.pos) + else: + if not name.isidentifier(): + raise error("bad character in group name", source.string, + source.pos) + + return name + +def is_octal(string): + "Checks whether a string is octal." + return all(ch in OCT_DIGITS for ch in string) + +def is_decimal(string): + "Checks whether a string is decimal." + return all(ch in DIGITS for ch in string) + +def is_hexadecimal(string): + "Checks whether a string is hexadecimal." + return all(ch in HEX_DIGITS for ch in string) + +def parse_escape(source, info, in_set): + "Parses an escape sequence." + saved_ignore = source.ignore_space + source.ignore_space = False + ch = source.get() + source.ignore_space = saved_ignore + if not ch: + # A backslash at the end of the pattern. + raise error("bad escape (end of pattern)", source.string, source.pos) + if ch in HEX_ESCAPES: + # A hexadecimal escape sequence. + return parse_hex_escape(source, info, ch, HEX_ESCAPES[ch], in_set, ch) + elif ch == "g" and not in_set: + # A group reference. + saved_pos = source.pos + try: + return parse_group_ref(source, info) + except error: + # Invalid as a group reference, so assume it's a literal. + source.pos = saved_pos + + return make_character(info, ord(ch), in_set) + elif ch == "G" and not in_set: + # A search anchor. + return SearchAnchor() + elif ch == "L" and not in_set: + # A string set. + return parse_string_set(source, info) + elif ch == "N": + # A named codepoint. + return parse_named_char(source, info, in_set) + elif ch in "pP": + # A Unicode property, positive or negative. + return parse_property(source, info, ch == "p", in_set) + elif ch == "R" and not in_set: + # A line ending. + charset = [0x0A, 0x0B, 0x0C, 0x0D] + if info.guess_encoding == UNICODE: + charset.extend([0x85, 0x2028, 0x2029]) + + return Atomic(Branch([String([0x0D, 0x0A]), SetUnion(info, [Character(c) + for c in charset])])) + elif ch == "X" and not in_set: + # A grapheme cluster. + return Grapheme() + elif ch in ALPHA: + # An alphabetic escape sequence. + # Positional escapes aren't allowed inside a character set. + if not in_set: + if info.flags & WORD: + value = WORD_POSITION_ESCAPES.get(ch) + else: + value = POSITION_ESCAPES.get(ch) + + if value: + return value + + value = CHARSET_ESCAPES.get(ch) + if value: + return value + + value = CHARACTER_ESCAPES.get(ch) + if value: + return Character(ord(value)) + + raise error("bad escape \\%s" % ch, source.string, source.pos) + elif ch in DIGITS: + # A numeric escape sequence. + return parse_numeric_escape(source, info, ch, in_set) + else: + # A literal. + return make_character(info, ord(ch), in_set) + +def parse_numeric_escape(source, info, ch, in_set): + "Parses a numeric escape sequence." + if in_set or ch == "0": + # Octal escape sequence, max 3 digits. + return parse_octal_escape(source, info, [ch], in_set) + + # At least 1 digit, so either octal escape or group. + digits = ch + saved_pos = source.pos + ch = source.get() + if ch in DIGITS: + # At least 2 digits, so either octal escape or group. + digits += ch + saved_pos = source.pos + ch = source.get() + if is_octal(digits) and ch in OCT_DIGITS: + # 3 octal digits, so octal escape sequence. + encoding = info.flags & _ALL_ENCODINGS + if encoding == ASCII or encoding == LOCALE: + octal_mask = 0xFF + else: + octal_mask = 0x1FF + + value = int(digits + ch, 8) & octal_mask + return make_character(info, value) + + # Group reference. + source.pos = saved_pos + if info.is_open_group(digits): + raise error("cannot refer to an open group", source.string, source.pos) + + return make_ref_group(info, digits, source.pos) + +def parse_octal_escape(source, info, digits, in_set): + "Parses an octal escape sequence." + saved_pos = source.pos + ch = source.get() + while len(digits) < 3 and ch in OCT_DIGITS: + digits.append(ch) + saved_pos = source.pos + ch = source.get() + + source.pos = saved_pos + try: + value = int("".join(digits), 8) + return make_character(info, value, in_set) + except ValueError: + if digits[0] in OCT_DIGITS: + raise error("incomplete escape \\%s" % ''.join(digits), + source.string, source.pos) + else: + raise error("bad escape \\%s" % digits[0], source.string, + source.pos) + +def parse_hex_escape(source, info, esc, expected_len, in_set, type): + "Parses a hex escape sequence." + saved_pos = source.pos + digits = [] + for i in range(expected_len): + ch = source.get() + if ch not in HEX_DIGITS: + raise error("incomplete escape \\%s%s" % (type, ''.join(digits)), + source.string, saved_pos) + digits.append(ch) + + try: + value = int("".join(digits), 16) + except ValueError: + pass + else: + if value < 0x110000: + return make_character(info, value, in_set) + + # Bad hex escape. + raise error("bad hex escape \\%s%s" % (esc, ''.join(digits)), + source.string, saved_pos) + +def parse_group_ref(source, info): + "Parses a group reference." + source.expect("<") + saved_pos = source.pos + name = parse_name(source, True) + source.expect(">") + if info.is_open_group(name): + raise error("cannot refer to an open group", source.string, source.pos) + + return make_ref_group(info, name, saved_pos) + +def parse_string_set(source, info): + "Parses a string set reference." + source.expect("<") + name = parse_name(source, True) + source.expect(">") + if name is None or name not in info.kwargs: + raise error("undefined named list", source.string, source.pos) + + return make_string_set(info, name) + +def parse_named_char(source, info, in_set): + "Parses a named character." + saved_pos = source.pos + if source.match("{"): + name = source.get_while(NAMED_CHAR_PART, keep_spaces=True) + if source.match("}"): + try: + value = unicodedata.lookup(name) + return make_character(info, ord(value), in_set) + except KeyError: + raise error("undefined character name", source.string, + source.pos) + + source.pos = saved_pos + return make_character(info, ord("N"), in_set) + +def parse_property(source, info, positive, in_set): + "Parses a Unicode property." + saved_pos = source.pos + ch = source.get() + if ch == "{": + negate = source.match("^") + prop_name, name = parse_property_name(source) + if source.match("}"): + # It's correctly delimited. + prop = lookup_property(prop_name, name, positive != negate, source) + return make_property(info, prop, in_set) + elif ch and ch in "CLMNPSZ": + # An abbreviated property, eg \pL. + prop = lookup_property(None, ch, positive, source) + return make_property(info, prop, in_set) + + # Not a property, so treat as a literal "p" or "P". + source.pos = saved_pos + ch = "p" if positive else "P" + return make_character(info, ord(ch), in_set) + +def parse_property_name(source): + "Parses a property name, which may be qualified." + name = source.get_while(PROPERTY_NAME_PART) + saved_pos = source.pos + + ch = source.get() + if ch and ch in ":=": + prop_name = name + name = source.get_while(ALNUM | set(" &_-./")).strip() + + if name: + # Name after the ":" or "=", so it's a qualified name. + saved_pos = source.pos + else: + # No name after the ":" or "=", so assume it's an unqualified name. + prop_name, name = None, prop_name + else: + prop_name = None + + source.pos = saved_pos + return prop_name, name + +def parse_set(source, info): + "Parses a character set." + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + + saved_ignore = source.ignore_space + source.ignore_space = False + # Negative set? + negate = source.match("^") + try: + if version == VERSION0: + item = parse_set_imp_union(source, info) + else: + item = parse_set_union(source, info) + + if not source.match("]"): + raise error("missing ]", source.string, source.pos) + finally: + source.ignore_space = saved_ignore + + if negate: + item = item.with_flags(positive=not item.positive) + + item = item.with_flags(case_flags=make_case_flags(info)) + + return item + +def parse_set_union(source, info): + "Parses a set union ([x||y])." + items = [parse_set_symm_diff(source, info)] + while source.match("||"): + items.append(parse_set_symm_diff(source, info)) + + if len(items) == 1: + return items[0] + return SetUnion(info, items) + +def parse_set_symm_diff(source, info): + "Parses a set symmetric difference ([x~~y])." + items = [parse_set_inter(source, info)] + while source.match("~~"): + items.append(parse_set_inter(source, info)) + + if len(items) == 1: + return items[0] + return SetSymDiff(info, items) + +def parse_set_inter(source, info): + "Parses a set intersection ([x&&y])." + items = [parse_set_diff(source, info)] + while source.match("&&"): + items.append(parse_set_diff(source, info)) + + if len(items) == 1: + return items[0] + return SetInter(info, items) + +def parse_set_diff(source, info): + "Parses a set difference ([x--y])." + items = [parse_set_imp_union(source, info)] + while source.match("--"): + items.append(parse_set_imp_union(source, info)) + + if len(items) == 1: + return items[0] + return SetDiff(info, items) + +def parse_set_imp_union(source, info): + "Parses a set implicit union ([xy])." + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + + items = [parse_set_member(source, info)] + while True: + saved_pos = source.pos + if source.match("]"): + # End of the set. + source.pos = saved_pos + break + + if version == VERSION1 and any(source.match(op) for op in SET_OPS): + # The new behaviour has set operators. + source.pos = saved_pos + break + + items.append(parse_set_member(source, info)) + + if len(items) == 1: + return items[0] + return SetUnion(info, items) + +def parse_set_member(source, info): + "Parses a member in a character set." + # Parse a set item. + start = parse_set_item(source, info) + saved_pos1 = source.pos + if (not isinstance(start, Character) or not start.positive or not + source.match("-")): + # It's not the start of a range. + return start + + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + + # It looks like the start of a range of characters. + saved_pos2 = source.pos + if version == VERSION1 and source.match("-"): + # It's actually the set difference operator '--', so return the + # character. + source.pos = saved_pos1 + return start + + if source.match("]"): + # We've reached the end of the set, so return both the character and + # hyphen. + source.pos = saved_pos2 + return SetUnion(info, [start, Character(ord("-"))]) + + # Parse a set item. + end = parse_set_item(source, info) + if not isinstance(end, Character) or not end.positive: + # It's not a range, so return the character, hyphen and property. + return SetUnion(info, [start, Character(ord("-")), end]) + + # It _is_ a range. + if start.value > end.value: + raise error("bad character range", source.string, source.pos) + + if start.value == end.value: + return start + + return Range(start.value, end.value) + +def parse_set_item(source, info): + "Parses an item in a character set." + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + + if source.match("\\"): + # An escape sequence in a set. + return parse_escape(source, info, True) + + saved_pos = source.pos + if source.match("[:"): + # Looks like a POSIX character class. + try: + return parse_posix_class(source, info) + except ParseError: + # Not a POSIX character class. + source.pos = saved_pos + + if version == VERSION1 and source.match("["): + # It's the start of a nested set. + + # Negative set? + negate = source.match("^") + item = parse_set_union(source, info) + + if not source.match("]"): + raise error("missing ]", source.string, source.pos) + + if negate: + item = item.with_flags(positive=not item.positive) + + return item + + ch = source.get() + if not ch: + raise error("unterminated character set", source.string, source.pos) + + return Character(ord(ch)) + +def parse_posix_class(source, info): + "Parses a POSIX character class." + negate = source.match("^") + prop_name, name = parse_property_name(source) + if not source.match(":]"): + raise ParseError() + + return lookup_property(prop_name, name, not negate, source, posix=True) + +def float_to_rational(flt): + "Converts a float to a rational pair." + int_part = int(flt) + error = flt - int_part + if abs(error) < 0.0001: + return int_part, 1 + + den, num = float_to_rational(1.0 / error) + + return int_part * den + num, den + +def numeric_to_rational(numeric): + "Converts a numeric string to a rational string, if possible." + if numeric[ : 1] == "-": + sign, numeric = numeric[0], numeric[1 : ] + else: + sign = "" + + parts = numeric.split("/") + if len(parts) == 2: + num, den = float_to_rational(float(parts[0]) / float(parts[1])) + elif len(parts) == 1: + num, den = float_to_rational(float(parts[0])) + else: + raise ValueError() + + result = "{}{}/{}".format(sign, num, den) + if result.endswith("/1"): + return result[ : -2] + + return result + +def standardise_name(name): + "Standardises a property or value name." + try: + return numeric_to_rational("".join(name)) + except (ValueError, ZeroDivisionError): + return "".join(ch for ch in name if ch not in "_- ").upper() + +_POSIX_CLASSES = set('ALNUM DIGIT PUNCT XDIGIT'.split()) + +_BINARY_VALUES = set('YES Y NO N TRUE T FALSE F'.split()) + +def lookup_property(property, value, positive, source=None, posix=False): + "Looks up a property." + # Normalise the names (which may still be lists). + property = standardise_name(property) if property else None + value = standardise_name(value) + + if (property, value) == ("GENERALCATEGORY", "ASSIGNED"): + property, value, positive = "GENERALCATEGORY", "UNASSIGNED", not positive + + if posix and not property and value.upper() in _POSIX_CLASSES: + value = 'POSIX' + value + + if property: + # Both the property and the value are provided. + prop = PROPERTIES.get(property) + if not prop: + if not source: + raise error("unknown property") + + raise error("unknown property", source.string, source.pos) + + prop_id, value_dict = prop + val_id = value_dict.get(value) + if val_id is None: + if not source: + raise error("unknown property value") + + raise error("unknown property value", source.string, source.pos) + + return Property((prop_id << 16) | val_id, positive) + + # Only the value is provided. + # It might be the name of a GC, script or block value. + for property in ("GC", "SCRIPT", "BLOCK"): + prop_id, value_dict = PROPERTIES.get(property) + val_id = value_dict.get(value) + if val_id is not None: + return Property((prop_id << 16) | val_id, positive) + + # It might be the name of a binary property. + prop = PROPERTIES.get(value) + if prop: + prop_id, value_dict = prop + if set(value_dict) == _BINARY_VALUES: + return Property((prop_id << 16) | 1, positive) + + return Property(prop_id << 16, not positive) + + # It might be the name of a binary property starting with a prefix. + if value.startswith("IS"): + prop = PROPERTIES.get(value[2 : ]) + if prop: + prop_id, value_dict = prop + if "YES" in value_dict: + return Property((prop_id << 16) | 1, positive) + + # It might be the name of a script or block starting with a prefix. + for prefix, property in (("IS", "SCRIPT"), ("IN", "BLOCK")): + if value.startswith(prefix): + prop_id, value_dict = PROPERTIES.get(property) + val_id = value_dict.get(value[2 : ]) + if val_id is not None: + return Property((prop_id << 16) | val_id, positive) + + # Unknown property. + if not source: + raise error("unknown property") + + raise error("unknown property", source.string, source.pos) + +def _compile_replacement(source, pattern, is_unicode): + "Compiles a replacement template escape sequence." + ch = source.get() + if ch in ALPHA: + # An alphabetic escape sequence. + value = CHARACTER_ESCAPES.get(ch) + if value: + return False, [ord(value)] + + if ch in HEX_ESCAPES and (ch == "x" or is_unicode): + # A hexadecimal escape sequence. + return False, [parse_repl_hex_escape(source, HEX_ESCAPES[ch], ch)] + + if ch == "g": + # A group preference. + return True, [compile_repl_group(source, pattern)] + + if ch == "N" and is_unicode: + # A named character. + value = parse_repl_named_char(source) + if value is not None: + return False, [value] + + raise error("bad escape \\%s" % ch, source.string, source.pos) + + if isinstance(source.sep, bytes): + octal_mask = 0xFF + else: + octal_mask = 0x1FF + + if ch == "0": + # An octal escape sequence. + digits = ch + while len(digits) < 3: + saved_pos = source.pos + ch = source.get() + if ch not in OCT_DIGITS: + source.pos = saved_pos + break + digits += ch + + return False, [int(digits, 8) & octal_mask] + + if ch in DIGITS: + # Either an octal escape sequence (3 digits) or a group reference (max + # 2 digits). + digits = ch + saved_pos = source.pos + ch = source.get() + if ch in DIGITS: + digits += ch + saved_pos = source.pos + ch = source.get() + if ch and is_octal(digits + ch): + # An octal escape sequence. + return False, [int(digits + ch, 8) & octal_mask] + + # A group reference. + source.pos = saved_pos + return True, [int(digits)] + + if ch == "\\": + # An escaped backslash is a backslash. + return False, [ord("\\")] + + if not ch: + # A trailing backslash. + raise error("bad escape (end of pattern)", source.string, source.pos) + + # An escaped non-backslash is a backslash followed by the literal. + return False, [ord("\\"), ord(ch)] + +def parse_repl_hex_escape(source, expected_len, type): + "Parses a hex escape sequence in a replacement string." + digits = [] + for i in range(expected_len): + ch = source.get() + if ch not in HEX_DIGITS: + raise error("incomplete escape \\%s%s" % (type, ''.join(digits)), + source.string, source.pos) + digits.append(ch) + + return int("".join(digits), 16) + +def parse_repl_named_char(source): + "Parses a named character in a replacement string." + saved_pos = source.pos + if source.match("{"): + name = source.get_while(ALPHA | set(" ")) + + if source.match("}"): + try: + value = unicodedata.lookup(name) + return ord(value) + except KeyError: + raise error("undefined character name", source.string, + source.pos) + + source.pos = saved_pos + return None + +def compile_repl_group(source, pattern): + "Compiles a replacement template group reference." + source.expect("<") + name = parse_name(source, True, True) + + source.expect(">") + if name.isdigit(): + index = int(name) + if not 0 <= index <= pattern.groups: + raise error("invalid group reference", source.string, source.pos) + + return index + + try: + return pattern.groupindex[name] + except KeyError: + raise IndexError("unknown group") + +# The regular expression is parsed into a syntax tree. The different types of +# node are defined below. + +INDENT = " " +POSITIVE_OP = 0x1 +ZEROWIDTH_OP = 0x2 +FUZZY_OP = 0x4 +REVERSE_OP = 0x8 +REQUIRED_OP = 0x10 + +POS_TEXT = {False: "NON-MATCH", True: "MATCH"} +CASE_TEXT = {NOCASE: "", IGNORECASE: " SIMPLE_IGNORE_CASE", FULLCASE: "", + FULLIGNORECASE: " FULL_IGNORE_CASE"} + +def make_sequence(items): + if len(items) == 1: + return items[0] + return Sequence(items) + +# Common base class for all nodes. +class RegexBase: + def __init__(self): + self._key = self.__class__ + + def with_flags(self, positive=None, case_flags=None, zerowidth=None): + if positive is None: + positive = self.positive + else: + positive = bool(positive) + if case_flags is None: + case_flags = self.case_flags + else: + case_flags = CASE_FLAGS_COMBINATIONS[case_flags & CASE_FLAGS] + if zerowidth is None: + zerowidth = self.zerowidth + else: + zerowidth = bool(zerowidth) + + if (positive == self.positive and case_flags == self.case_flags and + zerowidth == self.zerowidth): + return self + + return self.rebuild(positive, case_flags, zerowidth) + + def fix_groups(self, pattern, reverse, fuzzy): + pass + + def optimise(self, info, reverse): + return self + + def pack_characters(self, info): + return self + + def remove_captures(self): + return self + + def is_atomic(self): + return True + + def can_be_affix(self): + return True + + def contains_group(self): + return False + + def get_firstset(self, reverse): + raise _FirstSetError() + + def has_simple_start(self): + return False + + def compile(self, reverse=False, fuzzy=False): + return self._compile(reverse, fuzzy) + + def is_empty(self): + return False + + def __hash__(self): + return hash(self._key) + + def __eq__(self, other): + return type(self) is type(other) and self._key == other._key + + def __ne__(self, other): + return not self.__eq__(other) + + def get_required_string(self, reverse): + return self.max_width(), None + +# Base class for zero-width nodes. +class ZeroWidthBase(RegexBase): + def __init__(self, positive=True): + RegexBase.__init__(self) + self.positive = bool(positive) + + self._key = self.__class__, self.positive + + def get_firstset(self, reverse): + return set([None]) + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if fuzzy: + flags |= FUZZY_OP + if reverse: + flags |= REVERSE_OP + return [(self._opcode, flags)] + + def dump(self, indent, reverse): + print("{}{} {}".format(INDENT * indent, self._op_name, + POS_TEXT[self.positive])) + + def max_width(self): + return 0 + +class Any(RegexBase): + _opcode = {False: OP.ANY, True: OP.ANY_REV} + _op_name = "ANY" + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if fuzzy: + flags |= FUZZY_OP + return [(self._opcode[reverse], flags)] + + def dump(self, indent, reverse): + print("{}{}".format(INDENT * indent, self._op_name)) + + def max_width(self): + return 1 + +class AnyAll(Any): + _opcode = {False: OP.ANY_ALL, True: OP.ANY_ALL_REV} + _op_name = "ANY_ALL" + +class AnyU(Any): + _opcode = {False: OP.ANY_U, True: OP.ANY_U_REV} + _op_name = "ANY_U" + +class Atomic(RegexBase): + def __init__(self, subpattern): + RegexBase.__init__(self) + self.subpattern = subpattern + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + self.subpattern = self.subpattern.optimise(info, reverse) + + if self.subpattern.is_empty(): + return self.subpattern + return self + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + self.subpattern = self.subpattern.remove_captures() + return self + + def can_be_affix(self): + return self.subpattern.can_be_affix() + + def contains_group(self): + return self.subpattern.contains_group() + + def get_firstset(self, reverse): + return self.subpattern.get_firstset(reverse) + + def has_simple_start(self): + return self.subpattern.has_simple_start() + + def _compile(self, reverse, fuzzy): + return ([(OP.ATOMIC, )] + self.subpattern.compile(reverse, fuzzy) + + [(OP.END, )]) + + def dump(self, indent, reverse): + print("{}ATOMIC".format(INDENT * indent)) + self.subpattern.dump(indent + 1, reverse) + + def is_empty(self): + return self.subpattern.is_empty() + + def __eq__(self, other): + return (type(self) is type(other) and self.subpattern == + other.subpattern) + + def max_width(self): + return self.subpattern.max_width() + + def get_required_string(self, reverse): + return self.subpattern.get_required_string(reverse) + +class Boundary(ZeroWidthBase): + _opcode = OP.BOUNDARY + _op_name = "BOUNDARY" + +class Branch(RegexBase): + def __init__(self, branches): + RegexBase.__init__(self) + self.branches = branches + + def fix_groups(self, pattern, reverse, fuzzy): + for b in self.branches: + b.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + if not self.branches: + return Sequence([]) + + # Flatten branches within branches. + branches = Branch._flatten_branches(info, reverse, self.branches) + + # Move any common prefix or suffix out of the branches. + if reverse: + suffix, branches = Branch._split_common_suffix(info, branches) + prefix = [] + else: + prefix, branches = Branch._split_common_prefix(info, branches) + suffix = [] + + # Try to reduce adjacent single-character branches to sets. + branches = Branch._reduce_to_set(info, reverse, branches) + + if len(branches) > 1: + sequence = [Branch(branches)] + + if not prefix or not suffix: + # We might be able to add a quick precheck before the branches. + firstset = self._add_precheck(info, reverse, branches) + + if firstset: + if reverse: + sequence.append(firstset) + else: + sequence.insert(0, firstset) + else: + sequence = branches + + return make_sequence(prefix + sequence + suffix) + + def _add_precheck(self, info, reverse, branches): + charset = set() + pos = -1 if reverse else 0 + + for branch in branches: + if type(branch) is Literal and branch.case_flags == NOCASE: + charset.add(branch.characters[pos]) + else: + return + + if not charset: + return None + + return _check_firstset(info, reverse, [Character(c) for c in charset]) + + def pack_characters(self, info): + self.branches = [b.pack_characters(info) for b in self.branches] + return self + + def remove_captures(self): + self.branches = [b.remove_captures() for b in self.branches] + return self + + def is_atomic(self): + return all(b.is_atomic() for b in self.branches) + + def can_be_affix(self): + return all(b.can_be_affix() for b in self.branches) + + def contains_group(self): + return any(b.contains_group() for b in self.branches) + + def get_firstset(self, reverse): + fs = set() + for b in self.branches: + fs |= b.get_firstset(reverse) + + return fs or set([None]) + + def _compile(self, reverse, fuzzy): + if not self.branches: + return [] + + code = [(OP.BRANCH, )] + for b in self.branches: + code.extend(b.compile(reverse, fuzzy)) + code.append((OP.NEXT, )) + + code[-1] = (OP.END, ) + + return code + + def dump(self, indent, reverse): + print("{}BRANCH".format(INDENT * indent)) + self.branches[0].dump(indent + 1, reverse) + for b in self.branches[1 : ]: + print("{}OR".format(INDENT * indent)) + b.dump(indent + 1, reverse) + + @staticmethod + def _flatten_branches(info, reverse, branches): + # Flatten the branches so that there aren't branches of branches. + new_branches = [] + for b in branches: + b = b.optimise(info, reverse) + if isinstance(b, Branch): + new_branches.extend(b.branches) + else: + new_branches.append(b) + + return new_branches + + @staticmethod + def _split_common_prefix(info, branches): + # Common leading items can be moved out of the branches. + # Get the items in the branches. + alternatives = [] + for b in branches: + if isinstance(b, Sequence): + alternatives.append(b.items) + else: + alternatives.append([b]) + + # What is the maximum possible length of the prefix? + max_count = min(len(a) for a in alternatives) + + # What is the longest common prefix? + prefix = alternatives[0] + pos = 0 + end_pos = max_count + while pos < end_pos and prefix[pos].can_be_affix() and all(a[pos] == + prefix[pos] for a in alternatives): + pos += 1 + count = pos + + if info.flags & UNICODE: + # We need to check that we're not splitting a sequence of + # characters which could form part of full case-folding. + count = pos + while count > 0 and not all(Branch._can_split(a, count) for a in + alternatives): + count -= 1 + + # No common prefix is possible. + if count == 0: + return [], branches + + # Rebuild the branches. + new_branches = [] + for a in alternatives: + new_branches.append(make_sequence(a[count : ])) + + return prefix[ : count], new_branches + + @staticmethod + def _split_common_suffix(info, branches): + # Common trailing items can be moved out of the branches. + # Get the items in the branches. + alternatives = [] + for b in branches: + if isinstance(b, Sequence): + alternatives.append(b.items) + else: + alternatives.append([b]) + + # What is the maximum possible length of the suffix? + max_count = min(len(a) for a in alternatives) + + # What is the longest common suffix? + suffix = alternatives[0] + pos = -1 + end_pos = -1 - max_count + while pos > end_pos and suffix[pos].can_be_affix() and all(a[pos] == + suffix[pos] for a in alternatives): + pos -= 1 + count = -1 - pos + + if info.flags & UNICODE: + # We need to check that we're not splitting a sequence of + # characters which could form part of full case-folding. + while count > 0 and not all(Branch._can_split_rev(a, count) for a + in alternatives): + count -= 1 + + # No common suffix is possible. + if count == 0: + return [], branches + + # Rebuild the branches. + new_branches = [] + for a in alternatives: + new_branches.append(make_sequence(a[ : -count])) + + return suffix[-count : ], new_branches + + @staticmethod + def _can_split(items, count): + # Check the characters either side of the proposed split. + if not Branch._is_full_case(items, count - 1): + return True + + if not Branch._is_full_case(items, count): + return True + + # Check whether a 1-1 split would be OK. + if Branch._is_folded(items[count - 1 : count + 1]): + return False + + # Check whether a 1-2 split would be OK. + if (Branch._is_full_case(items, count + 2) and + Branch._is_folded(items[count - 1 : count + 2])): + return False + + # Check whether a 2-1 split would be OK. + if (Branch._is_full_case(items, count - 2) and + Branch._is_folded(items[count - 2 : count + 1])): + return False + + return True + + @staticmethod + def _can_split_rev(items, count): + end = len(items) + + # Check the characters either side of the proposed split. + if not Branch._is_full_case(items, end - count): + return True + + if not Branch._is_full_case(items, end - count - 1): + return True + + # Check whether a 1-1 split would be OK. + if Branch._is_folded(items[end - count - 1 : end - count + 1]): + return False + + # Check whether a 1-2 split would be OK. + if (Branch._is_full_case(items, end - count + 2) and + Branch._is_folded(items[end - count - 1 : end - count + 2])): + return False + + # Check whether a 2-1 split would be OK. + if (Branch._is_full_case(items, end - count - 2) and + Branch._is_folded(items[end - count - 2 : end - count + 1])): + return False + + return True + + @staticmethod + def _merge_common_prefixes(info, reverse, branches): + # Branches with the same case-sensitive character prefix can be grouped + # together if they are separated only by other branches with a + # character prefix. + prefixed = defaultdict(list) + order = {} + new_branches = [] + for b in branches: + if Branch._is_simple_character(b): + # Branch starts with a simple character. + prefixed[b.value].append([b]) + order.setdefault(b.value, len(order)) + elif (isinstance(b, Sequence) and b.items and + Branch._is_simple_character(b.items[0])): + # Branch starts with a simple character. + prefixed[b.items[0].value].append(b.items) + order.setdefault(b.items[0].value, len(order)) + else: + Branch._flush_char_prefix(info, reverse, prefixed, order, + new_branches) + + new_branches.append(b) + + Branch._flush_char_prefix(info, prefixed, order, new_branches) + + return new_branches + + @staticmethod + def _is_simple_character(c): + return isinstance(c, Character) and c.positive and not c.case_flags + + @staticmethod + def _reduce_to_set(info, reverse, branches): + # Can the branches be reduced to a set? + new_branches = [] + items = set() + case_flags = NOCASE + for b in branches: + if isinstance(b, (Character, Property, SetBase)): + # Branch starts with a single character. + if b.case_flags != case_flags: + # Different case sensitivity, so flush. + Branch._flush_set_members(info, reverse, items, case_flags, + new_branches) + + case_flags = b.case_flags + + items.add(b.with_flags(case_flags=NOCASE)) + else: + Branch._flush_set_members(info, reverse, items, case_flags, + new_branches) + + new_branches.append(b) + + Branch._flush_set_members(info, reverse, items, case_flags, + new_branches) + + return new_branches + + @staticmethod + def _flush_char_prefix(info, reverse, prefixed, order, new_branches): + # Flush the prefixed branches. + if not prefixed: + return + + for value, branches in sorted(prefixed.items(), key=lambda pair: + order[pair[0]]): + if len(branches) == 1: + new_branches.append(make_sequence(branches[0])) + else: + subbranches = [] + optional = False + for b in branches: + if len(b) > 1: + subbranches.append(make_sequence(b[1 : ])) + elif not optional: + subbranches.append(Sequence()) + optional = True + + sequence = Sequence([Character(value), Branch(subbranches)]) + new_branches.append(sequence.optimise(info, reverse)) + + prefixed.clear() + order.clear() + + @staticmethod + def _flush_set_members(info, reverse, items, case_flags, new_branches): + # Flush the set members. + if not items: + return + + if len(items) == 1: + item = list(items)[0] + else: + item = SetUnion(info, list(items)).optimise(info, reverse) + + new_branches.append(item.with_flags(case_flags=case_flags)) + + items.clear() + + @staticmethod + def _is_full_case(items, i): + if not 0 <= i < len(items): + return False + + item = items[i] + return (isinstance(item, Character) and item.positive and + (item.case_flags & FULLIGNORECASE) == FULLIGNORECASE) + + @staticmethod + def _is_folded(items): + if len(items) < 2: + return False + + for i in items: + if (not isinstance(i, Character) or not i.positive or not + i.case_flags): + return False + + folded = "".join(chr(i.value) for i in items) + folded = _regex.fold_case(FULL_CASE_FOLDING, folded) + + # Get the characters which expand to multiple codepoints on folding. + expanding_chars = _regex.get_expand_on_folding() + + for c in expanding_chars: + if folded == _regex.fold_case(FULL_CASE_FOLDING, c): + return True + + return False + + def is_empty(self): + return all(b.is_empty() for b in self.branches) + + def __eq__(self, other): + return type(self) is type(other) and self.branches == other.branches + + def max_width(self): + return max(b.max_width() for b in self.branches) + +class CallGroup(RegexBase): + def __init__(self, info, group, position): + RegexBase.__init__(self) + self.info = info + self.group = group + self.position = position + + self._key = self.__class__, self.group + + def fix_groups(self, pattern, reverse, fuzzy): + try: + self.group = int(self.group) + except ValueError: + try: + self.group = self.info.group_index[self.group] + except KeyError: + raise error("invalid group reference", pattern, self.position) + + if not 0 <= self.group <= self.info.group_count: + raise error("unknown group", pattern, self.position) + + if self.group > 0 and self.info.open_group_count[self.group] > 1: + raise error("ambiguous group reference", pattern, self.position) + + self.info.group_calls.append((self, reverse, fuzzy)) + + self._key = self.__class__, self.group + + def remove_captures(self): + raise error("group reference not allowed", pattern, self.position) + + def _compile(self, reverse, fuzzy): + return [(OP.GROUP_CALL, self.call_ref)] + + def dump(self, indent, reverse): + print("{}GROUP_CALL {}".format(INDENT * indent, self.group)) + + def __eq__(self, other): + return type(self) is type(other) and self.group == other.group + + def max_width(self): + return UNLIMITED + + def __del__(self): + self.info = None + +class CallRef(RegexBase): + def __init__(self, ref, parsed): + self.ref = ref + self.parsed = parsed + + def _compile(self, reverse, fuzzy): + return ([(OP.CALL_REF, self.ref)] + self.parsed._compile(reverse, + fuzzy) + [(OP.END, )]) + +class Character(RegexBase): + _opcode = {(NOCASE, False): OP.CHARACTER, (IGNORECASE, False): + OP.CHARACTER_IGN, (FULLCASE, False): OP.CHARACTER, (FULLIGNORECASE, + False): OP.CHARACTER_IGN, (NOCASE, True): OP.CHARACTER_REV, (IGNORECASE, + True): OP.CHARACTER_IGN_REV, (FULLCASE, True): OP.CHARACTER_REV, + (FULLIGNORECASE, True): OP.CHARACTER_IGN_REV} + + def __init__(self, value, positive=True, case_flags=NOCASE, + zerowidth=False): + RegexBase.__init__(self) + self.value = value + self.positive = bool(positive) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + self.zerowidth = bool(zerowidth) + + if (self.positive and (self.case_flags & FULLIGNORECASE) == + FULLIGNORECASE): + self.folded = _regex.fold_case(FULL_CASE_FOLDING, chr(self.value)) + else: + self.folded = chr(self.value) + + self._key = (self.__class__, self.value, self.positive, + self.case_flags, self.zerowidth) + + def rebuild(self, positive, case_flags, zerowidth): + return Character(self.value, positive, case_flags, zerowidth) + + def optimise(self, info, reverse, in_set=False): + return self + + def get_firstset(self, reverse): + return set([self]) + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + + code = PrecompiledCode([self._opcode[self.case_flags, reverse], flags, + self.value]) + + if len(self.folded) > 1: + # The character expands on full case-folding. + code = Branch([code, String([ord(c) for c in self.folded], + case_flags=self.case_flags)]) + + return code.compile(reverse, fuzzy) + + def dump(self, indent, reverse): + display = ascii(chr(self.value)).lstrip("bu") + print("{}CHARACTER {} {}{}".format(INDENT * indent, + POS_TEXT[self.positive], display, CASE_TEXT[self.case_flags])) + + def matches(self, ch): + return (ch == self.value) == self.positive + + def max_width(self): + return len(self.folded) + + def get_required_string(self, reverse): + if not self.positive: + return 1, None + + self.folded_characters = tuple(ord(c) for c in self.folded) + + return 0, self + +class Conditional(RegexBase): + def __init__(self, info, group, yes_item, no_item, position): + RegexBase.__init__(self) + self.info = info + self.group = group + self.yes_item = yes_item + self.no_item = no_item + self.position = position + + def fix_groups(self, pattern, reverse, fuzzy): + try: + self.group = int(self.group) + except ValueError: + try: + self.group = self.info.group_index[self.group] + except KeyError: + if self.group == 'DEFINE': + # 'DEFINE' is a special name unless there's a group with + # that name. + self.group = 0 + else: + raise error("unknown group", pattern, self.position) + + if not 0 <= self.group <= self.info.group_count: + raise error("invalid group reference", pattern, self.position) + + self.yes_item.fix_groups(pattern, reverse, fuzzy) + self.no_item.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + yes_item = self.yes_item.optimise(info, reverse) + no_item = self.no_item.optimise(info, reverse) + + return Conditional(info, self.group, yes_item, no_item, self.position) + + def pack_characters(self, info): + self.yes_item = self.yes_item.pack_characters(info) + self.no_item = self.no_item.pack_characters(info) + return self + + def remove_captures(self): + self.yes_item = self.yes_item.remove_captures() + self.no_item = self.no_item.remove_captures() + + def is_atomic(self): + return self.yes_item.is_atomic() and self.no_item.is_atomic() + + def can_be_affix(self): + return self.yes_item.can_be_affix() and self.no_item.can_be_affix() + + def contains_group(self): + return self.yes_item.contains_group() or self.no_item.contains_group() + + def get_firstset(self, reverse): + return (self.yes_item.get_firstset(reverse) | + self.no_item.get_firstset(reverse)) + + def _compile(self, reverse, fuzzy): + code = [(OP.GROUP_EXISTS, self.group)] + code.extend(self.yes_item.compile(reverse, fuzzy)) + add_code = self.no_item.compile(reverse, fuzzy) + if add_code: + code.append((OP.NEXT, )) + code.extend(add_code) + + code.append((OP.END, )) + + return code + + def dump(self, indent, reverse): + print("{}GROUP_EXISTS {}".format(INDENT * indent, self.group)) + self.yes_item.dump(indent + 1, reverse) + if not self.no_item.is_empty(): + print("{}OR".format(INDENT * indent)) + self.no_item.dump(indent + 1, reverse) + + def is_empty(self): + return self.yes_item.is_empty() and self.no_item.is_empty() + + def __eq__(self, other): + return type(self) is type(other) and (self.group, self.yes_item, + self.no_item) == (other.group, other.yes_item, other.no_item) + + def max_width(self): + return max(self.yes_item.max_width(), self.no_item.max_width()) + + def __del__(self): + self.info = None + +class DefaultBoundary(ZeroWidthBase): + _opcode = OP.DEFAULT_BOUNDARY + _op_name = "DEFAULT_BOUNDARY" + +class DefaultEndOfWord(ZeroWidthBase): + _opcode = OP.DEFAULT_END_OF_WORD + _op_name = "DEFAULT_END_OF_WORD" + +class DefaultStartOfWord(ZeroWidthBase): + _opcode = OP.DEFAULT_START_OF_WORD + _op_name = "DEFAULT_START_OF_WORD" + +class EndOfLine(ZeroWidthBase): + _opcode = OP.END_OF_LINE + _op_name = "END_OF_LINE" + +class EndOfLineU(EndOfLine): + _opcode = OP.END_OF_LINE_U + _op_name = "END_OF_LINE_U" + +class EndOfString(ZeroWidthBase): + _opcode = OP.END_OF_STRING + _op_name = "END_OF_STRING" + +class EndOfStringLine(ZeroWidthBase): + _opcode = OP.END_OF_STRING_LINE + _op_name = "END_OF_STRING_LINE" + +class EndOfStringLineU(EndOfStringLine): + _opcode = OP.END_OF_STRING_LINE_U + _op_name = "END_OF_STRING_LINE_U" + +class EndOfWord(ZeroWidthBase): + _opcode = OP.END_OF_WORD + _op_name = "END_OF_WORD" + +class Failure(ZeroWidthBase): + _op_name = "FAILURE" + + def _compile(self, reverse, fuzzy): + return [(OP.FAILURE, )] + +class Fuzzy(RegexBase): + def __init__(self, subpattern, constraints=None): + RegexBase.__init__(self) + if constraints is None: + constraints = {} + self.subpattern = subpattern + self.constraints = constraints + + # If an error type is mentioned in the cost equation, then its maximum + # defaults to unlimited. + if "cost" in constraints: + for e in "dis": + if e in constraints["cost"]: + constraints.setdefault(e, (0, None)) + + # If any error type is mentioned, then all the error maxima default to + # 0, otherwise they default to unlimited. + if set(constraints) & set("dis"): + for e in "dis": + constraints.setdefault(e, (0, 0)) + else: + for e in "dis": + constraints.setdefault(e, (0, None)) + + # The maximum of the generic error type defaults to unlimited. + constraints.setdefault("e", (0, None)) + + # The cost equation defaults to equal costs. Also, the cost of any + # error type not mentioned in the cost equation defaults to 0. + if "cost" in constraints: + for e in "dis": + constraints["cost"].setdefault(e, 0) + else: + constraints["cost"] = {"d": 1, "i": 1, "s": 1, "max": + constraints["e"][1]} + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, reverse, True) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + self.subpattern = self.subpattern.remove_captures() + return self + + def is_atomic(self): + return self.subpattern.is_atomic() + + def contains_group(self): + return self.subpattern.contains_group() + + def _compile(self, reverse, fuzzy): + # The individual limits. + arguments = [] + for e in "dise": + v = self.constraints[e] + arguments.append(v[0]) + arguments.append(UNLIMITED if v[1] is None else v[1]) + + # The coeffs of the cost equation. + for e in "dis": + arguments.append(self.constraints["cost"][e]) + + # The maximum of the cost equation. + v = self.constraints["cost"]["max"] + arguments.append(UNLIMITED if v is None else v) + + flags = 0 + if reverse: + flags |= REVERSE_OP + + test = self.constraints.get("test") + + if test: + return ([(OP.FUZZY_EXT, flags) + tuple(arguments)] + + test.compile(reverse, True) + [(OP.NEXT,)] + + self.subpattern.compile(reverse, True) + [(OP.END,)]) + + return ([(OP.FUZZY, flags) + tuple(arguments)] + + self.subpattern.compile(reverse, True) + [(OP.END,)]) + + def dump(self, indent, reverse): + constraints = self._constraints_to_string() + if constraints: + constraints = " " + constraints + print("{}FUZZY{}".format(INDENT * indent, constraints)) + self.subpattern.dump(indent + 1, reverse) + + def is_empty(self): + return self.subpattern.is_empty() + + def __eq__(self, other): + return (type(self) is type(other) and self.subpattern == + other.subpattern and self.constraints == other.constraints) + + def max_width(self): + return UNLIMITED + + def _constraints_to_string(self): + constraints = [] + + for name in "ids": + min, max = self.constraints[name] + if max == 0: + continue + + con = "" + + if min > 0: + con = "{}<=".format(min) + + con += name + + if max is not None: + con += "<={}".format(max) + + constraints.append(con) + + cost = [] + for name in "ids": + coeff = self.constraints["cost"][name] + if coeff > 0: + cost.append("{}{}".format(coeff, name)) + + limit = self.constraints["cost"]["max"] + if limit is not None and limit > 0: + cost = "{}<={}".format("+".join(cost), limit) + constraints.append(cost) + + return ",".join(constraints) + +class Grapheme(RegexBase): + def _compile(self, reverse, fuzzy): + # Match at least 1 character until a grapheme boundary is reached. Note + # that this is the same whether matching forwards or backwards. + grapheme_matcher = Atomic(Sequence([LazyRepeat(AnyAll(), 1, None), + GraphemeBoundary()])) + + return grapheme_matcher.compile(reverse, fuzzy) + + def dump(self, indent, reverse): + print("{}GRAPHEME".format(INDENT * indent)) + + def max_width(self): + return UNLIMITED + +class GraphemeBoundary: + def compile(self, reverse, fuzzy): + return [(OP.GRAPHEME_BOUNDARY, 1)] + +class GreedyRepeat(RegexBase): + _opcode = OP.GREEDY_REPEAT + _op_name = "GREEDY_REPEAT" + + def __init__(self, subpattern, min_count, max_count): + RegexBase.__init__(self) + self.subpattern = subpattern + self.min_count = min_count + self.max_count = max_count + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + subpattern = self.subpattern.optimise(info, reverse) + + return type(self)(subpattern, self.min_count, self.max_count) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + self.subpattern = self.subpattern.remove_captures() + return self + + def is_atomic(self): + return self.min_count == self.max_count and self.subpattern.is_atomic() + + def can_be_affix(self): + return False + + def contains_group(self): + return self.subpattern.contains_group() + + def get_firstset(self, reverse): + fs = self.subpattern.get_firstset(reverse) + if self.min_count == 0: + fs.add(None) + + return fs + + def _compile(self, reverse, fuzzy): + repeat = [self._opcode, self.min_count] + if self.max_count is None: + repeat.append(UNLIMITED) + else: + repeat.append(self.max_count) + + subpattern = self.subpattern.compile(reverse, fuzzy) + if not subpattern: + return [] + + return ([tuple(repeat)] + subpattern + [(OP.END, )]) + + def dump(self, indent, reverse): + if self.max_count is None: + limit = "INF" + else: + limit = self.max_count + print("{}{} {} {}".format(INDENT * indent, self._op_name, + self.min_count, limit)) + + self.subpattern.dump(indent + 1, reverse) + + def is_empty(self): + return self.subpattern.is_empty() + + def __eq__(self, other): + return type(self) is type(other) and (self.subpattern, self.min_count, + self.max_count) == (other.subpattern, other.min_count, + other.max_count) + + def max_width(self): + if self.max_count is None: + return UNLIMITED + + return self.subpattern.max_width() * self.max_count + + def get_required_string(self, reverse): + max_count = UNLIMITED if self.max_count is None else self.max_count + if self.min_count == 0: + w = self.subpattern.max_width() * max_count + return min(w, UNLIMITED), None + + ofs, req = self.subpattern.get_required_string(reverse) + if req: + return ofs, req + + w = self.subpattern.max_width() * max_count + return min(w, UNLIMITED), None + +class PossessiveRepeat(GreedyRepeat): + def is_atomic(self): + return True + + def _compile(self, reverse, fuzzy): + subpattern = self.subpattern.compile(reverse, fuzzy) + if not subpattern: + return [] + + repeat = [self._opcode, self.min_count] + if self.max_count is None: + repeat.append(UNLIMITED) + else: + repeat.append(self.max_count) + + return ([(OP.ATOMIC, ), tuple(repeat)] + subpattern + [(OP.END, ), + (OP.END, )]) + + def dump(self, indent, reverse): + print("{}ATOMIC".format(INDENT * indent)) + + if self.max_count is None: + limit = "INF" + else: + limit = self.max_count + print("{}{} {} {}".format(INDENT * (indent + 1), self._op_name, + self.min_count, limit)) + + self.subpattern.dump(indent + 2, reverse) + +class Group(RegexBase): + def __init__(self, info, group, subpattern): + RegexBase.__init__(self) + self.info = info + self.group = group + self.subpattern = subpattern + + self.call_ref = None + + def fix_groups(self, pattern, reverse, fuzzy): + self.info.defined_groups[self.group] = (self, reverse, fuzzy) + self.subpattern.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + subpattern = self.subpattern.optimise(info, reverse) + + return Group(self.info, self.group, subpattern) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + return self.subpattern.remove_captures() + + def is_atomic(self): + return self.subpattern.is_atomic() + + def can_be_affix(self): + return False + + def contains_group(self): + return True + + def get_firstset(self, reverse): + return self.subpattern.get_firstset(reverse) + + def has_simple_start(self): + return self.subpattern.has_simple_start() + + def _compile(self, reverse, fuzzy): + code = [] + + public_group = private_group = self.group + if private_group < 0: + public_group = self.info.private_groups[private_group] + private_group = self.info.group_count - private_group + + key = self.group, reverse, fuzzy + ref = self.info.call_refs.get(key) + if ref is not None: + code += [(OP.CALL_REF, ref)] + + code += [(OP.GROUP, int(not reverse), private_group, public_group)] + code += self.subpattern.compile(reverse, fuzzy) + code += [(OP.END, )] + + if ref is not None: + code += [(OP.END, )] + + return code + + def dump(self, indent, reverse): + group = self.group + if group < 0: + group = private_groups[group] + print("{}GROUP {}".format(INDENT * indent, group)) + self.subpattern.dump(indent + 1, reverse) + + def __eq__(self, other): + return (type(self) is type(other) and (self.group, self.subpattern) == + (other.group, other.subpattern)) + + def max_width(self): + return self.subpattern.max_width() + + def get_required_string(self, reverse): + return self.subpattern.get_required_string(reverse) + + def __del__(self): + self.info = None + +class Keep(ZeroWidthBase): + _opcode = OP.KEEP + _op_name = "KEEP" + +class LazyRepeat(GreedyRepeat): + _opcode = OP.LAZY_REPEAT + _op_name = "LAZY_REPEAT" + +class LookAround(RegexBase): + _dir_text = {False: "AHEAD", True: "BEHIND"} + + def __init__(self, behind, positive, subpattern): + RegexBase.__init__(self) + self.behind = bool(behind) + self.positive = bool(positive) + self.subpattern = subpattern + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, self.behind, fuzzy) + + def optimise(self, info, reverse): + subpattern = self.subpattern.optimise(info, self.behind) + if self.positive and subpattern.is_empty(): + return subpattern + + return LookAround(self.behind, self.positive, subpattern) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + return self + + def remove_captures(self): + return self.subpattern.remove_captures() + + def is_atomic(self): + return self.subpattern.is_atomic() + + def can_be_affix(self): + return self.subpattern.can_be_affix() + + def contains_group(self): + return self.subpattern.contains_group() + + def get_firstset(self, reverse): + if self.positive and self.behind == reverse: + return self.subpattern.get_firstset(reverse) + + return set([None]) + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if fuzzy: + flags |= FUZZY_OP + if reverse: + flags |= REVERSE_OP + + return ([(OP.LOOKAROUND, flags, int(not self.behind))] + + self.subpattern.compile(self.behind) + [(OP.END, )]) + + def dump(self, indent, reverse): + print("{}LOOK{} {}".format(INDENT * indent, + self._dir_text[self.behind], POS_TEXT[self.positive])) + self.subpattern.dump(indent + 1, self.behind) + + def is_empty(self): + return self.positive and self.subpattern.is_empty() + + def __eq__(self, other): + return type(self) is type(other) and (self.behind, self.positive, + self.subpattern) == (other.behind, other.positive, other.subpattern) + + def max_width(self): + return 0 + +class LookAroundConditional(RegexBase): + _dir_text = {False: "AHEAD", True: "BEHIND"} + + def __init__(self, behind, positive, subpattern, yes_item, no_item): + RegexBase.__init__(self) + self.behind = bool(behind) + self.positive = bool(positive) + self.subpattern = subpattern + self.yes_item = yes_item + self.no_item = no_item + + def fix_groups(self, pattern, reverse, fuzzy): + self.subpattern.fix_groups(pattern, reverse, fuzzy) + self.yes_item.fix_groups(pattern, reverse, fuzzy) + self.no_item.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + subpattern = self.subpattern.optimise(info, self.behind) + yes_item = self.yes_item.optimise(info, self.behind) + no_item = self.no_item.optimise(info, self.behind) + + return LookAroundConditional(self.behind, self.positive, subpattern, + yes_item, no_item) + + def pack_characters(self, info): + self.subpattern = self.subpattern.pack_characters(info) + self.yes_item = self.yes_item.pack_characters(info) + self.no_item = self.no_item.pack_characters(info) + return self + + def remove_captures(self): + self.subpattern = self.subpattern.remove_captures() + self.yes_item = self.yes_item.remove_captures() + self.no_item = self.no_item.remove_captures() + + def is_atomic(self): + return (self.subpattern.is_atomic() and self.yes_item.is_atomic() and + self.no_item.is_atomic()) + + def can_be_affix(self): + return (self.subpattern.can_be_affix() and self.yes_item.can_be_affix() + and self.no_item.can_be_affix()) + + def contains_group(self): + return (self.subpattern.contains_group() or + self.yes_item.contains_group() or self.no_item.contains_group()) + + def _compile(self, reverse, fuzzy): + code = [(OP.CONDITIONAL, int(self.positive), int(not self.behind))] + code.extend(self.subpattern.compile(self.behind, fuzzy)) + code.append((OP.NEXT, )) + code.extend(self.yes_item.compile(reverse, fuzzy)) + add_code = self.no_item.compile(reverse, fuzzy) + if add_code: + code.append((OP.NEXT, )) + code.extend(add_code) + + code.append((OP.END, )) + + return code + + def dump(self, indent, reverse): + print("{}CONDITIONAL {} {}".format(INDENT * indent, + self._dir_text[self.behind], POS_TEXT[self.positive])) + self.subpattern.dump(indent + 1, self.behind) + print("{}EITHER".format(INDENT * indent)) + self.yes_item.dump(indent + 1, reverse) + if not self.no_item.is_empty(): + print("{}OR".format(INDENT * indent)) + self.no_item.dump(indent + 1, reverse) + + def is_empty(self): + return (self.subpattern.is_empty() and self.yes_item.is_empty() or + self.no_item.is_empty()) + + def __eq__(self, other): + return type(self) is type(other) and (self.subpattern, self.yes_item, + self.no_item) == (other.subpattern, other.yes_item, other.no_item) + + def max_width(self): + return max(self.yes_item.max_width(), self.no_item.max_width()) + + def get_required_string(self, reverse): + return self.max_width(), None + +class PrecompiledCode(RegexBase): + def __init__(self, code): + self.code = code + + def _compile(self, reverse, fuzzy): + return [tuple(self.code)] + +class Property(RegexBase): + _opcode = {(NOCASE, False): OP.PROPERTY, (IGNORECASE, False): + OP.PROPERTY_IGN, (FULLCASE, False): OP.PROPERTY, (FULLIGNORECASE, False): + OP.PROPERTY_IGN, (NOCASE, True): OP.PROPERTY_REV, (IGNORECASE, True): + OP.PROPERTY_IGN_REV, (FULLCASE, True): OP.PROPERTY_REV, (FULLIGNORECASE, + True): OP.PROPERTY_IGN_REV} + + def __init__(self, value, positive=True, case_flags=NOCASE, + zerowidth=False): + RegexBase.__init__(self) + self.value = value + self.positive = bool(positive) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + self.zerowidth = bool(zerowidth) + + self._key = (self.__class__, self.value, self.positive, + self.case_flags, self.zerowidth) + + def rebuild(self, positive, case_flags, zerowidth): + return Property(self.value, positive, case_flags, zerowidth) + + def optimise(self, info, reverse, in_set=False): + return self + + def get_firstset(self, reverse): + return set([self]) + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + return [(self._opcode[self.case_flags, reverse], flags, self.value)] + + def dump(self, indent, reverse): + prop = PROPERTY_NAMES[self.value >> 16] + name, value = prop[0], prop[1][self.value & 0xFFFF] + print("{}PROPERTY {} {}:{}{}".format(INDENT * indent, + POS_TEXT[self.positive], name, value, CASE_TEXT[self.case_flags])) + + def matches(self, ch): + return _regex.has_property_value(self.value, ch) == self.positive + + def max_width(self): + return 1 + +class Prune(ZeroWidthBase): + _op_name = "PRUNE" + + def _compile(self, reverse, fuzzy): + return [(OP.PRUNE, )] + +class Range(RegexBase): + _opcode = {(NOCASE, False): OP.RANGE, (IGNORECASE, False): OP.RANGE_IGN, + (FULLCASE, False): OP.RANGE, (FULLIGNORECASE, False): OP.RANGE_IGN, + (NOCASE, True): OP.RANGE_REV, (IGNORECASE, True): OP.RANGE_IGN_REV, + (FULLCASE, True): OP.RANGE_REV, (FULLIGNORECASE, True): OP.RANGE_IGN_REV} + _op_name = "RANGE" + + def __init__(self, lower, upper, positive=True, case_flags=NOCASE, + zerowidth=False): + RegexBase.__init__(self) + self.lower = lower + self.upper = upper + self.positive = bool(positive) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + self.zerowidth = bool(zerowidth) + + self._key = (self.__class__, self.lower, self.upper, self.positive, + self.case_flags, self.zerowidth) + + def rebuild(self, positive, case_flags, zerowidth): + return Range(self.lower, self.upper, positive, case_flags, zerowidth) + + def optimise(self, info, reverse, in_set=False): + # Is the range case-sensitive? + if not self.positive or not (self.case_flags & IGNORECASE) or in_set: + return self + + # Is full case-folding possible? + if (not (info.flags & UNICODE) or (self.case_flags & FULLIGNORECASE) != + FULLIGNORECASE): + return self + + # Get the characters which expand to multiple codepoints on folding. + expanding_chars = _regex.get_expand_on_folding() + + # Get the folded characters in the range. + items = [] + for ch in expanding_chars: + if self.lower <= ord(ch) <= self.upper: + folded = _regex.fold_case(FULL_CASE_FOLDING, ch) + items.append(String([ord(c) for c in folded], + case_flags=self.case_flags)) + + if not items: + # We can fall back to simple case-folding. + return self + + if len(items) < self.upper - self.lower + 1: + # Not all the characters are covered by the full case-folding. + items.insert(0, self) + + return Branch(items) + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + return [(self._opcode[self.case_flags, reverse], flags, self.lower, + self.upper)] + + def dump(self, indent, reverse): + display_lower = ascii(chr(self.lower)).lstrip("bu") + display_upper = ascii(chr(self.upper)).lstrip("bu") + print("{}RANGE {} {} {}{}".format(INDENT * indent, + POS_TEXT[self.positive], display_lower, display_upper, + CASE_TEXT[self.case_flags])) + + def matches(self, ch): + return (self.lower <= ch <= self.upper) == self.positive + + def max_width(self): + return 1 + +class RefGroup(RegexBase): + _opcode = {(NOCASE, False): OP.REF_GROUP, (IGNORECASE, False): + OP.REF_GROUP_IGN, (FULLCASE, False): OP.REF_GROUP, (FULLIGNORECASE, + False): OP.REF_GROUP_FLD, (NOCASE, True): OP.REF_GROUP_REV, (IGNORECASE, + True): OP.REF_GROUP_IGN_REV, (FULLCASE, True): OP.REF_GROUP_REV, + (FULLIGNORECASE, True): OP.REF_GROUP_FLD_REV} + + def __init__(self, info, group, position, case_flags=NOCASE): + RegexBase.__init__(self) + self.info = info + self.group = group + self.position = position + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + + self._key = self.__class__, self.group, self.case_flags + + def fix_groups(self, pattern, reverse, fuzzy): + try: + self.group = int(self.group) + except ValueError: + try: + self.group = self.info.group_index[self.group] + except KeyError: + raise error("unknown group", pattern, self.position) + + if not 1 <= self.group <= self.info.group_count: + raise error("invalid group reference", pattern, self.position) + + self._key = self.__class__, self.group, self.case_flags + + def remove_captures(self): + raise error("group reference not allowed", pattern, self.position) + + def _compile(self, reverse, fuzzy): + flags = 0 + if fuzzy: + flags |= FUZZY_OP + return [(self._opcode[self.case_flags, reverse], flags, self.group)] + + def dump(self, indent, reverse): + print("{}REF_GROUP {}{}".format(INDENT * indent, self.group, + CASE_TEXT[self.case_flags])) + + def max_width(self): + return UNLIMITED + + def __del__(self): + self.info = None + +class SearchAnchor(ZeroWidthBase): + _opcode = OP.SEARCH_ANCHOR + _op_name = "SEARCH_ANCHOR" + +class Sequence(RegexBase): + def __init__(self, items=None): + RegexBase.__init__(self) + if items is None: + items = [] + + self.items = items + + def fix_groups(self, pattern, reverse, fuzzy): + for s in self.items: + s.fix_groups(pattern, reverse, fuzzy) + + def optimise(self, info, reverse): + # Flatten the sequences. + items = [] + for s in self.items: + s = s.optimise(info, reverse) + if isinstance(s, Sequence): + items.extend(s.items) + else: + items.append(s) + + return make_sequence(items) + + def pack_characters(self, info): + "Packs sequences of characters into strings." + items = [] + characters = [] + case_flags = NOCASE + for s in self.items: + if type(s) is Character and s.positive and not s.zerowidth: + if s.case_flags != case_flags: + # Different case sensitivity, so flush, unless neither the + # previous nor the new character are cased. + if s.case_flags or is_cased_i(info, s.value): + Sequence._flush_characters(info, characters, + case_flags, items) + + case_flags = s.case_flags + + characters.append(s.value) + elif type(s) is String or type(s) is Literal: + if s.case_flags != case_flags: + # Different case sensitivity, so flush, unless the neither + # the previous nor the new string are cased. + if s.case_flags or any(is_cased_i(info, c) for c in + characters): + Sequence._flush_characters(info, characters, + case_flags, items) + + case_flags = s.case_flags + + characters.extend(s.characters) + else: + Sequence._flush_characters(info, characters, case_flags, items) + + items.append(s.pack_characters(info)) + + Sequence._flush_characters(info, characters, case_flags, items) + + return make_sequence(items) + + def remove_captures(self): + self.items = [s.remove_captures() for s in self.items] + return self + + def is_atomic(self): + return all(s.is_atomic() for s in self.items) + + def can_be_affix(self): + return False + + def contains_group(self): + return any(s.contains_group() for s in self.items) + + def get_firstset(self, reverse): + fs = set() + items = self.items + if reverse: + items.reverse() + for s in items: + fs |= s.get_firstset(reverse) + if None not in fs: + return fs + fs.discard(None) + + return fs | set([None]) + + def has_simple_start(self): + return bool(self.items) and self.items[0].has_simple_start() + + def _compile(self, reverse, fuzzy): + seq = self.items + if reverse: + seq = seq[::-1] + + code = [] + for s in seq: + code.extend(s.compile(reverse, fuzzy)) + + return code + + def dump(self, indent, reverse): + for s in self.items: + s.dump(indent, reverse) + + @staticmethod + def _flush_characters(info, characters, case_flags, items): + if not characters: + return + + # Disregard case_flags if all of the characters are case-less. + if case_flags & IGNORECASE: + if not any(is_cased_i(info, c) for c in characters): + case_flags = NOCASE + + if (case_flags & FULLIGNORECASE) == FULLIGNORECASE: + literals = Sequence._fix_full_casefold(characters) + + for item in literals: + chars = item.characters + + if len(chars) == 1: + items.append(Character(chars[0], case_flags=item.case_flags)) + else: + items.append(String(chars, case_flags=item.case_flags)) + else: + if len(characters) == 1: + items.append(Character(characters[0], case_flags=case_flags)) + else: + items.append(String(characters, case_flags=case_flags)) + + characters[:] = [] + + @staticmethod + def _fix_full_casefold(characters): + # Split a literal needing full case-folding into chunks that need it + # and chunks that can use simple case-folding, which is faster. + expanded = [_regex.fold_case(FULL_CASE_FOLDING, c) for c in + _regex.get_expand_on_folding()] + string = _regex.fold_case(FULL_CASE_FOLDING, ''.join(chr(c) + for c in characters)).lower() + chunks = [] + + for e in expanded: + found = string.find(e) + + while found >= 0: + chunks.append((found, found + len(e))) + found = string.find(e, found + 1) + + pos = 0 + literals = [] + + for start, end in Sequence._merge_chunks(chunks): + if pos < start: + literals.append(Literal(characters[pos : start], + case_flags=IGNORECASE)) + + literals.append(Literal(characters[start : end], + case_flags=FULLIGNORECASE)) + pos = end + + if pos < len(characters): + literals.append(Literal(characters[pos : ], case_flags=IGNORECASE)) + + return literals + + @staticmethod + def _merge_chunks(chunks): + if len(chunks) < 2: + return chunks + + chunks.sort() + + start, end = chunks[0] + new_chunks = [] + + for s, e in chunks[1 : ]: + if s <= end: + end = max(end, e) + else: + new_chunks.append((start, end)) + start, end = s, e + + new_chunks.append((start, end)) + + return new_chunks + + def is_empty(self): + return all(i.is_empty() for i in self.items) + + def __eq__(self, other): + return type(self) is type(other) and self.items == other.items + + def max_width(self): + return sum(s.max_width() for s in self.items) + + def get_required_string(self, reverse): + seq = self.items + if reverse: + seq = seq[::-1] + + offset = 0 + + for s in seq: + ofs, req = s.get_required_string(reverse) + offset += ofs + if req: + return offset, req + + return offset, None + +class SetBase(RegexBase): + def __init__(self, info, items, positive=True, case_flags=NOCASE, + zerowidth=False): + RegexBase.__init__(self) + self.info = info + self.items = tuple(items) + self.positive = bool(positive) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + self.zerowidth = bool(zerowidth) + + self.char_width = 1 + + self._key = (self.__class__, self.items, self.positive, + self.case_flags, self.zerowidth) + + def rebuild(self, positive, case_flags, zerowidth): + return type(self)(self.info, self.items, positive, case_flags, + zerowidth).optimise(self.info, False) + + def get_firstset(self, reverse): + return set([self]) + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + code = [(self._opcode[self.case_flags, reverse], flags)] + for m in self.items: + code.extend(m.compile()) + + code.append((OP.END, )) + + return code + + def dump(self, indent, reverse): + print("{}{} {}{}".format(INDENT * indent, self._op_name, + POS_TEXT[self.positive], CASE_TEXT[self.case_flags])) + for i in self.items: + i.dump(indent + 1, reverse) + + def _handle_case_folding(self, info, in_set): + # Is the set case-sensitive? + if not self.positive or not (self.case_flags & IGNORECASE) or in_set: + return self + + # Is full case-folding possible? + if (not (self.info.flags & UNICODE) or (self.case_flags & + FULLIGNORECASE) != FULLIGNORECASE): + return self + + # Get the characters which expand to multiple codepoints on folding. + expanding_chars = _regex.get_expand_on_folding() + + # Get the folded characters in the set. + items = [] + seen = set() + for ch in expanding_chars: + if self.matches(ord(ch)): + folded = _regex.fold_case(FULL_CASE_FOLDING, ch) + if folded not in seen: + items.append(String([ord(c) for c in folded], + case_flags=self.case_flags)) + seen.add(folded) + + if not items: + # We can fall back to simple case-folding. + return self + + return Branch([self] + items) + + def max_width(self): + # Is the set case-sensitive? + if not self.positive or not (self.case_flags & IGNORECASE): + return 1 + + # Is full case-folding possible? + if (not (self.info.flags & UNICODE) or (self.case_flags & + FULLIGNORECASE) != FULLIGNORECASE): + return 1 + + # Get the characters which expand to multiple codepoints on folding. + expanding_chars = _regex.get_expand_on_folding() + + # Get the folded characters in the set. + seen = set() + for ch in expanding_chars: + if self.matches(ord(ch)): + folded = _regex.fold_case(FULL_CASE_FOLDING, ch) + seen.add(folded) + + if not seen: + return 1 + + return max(len(folded) for folded in seen) + + def __del__(self): + self.info = None + +class SetDiff(SetBase): + _opcode = {(NOCASE, False): OP.SET_DIFF, (IGNORECASE, False): + OP.SET_DIFF_IGN, (FULLCASE, False): OP.SET_DIFF, (FULLIGNORECASE, False): + OP.SET_DIFF_IGN, (NOCASE, True): OP.SET_DIFF_REV, (IGNORECASE, True): + OP.SET_DIFF_IGN_REV, (FULLCASE, True): OP.SET_DIFF_REV, (FULLIGNORECASE, + True): OP.SET_DIFF_IGN_REV} + _op_name = "SET_DIFF" + + def optimise(self, info, reverse, in_set=False): + items = self.items + if len(items) > 2: + items = [items[0], SetUnion(info, items[1 : ])] + + if len(items) == 1: + return items[0].with_flags(case_flags=self.case_flags, + zerowidth=self.zerowidth).optimise(info, reverse, in_set) + + self.items = tuple(m.optimise(info, reverse, in_set=True) for m in + items) + + return self._handle_case_folding(info, in_set) + + def matches(self, ch): + m = self.items[0].matches(ch) and not self.items[1].matches(ch) + return m == self.positive + +class SetInter(SetBase): + _opcode = {(NOCASE, False): OP.SET_INTER, (IGNORECASE, False): + OP.SET_INTER_IGN, (FULLCASE, False): OP.SET_INTER, (FULLIGNORECASE, + False): OP.SET_INTER_IGN, (NOCASE, True): OP.SET_INTER_REV, (IGNORECASE, + True): OP.SET_INTER_IGN_REV, (FULLCASE, True): OP.SET_INTER_REV, + (FULLIGNORECASE, True): OP.SET_INTER_IGN_REV} + _op_name = "SET_INTER" + + def optimise(self, info, reverse, in_set=False): + items = [] + for m in self.items: + m = m.optimise(info, reverse, in_set=True) + if isinstance(m, SetInter) and m.positive: + # Intersection in intersection. + items.extend(m.items) + else: + items.append(m) + + if len(items) == 1: + return items[0].with_flags(case_flags=self.case_flags, + zerowidth=self.zerowidth).optimise(info, reverse, in_set) + + self.items = tuple(items) + + return self._handle_case_folding(info, in_set) + + def matches(self, ch): + m = all(i.matches(ch) for i in self.items) + return m == self.positive + +class SetSymDiff(SetBase): + _opcode = {(NOCASE, False): OP.SET_SYM_DIFF, (IGNORECASE, False): + OP.SET_SYM_DIFF_IGN, (FULLCASE, False): OP.SET_SYM_DIFF, (FULLIGNORECASE, + False): OP.SET_SYM_DIFF_IGN, (NOCASE, True): OP.SET_SYM_DIFF_REV, + (IGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV, (FULLCASE, True): + OP.SET_SYM_DIFF_REV, (FULLIGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV} + _op_name = "SET_SYM_DIFF" + + def optimise(self, info, reverse, in_set=False): + items = [] + for m in self.items: + m = m.optimise(info, reverse, in_set=True) + if isinstance(m, SetSymDiff) and m.positive: + # Symmetric difference in symmetric difference. + items.extend(m.items) + else: + items.append(m) + + if len(items) == 1: + return items[0].with_flags(case_flags=self.case_flags, + zerowidth=self.zerowidth).optimise(info, reverse, in_set) + + self.items = tuple(items) + + return self._handle_case_folding(info, in_set) + + def matches(self, ch): + m = False + for i in self.items: + m = m != i.matches(ch) + + return m == self.positive + +class SetUnion(SetBase): + _opcode = {(NOCASE, False): OP.SET_UNION, (IGNORECASE, False): + OP.SET_UNION_IGN, (FULLCASE, False): OP.SET_UNION, (FULLIGNORECASE, + False): OP.SET_UNION_IGN, (NOCASE, True): OP.SET_UNION_REV, (IGNORECASE, + True): OP.SET_UNION_IGN_REV, (FULLCASE, True): OP.SET_UNION_REV, + (FULLIGNORECASE, True): OP.SET_UNION_IGN_REV} + _op_name = "SET_UNION" + + def optimise(self, info, reverse, in_set=False): + items = [] + for m in self.items: + m = m.optimise(info, reverse, in_set=True) + if isinstance(m, SetUnion) and m.positive: + # Union in union. + items.extend(m.items) + else: + items.append(m) + + if len(items) == 1: + i = items[0] + return i.with_flags(positive=i.positive == self.positive, + case_flags=self.case_flags, + zerowidth=self.zerowidth).optimise(info, reverse, in_set) + + self.items = tuple(items) + + return self._handle_case_folding(info, in_set) + + def _compile(self, reverse, fuzzy): + flags = 0 + if self.positive: + flags |= POSITIVE_OP + if self.zerowidth: + flags |= ZEROWIDTH_OP + if fuzzy: + flags |= FUZZY_OP + + characters, others = defaultdict(list), [] + for m in self.items: + if isinstance(m, Character): + characters[m.positive].append(m.value) + else: + others.append(m) + + code = [(self._opcode[self.case_flags, reverse], flags)] + + for positive, values in characters.items(): + flags = 0 + if positive: + flags |= POSITIVE_OP + if len(values) == 1: + code.append((OP.CHARACTER, flags, values[0])) + else: + code.append((OP.STRING, flags, len(values)) + tuple(values)) + + for m in others: + code.extend(m.compile()) + + code.append((OP.END, )) + + return code + + def matches(self, ch): + m = any(i.matches(ch) for i in self.items) + return m == self.positive + +class Skip(ZeroWidthBase): + _op_name = "SKIP" + _opcode = OP.SKIP + +class StartOfLine(ZeroWidthBase): + _opcode = OP.START_OF_LINE + _op_name = "START_OF_LINE" + +class StartOfLineU(StartOfLine): + _opcode = OP.START_OF_LINE_U + _op_name = "START_OF_LINE_U" + +class StartOfString(ZeroWidthBase): + _opcode = OP.START_OF_STRING + _op_name = "START_OF_STRING" + +class StartOfWord(ZeroWidthBase): + _opcode = OP.START_OF_WORD + _op_name = "START_OF_WORD" + +class String(RegexBase): + _opcode = {(NOCASE, False): OP.STRING, (IGNORECASE, False): OP.STRING_IGN, + (FULLCASE, False): OP.STRING, (FULLIGNORECASE, False): OP.STRING_FLD, + (NOCASE, True): OP.STRING_REV, (IGNORECASE, True): OP.STRING_IGN_REV, + (FULLCASE, True): OP.STRING_REV, (FULLIGNORECASE, True): + OP.STRING_FLD_REV} + + def __init__(self, characters, case_flags=NOCASE): + self.characters = tuple(characters) + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + + if (self.case_flags & FULLIGNORECASE) == FULLIGNORECASE: + folded_characters = [] + for char in self.characters: + folded = _regex.fold_case(FULL_CASE_FOLDING, chr(char)) + folded_characters.extend(ord(c) for c in folded) + else: + folded_characters = self.characters + + self.folded_characters = tuple(folded_characters) + self.required = False + + self._key = self.__class__, self.characters, self.case_flags + + def get_firstset(self, reverse): + if reverse: + pos = -1 + else: + pos = 0 + return set([Character(self.characters[pos], + case_flags=self.case_flags)]) + + def has_simple_start(self): + return True + + def _compile(self, reverse, fuzzy): + flags = 0 + if fuzzy: + flags |= FUZZY_OP + if self.required: + flags |= REQUIRED_OP + return [(self._opcode[self.case_flags, reverse], flags, + len(self.folded_characters)) + self.folded_characters] + + def dump(self, indent, reverse): + display = ascii("".join(chr(c) for c in self.characters)).lstrip("bu") + print("{}STRING {}{}".format(INDENT * indent, display, + CASE_TEXT[self.case_flags])) + + def max_width(self): + return len(self.folded_characters) + + def get_required_string(self, reverse): + return 0, self + +class Literal(String): + def dump(self, indent, reverse): + literal = ''.join(chr(c) for c in self.characters) + display = ascii(literal).lstrip("bu") + print("{}LITERAL MATCH {}{}".format(INDENT * indent, display, + CASE_TEXT[self.case_flags])) + +class StringSet(Branch): + def __init__(self, info, name, case_flags=NOCASE): + self.info = info + self.name = name + self.case_flags = CASE_FLAGS_COMBINATIONS[case_flags] + + self._key = self.__class__, self.name, self.case_flags + + self.set_key = (name, self.case_flags) + if self.set_key not in info.named_lists_used: + info.named_lists_used[self.set_key] = len(info.named_lists_used) + + index = self.info.named_lists_used[self.set_key] + items = self.info.kwargs[self.name] + + case_flags = self.case_flags + + encoding = self.info.flags & _ALL_ENCODINGS + fold_flags = encoding | case_flags + + choices = [] + + for string in items: + if isinstance(string, str): + string = [ord(c) for c in string] + + choices.append([Character(c, case_flags=case_flags) for c in + string]) + + # Sort from longest to shortest. + choices.sort(key=len, reverse=True) + + self.branches = [Sequence(choice) for choice in choices] + + def dump(self, indent, reverse): + print("{}STRING_SET {}{}".format(INDENT * indent, self.name, + CASE_TEXT[self.case_flags])) + + def __del__(self): + self.info = None + +class Source: + "Scanner for the regular expression source string." + def __init__(self, string): + if isinstance(string, str): + self.string = string + self.char_type = chr + else: + self.string = string.decode("latin-1") + self.char_type = lambda c: bytes([c]) + + self.pos = 0 + self.ignore_space = False + self.sep = string[ : 0] + + def get(self, override_ignore=False): + string = self.string + pos = self.pos + + try: + if self.ignore_space and not override_ignore: + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + else: + break + + ch = string[pos] + self.pos = pos + 1 + return ch + except IndexError: + # We've reached the end of the string. + self.pos = pos + return string[ : 0] + except ValueError: + # The comment extended to the end of the string. + self.pos = len(string) + return string[ : 0] + + def get_many(self, count=1): + string = self.string + pos = self.pos + + try: + if self.ignore_space: + substring = [] + + while len(substring) < count: + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + else: + break + + substring.append(string[pos]) + pos += 1 + + substring = "".join(substring) + else: + substring = string[pos : pos + count] + pos += len(substring) + + self.pos = pos + return substring + except IndexError: + # We've reached the end of the string. + self.pos = len(string) + return "".join(substring) + except ValueError: + # The comment extended to the end of the string. + self.pos = len(string) + return "".join(substring) + + def get_while(self, test_set, include=True, keep_spaces=False): + string = self.string + pos = self.pos + + if self.ignore_space and not keep_spaces: + try: + substring = [] + + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + elif (string[pos] in test_set) == include: + substring.append(string[pos]) + pos += 1 + else: + break + + self.pos = pos + except IndexError: + # We've reached the end of the string. + self.pos = len(string) + except ValueError: + # The comment extended to the end of the string. + self.pos = len(string) + + return "".join(substring) + else: + try: + while (string[pos] in test_set) == include: + pos += 1 + + substring = string[self.pos : pos] + + self.pos = pos + + return substring + except IndexError: + # We've reached the end of the string. + substring = string[self.pos : pos] + + self.pos = pos + + return substring + + def skip_while(self, test_set, include=True): + string = self.string + pos = self.pos + + try: + if self.ignore_space: + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + elif (string[pos] in test_set) == include: + pos += 1 + else: + break + else: + while (string[pos] in test_set) == include: + pos += 1 + + self.pos = pos + except IndexError: + # We've reached the end of the string. + self.pos = len(string) + except ValueError: + # The comment extended to the end of the string. + self.pos = len(string) + + def match(self, substring): + string = self.string + pos = self.pos + + if self.ignore_space: + try: + for c in substring: + while True: + if string[pos].isspace(): + # Skip over the whitespace. + pos += 1 + elif string[pos] == "#": + # Skip over the comment to the end of the line. + pos = string.index("\n", pos) + else: + break + + if string[pos] != c: + return False + + pos += 1 + + self.pos = pos + + return True + except IndexError: + # We've reached the end of the string. + return False + except ValueError: + # The comment extended to the end of the string. + return False + else: + if not string.startswith(substring, pos): + return False + + self.pos = pos + len(substring) + + return True + + def expect(self, substring): + if not self.match(substring): + raise error("missing {}".format(substring), self.string, self.pos) + + def at_end(self): + string = self.string + pos = self.pos + + try: + if self.ignore_space: + while True: + if string[pos].isspace(): + pos += 1 + elif string[pos] == "#": + pos = string.index("\n", pos) + else: + break + + return pos >= len(string) + except IndexError: + # We've reached the end of the string. + return True + except ValueError: + # The comment extended to the end of the string. + return True + +class Info: + "Info about the regular expression." + + def __init__(self, flags=0, char_type=None, kwargs={}): + flags |= DEFAULT_FLAGS[(flags & _ALL_VERSIONS) or DEFAULT_VERSION] + self.flags = flags + self.global_flags = flags + self.inline_locale = False + + self.kwargs = kwargs + + self.group_count = 0 + self.group_index = {} + self.group_name = {} + self.char_type = char_type + self.named_lists_used = {} + self.open_groups = [] + self.open_group_count = {} + self.defined_groups = {} + self.group_calls = [] + self.private_groups = {} + + def open_group(self, name=None): + group = self.group_index.get(name) + if group is None: + while True: + self.group_count += 1 + if name is None or self.group_count not in self.group_name: + break + + group = self.group_count + if name: + self.group_index[name] = group + self.group_name[group] = name + + if group in self.open_groups: + # We have a nested named group. We'll assign it a private group + # number, initially negative until we can assign a proper + # (positive) number. + group_alias = -(len(self.private_groups) + 1) + self.private_groups[group_alias] = group + group = group_alias + + self.open_groups.append(group) + self.open_group_count[group] = self.open_group_count.get(group, 0) + 1 + + return group + + def close_group(self): + self.open_groups.pop() + + def is_open_group(self, name): + # In version 1, a group reference can refer to an open group. We'll + # just pretend the group isn't open. + version = (self.flags & _ALL_VERSIONS) or DEFAULT_VERSION + if version == VERSION1: + return False + + if name.isdigit(): + group = int(name) + else: + group = self.group_index.get(name) + + return group in self.open_groups + +def _check_group_features(info, parsed): + """Checks whether the reverse and fuzzy features of the group calls match + the groups which they call. + """ + call_refs = {} + additional_groups = [] + for call, reverse, fuzzy in info.group_calls: + # Look up the reference of this group call. + key = (call.group, reverse, fuzzy) + ref = call_refs.get(key) + if ref is None: + # This group doesn't have a reference yet, so look up its features. + if call.group == 0: + # Calling the pattern as a whole. + rev = bool(info.flags & REVERSE) + fuz = isinstance(parsed, Fuzzy) + if (rev, fuz) != (reverse, fuzzy): + # The pattern as a whole doesn't have the features we want, + # so we'll need to make a copy of it with the desired + # features. + additional_groups.append((CallRef(len(call_refs), parsed), + reverse, fuzzy)) + else: + # Calling a capture group. + def_info = info.defined_groups[call.group] + group = def_info[0] + if def_info[1 : ] != (reverse, fuzzy): + # The group doesn't have the features we want, so we'll + # need to make a copy of it with the desired features. + additional_groups.append((group, reverse, fuzzy)) + + ref = len(call_refs) + call_refs[key] = ref + + call.call_ref = ref + + info.call_refs = call_refs + info.additional_groups = additional_groups + +def _get_required_string(parsed, flags): + "Gets the required string and related info of a parsed pattern." + + req_offset, required = parsed.get_required_string(bool(flags & REVERSE)) + if required: + required.required = True + if req_offset >= UNLIMITED: + req_offset = -1 + + req_flags = required.case_flags + if not (flags & UNICODE): + req_flags &= ~UNICODE + + req_chars = required.folded_characters + else: + req_offset = 0 + req_chars = () + req_flags = 0 + + return req_offset, req_chars, req_flags + +class Scanner: + def __init__(self, lexicon, flags=0): + self.lexicon = lexicon + + # Combine phrases into a compound pattern. + patterns = [] + for phrase, action in lexicon: + # Parse the regular expression. + source = Source(phrase) + info = Info(flags, source.char_type) + source.ignore_space = bool(info.flags & VERBOSE) + parsed = _parse_pattern(source, info) + if not source.at_end(): + raise error("unbalanced parenthesis", source.string, + source.pos) + + # We want to forbid capture groups within each phrase. + patterns.append(parsed.remove_captures()) + + # Combine all the subpatterns into one pattern. + info = Info(flags) + patterns = [Group(info, g + 1, p) for g, p in enumerate(patterns)] + parsed = Branch(patterns) + + # Optimise the compound pattern. + reverse = bool(info.flags & REVERSE) + parsed = parsed.optimise(info, reverse) + parsed = parsed.pack_characters(info) + + # Get the required string. + req_offset, req_chars, req_flags = _get_required_string(parsed, + info.flags) + + # Check the features of the groups. + _check_group_features(info, parsed) + + # Complain if there are any group calls. They are not supported by the + # Scanner class. + if info.call_refs: + raise error("recursive regex not supported by Scanner", + source.string, source.pos) + + reverse = bool(info.flags & REVERSE) + + # Compile the compound pattern. The result is a list of tuples. + code = parsed.compile(reverse) + [(OP.SUCCESS, )] + + # Flatten the code into a list of ints. + code = _flatten_code(code) + + if not parsed.has_simple_start(): + # Get the first set, if possible. + try: + fs_code = _compile_firstset(info, parsed.get_firstset(reverse)) + fs_code = _flatten_code(fs_code) + code = fs_code + code + except _FirstSetError: + pass + + # Check the global flags for conflicts. + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + if version not in (0, VERSION0, VERSION1): + raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible") + + # Create the PatternObject. + # + # Local flags like IGNORECASE affect the code generation, but aren't + # needed by the PatternObject itself. Conversely, global flags like + # LOCALE _don't_ affect the code generation but _are_ needed by the + # PatternObject. + self.scanner = _regex.compile(None, (flags & GLOBAL_FLAGS) | version, + code, {}, {}, {}, [], req_offset, req_chars, req_flags, + len(patterns)) + + def scan(self, string): + result = [] + append = result.append + match = self.scanner.scanner(string).match + i = 0 + while True: + m = match() + if not m: + break + j = m.end() + if i == j: + break + action = self.lexicon[m.lastindex - 1][1] + if hasattr(action, '__call__'): + self.match = m + action = action(self, m.group()) + if action is not None: + append(action) + i = j + + return result, string[i : ] + +# Get the known properties dict. +PROPERTIES = _regex.get_properties() + +# Build the inverse of the properties dict. +PROPERTY_NAMES = {} +for prop_name, (prop_id, values) in PROPERTIES.items(): + name, prop_values = PROPERTY_NAMES.get(prop_id, ("", {})) + name = max(name, prop_name, key=len) + PROPERTY_NAMES[prop_id] = name, prop_values + + for val_name, val_id in values.items(): + prop_values[val_id] = max(prop_values.get(val_id, ""), val_name, + key=len) + +# Character escape sequences. +CHARACTER_ESCAPES = { + "a": "\a", + "b": "\b", + "f": "\f", + "n": "\n", + "r": "\r", + "t": "\t", + "v": "\v", +} + +# Predefined character set escape sequences. +CHARSET_ESCAPES = { + "d": lookup_property(None, "Digit", True), + "D": lookup_property(None, "Digit", False), + "h": lookup_property(None, "Blank", True), + "s": lookup_property(None, "Space", True), + "S": lookup_property(None, "Space", False), + "w": lookup_property(None, "Word", True), + "W": lookup_property(None, "Word", False), +} + +# Positional escape sequences. +POSITION_ESCAPES = { + "A": StartOfString(), + "b": Boundary(), + "B": Boundary(False), + "K": Keep(), + "m": StartOfWord(), + "M": EndOfWord(), + "Z": EndOfString(), +} + +# Positional escape sequences when WORD flag set. +WORD_POSITION_ESCAPES = dict(POSITION_ESCAPES) +WORD_POSITION_ESCAPES.update({ + "b": DefaultBoundary(), + "B": DefaultBoundary(False), + "m": DefaultStartOfWord(), + "M": DefaultEndOfWord(), +}) + +# Regex control verbs. +VERBS = { + "FAIL": Failure(), + "F": Failure(), + "PRUNE": Prune(), + "SKIP": Skip(), +} diff --git a/infer_4_33_0/lib/python3.10/site-packages/regex/regex.py b/infer_4_33_0/lib/python3.10/site-packages/regex/regex.py new file mode 100644 index 0000000000000000000000000000000000000000..0fdb4da983c495d15db51e593ec880c8513ed55f --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/regex/regex.py @@ -0,0 +1,746 @@ +# +# Secret Labs' Regular Expression Engine +# +# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. +# +# This version of the SRE library can be redistributed under CNRI's +# Python 1.6 license. For any other use, please contact Secret Labs +# AB (info@pythonware.com). +# +# Portions of this engine have been developed in cooperation with +# CNRI. Hewlett-Packard provided funding for 1.6 integration and +# other compatibility work. +# +# 2010-01-16 mrab Python front-end re-written and extended + +r"""Support for regular expressions (RE). + +This module provides regular expression matching operations similar to those +found in Perl. It supports both 8-bit and Unicode strings; both the pattern and +the strings being processed can contain null bytes and characters outside the +US ASCII range. + +Regular expressions can contain both special and ordinary characters. Most +ordinary characters, like "A", "a", or "0", are the simplest regular +expressions; they simply match themselves. You can concatenate ordinary +characters, so last matches the string 'last'. + +There are a few differences between the old (legacy) behaviour and the new +(enhanced) behaviour, which are indicated by VERSION0 or VERSION1. + +The special characters are: + "." Matches any character except a newline. + "^" Matches the start of the string. + "$" Matches the end of the string or just before the + newline at the end of the string. + "*" Matches 0 or more (greedy) repetitions of the preceding + RE. Greedy means that it will match as many repetitions + as possible. + "+" Matches 1 or more (greedy) repetitions of the preceding + RE. + "?" Matches 0 or 1 (greedy) of the preceding RE. + *?,+?,?? Non-greedy versions of the previous three special + characters. + *+,++,?+ Possessive versions of the previous three special + characters. + {m,n} Matches from m to n repetitions of the preceding RE. + {m,n}? Non-greedy version of the above. + {m,n}+ Possessive version of the above. + {...} Fuzzy matching constraints. + "\\" Either escapes special characters or signals a special + sequence. + [...] Indicates a set of characters. A "^" as the first + character indicates a complementing set. + "|" A|B, creates an RE that will match either A or B. + (...) Matches the RE inside the parentheses. The contents are + captured and can be retrieved or matched later in the + string. + (?flags-flags) VERSION1: Sets/clears the flags for the remainder of + the group or pattern; VERSION0: Sets the flags for the + entire pattern. + (?:...) Non-capturing version of regular parentheses. + (?>...) Atomic non-capturing version of regular parentheses. + (?flags-flags:...) Non-capturing version of regular parentheses with local + flags. + (?P...) The substring matched by the group is accessible by + name. + (?...) The substring matched by the group is accessible by + name. + (?P=name) Matches the text matched earlier by the group named + name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the + string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by .... + (? Matches the text matched by the group named name. + \G Matches the empty string, but only at the position where + the search started. + \h Matches horizontal whitespace. + \K Keeps only what follows for the entire match. + \L Named list. The list is provided as a keyword argument. + \m Matches the empty string, but only at the start of a word. + \M Matches the empty string, but only at the end of a word. + \n Matches the newline character. + \N{name} Matches the named character. + \p{name=value} Matches the character if its property has the specified + value. + \P{name=value} Matches the character if its property hasn't the specified + value. + \r Matches the carriage-return character. + \s Matches any whitespace character; equivalent to + [ \t\n\r\f\v]. + \S Matches any non-whitespace character; equivalent to [^\s]. + \t Matches the tab character. + \uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX. + \UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code + XXXXXXXX. + \v Matches the vertical tab character. + \w Matches any alphanumeric character; equivalent to + [a-zA-Z0-9_] when matching a bytestring or a Unicode string + with the ASCII flag, or the whole range of Unicode + alphanumeric characters (letters plus digits plus + underscore) when matching a Unicode string. With LOCALE, it + will match the set [0-9_] plus characters defined as + letters for the current locale. + \W Matches the complement of \w; equivalent to [^\w]. + \xXX Matches the character with 2-digit hex code XX. + \X Matches a grapheme. + \Z Matches only at the end of the string. + \\ Matches a literal backslash. + +This module exports the following functions: + match Match a regular expression pattern at the beginning of a string. + fullmatch Match a regular expression pattern against all of a string. + search Search a string for the presence of a pattern. + sub Substitute occurrences of a pattern found in a string using a + template string. + subf Substitute occurrences of a pattern found in a string using a + format string. + subn Same as sub, but also return the number of substitutions made. + subfn Same as subf, but also return the number of substitutions made. + split Split a string by the occurrences of a pattern. VERSION1: will + split at zero-width match; VERSION0: won't split at zero-width + match. + splititer Return an iterator yielding the parts of a split string. + findall Find all occurrences of a pattern in a string. + finditer Return an iterator yielding a match object for each match. + compile Compile a pattern into a Pattern object. + purge Clear the regular expression cache. + escape Backslash all non-alphanumerics or special characters in a + string. + +Most of the functions support a concurrent parameter: if True, the GIL will be +released during matching, allowing other Python threads to run concurrently. If +the string changes during matching, the behaviour is undefined. This parameter +is not needed when working on the builtin (immutable) string classes. + +Some of the functions in this module take flags as optional parameters. Most of +these flags can also be set within an RE: + A a ASCII Make \w, \W, \b, \B, \d, and \D match the + corresponding ASCII character categories. Default + when matching a bytestring. + B b BESTMATCH Find the best fuzzy match (default is first). + D DEBUG Print the parsed pattern. + E e ENHANCEMATCH Attempt to improve the fit after finding the first + fuzzy match. + F f FULLCASE Use full case-folding when performing + case-insensitive matching in Unicode. + I i IGNORECASE Perform case-insensitive matching. + L L LOCALE Make \w, \W, \b, \B, \d, and \D dependent on the + current locale. (One byte per character only.) + M m MULTILINE "^" matches the beginning of lines (after a newline) + as well as the string. "$" matches the end of lines + (before a newline) as well as the end of the string. + P p POSIX Perform POSIX-standard matching (leftmost longest). + R r REVERSE Searches backwards. + S s DOTALL "." matches any character at all, including the + newline. + U u UNICODE Make \w, \W, \b, \B, \d, and \D dependent on the + Unicode locale. Default when matching a Unicode + string. + V0 V0 VERSION0 Turn on the old legacy behaviour. + V1 V1 VERSION1 Turn on the new enhanced behaviour. This flag + includes the FULLCASE flag. + W w WORD Make \b and \B work with default Unicode word breaks + and make ".", "^" and "$" work with Unicode line + breaks. + X x VERBOSE Ignore whitespace and comments for nicer looking REs. + +This module also defines an exception 'error'. + +""" + +# Public symbols. +__all__ = ["cache_all", "compile", "DEFAULT_VERSION", "escape", "findall", + "finditer", "fullmatch", "match", "purge", "search", "split", "splititer", + "sub", "subf", "subfn", "subn", "template", "Scanner", "A", "ASCII", "B", + "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH", "S", "DOTALL", "F", + "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "P", "POSIX", + "R", "REVERSE", "T", "TEMPLATE", "U", "UNICODE", "V0", "VERSION0", "V1", + "VERSION1", "X", "VERBOSE", "W", "WORD", "error", "Regex", "__version__", + "__doc__", "RegexFlag"] + +__version__ = "2.5.148" + +# -------------------------------------------------------------------- +# Public interface. + +def match(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Try to apply the pattern at the start of the string, returning a match + object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.match(string, pos, endpos, concurrent, partial, timeout) + +def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Try to apply the pattern against all of the string, returning a match + object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.fullmatch(string, pos, endpos, concurrent, partial, timeout) + +def search(pattern, string, flags=0, pos=None, endpos=None, partial=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Search through string looking for a match to the pattern, returning a + match object, or None if no match was found.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.search(string, pos, endpos, concurrent, partial, timeout) + +def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return the string obtained by replacing the leftmost (or rightmost with a + reverse pattern) non-overlapping occurrences of the pattern in string by the + replacement repl. repl can be either a string or a callable; if a string, + backslash escapes in it are processed; if a callable, it's passed the match + object and must return a replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.sub(repl, string, count, pos, endpos, concurrent, timeout) + +def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return the string obtained by replacing the leftmost (or rightmost with a + reverse pattern) non-overlapping occurrences of the pattern in string by the + replacement format. format can be either a string or a callable; if a string, + it's treated as a format string; if a callable, it's passed the match object + and must return a replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subf(format, string, count, pos, endpos, concurrent, timeout) + +def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a 2-tuple containing (new_string, number). new_string is the string + obtained by replacing the leftmost (or rightmost with a reverse pattern) + non-overlapping occurrences of the pattern in the source string by the + replacement repl. number is the number of substitutions that were made. repl + can be either a string or a callable; if a string, backslash escapes in it + are processed; if a callable, it's passed the match object and must return a + replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subn(repl, string, count, pos, endpos, concurrent, timeout) + +def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a 2-tuple containing (new_string, number). new_string is the string + obtained by replacing the leftmost (or rightmost with a reverse pattern) + non-overlapping occurrences of the pattern in the source string by the + replacement format. number is the number of substitutions that were made. format + can be either a string or a callable; if a string, it's treated as a format + string; if a callable, it's passed the match object and must return a + replacement string to be used.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.subfn(format, string, count, pos, endpos, concurrent, timeout) + +def split(pattern, string, maxsplit=0, flags=0, concurrent=None, timeout=None, + ignore_unused=False, **kwargs): + """Split the source string by the occurrences of the pattern, returning a + list containing the resulting substrings. If capturing parentheses are used + in pattern, then the text of all groups in the pattern are also returned as + part of the resulting list. If maxsplit is nonzero, at most maxsplit splits + occur, and the remainder of the string is returned as the final element of + the list.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.split(string, maxsplit, concurrent, timeout) + +def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None, + timeout=None, ignore_unused=False, **kwargs): + "Return an iterator yielding the parts of a split string." + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.splititer(string, maxsplit, concurrent, timeout) + +def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False, + concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return a list of all matches in the string. The matches may be overlapped + if overlapped is True. If one or more groups are present in the pattern, + return a list of groups; this will be a list of tuples if the pattern has + more than one group. Empty matches are included in the result.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.findall(string, pos, endpos, overlapped, concurrent, timeout) + +def finditer(pattern, string, flags=0, pos=None, endpos=None, overlapped=False, + partial=False, concurrent=None, timeout=None, ignore_unused=False, **kwargs): + """Return an iterator over all matches in the string. The matches may be + overlapped if overlapped is True. For each match, the iterator returns a + match object. Empty matches are included in the result.""" + pat = _compile(pattern, flags, ignore_unused, kwargs, True) + return pat.finditer(string, pos, endpos, overlapped, concurrent, partial, + timeout) + +def compile(pattern, flags=0, ignore_unused=False, cache_pattern=None, **kwargs): + "Compile a regular expression pattern, returning a pattern object." + if cache_pattern is None: + cache_pattern = _cache_all + return _compile(pattern, flags, ignore_unused, kwargs, cache_pattern) + +def purge(): + "Clear the regular expression cache" + _cache.clear() + _locale_sensitive.clear() + +# Whether to cache all patterns. +_cache_all = True + +def cache_all(value=True): + """Sets whether to cache all patterns, even those are compiled explicitly. + Passing None has no effect, but returns the current setting.""" + global _cache_all + + if value is None: + return _cache_all + + _cache_all = value + +def template(pattern, flags=0): + "Compile a template pattern, returning a pattern object." + return _compile(pattern, flags | TEMPLATE, False, {}, False) + +def escape(pattern, special_only=True, literal_spaces=False): + """Escape a string for use as a literal in a pattern. If special_only is + True, escape only special characters, else escape all non-alphanumeric + characters. If literal_spaces is True, don't escape spaces.""" + # Convert it to Unicode. + if isinstance(pattern, bytes): + p = pattern.decode("latin-1") + else: + p = pattern + + s = [] + if special_only: + for c in p: + if c == " " and literal_spaces: + s.append(c) + elif c in _METACHARS or c.isspace(): + s.append("\\") + s.append(c) + else: + s.append(c) + else: + for c in p: + if c == " " and literal_spaces: + s.append(c) + elif c in _ALNUM: + s.append(c) + else: + s.append("\\") + s.append(c) + + r = "".join(s) + # Convert it back to bytes if necessary. + if isinstance(pattern, bytes): + r = r.encode("latin-1") + + return r + +# -------------------------------------------------------------------- +# Internals. + +import regex._regex_core as _regex_core +import regex._regex as _regex +from threading import RLock as _RLock +from locale import getpreferredencoding as _getpreferredencoding +from regex._regex_core import * +from regex._regex_core import (_ALL_VERSIONS, _ALL_ENCODINGS, _FirstSetError, + _UnscopedFlagSet, _check_group_features, _compile_firstset, + _compile_replacement, _flatten_code, _fold_case, _get_required_string, + _parse_pattern, _shrink_cache) +from regex._regex_core import (ALNUM as _ALNUM, Info as _Info, OP as _OP, Source + as _Source, Fuzzy as _Fuzzy) + +# Version 0 is the old behaviour, compatible with the original 're' module. +# Version 1 is the new behaviour, which differs slightly. + +DEFAULT_VERSION = VERSION0 + +_METACHARS = frozenset("()[]{}?*+|^$\\.-#&~") + +_regex_core.DEFAULT_VERSION = DEFAULT_VERSION + +# Caches for the patterns and replacements. +_cache = {} +_cache_lock = _RLock() +_named_args = {} +_replacement_cache = {} +_locale_sensitive = {} + +# Maximum size of the cache. +_MAXCACHE = 500 +_MAXREPCACHE = 500 + +def _compile(pattern, flags, ignore_unused, kwargs, cache_it): + "Compiles a regular expression to a PatternObject." + + global DEFAULT_VERSION + try: + from regex import DEFAULT_VERSION + except ImportError: + pass + + # We won't bother to cache the pattern if we're debugging. + if (flags & DEBUG) != 0: + cache_it = False + + # What locale is this pattern using? + locale_key = (type(pattern), pattern) + if _locale_sensitive.get(locale_key, True) or (flags & LOCALE) != 0: + # This pattern is, or might be, locale-sensitive. + pattern_locale = _getpreferredencoding() + else: + # This pattern is definitely not locale-sensitive. + pattern_locale = None + + def complain_unused_args(): + if ignore_unused: + return + + # Complain about any unused keyword arguments, possibly resulting from a typo. + unused_kwargs = set(kwargs) - {k for k, v in args_needed} + if unused_kwargs: + any_one = next(iter(unused_kwargs)) + raise ValueError('unused keyword argument {!a}'.format(any_one)) + + if cache_it: + try: + # Do we know what keyword arguments are needed? + args_key = pattern, type(pattern), flags + args_needed = _named_args[args_key] + + # Are we being provided with its required keyword arguments? + args_supplied = set() + if args_needed: + for k, v in args_needed: + try: + args_supplied.add((k, frozenset(kwargs[k]))) + except KeyError: + raise error("missing named list: {!r}".format(k)) + + complain_unused_args() + + args_supplied = frozenset(args_supplied) + + # Have we already seen this regular expression and named list? + pattern_key = (pattern, type(pattern), flags, args_supplied, + DEFAULT_VERSION, pattern_locale) + return _cache[pattern_key] + except KeyError: + # It's a new pattern, or new named list for a known pattern. + pass + + # Guess the encoding from the class of the pattern string. + if isinstance(pattern, str): + guess_encoding = UNICODE + elif isinstance(pattern, bytes): + guess_encoding = ASCII + elif isinstance(pattern, Pattern): + if flags: + raise ValueError("cannot process flags argument with a compiled pattern") + + return pattern + else: + raise TypeError("first argument must be a string or compiled pattern") + + # Set the default version in the core code in case it has been changed. + _regex_core.DEFAULT_VERSION = DEFAULT_VERSION + + global_flags = flags + + while True: + caught_exception = None + try: + source = _Source(pattern) + info = _Info(global_flags, source.char_type, kwargs) + info.guess_encoding = guess_encoding + source.ignore_space = bool(info.flags & VERBOSE) + parsed = _parse_pattern(source, info) + break + except _UnscopedFlagSet: + # Remember the global flags for the next attempt. + global_flags = info.global_flags + except error as e: + caught_exception = e + + if caught_exception: + raise error(caught_exception.msg, caught_exception.pattern, + caught_exception.pos) + + if not source.at_end(): + raise error("unbalanced parenthesis", pattern, source.pos) + + # Check the global flags for conflicts. + version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION + if version not in (0, VERSION0, VERSION1): + raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible") + + if (info.flags & _ALL_ENCODINGS) not in (0, ASCII, LOCALE, UNICODE): + raise ValueError("ASCII, LOCALE and UNICODE flags are mutually incompatible") + + if isinstance(pattern, bytes) and (info.flags & UNICODE): + raise ValueError("cannot use UNICODE flag with a bytes pattern") + + if not (info.flags & _ALL_ENCODINGS): + if isinstance(pattern, str): + info.flags |= UNICODE + else: + info.flags |= ASCII + + reverse = bool(info.flags & REVERSE) + fuzzy = isinstance(parsed, _Fuzzy) + + # Remember whether this pattern as an inline locale flag. + _locale_sensitive[locale_key] = info.inline_locale + + # Fix the group references. + caught_exception = None + try: + parsed.fix_groups(pattern, reverse, False) + except error as e: + caught_exception = e + + if caught_exception: + raise error(caught_exception.msg, caught_exception.pattern, + caught_exception.pos) + + # Should we print the parsed pattern? + if flags & DEBUG: + parsed.dump(indent=0, reverse=reverse) + + # Optimise the parsed pattern. + parsed = parsed.optimise(info, reverse) + parsed = parsed.pack_characters(info) + + # Get the required string. + req_offset, req_chars, req_flags = _get_required_string(parsed, info.flags) + + # Build the named lists. + named_lists = {} + named_list_indexes = [None] * len(info.named_lists_used) + args_needed = set() + for key, index in info.named_lists_used.items(): + name, case_flags = key + values = frozenset(kwargs[name]) + if case_flags: + items = frozenset(_fold_case(info, v) for v in values) + else: + items = values + named_lists[name] = values + named_list_indexes[index] = items + args_needed.add((name, values)) + + complain_unused_args() + + # Check the features of the groups. + _check_group_features(info, parsed) + + # Compile the parsed pattern. The result is a list of tuples. + code = parsed.compile(reverse) + + # Is there a group call to the pattern as a whole? + key = (0, reverse, fuzzy) + ref = info.call_refs.get(key) + if ref is not None: + code = [(_OP.CALL_REF, ref)] + code + [(_OP.END, )] + + # Add the final 'success' opcode. + code += [(_OP.SUCCESS, )] + + # Compile the additional copies of the groups that we need. + for group, rev, fuz in info.additional_groups: + code += group.compile(rev, fuz) + + # Flatten the code into a list of ints. + code = _flatten_code(code) + + if not parsed.has_simple_start(): + # Get the first set, if possible. + try: + fs_code = _compile_firstset(info, parsed.get_firstset(reverse)) + fs_code = _flatten_code(fs_code) + code = fs_code + code + except _FirstSetError: + pass + + # The named capture groups. + index_group = dict((v, n) for n, v in info.group_index.items()) + + # Create the PatternObject. + # + # Local flags like IGNORECASE affect the code generation, but aren't needed + # by the PatternObject itself. Conversely, global flags like LOCALE _don't_ + # affect the code generation but _are_ needed by the PatternObject. + compiled_pattern = _regex.compile(pattern, info.flags | version, code, + info.group_index, index_group, named_lists, named_list_indexes, + req_offset, req_chars, req_flags, info.group_count) + + # Do we need to reduce the size of the cache? + if len(_cache) >= _MAXCACHE: + with _cache_lock: + _shrink_cache(_cache, _named_args, _locale_sensitive, _MAXCACHE) + + if cache_it: + if (info.flags & LOCALE) == 0: + pattern_locale = None + + args_needed = frozenset(args_needed) + + # Store this regular expression and named list. + pattern_key = (pattern, type(pattern), flags, args_needed, + DEFAULT_VERSION, pattern_locale) + _cache[pattern_key] = compiled_pattern + + # Store what keyword arguments are needed. + _named_args[args_key] = args_needed + + return compiled_pattern + +def _compile_replacement_helper(pattern, template): + "Compiles a replacement template." + # This function is called by the _regex module. + + # Have we seen this before? + key = pattern.pattern, pattern.flags, template + compiled = _replacement_cache.get(key) + if compiled is not None: + return compiled + + if len(_replacement_cache) >= _MAXREPCACHE: + _replacement_cache.clear() + + is_unicode = isinstance(template, str) + source = _Source(template) + if is_unicode: + def make_string(char_codes): + return "".join(chr(c) for c in char_codes) + else: + def make_string(char_codes): + return bytes(char_codes) + + compiled = [] + literal = [] + while True: + ch = source.get() + if not ch: + break + if ch == "\\": + # '_compile_replacement' will return either an int group reference + # or a string literal. It returns items (plural) in order to handle + # a 2-character literal (an invalid escape sequence). + is_group, items = _compile_replacement(source, pattern, is_unicode) + if is_group: + # It's a group, so first flush the literal. + if literal: + compiled.append(make_string(literal)) + literal = [] + compiled.extend(items) + else: + literal.extend(items) + else: + literal.append(ord(ch)) + + # Flush the literal. + if literal: + compiled.append(make_string(literal)) + + _replacement_cache[key] = compiled + + return compiled + +# We define Pattern here after all the support objects have been defined. +_pat = _compile('', 0, False, {}, False) +Pattern = type(_pat) +Match = type(_pat.match('')) +del _pat + +# Make Pattern public for typing annotations. +__all__.append("Pattern") +__all__.append("Match") + +# We'll define an alias for the 'compile' function so that the repr of a +# pattern object is eval-able. +Regex = compile + +# Register myself for pickling. +import copyreg as _copy_reg + +def _pickle(pattern): + return _regex.compile, pattern._pickled_data + +_copy_reg.pickle(Pattern, _pickle) diff --git a/infer_4_33_0/lib/python3.10/site-packages/regex/test_regex.py b/infer_4_33_0/lib/python3.10/site-packages/regex/test_regex.py new file mode 100644 index 0000000000000000000000000000000000000000..bce5a871164919ca910c2b02b9cfd3c58e5912b8 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/regex/test_regex.py @@ -0,0 +1,4488 @@ +from weakref import proxy +import copy +import pickle +import regex +import string +import sys +import unittest + +# String subclasses for issue 18468. +class StrSubclass(str): + def __getitem__(self, index): + return StrSubclass(super().__getitem__(index)) + +class BytesSubclass(bytes): + def __getitem__(self, index): + return BytesSubclass(super().__getitem__(index)) + +class RegexTests(unittest.TestCase): + PATTERN_CLASS = "" + FLAGS_WITH_COMPILED_PAT = "cannot process flags argument with a compiled pattern" + INVALID_GROUP_REF = "invalid group reference" + MISSING_GT = "missing >" + BAD_GROUP_NAME = "bad character in group name" + MISSING_GROUP_NAME = "missing group name" + MISSING_LT = "missing <" + UNKNOWN_GROUP_I = "unknown group" + UNKNOWN_GROUP = "unknown group" + BAD_ESCAPE = r"bad escape \(end of pattern\)" + BAD_OCTAL_ESCAPE = r"bad escape \\" + BAD_SET = "unterminated character set" + STR_PAT_ON_BYTES = "cannot use a string pattern on a bytes-like object" + BYTES_PAT_ON_STR = "cannot use a bytes pattern on a string-like object" + STR_PAT_BYTES_TEMPL = "expected str instance, bytes found" + BYTES_PAT_STR_TEMPL = "expected a bytes-like object, str found" + BYTES_PAT_UNI_FLAG = "cannot use UNICODE flag with a bytes pattern" + MIXED_FLAGS = "ASCII, LOCALE and UNICODE flags are mutually incompatible" + MISSING_RPAREN = "missing \\)" + TRAILING_CHARS = "unbalanced parenthesis" + BAD_CHAR_RANGE = "bad character range" + NOTHING_TO_REPEAT = "nothing to repeat" + MULTIPLE_REPEAT = "multiple repeat" + OPEN_GROUP = "cannot refer to an open group" + DUPLICATE_GROUP = "duplicate group" + CANT_TURN_OFF = "bad inline flags: cannot turn flags off" + UNDEF_CHAR_NAME = "undefined character name" + + def assertTypedEqual(self, actual, expect, msg=None): + self.assertEqual(actual, expect, msg) + + def recurse(actual, expect): + if isinstance(expect, (tuple, list)): + for x, y in zip(actual, expect): + recurse(x, y) + else: + self.assertIs(type(actual), type(expect), msg) + + recurse(actual, expect) + + def test_weakref(self): + s = 'QabbbcR' + x = regex.compile('ab+c') + y = proxy(x) + if x.findall('QabbbcR') != y.findall('QabbbcR'): + self.fail() + + def test_search_star_plus(self): + self.assertEqual(regex.search('a*', 'xxx').span(0), (0, 0)) + self.assertEqual(regex.search('x*', 'axx').span(), (0, 0)) + self.assertEqual(regex.search('x+', 'axx').span(0), (1, 3)) + self.assertEqual(regex.search('x+', 'axx').span(), (1, 3)) + self.assertEqual(regex.search('x', 'aaa'), None) + self.assertEqual(regex.match('a*', 'xxx').span(0), (0, 0)) + self.assertEqual(regex.match('a*', 'xxx').span(), (0, 0)) + self.assertEqual(regex.match('x*', 'xxxa').span(0), (0, 3)) + self.assertEqual(regex.match('x*', 'xxxa').span(), (0, 3)) + self.assertEqual(regex.match('a+', 'xxx'), None) + + def bump_num(self, matchobj): + int_value = int(matchobj[0]) + return str(int_value + 1) + + def test_basic_regex_sub(self): + self.assertEqual(regex.sub("(?i)b+", "x", "bbbb BBBB"), 'x x') + self.assertEqual(regex.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'), + '9.3 -3 24x100y') + self.assertEqual(regex.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3), + '9.3 -3 23x99y') + + self.assertEqual(regex.sub('.', lambda m: r"\n", 'x'), "\\n") + self.assertEqual(regex.sub('.', r"\n", 'x'), "\n") + + self.assertEqual(regex.sub('(?Px)', r'\g\g', 'xx'), 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g\g<1>', 'xx'), 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g\g', 'xx'), + 'xxxx') + self.assertEqual(regex.sub('(?Px)', r'\g<1>\g<1>', 'xx'), 'xxxx') + + self.assertEqual(regex.sub('a', r'\t\n\v\r\f\a\b', 'a'), "\t\n\v\r\f\a\b") + self.assertEqual(regex.sub('a', '\t\n\v\r\f\a', 'a'), "\t\n\v\r\f\a") + self.assertEqual(regex.sub('a', '\t\n\v\r\f\a', 'a'), chr(9) + chr(10) + + chr(11) + chr(13) + chr(12) + chr(7)) + + self.assertEqual(regex.sub(r'^\s*', 'X', 'test'), 'Xtest') + + self.assertEqual(regex.sub(r"x", r"\x0A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\u000A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\U0000000A", "x"), "\n") + self.assertEqual(regex.sub(r"x", r"\N{LATIN CAPITAL LETTER A}", + "x"), "A") + + self.assertEqual(regex.sub(br"x", br"\x0A", b"x"), b"\n") + + def test_bug_449964(self): + # Fails for group followed by other escape. + self.assertEqual(regex.sub(r'(?Px)', r'\g<1>\g<1>\b', 'xx'), + "xx\bxx\b") + + def test_bug_449000(self): + # Test for sub() on escaped characters. + self.assertEqual(regex.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub('\r\n', r'\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + self.assertEqual(regex.sub('\r\n', '\n', 'abc\r\ndef\r\n'), + "abc\ndef\n") + + def test_bug_1661(self): + # Verify that flags do not get silently ignored with compiled patterns + pattern = regex.compile('.') + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.match(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.search(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.findall(pattern, 'A', regex.I)) + self.assertRaisesRegex(ValueError, self.FLAGS_WITH_COMPILED_PAT, + lambda: regex.compile(pattern, regex.I)) + + def test_bug_3629(self): + # A regex that triggered a bug in the sre-code validator + self.assertEqual(repr(type(regex.compile("(?P)(?(quote))"))), + self.PATTERN_CLASS) + + def test_sub_template_numeric_escape(self): + # Bug 776311 and friends. + self.assertEqual(regex.sub('x', r'\0', 'x'), "\0") + self.assertEqual(regex.sub('x', r'\000', 'x'), "\000") + self.assertEqual(regex.sub('x', r'\001', 'x'), "\001") + self.assertEqual(regex.sub('x', r'\008', 'x'), "\0" + "8") + self.assertEqual(regex.sub('x', r'\009', 'x'), "\0" + "9") + self.assertEqual(regex.sub('x', r'\111', 'x'), "\111") + self.assertEqual(regex.sub('x', r'\117', 'x'), "\117") + + self.assertEqual(regex.sub('x', r'\1111', 'x'), "\1111") + self.assertEqual(regex.sub('x', r'\1111', 'x'), "\111" + "1") + + self.assertEqual(regex.sub('x', r'\00', 'x'), '\x00') + self.assertEqual(regex.sub('x', r'\07', 'x'), '\x07') + self.assertEqual(regex.sub('x', r'\08', 'x'), "\0" + "8") + self.assertEqual(regex.sub('x', r'\09', 'x'), "\0" + "9") + self.assertEqual(regex.sub('x', r'\0a', 'x'), "\0" + "a") + + self.assertEqual(regex.sub('x', r'\400', 'x'), "\u0100") + self.assertEqual(regex.sub('x', r'\777', 'x'), "\u01FF") + self.assertEqual(regex.sub(b'x', br'\400', b'x'), b"\x00") + self.assertEqual(regex.sub(b'x', br'\777', b'x'), b"\xFF") + + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\1', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\8', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\9', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\11', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\18', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\1a', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\90', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\99', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\118', 'x')) # r'\11' + '8' + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\11a', 'x')) + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\181', 'x')) # r'\18' + '1' + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.sub('x', r'\800', 'x')) # r'\80' + '0' + + # In Python 2.3 (etc), these loop endlessly in sre_parser.py. + self.assertEqual(regex.sub('(((((((((((x)))))))))))', r'\11', 'x'), + 'x') + self.assertEqual(regex.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'), + 'xz8') + self.assertEqual(regex.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'), + 'xza') + + def test_qualified_re_sub(self): + self.assertEqual(regex.sub('a', 'b', 'aaaaa'), 'bbbbb') + self.assertEqual(regex.sub('a', 'b', 'aaaaa', 1), 'baaaa') + + def test_bug_114660(self): + self.assertEqual(regex.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'), + 'hello there') + + def test_bug_462270(self): + # Test for empty sub() behaviour, see SF bug #462270 + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0)x*', '-', 'abxd'), '-a-b--d-') + else: + self.assertEqual(regex.sub('(?V0)x*', '-', 'abxd'), '-a-b-d-') + self.assertEqual(regex.sub('(?V1)x*', '-', 'abxd'), '-a-b--d-') + self.assertEqual(regex.sub('x+', '-', 'abxd'), 'ab-d') + + def test_bug_14462(self): + # chr(255) is a valid identifier in Python 3. + group_name = '\xFF' + self.assertEqual(regex.search(r'(?P<' + group_name + '>a)', + 'abc').group(group_name), 'a') + + def test_symbolic_refs(self): + self.assertRaisesRegex(regex.error, self.MISSING_GT, lambda: + regex.sub('(?Px)', r'\gx)', r'\g<', 'xx')) + self.assertRaisesRegex(regex.error, self.MISSING_LT, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g<1a1>', 'xx')) + self.assertRaisesRegex(IndexError, self.UNKNOWN_GROUP_I, lambda: + regex.sub('(?Px)', r'\g', 'xx')) + + # The new behaviour of unmatched but valid groups is to treat them like + # empty matches in the replacement template, like in Perl. + self.assertEqual(regex.sub('(?Px)|(?Py)', r'\g', 'xx'), '') + self.assertEqual(regex.sub('(?Px)|(?Py)', r'\2', 'xx'), '') + + # The old behaviour was to raise it as an IndexError. + self.assertRaisesRegex(regex.error, self.BAD_GROUP_NAME, lambda: + regex.sub('(?Px)', r'\g<-1>', 'xx')) + + def test_re_subn(self): + self.assertEqual(regex.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2)) + self.assertEqual(regex.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1)) + self.assertEqual(regex.subn("b+", "x", "xyz"), ('xyz', 0)) + self.assertEqual(regex.subn("b*", "x", "xyz"), ('xxxyxzx', 4)) + self.assertEqual(regex.subn("b*", "x", "xyz", 2), ('xxxyz', 2)) + + def test_re_split(self): + self.assertEqual(regex.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c']) + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(":*", ":a:b::c"), ['', '', 'a', '', + 'b', '', 'c', '']) + self.assertEqual(regex.split("(:*)", ":a:b::c"), ['', ':', '', '', + 'a', ':', '', '', 'b', '::', '', '', 'c', '', '']) + self.assertEqual(regex.split("(?::*)", ":a:b::c"), ['', '', 'a', + '', 'b', '', 'c', '']) + self.assertEqual(regex.split("(:)*", ":a:b::c"), ['', ':', '', + None, 'a', ':', '', None, 'b', ':', '', None, 'c', None, '']) + else: + self.assertEqual(regex.split(":*", ":a:b::c"), ['', 'a', 'b', 'c']) + self.assertEqual(regex.split("(:*)", ":a:b::c"), ['', ':', 'a', + ':', 'b', '::', 'c']) + self.assertEqual(regex.split("(?::*)", ":a:b::c"), ['', 'a', 'b', + 'c']) + self.assertEqual(regex.split("(:)*", ":a:b::c"), ['', ':', 'a', + ':', 'b', ':', 'c']) + self.assertEqual(regex.split("([b:]+)", ":a:b::c"), ['', ':', 'a', + ':b::', 'c']) + self.assertEqual(regex.split("(b)|(:+)", ":a:b::c"), ['', None, ':', + 'a', None, ':', '', 'b', None, '', None, '::', 'c']) + self.assertEqual(regex.split("(?:b)|(?::+)", ":a:b::c"), ['', 'a', '', + '', 'c']) + + self.assertEqual(regex.split("x", "xaxbxc"), ['', 'a', 'b', 'c']) + self.assertEqual([m for m in regex.splititer("x", "xaxbxc")], ['', 'a', + 'b', 'c']) + + self.assertEqual(regex.split("(?r)x", "xaxbxc"), ['c', 'b', 'a', '']) + self.assertEqual([m for m in regex.splititer("(?r)x", "xaxbxc")], ['c', + 'b', 'a', '']) + + self.assertEqual(regex.split("(x)|(y)", "xaxbxc"), ['', 'x', None, 'a', + 'x', None, 'b', 'x', None, 'c']) + self.assertEqual([m for m in regex.splititer("(x)|(y)", "xaxbxc")], + ['', 'x', None, 'a', 'x', None, 'b', 'x', None, 'c']) + + self.assertEqual(regex.split("(?r)(x)|(y)", "xaxbxc"), ['c', 'x', None, + 'b', 'x', None, 'a', 'x', None, '']) + self.assertEqual([m for m in regex.splititer("(?r)(x)|(y)", "xaxbxc")], + ['c', 'x', None, 'b', 'x', None, 'a', 'x', None, '']) + + self.assertEqual(regex.split(r"(?V1)\b", "a b c"), ['', 'a', ' ', 'b', + ' ', 'c', '']) + self.assertEqual(regex.split(r"(?V1)\m", "a b c"), ['', 'a ', 'b ', + 'c']) + self.assertEqual(regex.split(r"(?V1)\M", "a b c"), ['a', ' b', ' c', + '']) + + def test_qualified_re_split(self): + self.assertEqual(regex.split(":", ":a:b::c", 2), ['', 'a', 'b::c']) + self.assertEqual(regex.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d']) + self.assertEqual(regex.split("(:)", ":a:b::c", 2), ['', ':', 'a', ':', + 'b::c']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("(:*)", ":a:b::c", 2), ['', ':', '', + '', 'a:b::c']) + else: + self.assertEqual(regex.split("(:*)", ":a:b::c", 2), ['', ':', 'a', + ':', 'b::c']) + + def test_re_findall(self): + self.assertEqual(regex.findall(":+", "abc"), []) + self.assertEqual(regex.findall(":+", "a:b::c:::d"), [':', '::', ':::']) + self.assertEqual(regex.findall("(:+)", "a:b::c:::d"), [':', '::', + ':::']) + self.assertEqual(regex.findall("(:)(:*)", "a:b::c:::d"), [(':', ''), + (':', ':'), (':', '::')]) + + self.assertEqual(regex.findall(r"\((?P.{0,5}?TEST)\)", + "(MY TEST)"), ["MY TEST"]) + self.assertEqual(regex.findall(r"\((?P.{0,3}?TEST)\)", + "(MY TEST)"), ["MY TEST"]) + self.assertEqual(regex.findall(r"\((?P.{0,3}?T)\)", "(MY T)"), + ["MY T"]) + + self.assertEqual(regex.findall(r"[^a]{2}[A-Z]", "\n S"), [' S']) + self.assertEqual(regex.findall(r"[^a]{2,3}[A-Z]", "\n S"), ['\n S']) + self.assertEqual(regex.findall(r"[^a]{2,3}[A-Z]", "\n S"), [' S']) + + self.assertEqual(regex.findall(r"X(Y[^Y]+?){1,2}( |Q)+DEF", + "XYABCYPPQ\nQ DEF"), [('YPPQ\n', ' ')]) + + self.assertEqual(regex.findall(r"(\nTest(\n+.+?){0,2}?)?\n+End", + "\nTest\nxyz\nxyz\nEnd"), [('\nTest\nxyz\nxyz', '\nxyz')]) + + def test_bug_117612(self): + self.assertEqual(regex.findall(r"(a|(b))", "aba"), [('a', ''), ('b', + 'b'), ('a', '')]) + + def test_re_match(self): + self.assertEqual(regex.match('a', 'a')[:], ('a',)) + self.assertEqual(regex.match('(a)', 'a')[:], ('a', 'a')) + self.assertEqual(regex.match(r'(a)', 'a')[0], 'a') + self.assertEqual(regex.match(r'(a)', 'a')[1], 'a') + self.assertEqual(regex.match(r'(a)', 'a').group(1, 1), ('a', 'a')) + + pat = regex.compile('((a)|(b))(c)?') + self.assertEqual(pat.match('a')[:], ('a', 'a', 'a', None, None)) + self.assertEqual(pat.match('b')[:], ('b', 'b', None, 'b', None)) + self.assertEqual(pat.match('ac')[:], ('ac', 'a', 'a', None, 'c')) + self.assertEqual(pat.match('bc')[:], ('bc', 'b', None, 'b', 'c')) + self.assertEqual(pat.match('bc')[:], ('bc', 'b', None, 'b', 'c')) + + # A single group. + m = regex.match('(a)', 'a') + self.assertEqual(m.group(), 'a') + self.assertEqual(m.group(0), 'a') + self.assertEqual(m.group(1), 'a') + self.assertEqual(m.group(1, 1), ('a', 'a')) + + pat = regex.compile('(?:(?Pa)|(?Pb))(?Pc)?') + self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None)) + self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'), (None, 'b', + None)) + self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c')) + + def test_re_groupref_exists(self): + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', '(a)')[:], + ('(a)', '(', 'a')) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', 'a')[:], ('a', + None, 'a')) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', 'a)'), None) + self.assertEqual(regex.match(r'^(\()?([^()]+)(?(1)\))$', '(a'), None) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)b|d))$', 'ab')[:], ('ab', + 'a', 'b')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)b|d))$', 'cd')[:], ('cd', + None, 'd')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)|d))$', 'cd')[:], ('cd', + None, 'd')) + self.assertEqual(regex.match('^(?:(a)|c)((?(1)|d))$', 'a')[:], ('a', + 'a', '')) + + # Tests for bug #1177831: exercise groups other than the first group. + p = regex.compile('(?Pa)(?Pb)?((?(g2)c|d))') + self.assertEqual(p.match('abc')[:], ('abc', 'a', 'b', 'c')) + self.assertEqual(p.match('ad')[:], ('ad', 'a', None, 'd')) + self.assertEqual(p.match('abd'), None) + self.assertEqual(p.match('ac'), None) + + def test_re_groupref(self): + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', '|a|')[:], ('|a|', + '|', 'a')) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1?$', 'a')[:], ('a', + None, 'a')) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', 'a|'), None) + self.assertEqual(regex.match(r'^(\|)?([^()]+)\1$', '|a'), None) + self.assertEqual(regex.match(r'^(?:(a)|c)(\1)$', 'aa')[:], ('aa', 'a', + 'a')) + self.assertEqual(regex.match(r'^(?:(a)|c)(\1)?$', 'c')[:], ('c', None, + None)) + + self.assertEqual(regex.findall(r"(?i)(.{1,40}?),(.{1,40}?)(?:;)+(.{1,80}).{1,40}?\3(\ |;)+(.{1,80}?)\1", + "TEST, BEST; LEST ; Lest 123 Test, Best"), [('TEST', ' BEST', + ' LEST', ' ', '123 ')]) + + def test_groupdict(self): + self.assertEqual(regex.match('(?Pfirst) (?Psecond)', + 'first second').groupdict(), {'first': 'first', 'second': 'second'}) + + def test_expand(self): + self.assertEqual(regex.match("(?Pfirst) (?Psecond)", + "first second").expand(r"\2 \1 \g \g"), + 'second first second first') + + def test_repeat_minmax(self): + self.assertEqual(regex.match(r"^(\w){1}$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1}?$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1,2}$", "abc"), None) + self.assertEqual(regex.match(r"^(\w){1,2}?$", "abc"), None) + + self.assertEqual(regex.match(r"^(\w){3}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,3}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,4}$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3,4}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,3}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){1,4}?$", "abc")[1], 'c') + self.assertEqual(regex.match(r"^(\w){3,4}?$", "abc")[1], 'c') + + self.assertEqual(regex.match("^x{1}$", "xxx"), None) + self.assertEqual(regex.match("^x{1}?$", "xxx"), None) + self.assertEqual(regex.match("^x{1,2}$", "xxx"), None) + self.assertEqual(regex.match("^x{1,2}?$", "xxx"), None) + + self.assertEqual(regex.match("^x{1}", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{1}?", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{0,1}", "xxx")[0], 'x') + self.assertEqual(regex.match("^x{0,1}?", "xxx")[0], '') + + self.assertEqual(bool(regex.match("^x{3}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,3}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,4}$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3,4}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,3}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{1,4}?$", "xxx")), True) + self.assertEqual(bool(regex.match("^x{3,4}?$", "xxx")), True) + + self.assertEqual(regex.match("^x{}$", "xxx"), None) + self.assertEqual(bool(regex.match("^x{}$", "x{}")), True) + + def test_getattr(self): + self.assertEqual(regex.compile("(?i)(a)(b)").pattern, '(?i)(a)(b)') + self.assertEqual(regex.compile("(?i)(a)(b)").flags, regex.I | regex.U | + regex.DEFAULT_VERSION) + self.assertEqual(regex.compile(b"(?i)(a)(b)").flags, regex.A | regex.I + | regex.DEFAULT_VERSION) + self.assertEqual(regex.compile("(?i)(a)(b)").groups, 2) + self.assertEqual(regex.compile("(?i)(a)(b)").groupindex, {}) + + self.assertEqual(regex.compile("(?i)(?Pa)(?Pb)").groupindex, + {'first': 1, 'other': 2}) + + self.assertEqual(regex.match("(a)", "a").pos, 0) + self.assertEqual(regex.match("(a)", "a").endpos, 1) + + self.assertEqual(regex.search("b(c)", "abcdef").pos, 0) + self.assertEqual(regex.search("b(c)", "abcdef").endpos, 6) + self.assertEqual(regex.search("b(c)", "abcdef").span(), (1, 3)) + self.assertEqual(regex.search("b(c)", "abcdef").span(1), (2, 3)) + + self.assertEqual(regex.match("(a)", "a").string, 'a') + self.assertEqual(regex.match("(a)", "a").regs, ((0, 1), (0, 1))) + self.assertEqual(repr(type(regex.match("(a)", "a").re)), + self.PATTERN_CLASS) + + # Issue 14260. + p = regex.compile(r'abc(?Pdef)') + p.groupindex["n"] = 0 + self.assertEqual(p.groupindex["n"], 1) + + def test_special_escapes(self): + self.assertEqual(regex.search(r"\b(b.)\b", "abcd abc bcd bx")[1], 'bx') + self.assertEqual(regex.search(r"\B(b.)\B", "abc bcd bc abxd")[1], 'bx') + self.assertEqual(regex.search(br"\b(b.)\b", b"abcd abc bcd bx", + regex.LOCALE)[1], b'bx') + self.assertEqual(regex.search(br"\B(b.)\B", b"abc bcd bc abxd", + regex.LOCALE)[1], b'bx') + self.assertEqual(regex.search(r"\b(b.)\b", "abcd abc bcd bx", + regex.UNICODE)[1], 'bx') + self.assertEqual(regex.search(r"\B(b.)\B", "abc bcd bc abxd", + regex.UNICODE)[1], 'bx') + + self.assertEqual(regex.search(r"^abc$", "\nabc\n", regex.M)[0], 'abc') + self.assertEqual(regex.search(r"^\Aabc\Z$", "abc", regex.M)[0], 'abc') + self.assertEqual(regex.search(r"^\Aabc\Z$", "\nabc\n", regex.M), None) + + self.assertEqual(regex.search(br"\b(b.)\b", b"abcd abc bcd bx")[1], + b'bx') + self.assertEqual(regex.search(br"\B(b.)\B", b"abc bcd bc abxd")[1], + b'bx') + self.assertEqual(regex.search(br"^abc$", b"\nabc\n", regex.M)[0], + b'abc') + self.assertEqual(regex.search(br"^\Aabc\Z$", b"abc", regex.M)[0], + b'abc') + self.assertEqual(regex.search(br"^\Aabc\Z$", b"\nabc\n", regex.M), + None) + + self.assertEqual(regex.search(r"\d\D\w\W\s\S", "1aa! a")[0], '1aa! a') + self.assertEqual(regex.search(br"\d\D\w\W\s\S", b"1aa! a", + regex.LOCALE)[0], b'1aa! a') + self.assertEqual(regex.search(r"\d\D\w\W\s\S", "1aa! a", + regex.UNICODE)[0], '1aa! a') + + def test_bigcharset(self): + self.assertEqual(regex.match(r"([\u2222\u2223])", "\u2222")[1], + '\u2222') + self.assertEqual(regex.match(r"([\u2222\u2223])", "\u2222", + regex.UNICODE)[1], '\u2222') + self.assertEqual("".join(regex.findall(".", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + self.assertEqual("".join(regex.findall(r"[e\xe8\xe9\xea\xeb\u0113\u011b\u0117]", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + self.assertEqual("".join(regex.findall(r"e|\xe8|\xe9|\xea|\xeb|\u0113|\u011b|\u0117", + "e\xe8\xe9\xea\xeb\u0113\u011b\u0117", flags=regex.UNICODE)), + 'e\xe8\xe9\xea\xeb\u0113\u011b\u0117') + + def test_anyall(self): + self.assertEqual(regex.match("a.b", "a\nb", regex.DOTALL)[0], "a\nb") + self.assertEqual(regex.match("a.*b", "a\n\nb", regex.DOTALL)[0], + "a\n\nb") + + def test_non_consuming(self): + self.assertEqual(regex.match(r"(a(?=\s[^a]))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[^a]*))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[abc]))", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a(?=\s[abc]*))", "a bc")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s\1)", "a a")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s\1*)", "a aa")[1], 'a') + self.assertEqual(regex.match(r"(a)(?=\s(abc|a))", "a a")[1], 'a') + + self.assertEqual(regex.match(r"(a(?!\s[^a]))", "a a")[1], 'a') + self.assertEqual(regex.match(r"(a(?!\s[abc]))", "a d")[1], 'a') + self.assertEqual(regex.match(r"(a)(?!\s\1)", "a b")[1], 'a') + self.assertEqual(regex.match(r"(a)(?!\s(abc|a))", "a b")[1], 'a') + + def test_ignore_case(self): + self.assertEqual(regex.match("abc", "ABC", regex.I)[0], 'ABC') + self.assertEqual(regex.match(b"abc", b"ABC", regex.I)[0], b'ABC') + + self.assertEqual(regex.match(r"(a\s[^a]*)", "a bb", regex.I)[1], + 'a bb') + self.assertEqual(regex.match(r"(a\s[abc])", "a b", regex.I)[1], 'a b') + self.assertEqual(regex.match(r"(a\s[abc]*)", "a bb", regex.I)[1], + 'a bb') + self.assertEqual(regex.match(r"((a)\s\2)", "a a", regex.I)[1], 'a a') + self.assertEqual(regex.match(r"((a)\s\2*)", "a aa", regex.I)[1], + 'a aa') + self.assertEqual(regex.match(r"((a)\s(abc|a))", "a a", regex.I)[1], + 'a a') + self.assertEqual(regex.match(r"((a)\s(abc|a)*)", "a aa", regex.I)[1], + 'a aa') + + # Issue 3511. + self.assertEqual(regex.match(r"[Z-a]", "_").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[Z-a]", "_").span(), (0, 1)) + + self.assertEqual(bool(regex.match(r"(?i)nao", "nAo")), True) + self.assertEqual(bool(regex.match(r"(?i)n\xE3o", "n\xC3o")), True) + self.assertEqual(bool(regex.match(r"(?i)n\xE3o", "N\xC3O")), True) + self.assertEqual(bool(regex.match(r"(?i)s", "\u017F")), True) + + def test_case_folding(self): + self.assertEqual(regex.search(r"(?fi)ss", "SS").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SS", "ss").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SS", + "\N{LATIN SMALL LETTER SHARP S}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LETTER SHARP S}", + "SS").span(), (0, 2)) + + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE ST}", + "ST").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)ST", + "\N{LATIN SMALL LIGATURE ST}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)ST", + "\N{LATIN SMALL LIGATURE LONG S T}").span(), (0, 1)) + + self.assertEqual(regex.search(r"(?fi)SST", + "\N{LATIN SMALL LETTER SHARP S}t").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE LONG S T}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE ST}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE ST}", + "SST").span(), (1, 3)) + self.assertEqual(regex.search(r"(?fi)SST", + "s\N{LATIN SMALL LIGATURE ST}").span(), (0, 2)) + + self.assertEqual(regex.search(r"(?fi)FFI", + "\N{LATIN SMALL LIGATURE FFI}").span(), (0, 1)) + self.assertEqual(regex.search(r"(?fi)FFI", + "\N{LATIN SMALL LIGATURE FF}i").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)FFI", + "f\N{LATIN SMALL LIGATURE FI}").span(), (0, 2)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE FFI}", + "FFI").span(), (0, 3)) + self.assertEqual(regex.search(r"(?fi)\N{LATIN SMALL LIGATURE FF}i", + "FFI").span(), (0, 3)) + self.assertEqual(regex.search(r"(?fi)f\N{LATIN SMALL LIGATURE FI}", + "FFI").span(), (0, 3)) + + sigma = "\u03A3\u03C3\u03C2" + for ch1 in sigma: + for ch2 in sigma: + if not regex.match(r"(?fi)" + ch1, ch2): + self.fail() + + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB01\uFB00")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB01\uFB00")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fffi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB03", + "\uFB00\uFB01")), True) + self.assertEqual(bool(regex.search(r"(?iV1)ff", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)fffi", "\uFB00\uFB01")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB03", + "\uFB00\uFB01")), True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB01", "\uFB00i")), + True) + self.assertEqual(bool(regex.search(r"(?iV1)f\uFB01", "\uFB00i")), + True) + + self.assertEqual(regex.findall(r"(?iV0)\m(?:word){e<=3}\M(?ne", "affine", + options=["\N{LATIN SMALL LIGATURE FFI}"]).span(), (0, 6)) + self.assertEqual(regex.search(r"(?fi)a\Lne", + "a\N{LATIN SMALL LIGATURE FFI}ne", options=["ffi"]).span(), (0, 4)) + + def test_category(self): + self.assertEqual(regex.match(r"(\s)", " ")[1], ' ') + + def test_not_literal(self): + self.assertEqual(regex.search(r"\s([^a])", " b")[1], 'b') + self.assertEqual(regex.search(r"\s([^a]*)", " bb")[1], 'bb') + + def test_search_coverage(self): + self.assertEqual(regex.search(r"\s(b)", " b")[1], 'b') + self.assertEqual(regex.search(r"a\s", "a ")[0], 'a ') + + def test_re_escape(self): + p = "" + self.assertEqual(regex.escape(p), p) + for i in range(0, 256): + p += chr(i) + self.assertEqual(bool(regex.match(regex.escape(chr(i)), chr(i))), + True) + self.assertEqual(regex.match(regex.escape(chr(i)), chr(i)).span(), + (0, 1)) + + pat = regex.compile(regex.escape(p)) + self.assertEqual(pat.match(p).span(), (0, 256)) + + def test_re_escape_byte(self): + p = b"" + self.assertEqual(regex.escape(p), p) + for i in range(0, 256): + b = bytes([i]) + p += b + self.assertEqual(bool(regex.match(regex.escape(b), b)), True) + self.assertEqual(regex.match(regex.escape(b), b).span(), (0, 1)) + + pat = regex.compile(regex.escape(p)) + self.assertEqual(pat.match(p).span(), (0, 256)) + + def test_constants(self): + if regex.I != regex.IGNORECASE: + self.fail() + if regex.L != regex.LOCALE: + self.fail() + if regex.M != regex.MULTILINE: + self.fail() + if regex.S != regex.DOTALL: + self.fail() + if regex.X != regex.VERBOSE: + self.fail() + + def test_flags(self): + for flag in [regex.I, regex.M, regex.X, regex.S, regex.L]: + self.assertEqual(repr(type(regex.compile('^pattern$', flag))), + self.PATTERN_CLASS) + + def test_sre_character_literals(self): + for i in [0, 8, 16, 32, 64, 127, 128, 255]: + self.assertEqual(bool(regex.match(r"\%03o" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"\%03o0" % i, chr(i) + "0")), + True) + self.assertEqual(bool(regex.match(r"\%03o8" % i, chr(i) + "8")), + True) + self.assertEqual(bool(regex.match(r"\x%02x" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"\x%02x0" % i, chr(i) + "0")), + True) + self.assertEqual(bool(regex.match(r"\x%02xz" % i, chr(i) + "z")), + True) + + self.assertRaisesRegex(regex.error, self.INVALID_GROUP_REF, lambda: + regex.match(r"\911", "")) + + def test_sre_character_class_literals(self): + for i in [0, 8, 16, 32, 64, 127, 128, 255]: + self.assertEqual(bool(regex.match(r"[\%03o]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\%03o0]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\%03o8]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02x]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02x0]" % i, chr(i))), True) + self.assertEqual(bool(regex.match(r"[\x%02xz]" % i, chr(i))), True) + + self.assertRaisesRegex(regex.error, self.BAD_OCTAL_ESCAPE, lambda: + regex.match(r"[\911]", "")) + + def test_bug_113254(self): + self.assertEqual(regex.match(r'(a)|(b)', 'b').start(1), -1) + self.assertEqual(regex.match(r'(a)|(b)', 'b').end(1), -1) + self.assertEqual(regex.match(r'(a)|(b)', 'b').span(1), (-1, -1)) + + def test_bug_527371(self): + # Bug described in patches 527371/672491. + self.assertEqual(regex.match(r'(a)?a','a').lastindex, None) + self.assertEqual(regex.match(r'(a)(b)?b','ab').lastindex, 1) + self.assertEqual(regex.match(r'(?Pa)(?Pb)?b','ab').lastgroup, + 'a') + self.assertEqual(regex.match("(?Pa(b))", "ab").lastgroup, 'a') + self.assertEqual(regex.match("((a))", "a").lastindex, 1) + + def test_bug_545855(self): + # Bug 545855 -- This pattern failed to cause a compile error as it + # should, instead provoking a TypeError. + self.assertRaisesRegex(regex.error, self.BAD_SET, lambda: + regex.compile('foo[a-')) + + def test_bug_418626(self): + # Bugs 418626 at al. -- Testing Greg Chapman's addition of op code + # SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of + # pattern '*?' on a long string. + self.assertEqual(regex.match('.*?c', 10000 * 'ab' + 'cd').end(0), + 20001) + self.assertEqual(regex.match('.*?cd', 5000 * 'ab' + 'c' + 5000 * 'ab' + + 'cde').end(0), 20003) + self.assertEqual(regex.match('.*?cd', 20000 * 'abc' + 'de').end(0), + 60001) + # Non-simple '*?' still used to hit the recursion limit, before the + # non-recursive scheme was implemented. + self.assertEqual(regex.search('(a|b)*?c', 10000 * 'ab' + 'cd').end(0), + 20001) + + def test_bug_612074(self): + pat = "[" + regex.escape("\u2039") + "]" + self.assertEqual(regex.compile(pat) and 1, 1) + + def test_stack_overflow(self): + # Nasty cases that used to overflow the straightforward recursive + # implementation of repeated groups. + self.assertEqual(regex.match('(x)*', 50000 * 'x')[1], 'x') + self.assertEqual(regex.match('(x)*y', 50000 * 'x' + 'y')[1], 'x') + self.assertEqual(regex.match('(x)*?y', 50000 * 'x' + 'y')[1], 'x') + + def test_scanner(self): + def s_ident(scanner, token): return token + def s_operator(scanner, token): return "op%s" % token + def s_float(scanner, token): return float(token) + def s_int(scanner, token): return int(token) + + scanner = regex.Scanner([(r"[a-zA-Z_]\w*", s_ident), (r"\d+\.\d*", + s_float), (r"\d+", s_int), (r"=|\+|-|\*|/", s_operator), (r"\s+", + None), ]) + + self.assertEqual(repr(type(scanner.scanner.scanner("").pattern)), + self.PATTERN_CLASS) + + self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum', + 'op=', 3, 'op*', 'foo', 'op+', 312.5, 'op+', 'bar'], '')) + + def test_bug_448951(self): + # Bug 448951 (similar to 429357, but with single char match). + # (Also test greedy matches.) + for op in '', '?', '*': + self.assertEqual(regex.match(r'((.%s):)?z' % op, 'z')[:], ('z', + None, None)) + self.assertEqual(regex.match(r'((.%s):)?z' % op, 'a:z')[:], ('a:z', + 'a:', 'a')) + + def test_bug_725106(self): + # Capturing groups in alternatives in repeats. + self.assertEqual(regex.match('^((a)|b)*', 'abc')[:], ('ab', 'b', 'a')) + self.assertEqual(regex.match('^(([ab])|c)*', 'abc')[:], ('abc', 'c', + 'b')) + self.assertEqual(regex.match('^((d)|[ab])*', 'abc')[:], ('ab', 'b', + None)) + self.assertEqual(regex.match('^((a)c|[ab])*', 'abc')[:], ('ab', 'b', + None)) + self.assertEqual(regex.match('^((a)|b)*?c', 'abc')[:], ('abc', 'b', + 'a')) + self.assertEqual(regex.match('^(([ab])|c)*?d', 'abcd')[:], ('abcd', + 'c', 'b')) + self.assertEqual(regex.match('^((d)|[ab])*?c', 'abc')[:], ('abc', 'b', + None)) + self.assertEqual(regex.match('^((a)c|[ab])*?c', 'abc')[:], ('abc', 'b', + None)) + + def test_bug_725149(self): + # Mark_stack_base restoring before restoring marks. + self.assertEqual(regex.match('(a)(?:(?=(b)*)c)*', 'abb')[:], ('a', 'a', + None)) + self.assertEqual(regex.match('(a)((?!(b)*))*', 'abb')[:], ('a', 'a', + None, None)) + + def test_bug_764548(self): + # Bug 764548, regex.compile() barfs on str/unicode subclasses. + class my_unicode(str): pass + pat = regex.compile(my_unicode("abc")) + self.assertEqual(pat.match("xyz"), None) + + def test_finditer(self): + it = regex.finditer(r":+", "a:b::c:::d") + self.assertEqual([item[0] for item in it], [':', '::', ':::']) + + def test_bug_926075(self): + if regex.compile('bug_926075') is regex.compile(b'bug_926075'): + self.fail() + + def test_bug_931848(self): + pattern = "[\u002E\u3002\uFF0E\uFF61]" + self.assertEqual(regex.compile(pattern).split("a.b.c"), ['a', 'b', + 'c']) + + def test_bug_581080(self): + it = regex.finditer(r"\s", "a b") + self.assertEqual(next(it).span(), (1, 2)) + self.assertRaises(StopIteration, lambda: next(it)) + + scanner = regex.compile(r"\s").scanner("a b") + self.assertEqual(scanner.search().span(), (1, 2)) + self.assertEqual(scanner.search(), None) + + def test_bug_817234(self): + it = regex.finditer(r".*", "asdf") + self.assertEqual(next(it).span(), (0, 4)) + self.assertEqual(next(it).span(), (4, 4)) + self.assertRaises(StopIteration, lambda: next(it)) + + def test_empty_array(self): + # SF buf 1647541. + import array + for typecode in 'bBuhHiIlLfd': + a = array.array(typecode) + self.assertEqual(regex.compile(b"bla").match(a), None) + self.assertEqual(regex.compile(b"").match(a)[1 : ], ()) + + def test_inline_flags(self): + # Bug #1700. + upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Below + lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Below + + p = regex.compile(upper_char, regex.I | regex.U) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile(lower_char, regex.I | regex.U) + self.assertEqual(bool(p.match(upper_char)), True) + + p = regex.compile('(?i)' + upper_char, regex.U) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile('(?i)' + lower_char, regex.U) + self.assertEqual(bool(p.match(upper_char)), True) + + p = regex.compile('(?iu)' + upper_char) + self.assertEqual(bool(p.match(lower_char)), True) + + p = regex.compile('(?iu)' + lower_char) + self.assertEqual(bool(p.match(upper_char)), True) + + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(bool(regex.match(r"(?i)a", "A")), True) + self.assertEqual(regex.match(r"a(?i)", "A"), None) + + def test_dollar_matches_twice(self): + # $ matches the end of string, and just before the terminating \n. + pattern = regex.compile('$') + self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#') + self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#') + self.assertEqual(pattern.sub('#', '\n'), '#\n#') + + pattern = regex.compile('$', regex.MULTILINE) + self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#') + self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#') + self.assertEqual(pattern.sub('#', '\n'), '#\n#') + + def test_bytes_str_mixing(self): + # Mixing str and bytes is disallowed. + pat = regex.compile('.') + bpat = regex.compile(b'.') + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.match(b'b')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.match('b')) + self.assertRaisesRegex(TypeError, self.STR_PAT_BYTES_TEMPL, lambda: + pat.sub(b'b', 'c')) + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.sub('b', b'c')) + self.assertRaisesRegex(TypeError, self.STR_PAT_ON_BYTES, lambda: + pat.sub(b'b', b'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.sub(b'b', 'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_STR_TEMPL, lambda: + bpat.sub('b', b'c')) + self.assertRaisesRegex(TypeError, self.BYTES_PAT_ON_STR, lambda: + bpat.sub('b', 'c')) + + self.assertRaisesRegex(ValueError, self.BYTES_PAT_UNI_FLAG, lambda: + regex.compile(br'\w', regex.UNICODE)) + self.assertRaisesRegex(ValueError, self.BYTES_PAT_UNI_FLAG, lambda: + regex.compile(br'(?u)\w')) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'\w', regex.UNICODE | regex.ASCII)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?u)\w', regex.ASCII)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?a)\w', regex.UNICODE)) + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?au)\w')) + + def test_ascii_and_unicode_flag(self): + # String patterns. + for flags in (0, regex.UNICODE): + pat = regex.compile('\xc0', flags | regex.IGNORECASE) + self.assertEqual(bool(pat.match('\xe0')), True) + pat = regex.compile(r'\w', flags) + self.assertEqual(bool(pat.match('\xe0')), True) + + pat = regex.compile('\xc0', regex.ASCII | regex.IGNORECASE) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile('(?a)\xc0', regex.IGNORECASE) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile(r'\w', regex.ASCII) + self.assertEqual(pat.match('\xe0'), None) + pat = regex.compile(r'(?a)\w') + self.assertEqual(pat.match('\xe0'), None) + + # Bytes patterns. + for flags in (0, regex.ASCII): + pat = regex.compile(b'\xc0', flags | regex.IGNORECASE) + self.assertEqual(pat.match(b'\xe0'), None) + pat = regex.compile(br'\w') + self.assertEqual(pat.match(b'\xe0'), None) + + self.assertRaisesRegex(ValueError, self.MIXED_FLAGS, lambda: + regex.compile(r'(?au)\w')) + + def test_subscripting_match(self): + m = regex.match(r'(?\w)', 'xy') + if not m: + self.fail("Failed: expected match but returned None") + elif not m or m[0] != m.group(0) or m[1] != m.group(1): + self.fail("Failed") + if not m: + self.fail("Failed: expected match but returned None") + elif m[:] != ('x', 'x'): + self.fail("Failed: expected \"('x', 'x')\" but got {} instead".format(ascii(m[:]))) + + def test_new_named_groups(self): + m0 = regex.match(r'(?P\w)', 'x') + m1 = regex.match(r'(?\w)', 'x') + if not (m0 and m1 and m0[:] == m1[:]): + self.fail("Failed") + + def test_properties(self): + self.assertEqual(regex.match(b'(?ai)\xC0', b'\xE0'), None) + self.assertEqual(regex.match(br'(?ai)\xC0', b'\xE0'), None) + self.assertEqual(regex.match(br'(?a)\w', b'\xE0'), None) + self.assertEqual(bool(regex.match(r'\w', '\xE0')), True) + + # Dropped the following test. It's not possible to determine what the + # correct result should be in the general case. +# self.assertEqual(bool(regex.match(br'(?L)\w', b'\xE0')), +# b'\xE0'.isalnum()) + + self.assertEqual(bool(regex.match(br'(?L)\d', b'0')), True) + self.assertEqual(bool(regex.match(br'(?L)\s', b' ')), True) + self.assertEqual(bool(regex.match(br'(?L)\w', b'a')), True) + self.assertEqual(regex.match(br'(?L)\d', b'?'), None) + self.assertEqual(regex.match(br'(?L)\s', b'?'), None) + self.assertEqual(regex.match(br'(?L)\w', b'?'), None) + + self.assertEqual(regex.match(br'(?L)\D', b'0'), None) + self.assertEqual(regex.match(br'(?L)\S', b' '), None) + self.assertEqual(regex.match(br'(?L)\W', b'a'), None) + self.assertEqual(bool(regex.match(br'(?L)\D', b'?')), True) + self.assertEqual(bool(regex.match(br'(?L)\S', b'?')), True) + self.assertEqual(bool(regex.match(br'(?L)\W', b'?')), True) + + self.assertEqual(bool(regex.match(r'\p{Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'(?i)\p{Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{IsCyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{Script=Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{InCyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{Block=Cyrillic}', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:IsCyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Script=Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:InCyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:Block=Cyrillic:]]', + '\N{CYRILLIC CAPITAL LETTER A}')), True) + + self.assertEqual(bool(regex.match(r'\P{Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{IsCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{Script=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{InCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\P{Block=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^IsCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Script=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^InCyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'\p{^Block=Cyrillic}', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^IsCyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Script=Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^InCyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + self.assertEqual(bool(regex.match(r'[[:^Block=Cyrillic:]]', + '\N{LATIN CAPITAL LETTER A}')), True) + + self.assertEqual(bool(regex.match(r'\d', '0')), True) + self.assertEqual(bool(regex.match(r'\s', ' ')), True) + self.assertEqual(bool(regex.match(r'\w', 'A')), True) + self.assertEqual(regex.match(r"\d", "?"), None) + self.assertEqual(regex.match(r"\s", "?"), None) + self.assertEqual(regex.match(r"\w", "?"), None) + self.assertEqual(regex.match(r"\D", "0"), None) + self.assertEqual(regex.match(r"\S", " "), None) + self.assertEqual(regex.match(r"\W", "A"), None) + self.assertEqual(bool(regex.match(r'\D', '?')), True) + self.assertEqual(bool(regex.match(r'\S', '?')), True) + self.assertEqual(bool(regex.match(r'\W', '?')), True) + + self.assertEqual(bool(regex.match(r'\p{L}', 'A')), True) + self.assertEqual(bool(regex.match(r'\p{L}', 'a')), True) + self.assertEqual(bool(regex.match(r'\p{Lu}', 'A')), True) + self.assertEqual(bool(regex.match(r'\p{Ll}', 'a')), True) + + self.assertEqual(bool(regex.match(r'(?i)a', 'a')), True) + self.assertEqual(bool(regex.match(r'(?i)a', 'A')), True) + + self.assertEqual(bool(regex.match(r'\w', '0')), True) + self.assertEqual(bool(regex.match(r'\w', 'a')), True) + self.assertEqual(bool(regex.match(r'\w', '_')), True) + + self.assertEqual(regex.match(r"\X", "\xE0").span(), (0, 1)) + self.assertEqual(regex.match(r"\X", "a\u0300").span(), (0, 2)) + self.assertEqual(regex.findall(r"\X", + "a\xE0a\u0300e\xE9e\u0301"), ['a', '\xe0', 'a\u0300', 'e', + '\xe9', 'e\u0301']) + self.assertEqual(regex.findall(r"\X{3}", + "a\xE0a\u0300e\xE9e\u0301"), ['a\xe0a\u0300', 'e\xe9e\u0301']) + self.assertEqual(regex.findall(r"\X", "\r\r\n\u0301A\u0301"), + ['\r', '\r\n', '\u0301', 'A\u0301']) + + self.assertEqual(bool(regex.match(r'\p{Ll}', 'a')), True) + + chars_u = "-09AZaz_\u0393\u03b3" + chars_b = b"-09AZaz_" + word_set = set("Ll Lm Lo Lt Lu Mc Me Mn Nd Nl No Pc".split()) + + tests = [ + (r"\w", chars_u, "09AZaz_\u0393\u03b3"), + (r"[[:word:]]", chars_u, "09AZaz_\u0393\u03b3"), + (r"\W", chars_u, "-"), + (r"[[:^word:]]", chars_u, "-"), + (r"\d", chars_u, "09"), + (r"[[:digit:]]", chars_u, "09"), + (r"\D", chars_u, "-AZaz_\u0393\u03b3"), + (r"[[:^digit:]]", chars_u, "-AZaz_\u0393\u03b3"), + (r"[[:alpha:]]", chars_u, "AZaz\u0393\u03b3"), + (r"[[:^alpha:]]", chars_u, "-09_"), + (r"[[:alnum:]]", chars_u, "09AZaz\u0393\u03b3"), + (r"[[:^alnum:]]", chars_u, "-_"), + (r"[[:xdigit:]]", chars_u, "09Aa"), + (r"[[:^xdigit:]]", chars_u, "-Zz_\u0393\u03b3"), + (r"\p{InBasicLatin}", "a\xE1", "a"), + (r"\P{InBasicLatin}", "a\xE1", "\xE1"), + (r"(?i)\p{InBasicLatin}", "a\xE1", "a"), + (r"(?i)\P{InBasicLatin}", "a\xE1", "\xE1"), + + (br"(?L)\w", chars_b, b"09AZaz_"), + (br"(?L)[[:word:]]", chars_b, b"09AZaz_"), + (br"(?L)\W", chars_b, b"-"), + (br"(?L)[[:^word:]]", chars_b, b"-"), + (br"(?L)\d", chars_b, b"09"), + (br"(?L)[[:digit:]]", chars_b, b"09"), + (br"(?L)\D", chars_b, b"-AZaz_"), + (br"(?L)[[:^digit:]]", chars_b, b"-AZaz_"), + (br"(?L)[[:alpha:]]", chars_b, b"AZaz"), + (br"(?L)[[:^alpha:]]", chars_b, b"-09_"), + (br"(?L)[[:alnum:]]", chars_b, b"09AZaz"), + (br"(?L)[[:^alnum:]]", chars_b, b"-_"), + (br"(?L)[[:xdigit:]]", chars_b, b"09Aa"), + (br"(?L)[[:^xdigit:]]", chars_b, b"-Zz_"), + + (br"(?a)\w", chars_b, b"09AZaz_"), + (br"(?a)[[:word:]]", chars_b, b"09AZaz_"), + (br"(?a)\W", chars_b, b"-"), + (br"(?a)[[:^word:]]", chars_b, b"-"), + (br"(?a)\d", chars_b, b"09"), + (br"(?a)[[:digit:]]", chars_b, b"09"), + (br"(?a)\D", chars_b, b"-AZaz_"), + (br"(?a)[[:^digit:]]", chars_b, b"-AZaz_"), + (br"(?a)[[:alpha:]]", chars_b, b"AZaz"), + (br"(?a)[[:^alpha:]]", chars_b, b"-09_"), + (br"(?a)[[:alnum:]]", chars_b, b"09AZaz"), + (br"(?a)[[:^alnum:]]", chars_b, b"-_"), + (br"(?a)[[:xdigit:]]", chars_b, b"09Aa"), + (br"(?a)[[:^xdigit:]]", chars_b, b"-Zz_"), + ] + for pattern, chars, expected in tests: + try: + if chars[ : 0].join(regex.findall(pattern, chars)) != expected: + self.fail("Failed: {}".format(pattern)) + except Exception as e: + self.fail("Failed: {} raised {}".format(pattern, ascii(e))) + + self.assertEqual(bool(regex.match(r"\p{NumericValue=0}", "0")), + True) + self.assertEqual(bool(regex.match(r"\p{NumericValue=1/2}", + "\N{VULGAR FRACTION ONE HALF}")), True) + self.assertEqual(bool(regex.match(r"\p{NumericValue=0.5}", + "\N{VULGAR FRACTION ONE HALF}")), True) + + def test_word_class(self): + self.assertEqual(regex.findall(r"\w+", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), + ['\u0939\u093f\u0928\u094d\u0926\u0940']) + self.assertEqual(regex.findall(r"\W+", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), [' ', ',']) + self.assertEqual(regex.split(r"(?V1)\b", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), [' ', + '\u0939\u093f\u0928\u094d\u0926\u0940', ',']) + self.assertEqual(regex.split(r"(?V1)\B", + " \u0939\u093f\u0928\u094d\u0926\u0940,"), ['', ' \u0939', + '\u093f', '\u0928', '\u094d', '\u0926', '\u0940,', '']) + + def test_search_anchor(self): + self.assertEqual(regex.findall(r"\G\w{2}", "abcd ef"), ['ab', 'cd']) + + def test_search_reverse(self): + self.assertEqual(regex.findall(r"(?r).", "abc"), ['c', 'b', 'a']) + self.assertEqual(regex.findall(r"(?r).", "abc", overlapped=True), ['c', + 'b', 'a']) + self.assertEqual(regex.findall(r"(?r)..", "abcde"), ['de', 'bc']) + self.assertEqual(regex.findall(r"(?r)..", "abcde", overlapped=True), + ['de', 'cd', 'bc', 'ab']) + self.assertEqual(regex.findall(r"(?r)(.)(-)(.)", "a-b-c", + overlapped=True), [("b", "-", "c"), ("a", "-", "b")]) + + self.assertEqual([m[0] for m in regex.finditer(r"(?r).", "abc")], ['c', + 'b', 'a']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r).", "abc")], ['c', + 'b', 'a']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + + self.assertEqual(regex.findall(r"^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual(regex.findall(r"(?V1)^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual(regex.findall(r"(?r)^|\w+", "foo bar"), ['bar', 'foo', + '']) + self.assertEqual(regex.findall(r"(?rV1)^|\w+", "foo bar"), ['bar', + 'foo', '']) + + self.assertEqual([m[0] for m in regex.finditer(r"^|\w+", "foo bar")], + ['', 'foo', 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?V1)^|\w+", + "foo bar")], ['', 'foo', 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)^|\w+", + "foo bar")], ['bar', 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?rV1)^|\w+", + "foo bar")], ['bar', 'foo', '']) + + self.assertEqual(regex.findall(r"\G\w{2}", "abcd ef"), ['ab', 'cd']) + self.assertEqual(regex.findall(r".{2}(?<=\G.*)", "abcd"), ['ab', 'cd']) + self.assertEqual(regex.findall(r"(?r)\G\w{2}", "abcd ef"), []) + self.assertEqual(regex.findall(r"(?r)\w{2}\G", "abcd ef"), ['ef']) + + self.assertEqual(regex.findall(r"q*", "qqwe"), ['qq', '', '', '']) + self.assertEqual(regex.findall(r"(?V1)q*", "qqwe"), ['qq', '', '', '']) + self.assertEqual(regex.findall(r"(?r)q*", "qqwe"), ['', '', 'qq', '']) + self.assertEqual(regex.findall(r"(?rV1)q*", "qqwe"), ['', '', 'qq', + '']) + + self.assertEqual(regex.findall(".", "abcd", pos=1, endpos=3), ['b', + 'c']) + self.assertEqual(regex.findall(".", "abcd", pos=1, endpos=-1), ['b', + 'c']) + self.assertEqual([m[0] for m in regex.finditer(".", "abcd", pos=1, + endpos=3)], ['b', 'c']) + self.assertEqual([m[0] for m in regex.finditer(".", "abcd", pos=1, + endpos=-1)], ['b', 'c']) + + self.assertEqual([m[0] for m in regex.finditer("(?r).", "abcd", pos=1, + endpos=3)], ['c', 'b']) + self.assertEqual([m[0] for m in regex.finditer("(?r).", "abcd", pos=1, + endpos=-1)], ['c', 'b']) + self.assertEqual(regex.findall("(?r).", "abcd", pos=1, endpos=3), ['c', + 'b']) + self.assertEqual(regex.findall("(?r).", "abcd", pos=1, endpos=-1), + ['c', 'b']) + + self.assertEqual(regex.findall(r"[ab]", "aB", regex.I), ['a', 'B']) + self.assertEqual(regex.findall(r"(?r)[ab]", "aB", regex.I), ['B', 'a']) + + self.assertEqual(regex.findall(r"(?r).{2}", "abc"), ['bc']) + self.assertEqual(regex.findall(r"(?r).{2}", "abc", overlapped=True), + ['bc', 'ab']) + self.assertEqual(regex.findall(r"(\w+) (\w+)", + "first second third fourth fifth"), [('first', 'second'), ('third', + 'fourth')]) + self.assertEqual(regex.findall(r"(?r)(\w+) (\w+)", + "first second third fourth fifth"), [('fourth', 'fifth'), ('second', + 'third')]) + + self.assertEqual([m[0] for m in regex.finditer(r"(?r).{2}", "abc")], + ['bc']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r).{2}", "abc", + overlapped=True)], ['bc', 'ab']) + self.assertEqual([m[0] for m in regex.finditer(r"(\w+) (\w+)", + "first second third fourth fifth")], ['first second', + 'third fourth']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)(\w+) (\w+)", + "first second third fourth fifth")], ['fourth fifth', + 'second third']) + + self.assertEqual(regex.search("abcdef", "abcdef").span(), (0, 6)) + self.assertEqual(regex.search("(?r)abcdef", "abcdef").span(), (0, 6)) + self.assertEqual(regex.search("(?i)abcdef", "ABCDEF").span(), (0, 6)) + self.assertEqual(regex.search("(?ir)abcdef", "ABCDEF").span(), (0, 6)) + + self.assertEqual(regex.sub(r"(.)", r"\1", "abc"), 'abc') + self.assertEqual(regex.sub(r"(?r)(.)", r"\1", "abc"), 'abc') + + def test_atomic(self): + # Issue 433030. + self.assertEqual(regex.search(r"(?>a*)a", "aa"), None) + + def test_possessive(self): + # Single-character non-possessive. + self.assertEqual(regex.search(r"a?a", "a").span(), (0, 1)) + self.assertEqual(regex.search(r"a*a", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"a+a", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"a{1,3}a", "aaa").span(), (0, 3)) + + # Multiple-character non-possessive. + self.assertEqual(regex.search(r"(?:ab)?ab", "ab").span(), (0, 2)) + self.assertEqual(regex.search(r"(?:ab)*ab", "ababab").span(), (0, 6)) + self.assertEqual(regex.search(r"(?:ab)+ab", "ababab").span(), (0, 6)) + self.assertEqual(regex.search(r"(?:ab){1,3}ab", "ababab").span(), (0, + 6)) + + # Single-character possessive. + self.assertEqual(regex.search(r"a?+a", "a"), None) + self.assertEqual(regex.search(r"a*+a", "aaa"), None) + self.assertEqual(regex.search(r"a++a", "aaa"), None) + self.assertEqual(regex.search(r"a{1,3}+a", "aaa"), None) + + # Multiple-character possessive. + self.assertEqual(regex.search(r"(?:ab)?+ab", "ab"), None) + self.assertEqual(regex.search(r"(?:ab)*+ab", "ababab"), None) + self.assertEqual(regex.search(r"(?:ab)++ab", "ababab"), None) + self.assertEqual(regex.search(r"(?:ab){1,3}+ab", "ababab"), None) + + def test_zerowidth(self): + # Issue 3262. + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(r"\b", "a b"), ['', 'a', ' ', 'b', + '']) + else: + self.assertEqual(regex.split(r"\b", "a b"), ['a b']) + self.assertEqual(regex.split(r"(?V1)\b", "a b"), ['', 'a', ' ', 'b', + '']) + + # Issue 1647489. + self.assertEqual(regex.findall(r"^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"^|\w+", "foo bar")], + ['', 'foo', 'bar']) + self.assertEqual(regex.findall(r"(?r)^|\w+", "foo bar"), ['bar', + 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)^|\w+", + "foo bar")], ['bar', 'foo', '']) + self.assertEqual(regex.findall(r"(?V1)^|\w+", "foo bar"), ['', 'foo', + 'bar']) + self.assertEqual([m[0] for m in regex.finditer(r"(?V1)^|\w+", + "foo bar")], ['', 'foo', 'bar']) + self.assertEqual(regex.findall(r"(?rV1)^|\w+", "foo bar"), ['bar', + 'foo', '']) + self.assertEqual([m[0] for m in regex.finditer(r"(?rV1)^|\w+", + "foo bar")], ['bar', 'foo', '']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("", "xaxbxc"), ['', 'x', 'a', 'x', + 'b', 'x', 'c', '']) + self.assertEqual([m for m in regex.splititer("", "xaxbxc")], ['', + 'x', 'a', 'x', 'b', 'x', 'c', '']) + else: + self.assertEqual(regex.split("", "xaxbxc"), ['xaxbxc']) + self.assertEqual([m for m in regex.splititer("", "xaxbxc")], + ['xaxbxc']) + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split("(?r)", "xaxbxc"), ['', 'c', 'x', 'b', + 'x', 'a', 'x', '']) + self.assertEqual([m for m in regex.splititer("(?r)", "xaxbxc")], + ['', 'c', 'x', 'b', 'x', 'a', 'x', '']) + else: + self.assertEqual(regex.split("(?r)", "xaxbxc"), ['xaxbxc']) + self.assertEqual([m for m in regex.splititer("(?r)", "xaxbxc")], + ['xaxbxc']) + + self.assertEqual(regex.split("(?V1)", "xaxbxc"), ['', 'x', 'a', 'x', + 'b', 'x', 'c', '']) + self.assertEqual([m for m in regex.splititer("(?V1)", "xaxbxc")], ['', + 'x', 'a', 'x', 'b', 'x', 'c', '']) + + self.assertEqual(regex.split("(?rV1)", "xaxbxc"), ['', 'c', 'x', 'b', + 'x', 'a', 'x', '']) + self.assertEqual([m for m in regex.splititer("(?rV1)", "xaxbxc")], ['', + 'c', 'x', 'b', 'x', 'a', 'x', '']) + + def test_scoped_and_inline_flags(self): + # Issues 433028, 433024, 433027. + self.assertEqual(regex.search(r"(?i)Ab", "ab").span(), (0, 2)) + self.assertEqual(regex.search(r"(?i:A)b", "ab").span(), (0, 2)) + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(regex.search(r"A(?i)b", "ab"), None) + + self.assertEqual(regex.search(r"(?V0)Ab", "ab"), None) + self.assertEqual(regex.search(r"(?V1)Ab", "ab"), None) + self.assertEqual(regex.search(r"(?-i)Ab", "ab", flags=regex.I), None) + self.assertEqual(regex.search(r"(?-i:A)b", "ab", flags=regex.I), None) + self.assertEqual(regex.search(r"A(?-i)b", "ab", flags=regex.I).span(), + (0, 2)) + + def test_repeated_repeats(self): + # Issue 2537. + self.assertEqual(regex.search(r"(?:a+)+", "aaa").span(), (0, 3)) + self.assertEqual(regex.search(r"(?:(?:ab)+c)+", "abcabc").span(), (0, + 6)) + + # Hg issue 286. + self.assertEqual(regex.search(r"(?:a+){2,}", "aaa").span(), (0, 3)) + + def test_lookbehind(self): + self.assertEqual(regex.search(r"123(?<=a\d+)", "a123").span(), (1, 4)) + self.assertEqual(regex.search(r"123(?<=a\d+)", "b123"), None) + self.assertEqual(regex.search(r"123(?= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "xy"), + 'y-x-') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "xy"), + 'y-x') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "xy"), 'y-x-') + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "x"), '-x-') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "x"), '-x') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "x"), '-x-') + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "y"), 'y--') + else: + self.assertEqual(regex.sub(r"(?V0)(x)?(y)?", r"\2-\1", "y"), 'y-') + self.assertEqual(regex.sub(r"(?V1)(x)?(y)?", r"\2-\1", "y"), 'y--') + + def test_bug_10328 (self): + # Issue 10328. + pat = regex.compile(r'(?mV0)(?P[ \t]+\r*$)|(?P(?<=[^\n])\Z)') + if sys.version_info >= (3, 7, 0): + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 2)) + else: + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 1)) + self.assertEqual([m.group() for m in pat.finditer('foobar ')], [' ', + '']) + pat = regex.compile(r'(?mV1)(?P[ \t]+\r*$)|(?P(?<=[^\n])\Z)') + self.assertEqual(pat.subn(lambda m: '<' + m.lastgroup + '>', + 'foobar '), ('foobar', 2)) + self.assertEqual([m.group() for m in pat.finditer('foobar ')], [' ', + '']) + + def test_overlapped(self): + self.assertEqual(regex.findall(r"..", "abcde"), ['ab', 'cd']) + self.assertEqual(regex.findall(r"..", "abcde", overlapped=True), ['ab', + 'bc', 'cd', 'de']) + self.assertEqual(regex.findall(r"(?r)..", "abcde"), ['de', 'bc']) + self.assertEqual(regex.findall(r"(?r)..", "abcde", overlapped=True), + ['de', 'cd', 'bc', 'ab']) + self.assertEqual(regex.findall(r"(.)(-)(.)", "a-b-c", overlapped=True), + [("a", "-", "b"), ("b", "-", "c")]) + + self.assertEqual([m[0] for m in regex.finditer(r"..", "abcde")], ['ab', + 'cd']) + self.assertEqual([m[0] for m in regex.finditer(r"..", "abcde", + overlapped=True)], ['ab', 'bc', 'cd', 'de']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde")], + ['de', 'bc']) + self.assertEqual([m[0] for m in regex.finditer(r"(?r)..", "abcde", + overlapped=True)], ['de', 'cd', 'bc', 'ab']) + + self.assertEqual([m.groups() for m in regex.finditer(r"(.)(-)(.)", + "a-b-c", overlapped=True)], [("a", "-", "b"), ("b", "-", "c")]) + self.assertEqual([m.groups() for m in regex.finditer(r"(?r)(.)(-)(.)", + "a-b-c", overlapped=True)], [("b", "-", "c"), ("a", "-", "b")]) + + def test_splititer(self): + self.assertEqual(regex.split(r",", "a,b,,c,"), ['a', 'b', '', 'c', '']) + self.assertEqual([m for m in regex.splititer(r",", "a,b,,c,")], ['a', + 'b', '', 'c', '']) + + def test_grapheme(self): + self.assertEqual(regex.match(r"\X", "\xE0").span(), (0, 1)) + self.assertEqual(regex.match(r"\X", "a\u0300").span(), (0, 2)) + + self.assertEqual(regex.findall(r"\X", + "a\xE0a\u0300e\xE9e\u0301"), ['a', '\xe0', 'a\u0300', 'e', + '\xe9', 'e\u0301']) + self.assertEqual(regex.findall(r"\X{3}", + "a\xE0a\u0300e\xE9e\u0301"), ['a\xe0a\u0300', 'e\xe9e\u0301']) + self.assertEqual(regex.findall(r"\X", "\r\r\n\u0301A\u0301"), + ['\r', '\r\n', '\u0301', 'A\u0301']) + + def test_word_boundary(self): + text = 'The quick ("brown") fox can\'t jump 32.3 feet, right?' + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'The', ' ', + 'quick', ' ("', 'brown', '") ', 'fox', ' ', 'can', "'", 't', + ' ', 'jump', ' ', '32', '.', '3', ' ', 'feet', ', ', + 'right', '?']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', 'The', ' ', + 'quick', ' ', '(', '"', 'brown', '"', ')', ' ', 'fox', ' ', + "can't", ' ', 'jump', ' ', '32.3', ' ', 'feet', ',', ' ', + 'right', '?', '']) + + text = "The fox" + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'The', ' ', + 'fox', '']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', 'The', ' ', + 'fox', '']) + + text = "can't aujourd'hui l'objectif" + self.assertEqual(regex.split(r'(?V1)\b', text), ['', 'can', "'", + 't', ' ', 'aujourd', "'", 'hui', ' ', 'l', "'", 'objectif', + '']) + self.assertEqual(regex.split(r'(?V1w)\b', text), ['', "can't", ' ', + "aujourd'hui", ' ', "l'objectif", '']) + + def test_line_boundary(self): + self.assertEqual(regex.findall(r".+", "Line 1\nLine 2\n"), ["Line 1", + "Line 2"]) + self.assertEqual(regex.findall(r".+", "Line 1\rLine 2\r"), + ["Line 1\rLine 2\r"]) + self.assertEqual(regex.findall(r".+", "Line 1\r\nLine 2\r\n"), + ["Line 1\r", "Line 2\r"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\nLine 2\n"), + ["Line 1", "Line 2"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\rLine 2\r"), + ["Line 1", "Line 2"]) + self.assertEqual(regex.findall(r"(?w).+", "Line 1\r\nLine 2\r\n"), + ["Line 1", "Line 2"]) + + self.assertEqual(regex.search(r"^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"^abc", "\nabc"), None) + self.assertEqual(regex.search(r"^abc", "\rabc"), None) + self.assertEqual(regex.search(r"(?w)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?w)^abc", "\nabc"), None) + self.assertEqual(regex.search(r"(?w)^abc", "\rabc"), None) + + self.assertEqual(regex.search(r"abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"abc$", "abc\r"), None) + self.assertEqual(regex.search(r"(?w)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?w)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?w)abc$", "abc\r").start(), 0) + + self.assertEqual(regex.search(r"(?m)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?m)^abc", "\nabc").start(), 1) + self.assertEqual(regex.search(r"(?m)^abc", "\rabc"), None) + self.assertEqual(regex.search(r"(?mw)^abc", "abc").start(), 0) + self.assertEqual(regex.search(r"(?mw)^abc", "\nabc").start(), 1) + self.assertEqual(regex.search(r"(?mw)^abc", "\rabc").start(), 1) + + self.assertEqual(regex.search(r"(?m)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?m)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?m)abc$", "abc\r"), None) + self.assertEqual(regex.search(r"(?mw)abc$", "abc").start(), 0) + self.assertEqual(regex.search(r"(?mw)abc$", "abc\n").start(), 0) + self.assertEqual(regex.search(r"(?mw)abc$", "abc\r").start(), 0) + + def test_branch_reset(self): + self.assertEqual(regex.match(r"(?:(a)|(b))(c)", "ac").groups(), ('a', + None, 'c')) + self.assertEqual(regex.match(r"(?:(a)|(b))(c)", "bc").groups(), (None, + 'b', 'c')) + self.assertEqual(regex.match(r"(?:(?a)|(?b))(?c)", + "ac").groups(), ('a', None, 'c')) + self.assertEqual(regex.match(r"(?:(?a)|(?b))(?c)", + "bc").groups(), (None, 'b', 'c')) + + self.assertEqual(regex.match(r"(?a)(?:(?b)|(?c))(?d)", + "abd").groups(), ('a', 'b', None, 'd')) + self.assertEqual(regex.match(r"(?a)(?:(?b)|(?c))(?d)", + "acd").groups(), ('a', None, 'c', 'd')) + self.assertEqual(regex.match(r"(a)(?:(b)|(c))(d)", "abd").groups(), + ('a', 'b', None, 'd')) + + self.assertEqual(regex.match(r"(a)(?:(b)|(c))(d)", "acd").groups(), + ('a', None, 'c', 'd')) + self.assertEqual(regex.match(r"(a)(?|(b)|(b))(d)", "abd").groups(), + ('a', 'b', 'd')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "ac").groups(), + ('a', None, 'c')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "bc").groups(), + (None, 'b', 'c')) + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "ac").groups(), + ('a', 'c')) + + self.assertEqual(regex.match(r"(?|(?a)|(?b))(c)", "bc").groups(), + ('b', 'c')) + + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(?d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(?d))(e)", + "cde").groups(), ('d', 'c', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(?c)(d))(e)", + "cde").groups(), ('d', 'c', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(d))(e)", + "abe").groups(), ('a', 'b', 'e')) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(d))(e)", + "cde").groups(), ('c', 'd', 'e')) + + # Hg issue 87: Allow duplicate names of groups + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "abe").groups(), ("a", "b", "e")) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "abe").capturesdict(), {"a": ["a"], "b": ["b"]}) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "cde").groups(), ("d", None, "e")) + self.assertEqual(regex.match(r"(?|(?a)(?b)|(c)(?d))(e)", + "cde").capturesdict(), {"a": ["c", "d"], "b": []}) + + def test_set(self): + self.assertEqual(regex.match(r"[a]", "a").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[a]", "A").span(), (0, 1)) + self.assertEqual(regex.match(r"[a-b]", r"a").span(), (0, 1)) + self.assertEqual(regex.match(r"(?i)[a-b]", r"A").span(), (0, 1)) + + self.assertEqual(regex.sub(r"(?V0)([][])", r"-", "a[b]c"), "a-b-c") + + self.assertEqual(regex.findall(r"[\p{Alpha}]", "a0"), ["a"]) + self.assertEqual(regex.findall(r"(?i)[\p{Alpha}]", "A0"), ["A"]) + + self.assertEqual(regex.findall(r"[a\p{Alpha}]", "ab0"), ["a", "b"]) + self.assertEqual(regex.findall(r"[a\P{Alpha}]", "ab0"), ["a", "0"]) + self.assertEqual(regex.findall(r"(?i)[a\p{Alpha}]", "ab0"), ["a", + "b"]) + self.assertEqual(regex.findall(r"(?i)[a\P{Alpha}]", "ab0"), ["a", + "0"]) + + self.assertEqual(regex.findall(r"[a-b\p{Alpha}]", "abC0"), ["a", + "b", "C"]) + self.assertEqual(regex.findall(r"(?i)[a-b\p{Alpha}]", "AbC0"), ["A", + "b", "C"]) + + self.assertEqual(regex.findall(r"[\p{Alpha}]", "a0"), ["a"]) + self.assertEqual(regex.findall(r"[\P{Alpha}]", "a0"), ["0"]) + self.assertEqual(regex.findall(r"[^\p{Alpha}]", "a0"), ["0"]) + self.assertEqual(regex.findall(r"[^\P{Alpha}]", "a0"), ["a"]) + + self.assertEqual("".join(regex.findall(r"[^\d-h]", "a^b12c-h")), + 'a^bc') + self.assertEqual("".join(regex.findall(r"[^\dh]", "a^b12c-h")), + 'a^bc-') + self.assertEqual("".join(regex.findall(r"[^h\s\db]", "a^b 12c-h")), + 'a^c-') + self.assertEqual("".join(regex.findall(r"[^b\w]", "a b")), ' ') + self.assertEqual("".join(regex.findall(r"[^b\S]", "a b")), ' ') + self.assertEqual("".join(regex.findall(r"[^8\d]", "a 1b2")), 'a b') + + all_chars = "".join(chr(c) for c in range(0x100)) + self.assertEqual(len(regex.findall(r"\p{ASCII}", all_chars)), 128) + self.assertEqual(len(regex.findall(r"\p{Letter}", all_chars)), + 117) + self.assertEqual(len(regex.findall(r"\p{Digit}", all_chars)), 10) + + # Set operators + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Letter}]", + all_chars)), 52) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Alnum}&&\p{Letter}]", + all_chars)), 52) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Alnum}&&\p{Digit}]", + all_chars)), 10) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Cc}]", + all_chars)), 33) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}&&\p{Graph}]", + all_chars)), 94) + self.assertEqual(len(regex.findall(r"(?V1)[\p{ASCII}--\p{Cc}]", + all_chars)), 95) + self.assertEqual(len(regex.findall(r"[\p{Letter}\p{Digit}]", + all_chars)), 127) + self.assertEqual(len(regex.findall(r"(?V1)[\p{Letter}||\p{Digit}]", + all_chars)), 127) + self.assertEqual(len(regex.findall(r"\p{HexDigit}", all_chars)), + 22) + self.assertEqual(len(regex.findall(r"(?V1)[\p{HexDigit}~~\p{Digit}]", + all_chars)), 12) + self.assertEqual(len(regex.findall(r"(?V1)[\p{Digit}~~\p{HexDigit}]", + all_chars)), 12) + + self.assertEqual(repr(type(regex.compile(r"(?V0)([][-])"))), + self.PATTERN_CLASS) + self.assertEqual(regex.findall(r"(?V1)[[a-z]--[aei]]", "abc"), ["b", + "c"]) + self.assertEqual(regex.findall(r"(?iV1)[[a-z]--[aei]]", "abc"), ["b", + "c"]) + self.assertEqual(regex.findall(r"(?V1)[\w--a]","abc"), ["b", "c"]) + self.assertEqual(regex.findall(r"(?iV1)[\w--a]","abc"), ["b", "c"]) + + def test_various(self): + tests = [ + # Test ?P< and ?P= extensions. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with a digit. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with an illegal char. + ('(?Pa)', '', '', regex.error, self.BAD_GROUP_NAME), # Begins with an illegal char. + + # Same tests, for the ?P= form. + ('(?Pa)(?P=foo_123', 'aa', '', regex.error, + self.MISSING_RPAREN), + ('(?Pa)(?P=1)', 'aa', '1', ascii('a')), + ('(?Pa)(?P=0)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=-1)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=!)', 'aa', '', regex.error, + self.BAD_GROUP_NAME), + ('(?Pa)(?P=foo_124)', 'aa', '', regex.error, + self.UNKNOWN_GROUP), # Backref to undefined group. + + ('(?Pa)', 'a', '1', ascii('a')), + ('(?Pa)(?P=foo_123)', 'aa', '1', ascii('a')), + + # Mal-formed \g in pattern treated as literal for compatibility. + (r'(?a)\ga)\g<1>', 'aa', '1', ascii('a')), + (r'(?a)\g', 'aa', '', ascii(None)), + (r'(?a)\g', 'aa', '', regex.error, + self.UNKNOWN_GROUP), # Backref to undefined group. + + ('(?a)', 'a', '1', ascii('a')), + (r'(?a)\g', 'aa', '1', ascii('a')), + + # Test octal escapes. + ('\\1', 'a', '', regex.error, self.INVALID_GROUP_REF), # Backreference. + ('[\\1]', '\1', '0', "'\\x01'"), # Character. + ('\\09', chr(0) + '9', '0', ascii(chr(0) + '9')), + ('\\141', 'a', '0', ascii('a')), + ('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', + '0,11', ascii(('abcdefghijklk9', 'k'))), + + # Test \0 is handled everywhere. + (r'\0', '\0', '0', ascii('\0')), + (r'[\0a]', '\0', '0', ascii('\0')), + (r'[a\0]', '\0', '0', ascii('\0')), + (r'[^a\0]', '\0', '', ascii(None)), + + # Test various letter escapes. + (r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', '0', + ascii('\a\b\f\n\r\t\v')), + (r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', '0', + ascii('\a\b\f\n\r\t\v')), + (r'\xff', '\377', '0', ascii(chr(255))), + + # New \x semantics. + (r'\x00ffffffffffffff', '\377', '', ascii(None)), + (r'\x00f', '\017', '', ascii(None)), + (r'\x00fe', '\376', '', ascii(None)), + + (r'\x00ff', '\377', '', ascii(None)), + (r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', '0', ascii('\t\n\v\r\f\ag')), + ('\t\n\v\r\f\a\\g', '\t\n\v\r\f\ag', '0', ascii('\t\n\v\r\f\ag')), + (r'\t\n\v\r\f\a', '\t\n\v\r\f\a', '0', ascii(chr(9) + chr(10) + + chr(11) + chr(13) + chr(12) + chr(7))), + (r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', '0', + ascii('\t\n\v\r\f\b')), + + (r"^\w+=(\\[\000-\277]|[^\n\\])*", + "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c", '0', + ascii("SRC=eval.c g.c blah blah blah \\\\")), + + # Test that . only matches \n in DOTALL mode. + ('a.b', 'acb', '0', ascii('acb')), + ('a.b', 'a\nb', '', ascii(None)), + ('a.*b', 'acc\nccb', '', ascii(None)), + ('a.{4,5}b', 'acc\nccb', '', ascii(None)), + ('a.b', 'a\rb', '0', ascii('a\rb')), + # Changed to positional flags in regex 2023.12.23. + ('a.b(?s)', 'a\nb', '', ascii(None)), + ('(?s)a.b', 'a\nb', '0', ascii('a\nb')), + ('a.*(?s)b', 'acc\nccb', '', ascii(None)), + ('(?s)a.*b', 'acc\nccb', '0', ascii('acc\nccb')), + ('(?s)a.{4,5}b', 'acc\nccb', '0', ascii('acc\nccb')), + + (')', '', '', regex.error, self.TRAILING_CHARS), # Unmatched right bracket. + ('', '', '0', "''"), # Empty pattern. + ('abc', 'abc', '0', ascii('abc')), + ('abc', 'xbc', '', ascii(None)), + ('abc', 'axc', '', ascii(None)), + ('abc', 'abx', '', ascii(None)), + ('abc', 'xabcy', '0', ascii('abc')), + ('abc', 'ababc', '0', ascii('abc')), + ('ab*c', 'abc', '0', ascii('abc')), + ('ab*bc', 'abc', '0', ascii('abc')), + + ('ab*bc', 'abbc', '0', ascii('abbc')), + ('ab*bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab+bc', 'abbc', '0', ascii('abbc')), + ('ab+bc', 'abc', '', ascii(None)), + ('ab+bc', 'abq', '', ascii(None)), + ('ab+bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab?bc', 'abbc', '0', ascii('abbc')), + ('ab?bc', 'abc', '0', ascii('abc')), + ('ab?bc', 'abbbbc', '', ascii(None)), + ('ab?c', 'abc', '0', ascii('abc')), + + ('^abc$', 'abc', '0', ascii('abc')), + ('^abc$', 'abcc', '', ascii(None)), + ('^abc', 'abcc', '0', ascii('abc')), + ('^abc$', 'aabc', '', ascii(None)), + ('abc$', 'aabc', '0', ascii('abc')), + ('^', 'abc', '0', ascii('')), + ('$', 'abc', '0', ascii('')), + ('a.c', 'abc', '0', ascii('abc')), + ('a.c', 'axc', '0', ascii('axc')), + ('a.*c', 'axyzc', '0', ascii('axyzc')), + + ('a.*c', 'axyzd', '', ascii(None)), + ('a[bc]d', 'abc', '', ascii(None)), + ('a[bc]d', 'abd', '0', ascii('abd')), + ('a[b-d]e', 'abd', '', ascii(None)), + ('a[b-d]e', 'ace', '0', ascii('ace')), + ('a[b-d]', 'aac', '0', ascii('ac')), + ('a[-b]', 'a-', '0', ascii('a-')), + ('a[\\-b]', 'a-', '0', ascii('a-')), + ('a[b-]', 'a-', '0', ascii('a-')), + ('a[]b', '-', '', regex.error, self.BAD_SET), + + ('a[', '-', '', regex.error, self.BAD_SET), + ('a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('a]', 'a]', '0', ascii('a]')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[^bc]d', 'aed', '0', ascii('aed')), + ('a[^bc]d', 'abd', '', ascii(None)), + ('a[^-b]c', 'adc', '0', ascii('adc')), + + ('a[^-b]c', 'a-c', '', ascii(None)), + ('a[^]b]c', 'a]c', '', ascii(None)), + ('a[^]b]c', 'adc', '0', ascii('adc')), + ('\\ba\\b', 'a-', '0', ascii('a')), + ('\\ba\\b', '-a', '0', ascii('a')), + ('\\ba\\b', '-a-', '0', ascii('a')), + ('\\by\\b', 'xy', '', ascii(None)), + ('\\by\\b', 'yz', '', ascii(None)), + ('\\by\\b', 'xyz', '', ascii(None)), + ('x\\b', 'xyz', '', ascii(None)), + + ('x\\B', 'xyz', '0', ascii('x')), + ('\\Bz', 'xyz', '0', ascii('z')), + ('z\\B', 'xyz', '', ascii(None)), + ('\\Bx', 'xyz', '', ascii(None)), + ('\\Ba\\B', 'a-', '', ascii(None)), + ('\\Ba\\B', '-a', '', ascii(None)), + ('\\Ba\\B', '-a-', '', ascii(None)), + ('\\By\\B', 'xy', '', ascii(None)), + ('\\By\\B', 'yz', '', ascii(None)), + ('\\By\\b', 'xy', '0', ascii('y')), + + ('\\by\\B', 'yz', '0', ascii('y')), + ('\\By\\B', 'xyz', '0', ascii('y')), + ('ab|cd', 'abc', '0', ascii('ab')), + ('ab|cd', 'abcd', '0', ascii('ab')), + ('()ef', 'def', '0,1', ascii(('ef', ''))), + ('$b', 'b', '', ascii(None)), + ('a\\(b', 'a(b', '', ascii(('a(b',))), + ('a\\(*b', 'ab', '0', ascii('ab')), + ('a\\(*b', 'a((b', '0', ascii('a((b')), + ('a\\\\b', 'a\\b', '0', ascii('a\\b')), + + ('((a))', 'abc', '0,1,2', ascii(('a', 'a', 'a'))), + ('(a)b(c)', 'abc', '0,1,2', ascii(('abc', 'a', 'c'))), + ('a+b+c', 'aabbabc', '0', ascii('abc')), + ('(a+|b)*', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)+', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)?', 'ab', '0,1', ascii(('a', 'a'))), + (')(', '-', '', regex.error, self.TRAILING_CHARS), + ('[^ab]*', 'cde', '0', ascii('cde')), + ('abc', '', '', ascii(None)), + ('a*', '', '0', ascii('')), + + ('a|b|c|d|e', 'e', '0', ascii('e')), + ('(a|b|c|d|e)f', 'ef', '0,1', ascii(('ef', 'e'))), + ('abcd*efg', 'abcdefg', '0', ascii('abcdefg')), + ('ab*', 'xabyabbbz', '0', ascii('ab')), + ('ab*', 'xayabbbz', '0', ascii('a')), + ('(ab|cd)e', 'abcde', '0,1', ascii(('cde', 'cd'))), + ('[abhgefdc]ij', 'hij', '0', ascii('hij')), + ('^(ab|cd)e', 'abcde', '', ascii(None)), + ('(abc|)ef', 'abcdef', '0,1', ascii(('ef', ''))), + ('(a|b)c*d', 'abcd', '0,1', ascii(('bcd', 'b'))), + + ('(ab|ab*)bc', 'abc', '0,1', ascii(('abc', 'a'))), + ('a([bc]*)c*', 'abc', '0,1', ascii(('abc', 'bc'))), + ('a([bc]*)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]+)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]*)(c+d)', 'abcd', '0,1,2', ascii(('abcd', 'b', 'cd'))), + ('a[bcd]*dcdcde', 'adcdcde', '0', ascii('adcdcde')), + ('a[bcd]+dcdcde', 'adcdcde', '', ascii(None)), + ('(ab|a)b*c', 'abc', '0,1', ascii(('abc', 'ab'))), + ('((a)(b)c)(d)', 'abcd', '1,2,3,4', ascii(('abc', 'a', 'b', 'd'))), + ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', '0', ascii('alpha')), + + ('^a(bc+|b[eh])g|.h$', 'abh', '0,1', ascii(('bh', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'ij', '0,1,2', ascii(('ij', 'ij', + 'j'))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effg', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(((((((((a)))))))))', 'a', '0', ascii('a')), + ('multiple words of text', 'uh-uh', '', ascii(None)), + ('multiple words', 'multiple words, yeah', '0', + ascii('multiple words')), + ('(.*)c(.*)', 'abcde', '0,1,2', ascii(('abcde', 'ab', 'de'))), + + ('\\((.*), (.*)\\)', '(a, b)', '2,1', ascii(('b', 'a'))), + ('[k]', 'ab', '', ascii(None)), + ('a[-]?c', 'ac', '0', ascii('ac')), + ('(abc)\\1', 'abcabc', '1', ascii('abc')), + ('([a-c]*)\\1', 'abcabc', '1', ascii('abc')), + ('^(.+)?B', 'AB', '1', ascii('A')), + ('(a+).\\1$', 'aaaaa', '0,1', ascii(('aaaaa', 'aa'))), + ('^(a+).\\1$', 'aaaa', '', ascii(None)), + ('(abc)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + ('([a-c]+)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + + ('(a)\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a+)\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a+)+\\1', 'aa', '0,1', ascii(('aa', 'a'))), + ('(a).+\\1', 'aba', '0,1', ascii(('aba', 'a'))), + ('(a)ba*\\1', 'aba', '0,1', ascii(('aba', 'a'))), + ('(aa|a)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('(a|aa)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('(a+)a\\1$', 'aaa', '0,1', ascii(('aaa', 'a'))), + ('([abc]*)\\1', 'abcabc', '0,1', ascii(('abcabc', 'abc'))), + ('(a)(b)c|ab', 'ab', '0,1,2', ascii(('ab', None, None))), + + ('(a)+x', 'aaax', '0,1', ascii(('aaax', 'a'))), + ('([ac])+x', 'aacx', '0,1', ascii(('aacx', 'c'))), + ('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', '0,1', + ascii(('d:msgs/tdir/sub1/', 'tdir/'))), + ('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', + '0,1,2,3', ascii(('track1.title:TBlah blah blah', 'track1', + 'title', 'Blah blah blah'))), + ('([^N]*N)+', 'abNNxyzN', '0,1', ascii(('abNNxyzN', 'xyzN'))), + ('([^N]*N)+', 'abNNxyz', '0,1', ascii(('abNN', 'N'))), + ('([abc]*)x', 'abcx', '0,1', ascii(('abcx', 'abc'))), + ('([abc]*)x', 'abc', '', ascii(None)), + ('([xyz]*)x', 'abcx', '0,1', ascii(('x', ''))), + ('(a)+b|aac', 'aac', '0,1', ascii(('aac', None))), + + # Test symbolic groups. + ('(?Paaa)a', 'aaaa', '', regex.error, self.BAD_GROUP_NAME), + ('(?Paaa)a', 'aaaa', '0,id', ascii(('aaaa', 'aaa'))), + ('(?Paa)(?P=id)', 'aaaa', '0,id', ascii(('aaaa', 'aa'))), + ('(?Paa)(?P=xd)', 'aaaa', '', regex.error, self.UNKNOWN_GROUP), + + # Character properties. + (r"\g", "g", '0', ascii('g')), + (r"\g<1>", "g", '', regex.error, self.INVALID_GROUP_REF), + (r"(.)\g<1>", "gg", '0', ascii('gg')), + (r"(.)\g<1>", "gg", '', ascii(('gg', 'g'))), + (r"\N", "N", '0', ascii('N')), + (r"\N{LATIN SMALL LETTER A}", "a", '0', ascii('a')), + (r"\p", "p", '0', ascii('p')), + (r"\p{Ll}", "a", '0', ascii('a')), + (r"\P", "P", '0', ascii('P')), + (r"\P{Lu}", "p", '0', ascii('p')), + + # All tests from Perl. + ('abc', 'abc', '0', ascii('abc')), + ('abc', 'xbc', '', ascii(None)), + ('abc', 'axc', '', ascii(None)), + ('abc', 'abx', '', ascii(None)), + ('abc', 'xabcy', '0', ascii('abc')), + ('abc', 'ababc', '0', ascii('abc')), + + ('ab*c', 'abc', '0', ascii('abc')), + ('ab*bc', 'abc', '0', ascii('abc')), + ('ab*bc', 'abbc', '0', ascii('abbc')), + ('ab*bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{0,}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab+bc', 'abbc', '0', ascii('abbc')), + ('ab+bc', 'abc', '', ascii(None)), + ('ab+bc', 'abq', '', ascii(None)), + ('ab{1,}bc', 'abq', '', ascii(None)), + ('ab+bc', 'abbbbc', '0', ascii('abbbbc')), + + ('ab{1,}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{1,3}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{3,4}bc', 'abbbbc', '0', ascii('abbbbc')), + ('ab{4,5}bc', 'abbbbc', '', ascii(None)), + ('ab?bc', 'abbc', '0', ascii('abbc')), + ('ab?bc', 'abc', '0', ascii('abc')), + ('ab{0,1}bc', 'abc', '0', ascii('abc')), + ('ab?bc', 'abbbbc', '', ascii(None)), + ('ab?c', 'abc', '0', ascii('abc')), + ('ab{0,1}c', 'abc', '0', ascii('abc')), + + ('^abc$', 'abc', '0', ascii('abc')), + ('^abc$', 'abcc', '', ascii(None)), + ('^abc', 'abcc', '0', ascii('abc')), + ('^abc$', 'aabc', '', ascii(None)), + ('abc$', 'aabc', '0', ascii('abc')), + ('^', 'abc', '0', ascii('')), + ('$', 'abc', '0', ascii('')), + ('a.c', 'abc', '0', ascii('abc')), + ('a.c', 'axc', '0', ascii('axc')), + ('a.*c', 'axyzc', '0', ascii('axyzc')), + + ('a.*c', 'axyzd', '', ascii(None)), + ('a[bc]d', 'abc', '', ascii(None)), + ('a[bc]d', 'abd', '0', ascii('abd')), + ('a[b-d]e', 'abd', '', ascii(None)), + ('a[b-d]e', 'ace', '0', ascii('ace')), + ('a[b-d]', 'aac', '0', ascii('ac')), + ('a[-b]', 'a-', '0', ascii('a-')), + ('a[b-]', 'a-', '0', ascii('a-')), + ('a[b-a]', '-', '', regex.error, self.BAD_CHAR_RANGE), + ('a[]b', '-', '', regex.error, self.BAD_SET), + + ('a[', '-', '', regex.error, self.BAD_SET), + ('a]', 'a]', '0', ascii('a]')), + ('a[]]b', 'a]b', '0', ascii('a]b')), + ('a[^bc]d', 'aed', '0', ascii('aed')), + ('a[^bc]d', 'abd', '', ascii(None)), + ('a[^-b]c', 'adc', '0', ascii('adc')), + ('a[^-b]c', 'a-c', '', ascii(None)), + ('a[^]b]c', 'a]c', '', ascii(None)), + ('a[^]b]c', 'adc', '0', ascii('adc')), + ('ab|cd', 'abc', '0', ascii('ab')), + + ('ab|cd', 'abcd', '0', ascii('ab')), + ('()ef', 'def', '0,1', ascii(('ef', ''))), + ('*a', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(*)b', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('$b', 'b', '', ascii(None)), + ('a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('a\\(b', 'a(b', '', ascii(('a(b',))), + ('a\\(*b', 'ab', '0', ascii('ab')), + ('a\\(*b', 'a((b', '0', ascii('a((b')), + ('a\\\\b', 'a\\b', '0', ascii('a\\b')), + + ('abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('((a))', 'abc', '0,1,2', ascii(('a', 'a', 'a'))), + ('(a)b(c)', 'abc', '0,1,2', ascii(('abc', 'a', 'c'))), + ('a+b+c', 'aabbabc', '0', ascii('abc')), + ('a{1,}b{1,}c', 'aabbabc', '0', ascii('abc')), + ('a**', '-', '', regex.error, self.MULTIPLE_REPEAT), + ('a.+?c', 'abcabc', '0', ascii('abc')), + ('(a+|b)*', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b){0,}', 'ab', '0,1', ascii(('ab', 'b'))), + + ('(a+|b)+', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b){1,}', 'ab', '0,1', ascii(('ab', 'b'))), + ('(a+|b)?', 'ab', '0,1', ascii(('a', 'a'))), + ('(a+|b){0,1}', 'ab', '0,1', ascii(('a', 'a'))), + (')(', '-', '', regex.error, self.TRAILING_CHARS), + ('[^ab]*', 'cde', '0', ascii('cde')), + ('abc', '', '', ascii(None)), + ('a*', '', '0', ascii('')), + ('([abc])*d', 'abbbcd', '0,1', ascii(('abbbcd', 'c'))), + ('([abc])*bcd', 'abcd', '0,1', ascii(('abcd', 'a'))), + + ('a|b|c|d|e', 'e', '0', ascii('e')), + ('(a|b|c|d|e)f', 'ef', '0,1', ascii(('ef', 'e'))), + ('abcd*efg', 'abcdefg', '0', ascii('abcdefg')), + ('ab*', 'xabyabbbz', '0', ascii('ab')), + ('ab*', 'xayabbbz', '0', ascii('a')), + ('(ab|cd)e', 'abcde', '0,1', ascii(('cde', 'cd'))), + ('[abhgefdc]ij', 'hij', '0', ascii('hij')), + ('^(ab|cd)e', 'abcde', '', ascii(None)), + ('(abc|)ef', 'abcdef', '0,1', ascii(('ef', ''))), + ('(a|b)c*d', 'abcd', '0,1', ascii(('bcd', 'b'))), + + ('(ab|ab*)bc', 'abc', '0,1', ascii(('abc', 'a'))), + ('a([bc]*)c*', 'abc', '0,1', ascii(('abc', 'bc'))), + ('a([bc]*)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]+)(c*d)', 'abcd', '0,1,2', ascii(('abcd', 'bc', 'd'))), + ('a([bc]*)(c+d)', 'abcd', '0,1,2', ascii(('abcd', 'b', 'cd'))), + ('a[bcd]*dcdcde', 'adcdcde', '0', ascii('adcdcde')), + ('a[bcd]+dcdcde', 'adcdcde', '', ascii(None)), + ('(ab|a)b*c', 'abc', '0,1', ascii(('abc', 'ab'))), + ('((a)(b)c)(d)', 'abcd', '1,2,3,4', ascii(('abc', 'a', 'b', 'd'))), + ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', '0', ascii('alpha')), + + ('^a(bc+|b[eh])g|.h$', 'abh', '0,1', ascii(('bh', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('(bc+d$|ef*g.|h?i(j|k))', 'ij', '0,1,2', ascii(('ij', 'ij', + 'j'))), + ('(bc+d$|ef*g.|h?i(j|k))', 'effg', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', '', ascii(None)), + ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', '0,1,2', ascii(('effgz', + 'effgz', None))), + ('((((((((((a))))))))))', 'a', '10', ascii('a')), + ('((((((((((a))))))))))\\10', 'aa', '0', ascii('aa')), + + # Python does not have the same rules for \\41 so this is a syntax error + # ('((((((((((a))))))))))\\41', 'aa', '', ascii(None)), + # ('((((((((((a))))))))))\\41', 'a!', '0', ascii('a!')), + ('((((((((((a))))))))))\\41', '', '', regex.error, + self.INVALID_GROUP_REF), + ('(?i)((((((((((a))))))))))\\41', '', '', regex.error, + self.INVALID_GROUP_REF), + + ('(((((((((a)))))))))', 'a', '0', ascii('a')), + ('multiple words of text', 'uh-uh', '', ascii(None)), + ('multiple words', 'multiple words, yeah', '0', + ascii('multiple words')), + ('(.*)c(.*)', 'abcde', '0,1,2', ascii(('abcde', 'ab', 'de'))), + ('\\((.*), (.*)\\)', '(a, b)', '2,1', ascii(('b', 'a'))), + ('[k]', 'ab', '', ascii(None)), + ('a[-]?c', 'ac', '0', ascii('ac')), + ('(abc)\\1', 'abcabc', '1', ascii('abc')), + ('([a-c]*)\\1', 'abcabc', '1', ascii('abc')), + ('(?i)abc', 'ABC', '0', ascii('ABC')), + + ('(?i)abc', 'XBC', '', ascii(None)), + ('(?i)abc', 'AXC', '', ascii(None)), + ('(?i)abc', 'ABX', '', ascii(None)), + ('(?i)abc', 'XABCY', '0', ascii('ABC')), + ('(?i)abc', 'ABABC', '0', ascii('ABC')), + ('(?i)ab*c', 'ABC', '0', ascii('ABC')), + ('(?i)ab*bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab*bc', 'ABBC', '0', ascii('ABBC')), + ('(?i)ab*?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{0,}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + + ('(?i)ab+?bc', 'ABBC', '0', ascii('ABBC')), + ('(?i)ab+bc', 'ABC', '', ascii(None)), + ('(?i)ab+bc', 'ABQ', '', ascii(None)), + ('(?i)ab{1,}bc', 'ABQ', '', ascii(None)), + ('(?i)ab+bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{1,}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{1,3}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{3,4}?bc', 'ABBBBC', '0', ascii('ABBBBC')), + ('(?i)ab{4,5}?bc', 'ABBBBC', '', ascii(None)), + ('(?i)ab??bc', 'ABBC', '0', ascii('ABBC')), + + ('(?i)ab??bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab{0,1}?bc', 'ABC', '0', ascii('ABC')), + ('(?i)ab??bc', 'ABBBBC', '', ascii(None)), + ('(?i)ab??c', 'ABC', '0', ascii('ABC')), + ('(?i)ab{0,1}?c', 'ABC', '0', ascii('ABC')), + ('(?i)^abc$', 'ABC', '0', ascii('ABC')), + ('(?i)^abc$', 'ABCC', '', ascii(None)), + ('(?i)^abc', 'ABCC', '0', ascii('ABC')), + ('(?i)^abc$', 'AABC', '', ascii(None)), + ('(?i)abc$', 'AABC', '0', ascii('ABC')), + + ('(?i)^', 'ABC', '0', ascii('')), + ('(?i)$', 'ABC', '0', ascii('')), + ('(?i)a.c', 'ABC', '0', ascii('ABC')), + ('(?i)a.c', 'AXC', '0', ascii('AXC')), + ('(?i)a.*?c', 'AXYZC', '0', ascii('AXYZC')), + ('(?i)a.*c', 'AXYZD', '', ascii(None)), + ('(?i)a[bc]d', 'ABC', '', ascii(None)), + ('(?i)a[bc]d', 'ABD', '0', ascii('ABD')), + ('(?i)a[b-d]e', 'ABD', '', ascii(None)), + ('(?i)a[b-d]e', 'ACE', '0', ascii('ACE')), + + ('(?i)a[b-d]', 'AAC', '0', ascii('AC')), + ('(?i)a[-b]', 'A-', '0', ascii('A-')), + ('(?i)a[b-]', 'A-', '0', ascii('A-')), + ('(?i)a[b-a]', '-', '', regex.error, self.BAD_CHAR_RANGE), + ('(?i)a[]b', '-', '', regex.error, self.BAD_SET), + ('(?i)a[', '-', '', regex.error, self.BAD_SET), + ('(?i)a]', 'A]', '0', ascii('A]')), + ('(?i)a[]]b', 'A]B', '0', ascii('A]B')), + ('(?i)a[^bc]d', 'AED', '0', ascii('AED')), + ('(?i)a[^bc]d', 'ABD', '', ascii(None)), + + ('(?i)a[^-b]c', 'ADC', '0', ascii('ADC')), + ('(?i)a[^-b]c', 'A-C', '', ascii(None)), + ('(?i)a[^]b]c', 'A]C', '', ascii(None)), + ('(?i)a[^]b]c', 'ADC', '0', ascii('ADC')), + ('(?i)ab|cd', 'ABC', '0', ascii('AB')), + ('(?i)ab|cd', 'ABCD', '0', ascii('AB')), + ('(?i)()ef', 'DEF', '0,1', ascii(('EF', ''))), + ('(?i)*a', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(?i)(*)b', '-', '', regex.error, self.NOTHING_TO_REPEAT), + ('(?i)$b', 'B', '', ascii(None)), + + ('(?i)a\\', '-', '', regex.error, self.BAD_ESCAPE), + ('(?i)a\\(b', 'A(B', '', ascii(('A(B',))), + ('(?i)a\\(*b', 'AB', '0', ascii('AB')), + ('(?i)a\\(*b', 'A((B', '0', ascii('A((B')), + ('(?i)a\\\\b', 'A\\B', '0', ascii('A\\B')), + ('(?i)abc)', '-', '', regex.error, self.TRAILING_CHARS), + ('(?i)(abc', '-', '', regex.error, self.MISSING_RPAREN), + ('(?i)((a))', 'ABC', '0,1,2', ascii(('A', 'A', 'A'))), + ('(?i)(a)b(c)', 'ABC', '0,1,2', ascii(('ABC', 'A', 'C'))), + ('(?i)a+b+c', 'AABBABC', '0', ascii('ABC')), + + ('(?i)a{1,}b{1,}c', 'AABBABC', '0', ascii('ABC')), + ('(?i)a**', '-', '', regex.error, self.MULTIPLE_REPEAT), + ('(?i)a.+?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)a.*?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)a.{0,5}?c', 'ABCABC', '0', ascii('ABC')), + ('(?i)(a+|b)*', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b){0,}', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b)+', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b){1,}', 'AB', '0,1', ascii(('AB', 'B'))), + ('(?i)(a+|b)?', 'AB', '0,1', ascii(('A', 'A'))), + + ('(?i)(a+|b){0,1}', 'AB', '0,1', ascii(('A', 'A'))), + ('(?i)(a+|b){0,1}?', 'AB', '0,1', ascii(('', None))), + ('(?i))(', '-', '', regex.error, self.TRAILING_CHARS), + ('(?i)[^ab]*', 'CDE', '0', ascii('CDE')), + ('(?i)abc', '', '', ascii(None)), + ('(?i)a*', '', '0', ascii('')), + ('(?i)([abc])*d', 'ABBBCD', '0,1', ascii(('ABBBCD', 'C'))), + ('(?i)([abc])*bcd', 'ABCD', '0,1', ascii(('ABCD', 'A'))), + ('(?i)a|b|c|d|e', 'E', '0', ascii('E')), + ('(?i)(a|b|c|d|e)f', 'EF', '0,1', ascii(('EF', 'E'))), + + ('(?i)abcd*efg', 'ABCDEFG', '0', ascii('ABCDEFG')), + ('(?i)ab*', 'XABYABBBZ', '0', ascii('AB')), + ('(?i)ab*', 'XAYABBBZ', '0', ascii('A')), + ('(?i)(ab|cd)e', 'ABCDE', '0,1', ascii(('CDE', 'CD'))), + ('(?i)[abhgefdc]ij', 'HIJ', '0', ascii('HIJ')), + ('(?i)^(ab|cd)e', 'ABCDE', '', ascii(None)), + ('(?i)(abc|)ef', 'ABCDEF', '0,1', ascii(('EF', ''))), + ('(?i)(a|b)c*d', 'ABCD', '0,1', ascii(('BCD', 'B'))), + ('(?i)(ab|ab*)bc', 'ABC', '0,1', ascii(('ABC', 'A'))), + ('(?i)a([bc]*)c*', 'ABC', '0,1', ascii(('ABC', 'BC'))), + + ('(?i)a([bc]*)(c*d)', 'ABCD', '0,1,2', ascii(('ABCD', 'BC', 'D'))), + ('(?i)a([bc]+)(c*d)', 'ABCD', '0,1,2', ascii(('ABCD', 'BC', 'D'))), + ('(?i)a([bc]*)(c+d)', 'ABCD', '0,1,2', ascii(('ABCD', 'B', 'CD'))), + ('(?i)a[bcd]*dcdcde', 'ADCDCDE', '0', ascii('ADCDCDE')), + ('(?i)a[bcd]+dcdcde', 'ADCDCDE', '', ascii(None)), + ('(?i)(ab|a)b*c', 'ABC', '0,1', ascii(('ABC', 'AB'))), + ('(?i)((a)(b)c)(d)', 'ABCD', '1,2,3,4', ascii(('ABC', 'A', 'B', + 'D'))), + ('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', '0', ascii('ALPHA')), + ('(?i)^a(bc+|b[eh])g|.h$', 'ABH', '0,1', ascii(('BH', None))), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', '0,1,2', ascii(('EFFGZ', + 'EFFGZ', None))), + + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', '0,1,2', ascii(('IJ', 'IJ', + 'J'))), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', '', ascii(None)), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', '', ascii(None)), + ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', '0,1,2', ascii(('EFFGZ', + 'EFFGZ', None))), + ('(?i)((((((((((a))))))))))', 'A', '10', ascii('A')), + ('(?i)((((((((((a))))))))))\\10', 'AA', '0', ascii('AA')), + #('(?i)((((((((((a))))))))))\\41', 'AA', '', ascii(None)), + #('(?i)((((((((((a))))))))))\\41', 'A!', '0', ascii('A!')), + ('(?i)(((((((((a)))))))))', 'A', '0', ascii('A')), + ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', '1', + ascii('A')), + ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', '1', + ascii('C')), + ('(?i)multiple words of text', 'UH-UH', '', ascii(None)), + + ('(?i)multiple words', 'MULTIPLE WORDS, YEAH', '0', + ascii('MULTIPLE WORDS')), + ('(?i)(.*)c(.*)', 'ABCDE', '0,1,2', ascii(('ABCDE', 'AB', 'DE'))), + ('(?i)\\((.*), (.*)\\)', '(A, B)', '2,1', ascii(('B', 'A'))), + ('(?i)[k]', 'AB', '', ascii(None)), + # ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', ascii(ABCD-$&-\\ABCD)), + # ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', ascii(BC-$1-\\BC)), + ('(?i)a[-]?c', 'AC', '0', ascii('AC')), + ('(?i)(abc)\\1', 'ABCABC', '1', ascii('ABC')), + ('(?i)([a-c]*)\\1', 'ABCABC', '1', ascii('ABC')), + ('a(?!b).', 'abad', '0', ascii('ad')), + ('a(?=d).', 'abad', '0', ascii('ad')), + ('a(?=c|d).', 'abad', '0', ascii('ad')), + + ('a(?:b|c|d)(.)', 'ace', '1', ascii('e')), + ('a(?:b|c|d)*(.)', 'ace', '1', ascii('e')), + ('a(?:b|c|d)+?(.)', 'ace', '1', ascii('e')), + ('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', '1,2', ascii(('c', 'e'))), + + # Lookbehind: split by : but not if it is escaped by -. + ('(?]*?b', 'a>b', '', ascii(None)), + # Bug 490573: minimizing repeat problem. + (r'^a*?$', 'foo', '', ascii(None)), + # Bug 470582: nested groups problem. + (r'^((a)c)?(ab)$', 'ab', '1,2,3', ascii((None, None, 'ab'))), + # Another minimizing repeat problem (capturing groups in assertions). + ('^([ab]*?)(?=(b)?)c', 'abc', '1,2', ascii(('ab', None))), + ('^([ab]*?)(?!(b))c', 'abc', '1,2', ascii(('ab', None))), + ('^([ab]*?)(?(.){0,2})d", "abcd").captures(1), + ['b', 'c']) + self.assertEqual(regex.search(r"(.)+", "a").captures(1), ['a']) + + def test_guards(self): + m = regex.search(r"(X.*?Y\s*){3}(X\s*)+AB:", + "XY\nX Y\nX Y\nXY\nXX AB:") + self.assertEqual(m.span(0, 1, 2), ((3, 21), (12, 15), (16, 18))) + + m = regex.search(r"(X.*?Y\s*){3,}(X\s*)+AB:", + "XY\nX Y\nX Y\nXY\nXX AB:") + self.assertEqual(m.span(0, 1, 2), ((0, 21), (12, 15), (16, 18))) + + m = regex.search(r'\d{4}(\s*\w)?\W*((?!\d)\w){2}', "9999XX") + self.assertEqual(m.span(0, 1, 2), ((0, 6), (-1, -1), (5, 6))) + + m = regex.search(r'A\s*?.*?(\n+.*?\s*?){0,2}\(X', 'A\n1\nS\n1 (X') + self.assertEqual(m.span(0, 1), ((0, 10), (5, 8))) + + m = regex.search(r'Derde\s*:', 'aaaaaa:\nDerde:') + self.assertEqual(m.span(), (8, 14)) + m = regex.search(r'Derde\s*:', 'aaaaa:\nDerde:') + self.assertEqual(m.span(), (7, 13)) + + def test_turkic(self): + # Turkish has dotted and dotless I/i. + pairs = "I=i;I=\u0131;i=\u0130" + + all_chars = set() + matching = set() + for pair in pairs.split(";"): + ch1, ch2 = pair.split("=") + all_chars.update((ch1, ch2)) + matching.add((ch1, ch1)) + matching.add((ch1, ch2)) + matching.add((ch2, ch1)) + matching.add((ch2, ch2)) + + for ch1 in all_chars: + for ch2 in all_chars: + m = regex.match(r"(?i)\A" + ch1 + r"\Z", ch2) + if m: + if (ch1, ch2) not in matching: + self.fail("{} matching {}".format(ascii(ch1), + ascii(ch2))) + else: + if (ch1, ch2) in matching: + self.fail("{} not matching {}".format(ascii(ch1), + ascii(ch2))) + + def test_named_lists(self): + options = ["one", "two", "three"] + self.assertEqual(regex.match(r"333\L444", "333one444", + bar=options).group(), "333one444") + self.assertEqual(regex.match(r"(?i)333\L444", "333TWO444", + bar=options).group(), "333TWO444") + self.assertEqual(regex.match(r"333\L444", "333four444", + bar=options), None) + + options = [b"one", b"two", b"three"] + self.assertEqual(regex.match(br"333\L444", b"333one444", + bar=options).group(), b"333one444") + self.assertEqual(regex.match(br"(?i)333\L444", b"333TWO444", + bar=options).group(), b"333TWO444") + self.assertEqual(regex.match(br"333\L444", b"333four444", + bar=options), None) + + self.assertEqual(repr(type(regex.compile(r"3\L4\L+5", + bar=["one", "two", "three"]))), self.PATTERN_CLASS) + + self.assertEqual(regex.findall(r"^\L", "solid QWERT", + options=set(['good', 'brilliant', '+s\\ol[i}d'])), []) + self.assertEqual(regex.findall(r"^\L", "+solid QWERT", + options=set(['good', 'brilliant', '+solid'])), ['+solid']) + + options = ["STRASSE"] + self.assertEqual(regex.match(r"(?fi)\L", + "stra\N{LATIN SMALL LETTER SHARP S}e", words=options).span(), (0, + 6)) + + options = ["STRASSE", "stress"] + self.assertEqual(regex.match(r"(?fi)\L", + "stra\N{LATIN SMALL LETTER SHARP S}e", words=options).span(), (0, + 6)) + + options = ["stra\N{LATIN SMALL LETTER SHARP S}e"] + self.assertEqual(regex.match(r"(?fi)\L", "STRASSE", + words=options).span(), (0, 7)) + + options = ["kit"] + self.assertEqual(regex.search(r"(?i)\L", "SKITS", + words=options).span(), (1, 4)) + self.assertEqual(regex.search(r"(?i)\L", + "SK\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}TS", + words=options).span(), (1, 4)) + + self.assertEqual(regex.search(r"(?fi)\b(\w+) +\1\b", + " stra\N{LATIN SMALL LETTER SHARP S}e STRASSE ").span(), (1, 15)) + self.assertEqual(regex.search(r"(?fi)\b(\w+) +\1\b", + " STRASSE stra\N{LATIN SMALL LETTER SHARP S}e ").span(), (1, 15)) + + self.assertEqual(regex.search(r"^\L$", "", options=[]).span(), + (0, 0)) + + def test_fuzzy(self): + # Some tests borrowed from TRE library tests. + self.assertEqual(repr(type(regex.compile('(fou){s,e<=1}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(fuu){s}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(fuu){s,e}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){1i+1d<1,s<=1}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){1i+1d<1,s<=1,e<=10}'))), + self.PATTERN_CLASS) + self.assertEqual(repr(type(regex.compile('(anaconda){s<=1,e<=1,1i+1d<1}'))), + self.PATTERN_CLASS) + + text = 'molasses anaconda foo bar baz smith anderson ' + self.assertEqual(regex.search('(znacnda){s<=1,e<=3,1i+1d<1}', text), + None) + self.assertEqual(regex.search('(znacnda){s<=1,e<=3,1i+1d<2}', + text).span(0, 1), ((9, 17), (9, 17))) + self.assertEqual(regex.search('(ananda){1i+1d<2}', text), None) + self.assertEqual(regex.search(r"(?:\bznacnda){e<=2}", text)[0], + "anaconda") + self.assertEqual(regex.search(r"(?:\bnacnda){e<=2}", text)[0], + "anaconda") + + text = 'anaconda foo bar baz smith anderson' + self.assertEqual(regex.search('(fuu){i<=3,d<=3,e<=5}', text).span(0, + 1), ((0, 0), (0, 0))) + self.assertEqual(regex.search('(?b)(fuu){i<=3,d<=3,e<=5}', + text).span(0, 1), ((9, 10), (9, 10))) + self.assertEqual(regex.search('(fuu){i<=2,d<=2,e<=5}', text).span(0, + 1), ((7, 10), (7, 10))) + self.assertEqual(regex.search('(?e)(fuu){i<=2,d<=2,e<=5}', + text).span(0, 1), ((9, 10), (9, 10))) + self.assertEqual(regex.search('(fuu){i<=3,d<=3,e}', text).span(0, 1), + ((0, 0), (0, 0))) + self.assertEqual(regex.search('(?b)(fuu){i<=3,d<=3,e}', text).span(0, + 1), ((9, 10), (9, 10))) + + self.assertEqual(repr(type(regex.compile('(approximate){s<=3,1i+1d<3}'))), + self.PATTERN_CLASS) + + # No cost limit. + self.assertEqual(regex.search('(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((0, 6), (0, 6))) + self.assertEqual(regex.search('(?e)(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((0, 3), (0, 3))) + self.assertEqual(regex.search('(?b)(foobar){e}', + 'xirefoabralfobarxie').span(0, 1), ((11, 16), (11, 16))) + + # At most two errors. + self.assertEqual(regex.search('(foobar){e<=2}', + 'xirefoabrzlfd').span(0, 1), ((4, 9), (4, 9))) + self.assertEqual(regex.search('(foobar){e<=2}', 'xirefoabzlfd'), None) + + # At most two inserts or substitutions and max two errors total. + self.assertEqual(regex.search('(foobar){i<=2,s<=2,e<=2}', + 'oobargoobaploowap').span(0, 1), ((5, 11), (5, 11))) + + # Find best whole word match for "foobar". + self.assertEqual(regex.search('\\b(foobar){e}\\b', 'zfoobarz').span(0, + 1), ((0, 8), (0, 8))) + self.assertEqual(regex.search('\\b(foobar){e}\\b', + 'boing zfoobarz goobar woop').span(0, 1), ((0, 6), (0, 6))) + self.assertEqual(regex.search('(?b)\\b(foobar){e}\\b', + 'boing zfoobarz goobar woop').span(0, 1), ((15, 21), (15, 21))) + + # Match whole string, allow only 1 error. + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoobar').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobarx').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fooxbar').span(0, + 1), ((0, 7), (0, 7))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foxbar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xoobar').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobax').span(0, 1), + ((0, 6), (0, 6))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'oobar').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fobar').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'fooba').span(0, 1), + ((0, 5), (0, 5))) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoobarx'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foobarxx'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xxfoobar'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'xfoxbar'), None) + self.assertEqual(regex.search('^(foobar){e<=1}$', 'foxbarx'), None) + + # At most one insert, two deletes, and three substitutions. + # Additionally, deletes cost two and substitutes one, and total + # cost must be less than 4. + self.assertEqual(regex.search('(foobar){i<=1,d<=2,s<=3,2d+1s<4}', + '3oifaowefbaoraofuiebofasebfaobfaorfeoaro').span(0, 1), ((6, 13), (6, + 13))) + self.assertEqual(regex.search('(?b)(foobar){i<=1,d<=2,s<=3,2d+1s<4}', + '3oifaowefbaoraofuiebofasebfaobfaorfeoaro').span(0, 1), ((34, 39), + (34, 39))) + + # Partially fuzzy matches. + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'foobarzap').span(0, + 1), ((0, 9), (3, 6))) + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'fobarzap'), None) + self.assertEqual(regex.search('foo(bar){e<=1}zap', 'foobrzap').span(0, + 1), ((0, 8), (3, 5))) + + text = ('www.cnn.com 64.236.16.20\nwww.slashdot.org 66.35.250.150\n' + 'For useful information, use www.slashdot.org\nthis is demo data!\n') + self.assertEqual(regex.search(r'(?s)^.*(dot.org){e}.*$', text).span(0, + 1), ((0, 120), (120, 120))) + self.assertEqual(regex.search(r'(?es)^.*(dot.org){e}.*$', text).span(0, + 1), ((0, 120), (93, 100))) + self.assertEqual(regex.search(r'^.*(dot.org){e}.*$', text).span(0, 1), + ((0, 119), (24, 101))) + + # Behaviour is unexpected, but arguably not wrong. It first finds the + # best match, then the best in what follows, etc. + self.assertEqual(regex.findall(r"\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["cot", "dog"]) + self.assertEqual(regex.findall(r"\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), [" dog", "cot"]) + self.assertEqual(regex.findall(r"(?e)\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), ["dog", "cot"]) + self.assertEqual(regex.findall(r"(?r)\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["dog ", "cot"]) + self.assertEqual(regex.findall(r"(?er)\b\L{e<=1}\b", + " book cot dog desk ", words="cat dog".split()), ["dog", "cot"]) + self.assertEqual(regex.findall(r"(?r)\b\L{e<=1}\b", + " book dog cot desk ", words="cat dog".split()), ["cot", "dog"]) + self.assertEqual(regex.findall(br"\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"cot", b"dog"]) + self.assertEqual(regex.findall(br"\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b" dog", b"cot"]) + self.assertEqual(regex.findall(br"(?e)\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b"dog", b"cot"]) + self.assertEqual(regex.findall(br"(?r)\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"dog ", b"cot"]) + self.assertEqual(regex.findall(br"(?er)\b\L{e<=1}\b", + b" book cot dog desk ", words=b"cat dog".split()), [b"dog", b"cot"]) + self.assertEqual(regex.findall(br"(?r)\b\L{e<=1}\b", + b" book dog cot desk ", words=b"cat dog".split()), [b"cot", b"dog"]) + + self.assertEqual(regex.search(r"(\w+) (\1{e<=1})", "foo fou").groups(), + ("foo", "fou")) + self.assertEqual(regex.search(r"(?r)(\2{e<=1}) (\w+)", + "foo fou").groups(), ("foo", "fou")) + self.assertEqual(regex.search(br"(\w+) (\1{e<=1})", + b"foo fou").groups(), (b"foo", b"fou")) + + self.assertEqual(regex.findall(r"(?:(?:QR)+){e}", "abcde"), ["abcde", + ""]) + self.assertEqual(regex.findall(r"(?:Q+){e}", "abc"), ["abc", ""]) + + # Hg issue 41: = for fuzzy matches + self.assertEqual(regex.match(r"(?:service detection){0[^()]+)|(?R))*\)", "(ab(cd)ef)")[ + : ], ("(ab(cd)ef)", "ef")) + self.assertEqual(regex.search(r"\(((?>[^()]+)|(?R))*\)", + "(ab(cd)ef)").captures(1), ["ab", "cd", "(cd)", "ef"]) + + self.assertEqual(regex.search(r"(?r)\(((?R)|(?>[^()]+))*\)", + "(ab(cd)ef)")[ : ], ("(ab(cd)ef)", "ab")) + self.assertEqual(regex.search(r"(?r)\(((?R)|(?>[^()]+))*\)", + "(ab(cd)ef)").captures(1), ["ef", "cd", "(cd)", "ab"]) + + self.assertEqual(regex.search(r"\(([^()]+|(?R))*\)", + "some text (a(b(c)d)e) more text")[ : ], ("(a(b(c)d)e)", "e")) + + self.assertEqual(regex.search(r"(?r)\(((?R)|[^()]+)*\)", + "some text (a(b(c)d)e) more text")[ : ], ("(a(b(c)d)e)", "a")) + + self.assertEqual(regex.search(r"(foo(\(((?:(?>[^()]+)|(?2))*)\)))", + "foo(bar(baz)+baz(bop))")[ : ], ("foo(bar(baz)+baz(bop))", + "foo(bar(baz)+baz(bop))", "(bar(baz)+baz(bop))", + "bar(baz)+baz(bop)")) + + self.assertEqual(regex.search(r"(?r)(foo(\(((?:(?2)|(?>[^()]+))*)\)))", + "foo(bar(baz)+baz(bop))")[ : ], ("foo(bar(baz)+baz(bop))", + "foo(bar(baz)+baz(bop))", "(bar(baz)+baz(bop))", + "bar(baz)+baz(bop)")) + + rgx = regex.compile(r"""^\s*(<\s*([a-zA-Z:]+)(?:\s*[a-zA-Z:]*\s*=\s*(?:'[^']*'|"[^"]*"))*\s*(/\s*)?>(?:[^<>]*|(?1))*(?(3)|<\s*/\s*\2\s*>))\s*$""") + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), False) + + self.assertEqual(bool(rgx.search('')), False) + self.assertEqual(bool(rgx.search('')), True) + self.assertEqual(bool(rgx.search('< fooo / >')), True) + # The next regex should and does match. Perl 5.14 agrees. + #self.assertEqual(bool(rgx.search('foo')), False) + self.assertEqual(bool(rgx.search('foo')), False) + + self.assertEqual(bool(rgx.search('foo')), True) + self.assertEqual(bool(rgx.search('foo')), True) + self.assertEqual(bool(rgx.search('')), True) + + def test_copy(self): + # PatternObjects are immutable, therefore there's no need to clone them. + r = regex.compile("a") + self.assertTrue(copy.copy(r) is r) + self.assertTrue(copy.deepcopy(r) is r) + + # MatchObjects are normally mutable because the target string can be + # detached. However, after the target string has been detached, a + # MatchObject becomes immutable, so there's no need to clone it. + m = r.match("a") + self.assertTrue(copy.copy(m) is not m) + self.assertTrue(copy.deepcopy(m) is not m) + + self.assertTrue(m.string is not None) + m2 = copy.copy(m) + m2.detach_string() + self.assertTrue(m.string is not None) + self.assertTrue(m2.string is None) + + # The following behaviour matches that of the re module. + it = regex.finditer(".", "ab") + it2 = copy.copy(it) + self.assertEqual(next(it).group(), "a") + self.assertEqual(next(it2).group(), "b") + + # The following behaviour matches that of the re module. + it = regex.finditer(".", "ab") + it2 = copy.deepcopy(it) + self.assertEqual(next(it).group(), "a") + self.assertEqual(next(it2).group(), "b") + + # The following behaviour is designed to match that of copying 'finditer'. + it = regex.splititer(" ", "a b") + it2 = copy.copy(it) + self.assertEqual(next(it), "a") + self.assertEqual(next(it2), "b") + + # The following behaviour is designed to match that of copying 'finditer'. + it = regex.splititer(" ", "a b") + it2 = copy.deepcopy(it) + self.assertEqual(next(it), "a") + self.assertEqual(next(it2), "b") + + def test_format(self): + self.assertEqual(regex.subf(r"(\w+) (\w+)", "{0} => {2} {1}", + "foo bar"), "foo bar => bar foo") + self.assertEqual(regex.subf(r"(?\w+) (?\w+)", + "{word2} {word1}", "foo bar"), "bar foo") + + self.assertEqual(regex.subfn(r"(\w+) (\w+)", "{0} => {2} {1}", + "foo bar"), ("foo bar => bar foo", 1)) + self.assertEqual(regex.subfn(r"(?\w+) (?\w+)", + "{word2} {word1}", "foo bar"), ("bar foo", 1)) + + self.assertEqual(regex.match(r"(\w+) (\w+)", + "foo bar").expandf("{0} => {2} {1}"), "foo bar => bar foo") + + def test_fullmatch(self): + self.assertEqual(bool(regex.fullmatch(r"abc", "abc")), True) + self.assertEqual(bool(regex.fullmatch(r"abc", "abcx")), False) + self.assertEqual(bool(regex.fullmatch(r"abc", "abcx", endpos=3)), True) + + self.assertEqual(bool(regex.fullmatch(r"abc", "xabc", pos=1)), True) + self.assertEqual(bool(regex.fullmatch(r"abc", "xabcy", pos=1)), False) + self.assertEqual(bool(regex.fullmatch(r"abc", "xabcy", pos=1, + endpos=4)), True) + + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abc")), True) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abcx")), False) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "abcx", endpos=3)), + True) + + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabc", pos=1)), + True) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabcy", pos=1)), + False) + self.assertEqual(bool(regex.fullmatch(r"(?r)abc", "xabcy", pos=1, + endpos=4)), True) + + def test_issue_18468(self): + self.assertTypedEqual(regex.sub('y', 'a', 'xyz'), 'xaz') + self.assertTypedEqual(regex.sub('y', StrSubclass('a'), + StrSubclass('xyz')), 'xaz') + self.assertTypedEqual(regex.sub(b'y', b'a', b'xyz'), b'xaz') + self.assertTypedEqual(regex.sub(b'y', BytesSubclass(b'a'), + BytesSubclass(b'xyz')), b'xaz') + self.assertTypedEqual(regex.sub(b'y', bytearray(b'a'), + bytearray(b'xyz')), b'xaz') + self.assertTypedEqual(regex.sub(b'y', memoryview(b'a'), + memoryview(b'xyz')), b'xaz') + + for string in ":a:b::c", StrSubclass(":a:b::c"): + self.assertTypedEqual(regex.split(":", string), ['', 'a', 'b', '', + 'c']) + if sys.version_info >= (3, 7, 0): + self.assertTypedEqual(regex.split(":*", string), ['', '', 'a', + '', 'b', '', 'c', '']) + self.assertTypedEqual(regex.split("(:*)", string), ['', ':', + '', '', 'a', ':', '', '', 'b', '::', '', '', 'c', '', '']) + else: + self.assertTypedEqual(regex.split(":*", string), ['', 'a', 'b', + 'c']) + self.assertTypedEqual(regex.split("(:*)", string), ['', ':', + 'a', ':', 'b', '::', 'c']) + + for string in (b":a:b::c", BytesSubclass(b":a:b::c"), + bytearray(b":a:b::c"), memoryview(b":a:b::c")): + self.assertTypedEqual(regex.split(b":", string), [b'', b'a', b'b', + b'', b'c']) + if sys.version_info >= (3, 7, 0): + self.assertTypedEqual(regex.split(b":*", string), [b'', b'', + b'a', b'', b'b', b'', b'c', b'']) + self.assertTypedEqual(regex.split(b"(:*)", string), [b'', b':', + b'', b'', b'a', b':', b'', b'', b'b', b'::', b'', b'', b'c', + b'', b'']) + else: + self.assertTypedEqual(regex.split(b":*", string), [b'', b'a', + b'b', b'c']) + self.assertTypedEqual(regex.split(b"(:*)", string), [b'', b':', + b'a', b':', b'b', b'::', b'c']) + + for string in "a:b::c:::d", StrSubclass("a:b::c:::d"): + self.assertTypedEqual(regex.findall(":+", string), [":", "::", + ":::"]) + self.assertTypedEqual(regex.findall("(:+)", string), [":", "::", + ":::"]) + self.assertTypedEqual(regex.findall("(:)(:*)", string), [(":", ""), + (":", ":"), (":", "::")]) + + for string in (b"a:b::c:::d", BytesSubclass(b"a:b::c:::d"), + bytearray(b"a:b::c:::d"), memoryview(b"a:b::c:::d")): + self.assertTypedEqual(regex.findall(b":+", string), [b":", b"::", + b":::"]) + self.assertTypedEqual(regex.findall(b"(:+)", string), [b":", b"::", + b":::"]) + self.assertTypedEqual(regex.findall(b"(:)(:*)", string), [(b":", + b""), (b":", b":"), (b":", b"::")]) + + for string in 'a', StrSubclass('a'): + self.assertEqual(regex.match('a', string).groups(), ()) + self.assertEqual(regex.match('(a)', string).groups(), ('a',)) + self.assertEqual(regex.match('(a)', string).group(0), 'a') + self.assertEqual(regex.match('(a)', string).group(1), 'a') + self.assertEqual(regex.match('(a)', string).group(1, 1), ('a', + 'a')) + + for string in (b'a', BytesSubclass(b'a'), bytearray(b'a'), + memoryview(b'a')): + self.assertEqual(regex.match(b'a', string).groups(), ()) + self.assertEqual(regex.match(b'(a)', string).groups(), (b'a',)) + self.assertEqual(regex.match(b'(a)', string).group(0), b'a') + self.assertEqual(regex.match(b'(a)', string).group(1), b'a') + self.assertEqual(regex.match(b'(a)', string).group(1, 1), (b'a', + b'a')) + + def test_partial(self): + self.assertEqual(regex.match('ab', 'a', partial=True).partial, True) + self.assertEqual(regex.match('ab', 'a', partial=True).span(), (0, 1)) + self.assertEqual(regex.match(r'cats', 'cat', partial=True).partial, + True) + self.assertEqual(regex.match(r'cats', 'cat', partial=True).span(), (0, + 3)) + self.assertEqual(regex.match(r'cats', 'catch', partial=True), None) + self.assertEqual(regex.match(r'abc\w{3}', 'abcdef', + partial=True).partial, False) + self.assertEqual(regex.match(r'abc\w{3}', 'abcdef', + partial=True).span(), (0, 6)) + self.assertEqual(regex.match(r'abc\w{3}', 'abcde', + partial=True).partial, True) + self.assertEqual(regex.match(r'abc\w{3}', 'abcde', + partial=True).span(), (0, 5)) + + self.assertEqual(regex.match(r'\d{4}$', '1234', partial=True).partial, + False) + + self.assertEqual(regex.match(r'\L', 'post', partial=True, + words=['post']).partial, False) + self.assertEqual(regex.match(r'\L', 'post', partial=True, + words=['post']).span(), (0, 4)) + self.assertEqual(regex.match(r'\L', 'pos', partial=True, + words=['post']).partial, True) + self.assertEqual(regex.match(r'\L', 'pos', partial=True, + words=['post']).span(), (0, 3)) + + self.assertEqual(regex.match(r'(?fi)\L', 'POST', partial=True, + words=['po\uFB06']).partial, False) + self.assertEqual(regex.match(r'(?fi)\L', 'POST', partial=True, + words=['po\uFB06']).span(), (0, 4)) + self.assertEqual(regex.match(r'(?fi)\L', 'POS', partial=True, + words=['po\uFB06']).partial, True) + self.assertEqual(regex.match(r'(?fi)\L', 'POS', partial=True, + words=['po\uFB06']).span(), (0, 3)) + self.assertEqual(regex.match(r'(?fi)\L', 'po\uFB06', + partial=True, words=['POS']), None) + + self.assertEqual(regex.match(r'[a-z]*4R$', 'a', partial=True).span(), + (0, 1)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'ab', partial=True).span(), + (0, 2)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'ab4', partial=True).span(), + (0, 3)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a4', partial=True).span(), + (0, 2)) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a4R', partial=True).span(), + (0, 3)) + self.assertEqual(regex.match(r'[a-z]*4R$', '4a', partial=True), None) + self.assertEqual(regex.match(r'[a-z]*4R$', 'a44', partial=True), None) + + def test_hg_bugs(self): + # Hg issue 28: regex.compile("(?>b)") causes "TypeError: 'Character' + # object is not subscriptable" + self.assertEqual(bool(regex.compile("(?>b)", flags=regex.V1)), True) + + # Hg issue 29: regex.compile("^((?>\w+)|(?>\s+))*$") causes + # "TypeError: 'GreedyRepeat' object is not iterable" + self.assertEqual(bool(regex.compile(r"^((?>\w+)|(?>\s+))*$", + flags=regex.V1)), True) + + # Hg issue 31: atomic and normal groups in recursive patterns + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(bcd(e)f)g(h)"), ['(bcd(e)f)', '(h)']) + self.assertEqual(regex.findall(r"\((?:(?:[^()]+)|(?R))*\)", + "a(bcd(e)f)g(h)"), ['(bcd(e)f)', '(h)']) + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(b(cd)e)f)g)h"), ['(b(cd)e)']) + self.assertEqual(regex.findall(r"\((?:(?>[^()]+)|(?R))*\)", + "a(bc(d(e)f)gh"), ['(d(e)f)']) + self.assertEqual(regex.findall(r"(?r)\((?:(?>[^()]+)|(?R))*\)", + "a(bc(d(e)f)gh"), ['(d(e)f)']) + self.assertEqual([m.group() for m in + regex.finditer(r"\((?:[^()]*+|(?0))*\)", "a(b(c(de)fg)h")], + ['(c(de)fg)']) + + # Hg issue 32: regex.search("a(bc)d", "abcd", regex.I|regex.V1) returns + # None + self.assertEqual(regex.search("a(bc)d", "abcd", regex.I | + regex.V1).group(0), "abcd") + + # Hg issue 33: regex.search("([\da-f:]+)$", "E", regex.I|regex.V1) + # returns None + self.assertEqual(regex.search(r"([\da-f:]+)$", "E", regex.I | + regex.V1).group(0), "E") + self.assertEqual(regex.search(r"([\da-f:]+)$", "e", regex.I | + regex.V1).group(0), "e") + + # Hg issue 34: regex.search("^(?=ab(de))(abd)(e)", "abde").groups() + # returns (None, 'abd', 'e') instead of ('de', 'abd', 'e') + self.assertEqual(regex.search("^(?=ab(de))(abd)(e)", "abde").groups(), + ('de', 'abd', 'e')) + + # Hg issue 35: regex.compile("\ ", regex.X) causes "_regex_core.error: + # bad escape" + self.assertEqual(bool(regex.match(r"\ ", " ", flags=regex.X)), True) + + # Hg issue 36: regex.search("^(a|)\1{2}b", "b") returns None + self.assertEqual(regex.search(r"^(a|)\1{2}b", "b").group(0, 1), ('b', + '')) + + # Hg issue 37: regex.search("^(a){0,0}", "abc").group(0,1) returns + # ('a', 'a') instead of ('', None) + self.assertEqual(regex.search("^(a){0,0}", "abc").group(0, 1), ('', + None)) + + # Hg issue 38: regex.search("(?>.*/)b", "a/b") returns None + self.assertEqual(regex.search("(?>.*/)b", "a/b").group(0), "a/b") + + # Hg issue 39: regex.search("((?i)blah)\\s+\\1", "blah BLAH") doesn't + # return None + # Changed to positional flags in regex 2023.12.23. + self.assertEqual(regex.search(r"((?i)blah)\s+\1", "blah BLAH"), None) + + # Hg issue 40: regex.search("(\()?[^()]+(?(1)\)|)", "(abcd").group(0) + # returns "bcd" instead of "abcd" + self.assertEqual(regex.search(r"(\()?[^()]+(?(1)\)|)", + "(abcd").group(0), "abcd") + + # Hg issue 42: regex.search("(a*)*", "a", flags=regex.V1).span(1) + # returns (0, 1) instead of (1, 1) + self.assertEqual(regex.search("(a*)*", "a").span(1), (1, 1)) + self.assertEqual(regex.search("(a*)*", "aa").span(1), (2, 2)) + self.assertEqual(regex.search("(a*)*", "aaa").span(1), (3, 3)) + + # Hg issue 43: regex.compile("a(?#xxx)*") causes "_regex_core.error: + # nothing to repeat" + self.assertEqual(regex.search("a(?#xxx)*", "aaa").group(), "aaa") + + # Hg issue 44: regex.compile("(?=abc){3}abc") causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("(?=abc){3}abc", "abcabcabc").span(), (0, + 3)) + + # Hg issue 45: regex.compile("^(?:a(?:(?:))+)+") causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("^(?:a(?:(?:))+)+", "a").span(), (0, 1)) + self.assertEqual(regex.search("^(?:a(?:(?:))+)+", "aa").span(), (0, 2)) + + # Hg issue 46: regex.compile("a(?x: b c )d") causes + # "_regex_core.error: missing )" + self.assertEqual(regex.search("a(?x: b c )d", "abcd").group(0), "abcd") + + # Hg issue 47: regex.compile("a#comment\n*", flags=regex.X) causes + # "_regex_core.error: nothing to repeat" + self.assertEqual(regex.search("a#comment\n*", "aaa", + flags=regex.X).group(0), "aaa") + + # Hg issue 48: regex.search("(a(?(1)\\1)){4}", "a"*10, + # flags=regex.V1).group(0,1) returns ('aaaaa', 'a') instead of ('aaaaaaaaaa', 'aaaa') + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){1}", + "aaaaaaaaaa").span(0, 1), ((0, 1), (0, 1))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){2}", + "aaaaaaaaaa").span(0, 1), ((0, 3), (1, 3))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){3}", + "aaaaaaaaaa").span(0, 1), ((0, 6), (3, 6))) + self.assertEqual(regex.search(r"(?V1)(a(?(1)\1)){4}", + "aaaaaaaaaa").span(0, 1), ((0, 10), (6, 10))) + + # Hg issue 49: regex.search("(a)(?<=b(?1))", "baz", regex.V1) returns + # None incorrectly + self.assertEqual(regex.search("(?V1)(a)(?<=b(?1))", "baz").group(0), + "a") + + # Hg issue 50: not all keywords are found by named list with + # overlapping keywords when full Unicode casefolding is required + self.assertEqual(regex.findall(r'(?fi)\L', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05', + keywords=['post','pos']), ['POST', 'Post', 'post', 'po\u017Ft', + 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)pos|post', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POS', + 'Pos', 'pos', 'po\u017F', 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)post|pos', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POST', + 'Post', 'post', 'po\u017Ft', 'po\uFB06', 'po\uFB05']) + self.assertEqual(regex.findall(r'(?fi)post|another', + 'POST, Post, post, po\u017Ft, po\uFB06, and po\uFB05'), ['POST', + 'Post', 'post', 'po\u017Ft', 'po\uFB06', 'po\uFB05']) + + # Hg issue 51: regex.search("((a)(?1)|(?2))", "a", flags=regex.V1) + # returns None incorrectly + self.assertEqual(regex.search("(?V1)((a)(?1)|(?2))", "a").group(0, 1, + 2), ('a', 'a', None)) + + # Hg issue 52: regex.search("(\\1xx|){6}", "xx", + # flags=regex.V1).span(0,1) returns incorrect value + self.assertEqual(regex.search(r"(?V1)(\1xx|){6}", "xx").span(0, 1), + ((0, 2), (2, 2))) + + # Hg issue 53: regex.search("(a|)+", "a") causes MemoryError + self.assertEqual(regex.search("(a|)+", "a").group(0, 1), ("a", "")) + + # Hg issue 54: regex.search("(a|)*\\d", "a"*80) causes MemoryError + self.assertEqual(regex.search(r"(a|)*\d", "a" * 80), None) + + # Hg issue 55: regex.search("^(?:a?b?)*$", "ac") take a very long time. + self.assertEqual(regex.search("^(?:a?b?)*$", "ac"), None) + + # Hg issue 58: bad named character escape sequences like "\\N{1}" + # treats as "N" + self.assertRaisesRegex(regex.error, self.UNDEF_CHAR_NAME, lambda: + regex.compile("\\N{1}")) + + # Hg issue 59: regex.search("\\Z", "a\na\n") returns None incorrectly + self.assertEqual(regex.search("\\Z", "a\na\n").span(0), (4, 4)) + + # Hg issue 60: regex.search("(q1|.)*(q2|.)*(x(a|bc)*y){2,}", "xayxay") + # returns None incorrectly + self.assertEqual(regex.search("(q1|.)*(q2|.)*(x(a|bc)*y){2,}", + "xayxay").group(0), "xayxay") + + # Hg issue 61: regex.search("[^a]", "A", regex.I).group(0) returns '' + # incorrectly + self.assertEqual(regex.search("(?i)[^a]", "A"), None) + + # Hg issue 63: regex.search("[[:ascii:]]", "\N{KELVIN SIGN}", + # flags=regex.I|regex.V1) doesn't return None + self.assertEqual(regex.search("(?i)[[:ascii:]]", "\N{KELVIN SIGN}"), + None) + + # Hg issue 66: regex.search("((a|b(?1)c){3,5})", "baaaaca", + # flags=regex.V1).groups() returns ('baaaac', 'baaaac') instead of ('aaaa', 'a') + self.assertEqual(regex.search("((a|b(?1)c){3,5})", "baaaaca").group(0, + 1, 2), ('aaaa', 'aaaa', 'a')) + + # Hg issue 71: non-greedy quantifier in lookbehind + self.assertEqual(regex.findall(r"(?<=:\S+ )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S* )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S+? )\w+", ":9 abc :10 def"), + ['abc', 'def']) + self.assertEqual(regex.findall(r"(?<=:\S*? )\w+", ":9 abc :10 def"), + ['abc', 'def']) + + # Hg issue 73: conditional patterns + self.assertEqual(regex.search(r"(?:fe)?male", "female").group(), + "female") + self.assertEqual([m.group() for m in + regex.finditer(r"(fe)?male: h(?(1)(er)|(is)) (\w+)", + "female: her dog; male: his cat. asdsasda")], ['female: her dog', + 'male: his cat']) + + # Hg issue 78: "Captures" doesn't work for recursive calls + self.assertEqual(regex.search(r'(?\((?:[^()]++|(?&rec))*\))', + 'aaa(((1+0)+1)+1)bbb').captures('rec'), ['(1+0)', '((1+0)+1)', + '(((1+0)+1)+1)']) + + # Hg issue 80: Escape characters throws an exception + self.assertRaisesRegex(regex.error, self.BAD_ESCAPE, lambda: + regex.sub('x', '\\', 'x'), ) + + # Hg issue 82: error range does not work + fz = "(CAGCCTCCCATTTCAGAATATACATCC){1a(?b))', "ab").spans("x"), [(1, + 2), (0, 2)]) + + # Hg issue 91: match.expand is extremely slow + # Check that the replacement cache works. + self.assertEqual(regex.sub(r'(-)', lambda m: m.expand(r'x'), 'a-b-c'), + 'axbxc') + + # Hg issue 94: Python crashes when executing regex updates + # pattern.findall + rx = regex.compile(r'\bt(est){i<2}', flags=regex.V1) + self.assertEqual(rx.search("Some text"), None) + self.assertEqual(rx.findall("Some text"), []) + + # Hg issue 95: 'pos' for regex.error + self.assertRaisesRegex(regex.error, self.MULTIPLE_REPEAT, lambda: + regex.compile(r'.???')) + + # Hg issue 97: behaviour of regex.escape's special_only is wrong + # + # Hg issue 244: Make `special_only=True` the default in + # `regex.escape()` + self.assertEqual(regex.escape('foo!?', special_only=False), 'foo\\!\\?') + self.assertEqual(regex.escape('foo!?', special_only=True), 'foo!\\?') + self.assertEqual(regex.escape('foo!?'), 'foo!\\?') + + self.assertEqual(regex.escape(b'foo!?', special_only=False), b'foo\\!\\?') + self.assertEqual(regex.escape(b'foo!?', special_only=True), + b'foo!\\?') + self.assertEqual(regex.escape(b'foo!?'), b'foo!\\?') + + # Hg issue 100: strange results from regex.search + self.assertEqual(regex.search('^([^z]*(?:WWWi|W))?$', + 'WWWi').groups(), ('WWWi', )) + self.assertEqual(regex.search('^([^z]*(?:WWWi|w))?$', + 'WWWi').groups(), ('WWWi', )) + self.assertEqual(regex.search('^([^z]*?(?:WWWi|W))?$', + 'WWWi').groups(), ('WWWi', )) + + # Hg issue 101: findall() broken (seems like memory corruption) + pat = regex.compile(r'xxx', flags=regex.FULLCASE | regex.UNICODE) + self.assertEqual([x.group() for x in pat.finditer('yxxx')], ['xxx']) + self.assertEqual(pat.findall('yxxx'), ['xxx']) + + raw = 'yxxx' + self.assertEqual([x.group() for x in pat.finditer(raw)], ['xxx']) + self.assertEqual(pat.findall(raw), ['xxx']) + + pat = regex.compile(r'xxx', flags=regex.FULLCASE | regex.IGNORECASE | + regex.UNICODE) + self.assertEqual([x.group() for x in pat.finditer('yxxx')], ['xxx']) + self.assertEqual(pat.findall('yxxx'), ['xxx']) + + raw = 'yxxx' + self.assertEqual([x.group() for x in pat.finditer(raw)], ['xxx']) + self.assertEqual(pat.findall(raw), ['xxx']) + + # Hg issue 106: * operator not working correctly with sub() + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0).*', 'x', 'test'), 'xx') + else: + self.assertEqual(regex.sub('(?V0).*', 'x', 'test'), 'x') + self.assertEqual(regex.sub('(?V1).*', 'x', 'test'), 'xx') + + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.sub('(?V0).*?', '|', 'test'), '|||||||||') + else: + self.assertEqual(regex.sub('(?V0).*?', '|', 'test'), '|t|e|s|t|') + self.assertEqual(regex.sub('(?V1).*?', '|', 'test'), '|||||||||') + + # Hg issue 112: re: OK, but regex: SystemError + self.assertEqual(regex.sub(r'^(@)\n(?!.*?@)(.*)', + r'\1\n==========\n\2', '@\n', flags=regex.DOTALL), '@\n==========\n') + + # Hg issue 109: Edit distance of fuzzy match + self.assertEqual(regex.match(r'(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats|cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + + self.assertEqual(regex.match(r'(?:cat){e<=1}', 'caz').fuzzy_counts, + (1, 0, 0)) + self.assertEqual(regex.match(r'(?e)(?:cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match(r'(?b)(?:cat){e<=1}', + 'caz').fuzzy_counts, (1, 0, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=2}', 'c ats').fuzzy_counts, + (1, 1, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=2}', + 'c ats').fuzzy_counts, (0, 1, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=2}', + 'c ats').fuzzy_counts, (0, 1, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=2}', + 'c a ts').fuzzy_counts, (0, 2, 0)) + + self.assertEqual(regex.match(r'(?:cats){e<=1}', 'c ats').fuzzy_counts, + (0, 1, 0)) + self.assertEqual(regex.match(r'(?e)(?:cats){e<=1}', + 'c ats').fuzzy_counts, (0, 1, 0)) + self.assertEqual(regex.match(r'(?b)(?:cats){e<=1}', + 'c ats').fuzzy_counts, (0, 1, 0)) + + # Hg issue 115: Infinite loop when processing backreferences + self.assertEqual(regex.findall(r'\bof ([a-z]+) of \1\b', + 'To make use of one of these modules'), []) + + # Hg issue 125: Reference to entire match (\g<0>) in + # Pattern.sub() doesn't work as of 2014.09.22 release. + self.assertEqual(regex.sub(r'x', r'\g<0>', 'x'), 'x') + + # Unreported issue: no such builtin as 'ascii' in Python 2. + self.assertEqual(bool(regex.match(r'a', 'a', regex.DEBUG)), True) + + # Hg issue 131: nested sets behaviour + self.assertEqual(regex.findall(r'(?V1)[[b-e]--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[b-e--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[[bcde]--cd]', 'abcdef'), ['b', + 'e']) + self.assertEqual(regex.findall(r'(?V1)[bcde--cd]', 'abcdef'), ['b', + 'e']) + + # Hg issue 132: index out of range on null property \p{} + self.assertRaisesRegex(regex.error, '^unknown property at position 4$', + lambda: regex.compile(r'\p{}')) + + # Issue 23692. + self.assertEqual(regex.match('(?:()|(?(1)()|z)){2}(?(2)a|z)', + 'a').group(0, 1, 2), ('a', '', '')) + self.assertEqual(regex.match('(?:()|(?(1)()|z)){0,2}(?(2)a|z)', + 'a').group(0, 1, 2), ('a', '', '')) + + # Hg issue 137: Posix character class :punct: does not seem to be + # supported. + + # Posix compatibility as recommended here: + # http://www.unicode.org/reports/tr18/#Compatibility_Properties + + # Posix in Unicode. + chars = ''.join(chr(c) for c in range(0x10000)) + + self.assertEqual(ascii(''.join(regex.findall(r'''[[:alnum:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{Alpha}\p{PosixDigit}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:alpha:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Alpha}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:ascii:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{InBasicLatin}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:blank:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{gc=Space_Separator}\t]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:cntrl:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{gc=Control}+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:digit:]]+''', + chars))), ascii(''.join(regex.findall(r'''[0-9]+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:graph:]]+''', + chars))), ascii(''.join(regex.findall(r'''[^\p{Space}\p{gc=Control}\p{gc=Surrogate}\p{gc=Unassigned}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:lower:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Lower}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:print:]]+''', + chars))), ascii(''.join(regex.findall(r'''(?V1)[\p{Graph}\p{Blank}--\p{Cntrl}]+''', chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:punct:]]+''', + chars))), + ascii(''.join(regex.findall(r'''(?V1)[\p{gc=Punctuation}\p{gc=Symbol}--\p{Alpha}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:space:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Whitespace}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:upper:]]+''', + chars))), ascii(''.join(regex.findall(r'''\p{Upper}+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:word:]]+''', + chars))), ascii(''.join(regex.findall(r'''[\p{Alpha}\p{gc=Mark}\p{Digit}\p{gc=Connector_Punctuation}\p{Join_Control}]+''', + chars)))) + self.assertEqual(ascii(''.join(regex.findall(r'''[[:xdigit:]]+''', + chars))), ascii(''.join(regex.findall(r'''[0-9A-Fa-f]+''', + chars)))) + + # Posix in ASCII. + chars = bytes(range(0x100)) + + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:alnum:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{Alpha}\p{PosixDigit}]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:alpha:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Alpha}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:ascii:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\x00-\x7F]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:blank:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{gc=Space_Separator}\t]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:cntrl:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{gc=Control}+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:digit:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[0-9]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:graph:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[^\p{Space}\p{gc=Control}\p{gc=Surrogate}\p{gc=Unassigned}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:lower:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Lower}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:print:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?aV1)[\p{Graph}\p{Blank}--\p{Cntrl}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:punct:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?aV1)[\p{gc=Punctuation}\p{gc=Symbol}--\p{Alpha}]+''', + chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:space:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Whitespace}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:upper:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)\p{Upper}+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:word:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[\p{Alpha}\p{gc=Mark}\p{Digit}\p{gc=Connector_Punctuation}\p{Join_Control}]+''', chars)))) + self.assertEqual(ascii(b''.join(regex.findall(br'''(?a)[[:xdigit:]]+''', + chars))), ascii(b''.join(regex.findall(br'''(?a)[0-9A-Fa-f]+''', chars)))) + + # Hg issue 138: grapheme anchored search not working properly. + self.assertEqual(ascii(regex.search(r'\X$', 'ab\u2103').group()), + ascii('\u2103')) + + # Hg issue 139: Regular expression with multiple wildcards where first + # should match empty string does not always work. + self.assertEqual(regex.search("([^L]*)([^R]*R)", "LtR").groups(), ('', + 'LtR')) + + # Hg issue 140: Replace with REVERSE and groups has unexpected + # behavior. + self.assertEqual(regex.sub(r'(.)', r'x\1y', 'ab'), 'xayxby') + self.assertEqual(regex.sub(r'(?r)(.)', r'x\1y', 'ab'), 'xayxby') + self.assertEqual(regex.subf(r'(.)', 'x{1}y', 'ab'), 'xayxby') + self.assertEqual(regex.subf(r'(?r)(.)', 'x{1}y', 'ab'), 'xayxby') + + # Hg issue 141: Crash on a certain partial match. + self.assertEqual(regex.fullmatch('(a)*abc', 'ab', + partial=True).span(), (0, 2)) + self.assertEqual(regex.fullmatch('(a)*abc', 'ab', + partial=True).partial, True) + + # Hg issue 143: Partial matches have incorrect span if prefix is '.' + # wildcard. + self.assertEqual(regex.search('OXRG', 'OOGOX', partial=True).span(), + (3, 5)) + self.assertEqual(regex.search('.XRG', 'OOGOX', partial=True).span(), + (3, 5)) + self.assertEqual(regex.search('.{1,3}XRG', 'OOGOX', + partial=True).span(), (1, 5)) + + # Hg issue 144: Latest version problem with matching 'R|R'. + self.assertEqual(regex.match('R|R', 'R').span(), (0, 1)) + + # Hg issue 146: Forced-fail (?!) works improperly in conditional. + self.assertEqual(regex.match(r'(.)(?(1)(?!))', 'xy'), None) + + # Groups cleared after failure. + self.assertEqual(regex.findall(r'(y)?(\d)(?(1)\b\B)', 'ax1y2z3b'), + [('', '1'), ('', '2'), ('', '3')]) + self.assertEqual(regex.findall(r'(y)?+(\d)(?(1)\b\B)', 'ax1y2z3b'), + [('', '1'), ('', '2'), ('', '3')]) + + # Hg issue 147: Fuzzy match can return match points beyond buffer end. + self.assertEqual([m.span() for m in regex.finditer(r'(?i)(?:error){e}', + 'regex failure')], [(0, 5), (5, 10), (10, 13), (13, 13)]) + self.assertEqual([m.span() for m in + regex.finditer(r'(?fi)(?:error){e}', 'regex failure')], [(0, 5), (5, + 10), (10, 13), (13, 13)]) + + # Hg issue 150: Have an option for POSIX-compatible longest match of + # alternates. + self.assertEqual(regex.search(r'(?p)\d+(\w(\d*)?|[eE]([+-]\d+))', + '10b12')[0], '10b12') + self.assertEqual(regex.search(r'(?p)\d+(\w(\d*)?|[eE]([+-]\d+))', + '10E+12')[0], '10E+12') + + self.assertEqual(regex.search(r'(?p)(\w|ae|oe|ue|ss)', 'ae')[0], 'ae') + self.assertEqual(regex.search(r'(?p)one(self)?(selfsufficient)?', + 'oneselfsufficient')[0], 'oneselfsufficient') + + # Hg issue 151: Request: \K. + self.assertEqual(regex.search(r'(ab\Kcd)', 'abcd').group(0, 1), ('cd', + 'abcd')) + self.assertEqual(regex.findall(r'\w\w\K\w\w', 'abcdefgh'), ['cd', + 'gh']) + self.assertEqual(regex.findall(r'(\w\w\K\w\w)', 'abcdefgh'), ['abcd', + 'efgh']) + + self.assertEqual(regex.search(r'(?r)(ab\Kcd)', 'abcd').group(0, 1), + ('ab', 'abcd')) + self.assertEqual(regex.findall(r'(?r)\w\w\K\w\w', 'abcdefgh'), ['ef', + 'ab']) + self.assertEqual(regex.findall(r'(?r)(\w\w\K\w\w)', 'abcdefgh'), + ['efgh', 'abcd']) + + # Hg issue 152: Request: Request: (?(DEFINE)...). + self.assertEqual(regex.search(r'(?(DEFINE)(?\d+)(?\w+))(?&quant) (?&item)', + '5 elephants')[0], '5 elephants') + + self.assertEqual(regex.search(r'(?&routine)(?(DEFINE)(?.))', 'a').group('routine'), None) + self.assertEqual(regex.search(r'(?&routine)(?(DEFINE)(?.))', 'a').captures('routine'), ['a']) + + # Hg issue 153: Request: (*SKIP). + self.assertEqual(regex.search(r'12(*FAIL)|3', '123')[0], '3') + self.assertEqual(regex.search(r'(?r)12(*FAIL)|3', '123')[0], '3') + + self.assertEqual(regex.search(r'\d+(*PRUNE)\d', '123'), None) + self.assertEqual(regex.search(r'\d+(?=(*PRUNE))\d', '123')[0], '123') + self.assertEqual(regex.search(r'\d+(*PRUNE)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'\d+(*PRUNE)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d+?(*PRUNE)bcd|[3d]', '123bcd')[0], + '3bcd') + self.assertEqual(regex.search(r'\d+?(*PRUNE)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d++(?<=3(*PRUNE))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'\d++(?<=3(*PRUNE))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=(*PRUNE)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=2(*PRUNE)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'(?r)\d(*PRUNE)\d+', '123'), None) + self.assertEqual(regex.search(r'(?r)\d(?<=(*PRUNE))\d+', '123')[0], + '123') + self.assertEqual(regex.search(r'(?r)\d+(*PRUNE)bcd|[3d]', + '123bcd')[0], '123bcd') + self.assertEqual(regex.search(r'(?r)\d+(*PRUNE)bcd|[3d]', + '123zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*PRUNE))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*PRUNE))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=(*PRUNE)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=2(*PRUNE)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'\d+(*SKIP)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'\d+(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d+?(*SKIP)bcd|[3d]', '123bcd')[0], + '3bcd') + self.assertEqual(regex.search(r'\d+?(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'\d++(?<=3(*SKIP))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'\d++(?<=3(*SKIP))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=(*SKIP)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'\d++(?<=2(*SKIP)3)zzd|[3d]$', + '124zzd')[0], 'd') + + self.assertEqual(regex.search(r'(?r)\d+(*SKIP)bcd|[3d]', '123bcd')[0], + '123bcd') + self.assertEqual(regex.search(r'(?r)\d+(*SKIP)bcd|[3d]', '123zzd')[0], + 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*SKIP))zzd|[4d]$', + '123zzd')[0], '123zzd') + self.assertEqual(regex.search(r'(?r)\d++(?<=3(*SKIP))zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=(*SKIP)3)zzd|[4d]$', + '124zzd')[0], 'd') + self.assertEqual(regex.search(r'(?r)\d++(?<=2(*SKIP)3)zzd|[3d]$', + '124zzd')[0], 'd') + + # Hg issue 154: Segmentation fault 11 when working with an atomic group + text = """June 30, December 31, 2013 2012 +some words follow: +more words and numbers 1,234,567 9,876,542 +more words and numbers 1,234,567 9,876,542""" + self.assertEqual(len(regex.findall(r'(?2014|2013 ?2012)', text)), 1) + + # Hg issue 156: regression on atomic grouping + self.assertEqual(regex.match('1(?>2)', '12').span(), (0, 2)) + + # Hg issue 157: regression: segfault on complex lookaround + self.assertEqual(regex.match(r'(?V1w)(?=(?=[^A-Z]*+[A-Z])(?=[^a-z]*+[a-z]))(?=\D*+\d)(?=\p{Alphanumeric}*+\P{Alphanumeric})\A(?s:.){8,255}+\Z', + 'AAaa11!!')[0], 'AAaa11!!') + + # Hg issue 158: Group issue with (?(DEFINE)...) + TEST_REGEX = regex.compile(r'''(?smx) +(?(DEFINE) + (? + ^,[^,]+, + ) +) + +# Group 2 is defined on this line +^,([^,]+), + +(?:(?!(?&subcat)[\r\n]+(?&subcat)).)+ +''') + + TEST_DATA = ''' +,Cat 1, +,Brand 1, +some +thing +,Brand 2, +other +things +,Cat 2, +,Brand, +Some +thing +''' + + self.assertEqual([m.span(1, 2) for m in + TEST_REGEX.finditer(TEST_DATA)], [((-1, -1), (2, 7)), ((-1, -1), (54, + 59))]) + + # Hg issue 161: Unexpected fuzzy match results + self.assertEqual(regex.search('(abcdefgh){e}', + '******abcdefghijklmnopqrtuvwxyz', regex.BESTMATCH).span(), (6, 14)) + self.assertEqual(regex.search('(abcdefghi){e}', + '******abcdefghijklmnopqrtuvwxyz', regex.BESTMATCH).span(), (6, 15)) + + # Hg issue 163: allow lookarounds in conditionals. + self.assertEqual(regex.match(r'(?:(?=\d)\d+\b|\w+)', '123abc').span(), + (0, 6)) + self.assertEqual(regex.match(r'(?(?=\d)\d+\b|\w+)', '123abc'), None) + self.assertEqual(regex.search(r'(?(?<=love\s)you|(?<=hate\s)her)', + "I love you").span(), (7, 10)) + self.assertEqual(regex.findall(r'(?(?<=love\s)you|(?<=hate\s)her)', + "I love you but I don't hate her either"), ['you', 'her']) + + # Hg issue 180: bug of POSIX matching. + self.assertEqual(regex.search(r'(?p)a*(.*?)', 'aaabbb').group(0, 1), + ('aaabbb', 'bbb')) + self.assertEqual(regex.search(r'(?p)a*(.*)', 'aaabbb').group(0, 1), + ('aaabbb', 'bbb')) + self.assertEqual(regex.sub(r'(?p)a*(.*?)', r'\1', 'aaabbb'), 'bbb') + self.assertEqual(regex.sub(r'(?p)a*(.*)', r'\1', 'aaabbb'), 'bbb') + + # Hg issue 192: Named lists reverse matching doesn't work with + # IGNORECASE and V1 + self.assertEqual(regex.match(r'(?irV0)\L', '21', kw=['1']).span(), + (1, 2)) + self.assertEqual(regex.match(r'(?irV1)\L', '21', kw=['1']).span(), + (1, 2)) + + # Hg issue 193: Alternation and .REVERSE flag. + self.assertEqual(regex.search('a|b', '111a222').span(), (3, 4)) + self.assertEqual(regex.search('(?r)a|b', '111a222').span(), (3, 4)) + + # Hg issue 194: .FULLCASE and Backreference + self.assertEqual(regex.search(r'(?if)<(CLI)><\1>', + '').span(), (0, 10)) + self.assertEqual(regex.search(r'(?if)<(CLI)><\1>', + '').span(), (0, 10)) + self.assertEqual(regex.search(r'(?ifr)<\1><(CLI)>', + '').span(), (0, 10)) + + # Hg issue 195: Pickle (or otherwise serial) the compiled regex + r = regex.compile(r'\L', options=['foo', 'bar']) + p = pickle.dumps(r) + r = pickle.loads(p) + self.assertEqual(r.match('foo').span(), (0, 3)) + + # Hg issue 196: Fuzzy matching on repeated regex not working as + # expected + self.assertEqual(regex.match('(x{6}){e<=1}', 'xxxxxx', + flags=regex.BESTMATCH).span(), (0, 6)) + self.assertEqual(regex.match('(x{6}){e<=1}', 'xxxxx', + flags=regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.match('(x{6}){e<=1}', 'x', + flags=regex.BESTMATCH), None) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'xxxxxx', + flags=regex.BESTMATCH).span(), (0, 6)) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'xxxxx', + flags=regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.match('(?r)(x{6}){e<=1}', 'x', + flags=regex.BESTMATCH), None) + + # Hg issue 197: ValueError in regex.compile + self.assertRaises(regex.error, lambda: + regex.compile(b'00000\\0\\00\\^\50\\00\\U05000000')) + + # Hg issue 198: ValueError in regex.compile + self.assertRaises(regex.error, lambda: regex.compile(b"{e', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?ri)\L', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?fi)\L', '22', aa=['121', + '22'])), True) + self.assertEqual(bool(regex.search(r'(?fri)\L', '22', aa=['121', + '22'])), True) + + # Hg issue 208: Named list, (?ri) flags, Backreference + self.assertEqual(regex.search(r'(?r)\1dog..(?<=(\L))$', 'ccdogcc', + aa=['bcb', 'cc']). span(), (0, 7)) + self.assertEqual(regex.search(r'(?ir)\1dog..(?<=(\L))$', + 'ccdogcc', aa=['bcb', 'cc']). span(), (0, 7)) + + # Hg issue 210: Fuzzy matching and Backreference + self.assertEqual(regex.search(r'(2)(?:\1{5}){e<=1}', + '3222212').span(), (1, 7)) + self.assertEqual(regex.search(r'(\d)(?:\1{5}){e<=1}', + '3222212').span(), (1, 7)) + + # Hg issue 211: Segmentation fault with recursive matches and atomic + # groups + self.assertEqual(regex.match(r'''\A(?P(?>\((?&whole)\)|[+\-]))\Z''', + '((-))').span(), (0, 5)) + self.assertEqual(regex.match(r'''\A(?P(?>\((?&whole)\)|[+\-]))\Z''', + '((-)+)'), None) + + # Hg issue 212: Unexpected matching difference with .*? between re and + # regex + self.assertEqual(regex.match(r"x.*? (.).*\1(.*)\1", + 'x |y| z|').span(), (0, 9)) + self.assertEqual(regex.match(r"\.sr (.*?) (.)(.*)\2(.*)\2(.*)", + r'.sr h |||').span(), (0, 35)) + + # Hg issue 213: Segmentation Fault + a = '"\\xF9\\x80\\xAEqdz\\x95L\\xA7\\x89[\\xFE \\x91)\\xF9]\\xDB\'\\x99\\x09=\\x00\\xFD\\x98\\x22\\xDD\\xF1\\xB6\\xC3 Z\\xB6gv\\xA5x\\x93P\\xE1r\\x14\\x8Cv\\x0C\\xC0w\\x15r\\xFFc%" ' + py_regex_pattern = r'''(?P((?>(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``)))) (?P((?>(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``))))''' + self.assertEqual(bool(regex.search(py_regex_pattern, a)), False) + + # Hg Issue 216: Invalid match when using negative lookbehind and pipe + self.assertEqual(bool(regex.match('foo(?<=foo)', 'foo')), True) + self.assertEqual(bool(regex.match('foo(?.*\!\w*\:.*)|(?P.*))', + '!')), False) + + # Hg issue 220: Misbehavior of group capture with OR operand + self.assertEqual(regex.match(r'\w*(ea)\w*|\w*e(?!a)\w*', + 'easier').groups(), ('ea', )) + + # Hg issue 225: BESTMATCH in fuzzy match not working + self.assertEqual(regex.search('(^1234$){i,d}', '12234', + regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.search('(^1234$){i,d}', '12234', + regex.BESTMATCH).fuzzy_counts, (0, 1, 0)) + + self.assertEqual(regex.search('(^1234$){s,i,d}', '12234', + regex.BESTMATCH).span(), (0, 5)) + self.assertEqual(regex.search('(^1234$){s,i,d}', '12234', + regex.BESTMATCH).fuzzy_counts, (0, 1, 0)) + + # Hg issue 226: Error matching at start of string + self.assertEqual(regex.search('(^123$){s,i,d}', 'xxxxxxxx123', + regex.BESTMATCH).span(), (0, 11)) + self.assertEqual(regex.search('(^123$){s,i,d}', 'xxxxxxxx123', + regex.BESTMATCH).fuzzy_counts, (0, 8, 0)) + + # Hg issue 227: Incorrect behavior for ? operator with UNICODE + + # IGNORECASE + self.assertEqual(regex.search(r'a?yz', 'xxxxyz', flags=regex.FULLCASE | + regex.IGNORECASE).span(), (4, 6)) + + # Hg issue 230: Is it a bug of (?(DEFINE)...) + self.assertEqual(regex.findall(r'(?:(?![a-d]).)+', 'abcdefgh'), + ['efgh']) + self.assertEqual(regex.findall(r'''(?(DEFINE)(?P(?:(?![a-d]).)))(?&mydef)+''', + 'abcdefgh'), ['efgh']) + + # Hg issue 238: Not fully re backward compatible + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){1,3}', + '"Erm....yes. T..T...Thank you for that."'), [('Erm....', 'Erm', + '....'), ('T...', 'T', '...')]) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){3}', + '"Erm....yes. T..T...Thank you for that."'), []) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){2}', + '"Erm....yes. T..T...Thank you for that."'), [('T...', 'T', '...')]) + self.assertEqual(regex.findall(r'((\w{1,3})(\.{2,10})){1}', + '"Erm....yes. T..T...Thank you for that."'), [('Erm....', 'Erm', + '....'), ('T..', 'T', '..'), ('T...', 'T', '...')]) + + # Hg issue 247: Unexpected result with fuzzy matching and lookahead + # expression + self.assertEqual(regex.search(r'(?:ESTONIA(?!\w)){e<=1}', + 'ESTONIAN WORKERS').group(), 'ESTONIAN') + self.assertEqual(regex.search(r'(?:ESTONIA(?=\W)){e<=1}', + 'ESTONIAN WORKERS').group(), 'ESTONIAN') + + self.assertEqual(regex.search(r'(?:(?.))(?&func)', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?&func)', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?&func)', + 'abc').capturesdict(), {'func': ['a']}) + + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.))(?=(?&func))', + 'abc').capturesdict(), {'func': ['a']}) + + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').groups(), (None, )) + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').groupdict(), {'func': None}) + self.assertEqual(regex.search(r'(?(DEFINE)(?.)).(?<=(?&func))', + 'abc').capturesdict(), {'func': ['a']}) + + # Hg issue 271: Comment logic different between Re and Regex + self.assertEqual(bool(regex.match(r'ab(?#comment\))cd', 'abcd')), True) + + # Hg issue 276: Partial Matches yield incorrect matches and bounds + self.assertEqual(regex.search(r'[a-z]+ [a-z]*?:', 'foo bar', + partial=True).span(), (0, 7)) + self.assertEqual(regex.search(r'(?r):[a-z]*? [a-z]+', 'foo bar', + partial=True).span(), (0, 7)) + + # Hg issue 291: Include Script Extensions as a supported Unicode property + self.assertEqual(bool(regex.match(r'(?u)\p{Script:Beng}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script:Bengali}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Bengali}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Beng}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Cakm}', + '\u09EF')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{Script_Extensions:Sylo}', + '\u09EF')), True) + + # Hg issue #293: scx (Script Extensions) property currently matches + # incorrectly + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Latin}', 'P')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Ahom}', 'P')), False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Common}', '4')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Caucasian_Albanian}', '4')), + False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Arabic}', '\u062A')), True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Balinese}', '\u062A')), + False) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Devanagari}', '\u091C')), + True) + self.assertEqual(bool(regex.match(r'(?u)\p{scx:Batak}', '\u091C')), False) + + # Hg issue 296: Group references are not taken into account when group is reporting the last match + self.assertEqual(regex.fullmatch('(?P.)*(?&x)', 'abc').captures('x'), + ['a', 'b', 'c']) + self.assertEqual(regex.fullmatch('(?P.)*(?&x)', 'abc').group('x'), + 'b') + + self.assertEqual(regex.fullmatch('(?P.)(?P.)(?P.)', + 'abc').captures('x'), ['a', 'b', 'c']) + self.assertEqual(regex.fullmatch('(?P.)(?P.)(?P.)', + 'abc').group('x'), 'c') + + # Hg issue 299: Partial gives misleading results with "open ended" regexp + self.assertEqual(regex.match('(?:ab)*', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*?', '', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)*+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)+?', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)++', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?:ab)++', 'abab', partial=True).partial, + False) + + self.assertEqual(regex.match('(?r)(?:ab)*', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*?', '', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)*+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+', 'abab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)+?', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)++', 'ab', partial=True).partial, + False) + self.assertEqual(regex.match('(?r)(?:ab)++', 'abab', partial=True).partial, + False) + + self.assertEqual(regex.match('a*', '', partial=True).partial, False) + self.assertEqual(regex.match('a*?', '', partial=True).partial, False) + self.assertEqual(regex.match('a*+', '', partial=True).partial, False) + self.assertEqual(regex.match('a+', '', partial=True).partial, True) + self.assertEqual(regex.match('a+?', '', partial=True).partial, True) + self.assertEqual(regex.match('a++', '', partial=True).partial, True) + self.assertEqual(regex.match('a+', 'a', partial=True).partial, False) + self.assertEqual(regex.match('a+?', 'a', partial=True).partial, False) + self.assertEqual(regex.match('a++', 'a', partial=True).partial, False) + + self.assertEqual(regex.match('(?r)a*', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a*?', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a*+', '', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a+', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a+?', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a++', '', partial=True).partial, True) + self.assertEqual(regex.match('(?r)a+', 'a', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a+?', 'a', partial=True).partial, False) + self.assertEqual(regex.match('(?r)a++', 'a', partial=True).partial, False) + + self.assertEqual(regex.match(r"(?:\s*\w+'*)+", 'whatever', partial=True).partial, + False) + + # Hg issue 300: segmentation fault + pattern = ('(?PGGCGTCACACTTTGCTATGCCATAGCAT[AG]TTTATCCATAAGA' + 'TTAGCGGATCCTACCTGACGCTTTTTATCGCAACTCTCTACTGTTTCTCCATAACAGAACATATTGA' + 'CTATCCGGTATTACCCGGCATGACAGGAGTAAAA){e<=1}' + '(?P[ACGT]{1059}){e<=2}' + '(?PTAATCGTCTTGTTTGATACACAAGGGTCGCATCTGCGGCCCTTTTGCTTTTTTAAG' + 'TTGTAAGGATATGCCATTCTAGA){e<=0}' + '(?P[ACGT]{18}){e<=0}' + '(?PAGATCGG[CT]AGAGCGTCGTGTAGGGAAAGAGTGTGG){e<=1}') + + text = ('GCACGGCGTCACACTTTGCTATGCCATAGCATATTTATCCATAAGATTAGCGGATCCTACC' + 'TGACGCTTTTTATCGCAACTCTCTACTGTTTCTCCATAACAGAACATATTGACTATCCGGTATTACC' + 'CGGCATGACAGGAGTAAAAATGGCTATCGACGAAAACAAACAGAAAGCGTTGGCGGCAGCACTGGGC' + 'CAGATTGAGAAACAATTTGGTAAAGGCTCCATCATGCGCCTGGGTGAAGACCGTTCCATGGATGTGG' + 'AAACCATCTCTACCGGTTCGCTTTCACTGGATATCGCGCTTGGGGCAGGTGGTCTGCCGATGGGCCG' + 'TATCGTCGAAATCTACGGACCGGAATCTTCCGGTAAAACCACGCTGACGCTGCAGGTGATCGCCGCA' + 'GCGCAGCGTGAAGGTAAAACCTGTGCGTTTATCGATGCTGAACACGCGCTGGACCCAATCTACGCAC' + 'GTAAACTGGGCGTCGATATCGACAACCTGCTGTGCTCCCAGCCGGACACCGGCGAGCAGGCACTGGA' + 'AATCTGTGACGCCCTGGCGCGTTCTGGCGCAGTAGACGTTATCGTCGTTGACTCCGTGGCGGCACTG' + 'ACGCCGAAAGCGGAAATCGAAGGCGAAATCGGCGACTCTCATATGGGCCTTGCGGCACGTATGATGA' + 'GCCAGGCGATGCGTAAGCTGGCGGGTAACCTGAAGCAGTCCAACACGCTGCTGATCTTCATCAACCC' + 'CATCCGTATGAAAATTGGTGTGATGTTCGGCAACCCGGAAACCACTTACCGGTGGTAACGCGCTGAA' + 'ATTCTACGCCTCTGTTCGTCTCGACATCCGTTAAATCGGCGCGGTGAAAGAGGGCGAAAACGTGGTG' + 'GGTAGCGAAACCCGCGTGAAAGTGGTGAAGAACAAAATCGCTGCGCCGTTTAAACAGGCTGAATTCC' + 'AGATCCTCTACGGCGAAGGTATCAACTTCTACCCCGAACTGGTTGACCTGGGCGTAAAAGAGAAGCT' + 'GATCGAGAAAGCAGGCGCGTGGTACAGCTACAAAGGTGAGAAGATCGGTCAGGGTAAAGCGAATGCG' + 'ACTGCCTGGCTGAAATTTAACCCGGAAACCGCGAAAGAGATCGAGTGAAAAGTACGTGAGTTGCTGC' + 'TGAGCAACCCGAACTCAACGCCGGATTTCTCTGTAGATGATAGCGAAGGCGTAGCAGAAACTAACGA' + 'AGATTTTTAATCGTCTTGTTTGATACACAAGGGTCGCATCTGCGGCCCTTTTGCTTTTTTAAGTTGT' + 'AAGGATATGCCATTCTAGACAGTTAACACACCAACAAAGATCGGTAGAGCGTCGTGTAGGGAAAGAG' + 'TGTGGTACC') + + m = regex.search(pattern, text, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (0, 1, 0)) + self.assertEqual(m.fuzzy_changes, ([], [1206], [])) + + # Hg issue 306: Fuzzy match parameters not respecting quantifier scope + self.assertEqual(regex.search(r'(?e)(dogf(((oo){e<1})|((00){e<1}))d){e<2}', + 'dogfood').fuzzy_counts, (0, 0, 0)) + self.assertEqual(regex.search(r'(?e)(dogf(((oo){e<1})|((00){e<1}))d){e<2}', + 'dogfoot').fuzzy_counts, (1, 0, 0)) + + # Hg issue 312: \X not matching graphemes with zero-width-joins + self.assertEqual(regex.findall(r'\X', + '\U0001F468\u200D\U0001F469\u200D\U0001F467\u200D\U0001F466'), + ['\U0001F468\u200D\U0001F469\u200D\U0001F467\u200D\U0001F466']) + + # Hg issue 320: Abnormal performance + self.assertEqual(bool(regex.search(r'(?=a)a', 'a')), True) + self.assertEqual(bool(regex.search(r'(?!b)a', 'a')), True) + + # Hg issue 327: .fullmatch() causes MemoryError + self.assertEqual(regex.fullmatch(r'((\d)*?)*?', '123').span(), (0, 3)) + + # Hg issue 329: Wrong group matches when question mark quantifier is used within a look behind + self.assertEqual(regex.search(r'''(?(DEFINE)(?(?THIS_SHOULD_NOT_MATCHx?)|(?right))).*(?<=(?&mydef).*)''', + 'x right').capturesdict(), {'mydef': ['right'], 'wrong': [], 'right': + ['right']}) + + # Hg issue 338: specifying allowed characters when fuzzy-matching + self.assertEqual(bool(regex.match(r'(?:cat){e<=1:[u]}', 'cut')), True) + self.assertEqual(bool(regex.match(r'(?:cat){e<=1:u}', 'cut')), True) + + # Hg issue 353: fuzzy changes negative indexes + self.assertEqual(regex.search(r'(?be)(AGTGTTCCCCGCGCCAGCGGGGATAAACCG){s<=5,i<=5,d<=5,s+i+d<=10}', + 'TTCCCCGCGCCAGCGGGGATAAACCG').fuzzy_changes, ([], [], [0, 1, 3, 5])) + + # Git issue 364: Contradictory values in fuzzy_counts and fuzzy_changes + self.assertEqual(regex.match(r'(?:bc){e}', 'c').fuzzy_counts, (1, 0, + 1)) + self.assertEqual(regex.match(r'(?:bc){e}', 'c').fuzzy_changes, ([0], + [], [1])) + self.assertEqual(regex.match(r'(?e)(?:bc){e}', 'c').fuzzy_counts, (0, + 0, 1)) + self.assertEqual(regex.match(r'(?e)(?:bc){e}', 'c').fuzzy_changes, + ([], [], [0])) + self.assertEqual(regex.match(r'(?b)(?:bc){e}', 'c').fuzzy_counts, (0, + 0, 1)) + self.assertEqual(regex.match(r'(?b)(?:bc){e}', 'c').fuzzy_changes, + ([], [], [0])) + + # Git issue 370: Confusions about Fuzzy matching behavior + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){e}', + '$ 10,112.111.12').fuzzy_counts, (6, 0, 5)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=1}', + '$ 10,112.111.12').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=1,i<=1,d<=1}', + '$ 10,112.111.12').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=3}', + '$ 10,1a2.111.12').fuzzy_counts, (2, 0, 0)) + self.assertEqual(regex.match('(?e)(?:^(\\$ )?\\d{1,3}(,\\d{3})*(\\.\\d{2})$){s<=2}', + '$ 10,1a2.111.12').fuzzy_counts, (2, 0, 0)) + + self.assertEqual(regex.fullmatch(r'(?e)(?:0?,0(?:,0)?){s<=1,d<=1}', + ',0;0').fuzzy_counts, (1, 0, 0)) + self.assertEqual(regex.fullmatch(r'(?e)(?:0??,0(?:,0)?){s<=1,d<=1}', + ',0;0').fuzzy_counts, (1, 0, 0)) + + # Git issue 371: Specifying character set when fuzzy-matching allows characters not in the set + self.assertEqual(regex.search(r"\b(?e)(?:\d{6,20}){i<=5:[\-\\\/]}\b", + "cat dog starting at 00:01132.000. hello world"), None) + + # Git issue 385: Comments in expressions + self.assertEqual(bool(regex.compile('(?#)')), True) + self.assertEqual(bool(regex.compile('(?x)(?#)')), True) + + # Git issue 394: Unexpected behaviour in fuzzy matching with limited character set with IGNORECASE flag + self.assertEqual(regex.findall(r'(\d+){i<=2:[ab]}', '123X4Y5'), + ['123', '4', '5']) + self.assertEqual(regex.findall(r'(?i)(\d+){i<=2:[ab]}', '123X4Y5'), + ['123', '4', '5']) + + # Git issue 403: Fuzzy matching with wrong distance (unnecessary substitutions) + self.assertEqual(regex.match(r'^(test){e<=5}$', 'terstin', + flags=regex.B).fuzzy_counts, (0, 3, 0)) + + # Git issue 408: regex fails with a quantified backreference but succeeds with repeated backref + self.assertEqual(bool(regex.match(r"(?:(x*)\1\1\1)*x$", "x" * 5)), True) + self.assertEqual(bool(regex.match(r"(?:(x*)\1{3})*x$", "x" * 5)), True) + + # Git issue 415: Fuzzy character restrictions don't apply to insertions at "right edge" + self.assertEqual(regex.match(r't(?:es){s<=1:\d}t', 'te5t').group(), + 'te5t') + self.assertEqual(regex.match(r't(?:es){s<=1:\d}t', 'tezt'), None) + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', 'tes5t').group(), + 'tes5t') + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', 'teszt'), None) + self.assertEqual(regex.match(r't(?:es){i<=1:\d}t', + 'tes5t').fuzzy_changes, ([], [3], [])) + self.assertEqual(regex.match(r't(es){i<=1,0.*)(?PCTTCC){e<=1}(?P([ACGT]){4,6})(?PCAATACCGACTCCTCACTGTGT){e<=2}(?P([ACGT]){0,6}$)' + + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.span(), (0, 50)) + self.assertEqual(m.groupdict(), {'insert': 'TTCAGACGTGTGCT', 'anchor': 'CTTCC', 'umi': 'GATCT', 'sid': 'CAATACCGACTCCTCACTGTGT', 'end': 'GTCT'}) + + m = regex.match(pattern, sequence, flags=regex.ENHANCEMATCH) + self.assertEqual(m.span(), (0, 50)) + self.assertEqual(m.groupdict(), {'insert': 'TTCAGACGTGTGCT', 'anchor': 'CTTCC', 'umi': 'GATCT', 'sid': 'CAATACCGACTCCTCACTGTGT', 'end': 'GTCT'}) + + # Git issue 433: Disagreement between fuzzy_counts and fuzzy_changes + pattern = r'(?P.*)(?PAACACTGG){e<=1}(?P([AT][CG]){5}){e<=2}(?PGTAACCGAAG){e<=2}(?P([ACGT]){0,6}$)' + + sequence = 'GGAAAACACTGGTCTCAGTCTCGTAACCGAAGTGGTCG' + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (0, 0, 0)) + self.assertEqual(m.fuzzy_changes, ([], [], [])) + + sequence = 'GGAAAACACTGGTCTCAGTCTCGTCCCCGAAGTGGTCG' + m = regex.match(pattern, sequence, flags=regex.BESTMATCH) + self.assertEqual(m.fuzzy_counts, (2, 0, 0)) + self.assertEqual(m.fuzzy_changes, ([24, 25], [], [])) + + # Git issue 439: Unmatched groups: sub vs subf + self.assertEqual(regex.sub(r'(test1)|(test2)', r'matched: \1\2', 'test1'), 'matched: test1') + self.assertEqual(regex.subf(r'(test1)|(test2)', r'matched: {1}{2}', 'test1'), 'matched: test1') + self.assertEqual(regex.search(r'(test1)|(test2)', 'matched: test1').expand(r'matched: \1\2'), 'matched: test1'), + self.assertEqual(regex.search(r'(test1)|(test2)', 'matched: test1').expandf(r'matched: {1}{2}'), 'matched: test1') + + # Git issue 442: Fuzzy regex matching doesn't seem to test insertions correctly + self.assertEqual(regex.search(r"(?:\bha\b){i:[ ]}", "having"), None) + self.assertEqual(regex.search(r"(?:\bha\b){i:[ ]}", "having", flags=regex.I), None) + + # Git issue 467: Scoped inline flags 'a', 'u' and 'L' affect global flags + self.assertEqual(regex.match(r'(?a:\w)\w', 'd\N{CYRILLIC SMALL LETTER ZHE}').span(), (0, 2)) + self.assertEqual(regex.match(r'(?a:\w)(?u:\w)', 'd\N{CYRILLIC SMALL LETTER ZHE}').span(), (0, 2)) + + # Git issue 473: Emoji classified as letter + self.assertEqual(regex.match(r'^\p{LC}+$', '\N{SMILING CAT FACE WITH OPEN MOUTH}'), None) + self.assertEqual(regex.match(r'^\p{So}+$', '\N{SMILING CAT FACE WITH OPEN MOUTH}').span(), (0, 1)) + + # Git issue 474: regex has no equivalent to `re.Match.groups()` for captures + self.assertEqual(regex.match(r'(.)+', 'abc').allcaptures(), (['abc'], ['a', 'b', 'c'])) + self.assertEqual(regex.match(r'(.)+', 'abc').allspans(), ([(0, 3)], [(0, 1), (1, 2), (2, 3)])) + + # Git issue 477: \v for vertical spacing + self.assertEqual(bool(regex.fullmatch(r'\p{HorizSpace}+', '\t \xA0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000')), True) + self.assertEqual(bool(regex.fullmatch(r'\p{VertSpace}+', '\n\v\f\r\x85\u2028\u2029')), True) + + # Git issue 479: Segmentation fault when using conditional pattern + self.assertEqual(regex.match(r'(?(?<=A)|(?(?![^B])C|D))', 'A'), None) + self.assertEqual(regex.search(r'(?(?<=A)|(?(?![^B])C|D))', 'A').span(), (1, 1)) + + # Git issue 494: Backtracking failure matching regex ^a?(a?)b?c\1$ against string abca + self.assertEqual(regex.search(r"^a?(a?)b?c\1$", "abca").span(), (0, 4)) + + # Git issue 498: Conditional negative lookahead inside positive lookahead fails to match + self.assertEqual(regex.match(r'(?(?=a).|..)', 'ab').span(), (0, 1)) + self.assertEqual(regex.match(r'(?(?=b).|..)', 'ab').span(), (0, 2)) + self.assertEqual(regex.match(r'(?(?!a).|..)', 'ab').span(), (0, 2)) + self.assertEqual(regex.match(r'(?(?!b).|..)', 'ab').span(), (0, 1)) + + # Git issue 525: segfault when fuzzy matching empty list + self.assertEqual(regex.match(r"(\L){e<=5}", "blah", foo=[]).span(), (0, 0)) + + # Git issue 527: `VERBOSE`/`X` flag breaks `\N` escapes + self.assertEqual(regex.compile(r'\N{LATIN SMALL LETTER A}').match('a').span(), (0, 1)) + self.assertEqual(regex.compile(r'\N{LATIN SMALL LETTER A}', flags=regex.X).match('a').span(), (0, 1)) + + # Git issue 539: Bug: Partial matching fails on a simple example + self.assertEqual(regex.match(r"[^/]*b/ccc", "b/ccc", partial=True).span(), (0, 5)) + self.assertEqual(regex.match(r"[^/]*b/ccc", "b/ccb", partial=True), None) + self.assertEqual(regex.match(r"[^/]*b/ccc", "b/cc", partial=True).span(), (0, 4)) + self.assertEqual(regex.match(r"[^/]*b/xyz", "b/xy", partial=True).span(), (0, 4)) + self.assertEqual(regex.match(r"[^/]*b/xyz", "b/yz", partial=True), None) + + self.assertEqual(regex.match(r"(?i)[^/]*b/ccc", "b/ccc", partial=True).span(), (0, 5)) + self.assertEqual(regex.match(r"(?i)[^/]*b/ccc", "b/ccb", partial=True), None) + self.assertEqual(regex.match(r"(?i)[^/]*b/ccc", "b/cc", partial=True).span(), (0, 4)) + self.assertEqual(regex.match(r"(?i)[^/]*b/xyz", "b/xy", partial=True).span(), (0, 4)) + self.assertEqual(regex.match(r"(?i)[^/]*b/xyz", "b/yz", partial=True), None) + + # Git issue 546: Partial match not working in some instances with non-greedy capture + self.assertEqual(bool(regex.match(r'.*?', '<', partial=True)), True) + self.assertEqual(bool(regex.match(r'.*?', '.*?', '', partial=True)), True) + self.assertEqual(bool(regex.match(r'.*?', 'x', partial=True)), True) + self.assertEqual(bool(regex.match(r'.*?', 'xyz abc', partial=True)), True) + self.assertEqual(bool(regex.match(r'.*?', 'xyz abc foo', partial=True)), True) + self.assertEqual(bool(regex.match(r'.*?', 'xyz abc foo ', partial=True)), True) + self.assertEqual(bool(regex.match(r'.*?', 'xyz abc foo bar', partial=True)), True) + + def test_fuzzy_ext(self): + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', 'e')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'e')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', '-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', '-')), + False) + + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', + 'ae')), True) + self.assertEqual(bool(regex.fullmatch(r'(?:a){e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:a){e<=1:[a-z]}', + 'a-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?:ab){e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:ab){e<=1:[a-z]}', + 'ae')), True) + self.assertEqual(bool(regex.fullmatch(r'(?:ab){e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)(?:ab){e<=1:[a-z]}', + 'a-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(a)\1{e<=1:[a-z]}', 'ae')), + True) + self.assertEqual(bool(regex.fullmatch(r'(?r)\1{e<=1:[a-z]}(a)', + 'ea')), True) + self.assertEqual(bool(regex.fullmatch(r'(a)\1{e<=1:[a-z]}', 'a-')), + False) + self.assertEqual(bool(regex.fullmatch(r'(?r)\1{e<=1:[a-z]}(a)', + '-a')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 'ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + '-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 's-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + 's-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(?:\N{LATIN SMALL LETTER SHARP S}){e<=1:[a-z]}', + '-s')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ssst')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ssts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(\N{LATIN SMALL LETTER SHARP S})', + 'stss')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(\N{LATIN SMALL LETTER SHARP S})', + 'tsss')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'ss-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 'sss-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + '-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(\N{LATIN SMALL LETTER SHARP S})\1{e<=1:[a-z]}', + 's-')), False) + + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}ts')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}st')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(ss)', + 'st\N{LATIN SMALL LETTER SHARP S}')), True) + self.assertEqual(bool(regex.fullmatch(r'(?firu)\1{e<=1:[a-z]}(ss)', + 'ts\N{LATIN SMALL LETTER SHARP S}')), True) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}-s')), False) + self.assertEqual(bool(regex.fullmatch(r'(?fiu)(ss)\1{e<=1:[a-z]}', + '\N{LATIN SMALL LETTER SHARP S}s-')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(ss)\1{e<=1:[a-z]}', + 's-\N{LATIN SMALL LETTER SHARP S}')), False) + self.assertEqual(bool(regex.fullmatch(r'(?firu)(ss)\1{e<=1:[a-z]}', + '-s\N{LATIN SMALL LETTER SHARP S}')), False) + + def test_subscripted_captures(self): + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{0} {0[0]} {0[-1]}'), 'abc abc abc') + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{1} {1[0]} {1[1]} {1[2]} {1[-1]} {1[-2]} {1[-3]}'), + 'c a b c c b a') + self.assertEqual(regex.match(r'(?P.)+', + 'abc').expandf('{x} {x[0]} {x[1]} {x[2]} {x[-1]} {x[-2]} {x[-3]}'), + 'c a b c c b a') + + self.assertEqual(regex.subf(r'(?P.)+', r'{0} {0[0]} {0[-1]}', + 'abc'), 'abc abc abc') + self.assertEqual(regex.subf(r'(?P.)+', + '{1} {1[0]} {1[1]} {1[2]} {1[-1]} {1[-2]} {1[-3]}', 'abc'), + 'c a b c c b a') + self.assertEqual(regex.subf(r'(?P.)+', + '{x} {x[0]} {x[1]} {x[2]} {x[-1]} {x[-2]} {x[-3]}', 'abc'), + 'c a b c c b a') + + def test_more_zerowidth(self): + if sys.version_info >= (3, 7, 0): + self.assertEqual(regex.split(r'\b|:+', 'a::bc'), ['', 'a', '', '', + 'bc', '']) + self.assertEqual(regex.sub(r'\b|:+', '-', 'a::bc'), '-a---bc-') + self.assertEqual(regex.findall(r'\b|:+', 'a::bc'), ['', '', '::', + '', '']) + self.assertEqual([m.span() for m in regex.finditer(r'\b|:+', + 'a::bc')], [(0, 0), (1, 1), (1, 3), (3, 3), (5, 5)]) + self.assertEqual([m.span() for m in regex.finditer(r'(?m)^\s*?$', + 'foo\n\n\nbar')], [(4, 4), (4, 5), (5, 5)]) + + def test_line_ending(self): + self.assertEqual(regex.findall(r'\R', '\r\n\n\x0B\f\r\x85\u2028\u2029'), + ['\r\n', '\n', '\x0B', '\f', '\r', '\x85', '\u2028', '\u2029']) + self.assertEqual(regex.findall(br'\R', b'\r\n\n\x0B\f\r\x85'), [b'\r\n', + b'\n', b'\x0B', b'\f', b'\r']) + +def test_main(): + unittest.main(verbosity=2) + +if __name__ == "__main__": + test_main() diff --git a/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/INSTALLER b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/LICENSE b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..1cc22a5aa7679ebaa10934212f356823931bdc3e --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2010-2024 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/METADATA b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..cfde03c2631c5c1d5cdc0949d0ee3379e7110f0e --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/METADATA @@ -0,0 +1,43 @@ +Metadata-Version: 2.1 +Name: six +Version: 1.17.0 +Summary: Python 2 and 3 compatibility utilities +Home-page: https://github.com/benjaminp/six +Author: Benjamin Peterson +Author-email: benjamin@python.org +License: MIT +Classifier: Development Status :: 5 - Production/Stable +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.* +License-File: LICENSE + +.. image:: https://img.shields.io/pypi/v/six.svg + :target: https://pypi.org/project/six/ + :alt: six on PyPI + +.. image:: https://readthedocs.org/projects/six/badge/?version=latest + :target: https://six.readthedocs.io/ + :alt: six's documentation on Read the Docs + +.. image:: https://img.shields.io/badge/license-MIT-green.svg + :target: https://github.com/benjaminp/six/blob/master/LICENSE + :alt: MIT License badge + +Six is a Python 2 and 3 compatibility library. It provides utility functions +for smoothing over the differences between the Python versions with the goal of +writing Python code that is compatible on both Python versions. See the +documentation for more information on what is provided. + +Six supports Python 2.7 and 3.3+. It is contained in only one Python +file, so it can be easily copied into your project. (The copyright and license +notice must be retained.) + +Online documentation is at https://six.readthedocs.io/. + +Bugs can be reported to https://github.com/benjaminp/six. The code can also +be found there. diff --git a/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/RECORD b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..5bd23ffc51041dcb525cf04410e05a21ba51cdf0 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/RECORD @@ -0,0 +1,9 @@ +__pycache__/six.cpython-310.pyc,, +six-1.17.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +six-1.17.0.dist-info/LICENSE,sha256=Q3W6IOK5xsTnytKUCmKP2Q6VzD1Q7pKq51VxXYuh-9A,1066 +six-1.17.0.dist-info/METADATA,sha256=ViBCB4wnUlSfbYp8htvF3XCAiKe-bYBnLsewcQC3JGg,1658 +six-1.17.0.dist-info/RECORD,, +six-1.17.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +six-1.17.0.dist-info/WHEEL,sha256=pxeNX5JdtCe58PUSYP9upmc7jdRPgvT0Gm9kb1SHlVw,109 +six-1.17.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4 +six.py,sha256=xRyR9wPT1LNpbJI8tf7CE-BeddkhU5O--sfy-mo5BN8,34703 diff --git a/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/REQUESTED b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/WHEEL b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..104f3874635f24f0d2918dfeaf6a59652274460c --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/top_level.txt b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ffe2fce498955b628014618b28c6bcf152466a4a --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/six-1.17.0.dist-info/top_level.txt @@ -0,0 +1 @@ +six diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/__init__.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3fe782c8a45bbabcf240f3cac4303ac12b0ec274 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/__init__.py @@ -0,0 +1,211 @@ +""" +Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more +""" + +from __future__ import annotations + +# Set default logging handler to avoid "No handler found" warnings. +import logging +import sys +import typing +import warnings +from logging import NullHandler + +from . import exceptions +from ._base_connection import _TYPE_BODY +from ._collections import HTTPHeaderDict +from ._version import __version__ +from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url +from .filepost import _TYPE_FIELDS, encode_multipart_formdata +from .poolmanager import PoolManager, ProxyManager, proxy_from_url +from .response import BaseHTTPResponse, HTTPResponse +from .util.request import make_headers +from .util.retry import Retry +from .util.timeout import Timeout + +# Ensure that Python is compiled with OpenSSL 1.1.1+ +# If the 'ssl' module isn't available at all that's +# fine, we only care if the module is available. +try: + import ssl +except ImportError: + pass +else: + if not ssl.OPENSSL_VERSION.startswith("OpenSSL "): # Defensive: + warnings.warn( + "urllib3 v2 only supports OpenSSL 1.1.1+, currently " + f"the 'ssl' module is compiled with {ssl.OPENSSL_VERSION!r}. " + "See: https://github.com/urllib3/urllib3/issues/3020", + exceptions.NotOpenSSLWarning, + ) + elif ssl.OPENSSL_VERSION_INFO < (1, 1, 1): # Defensive: + raise ImportError( + "urllib3 v2 only supports OpenSSL 1.1.1+, currently " + f"the 'ssl' module is compiled with {ssl.OPENSSL_VERSION!r}. " + "See: https://github.com/urllib3/urllib3/issues/2168" + ) + +__author__ = "Andrey Petrov (andrey.petrov@shazow.net)" +__license__ = "MIT" +__version__ = __version__ + +__all__ = ( + "HTTPConnectionPool", + "HTTPHeaderDict", + "HTTPSConnectionPool", + "PoolManager", + "ProxyManager", + "HTTPResponse", + "Retry", + "Timeout", + "add_stderr_logger", + "connection_from_url", + "disable_warnings", + "encode_multipart_formdata", + "make_headers", + "proxy_from_url", + "request", + "BaseHTTPResponse", +) + +logging.getLogger(__name__).addHandler(NullHandler()) + + +def add_stderr_logger( + level: int = logging.DEBUG, +) -> logging.StreamHandler[typing.TextIO]: + """ + Helper for quickly adding a StreamHandler to the logger. Useful for + debugging. + + Returns the handler after adding it. + """ + # This method needs to be in this __init__.py to get the __name__ correct + # even if urllib3 is vendored within another package. + logger = logging.getLogger(__name__) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) + logger.addHandler(handler) + logger.setLevel(level) + logger.debug("Added a stderr logging handler to logger: %s", __name__) + return handler + + +# ... Clean up. +del NullHandler + + +# All warning filters *must* be appended unless you're really certain that they +# shouldn't be: otherwise, it's very hard for users to use most Python +# mechanisms to silence them. +# SecurityWarning's always go off by default. +warnings.simplefilter("always", exceptions.SecurityWarning, append=True) +# InsecurePlatformWarning's don't vary between requests, so we keep it default. +warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True) + + +def disable_warnings(category: type[Warning] = exceptions.HTTPWarning) -> None: + """ + Helper for quickly disabling all urllib3 warnings. + """ + warnings.simplefilter("ignore", category) + + +_DEFAULT_POOL = PoolManager() + + +def request( + method: str, + url: str, + *, + body: _TYPE_BODY | None = None, + fields: _TYPE_FIELDS | None = None, + headers: typing.Mapping[str, str] | None = None, + preload_content: bool | None = True, + decode_content: bool | None = True, + redirect: bool | None = True, + retries: Retry | bool | int | None = None, + timeout: Timeout | float | int | None = 3, + json: typing.Any | None = None, +) -> BaseHTTPResponse: + """ + A convenience, top-level request method. It uses a module-global ``PoolManager`` instance. + Therefore, its side effects could be shared across dependencies relying on it. + To avoid side effects create a new ``PoolManager`` instance and use it instead. + The method does not accept low-level ``**urlopen_kw`` keyword arguments. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param body: + Data to send in the request body, either :class:`str`, :class:`bytes`, + an iterable of :class:`str`/:class:`bytes`, or a file-like object. + + :param fields: + Data to encode and send in the request body. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. + + :param bool preload_content: + If True, the response's body will be preloaded into memory. + + :param bool decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param redirect: + If True, automatically handle redirects (status codes 301, 302, + 303, 307, 308). Each redirect counts as a retry. Disabling retries + will disable redirect, too. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param json: + Data to encode and send as JSON with UTF-encoded in the request body. + The ``"Content-Type"`` header will be set to ``"application/json"`` + unless specified otherwise. + """ + + return _DEFAULT_POOL.request( + method, + url, + body=body, + fields=fields, + headers=headers, + preload_content=preload_content, + decode_content=decode_content, + redirect=redirect, + retries=retries, + timeout=timeout, + json=json, + ) + + +if sys.platform == "emscripten": + from .contrib.emscripten import inject_into_urllib3 # noqa: 401 + + inject_into_urllib3() diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/_base_connection.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/_base_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..dc0f318c0b380926eed0f4209d395c79963eaf9e --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/_base_connection.py @@ -0,0 +1,165 @@ +from __future__ import annotations + +import typing + +from .util.connection import _TYPE_SOCKET_OPTIONS +from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT +from .util.url import Url + +_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str] + + +class ProxyConfig(typing.NamedTuple): + ssl_context: ssl.SSLContext | None + use_forwarding_for_https: bool + assert_hostname: None | str | typing.Literal[False] + assert_fingerprint: str | None + + +class _ResponseOptions(typing.NamedTuple): + # TODO: Remove this in favor of a better + # HTTP request/response lifecycle tracking. + request_method: str + request_url: str + preload_content: bool + decode_content: bool + enforce_content_length: bool + + +if typing.TYPE_CHECKING: + import ssl + from typing import Protocol + + from .response import BaseHTTPResponse + + class BaseHTTPConnection(Protocol): + default_port: typing.ClassVar[int] + default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS] + + host: str + port: int + timeout: None | ( + float + ) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved. + blocksize: int + source_address: tuple[str, int] | None + socket_options: _TYPE_SOCKET_OPTIONS | None + + proxy: Url | None + proxy_config: ProxyConfig | None + + is_verified: bool + proxy_is_verified: bool | None + + def __init__( + self, + host: str, + port: int | None = None, + *, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + source_address: tuple[str, int] | None = None, + blocksize: int = 8192, + socket_options: _TYPE_SOCKET_OPTIONS | None = ..., + proxy: Url | None = None, + proxy_config: ProxyConfig | None = None, + ) -> None: ... + + def set_tunnel( + self, + host: str, + port: int | None = None, + headers: typing.Mapping[str, str] | None = None, + scheme: str = "http", + ) -> None: ... + + def connect(self) -> None: ... + + def request( + self, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + # We know *at least* botocore is depending on the order of the + # first 3 parameters so to be safe we only mark the later ones + # as keyword-only to ensure we have space to extend. + *, + chunked: bool = False, + preload_content: bool = True, + decode_content: bool = True, + enforce_content_length: bool = True, + ) -> None: ... + + def getresponse(self) -> BaseHTTPResponse: ... + + def close(self) -> None: ... + + @property + def is_closed(self) -> bool: + """Whether the connection either is brand new or has been previously closed. + If this property is True then both ``is_connected`` and ``has_connected_to_proxy`` + properties must be False. + """ + + @property + def is_connected(self) -> bool: + """Whether the connection is actively connected to any origin (proxy or target)""" + + @property + def has_connected_to_proxy(self) -> bool: + """Whether the connection has successfully connected to its proxy. + This returns False if no proxy is in use. Used to determine whether + errors are coming from the proxy layer or from tunnelling to the target origin. + """ + + class BaseHTTPSConnection(BaseHTTPConnection, Protocol): + default_port: typing.ClassVar[int] + default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS] + + # Certificate verification methods + cert_reqs: int | str | None + assert_hostname: None | str | typing.Literal[False] + assert_fingerprint: str | None + ssl_context: ssl.SSLContext | None + + # Trusted CAs + ca_certs: str | None + ca_cert_dir: str | None + ca_cert_data: None | str | bytes + + # TLS version + ssl_minimum_version: int | None + ssl_maximum_version: int | None + ssl_version: int | str | None # Deprecated + + # Client certificates + cert_file: str | None + key_file: str | None + key_password: str | None + + def __init__( + self, + host: str, + port: int | None = None, + *, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + source_address: tuple[str, int] | None = None, + blocksize: int = 16384, + socket_options: _TYPE_SOCKET_OPTIONS | None = ..., + proxy: Url | None = None, + proxy_config: ProxyConfig | None = None, + cert_reqs: int | str | None = None, + assert_hostname: None | str | typing.Literal[False] = None, + assert_fingerprint: str | None = None, + server_hostname: str | None = None, + ssl_context: ssl.SSLContext | None = None, + ca_certs: str | None = None, + ca_cert_dir: str | None = None, + ca_cert_data: None | str | bytes = None, + ssl_minimum_version: int | None = None, + ssl_maximum_version: int | None = None, + ssl_version: int | str | None = None, # Deprecated + cert_file: str | None = None, + key_file: str | None = None, + key_password: str | None = None, + ) -> None: ... diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/_collections.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/_collections.py new file mode 100644 index 0000000000000000000000000000000000000000..1b6c1364213b990c716c39d7cc6427cb319cb948 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/_collections.py @@ -0,0 +1,479 @@ +from __future__ import annotations + +import typing +from collections import OrderedDict +from enum import Enum, auto +from threading import RLock + +if typing.TYPE_CHECKING: + # We can only import Protocol if TYPE_CHECKING because it's a development + # dependency, and is not available at runtime. + from typing import Protocol + + from typing_extensions import Self + + class HasGettableStringKeys(Protocol): + def keys(self) -> typing.Iterator[str]: ... + + def __getitem__(self, key: str) -> str: ... + + +__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"] + + +# Key type +_KT = typing.TypeVar("_KT") +# Value type +_VT = typing.TypeVar("_VT") +# Default type +_DT = typing.TypeVar("_DT") + +ValidHTTPHeaderSource = typing.Union[ + "HTTPHeaderDict", + typing.Mapping[str, str], + typing.Iterable[tuple[str, str]], + "HasGettableStringKeys", +] + + +class _Sentinel(Enum): + not_passed = auto() + + +def ensure_can_construct_http_header_dict( + potential: object, +) -> ValidHTTPHeaderSource | None: + if isinstance(potential, HTTPHeaderDict): + return potential + elif isinstance(potential, typing.Mapping): + # Full runtime checking of the contents of a Mapping is expensive, so for the + # purposes of typechecking, we assume that any Mapping is the right shape. + return typing.cast(typing.Mapping[str, str], potential) + elif isinstance(potential, typing.Iterable): + # Similarly to Mapping, full runtime checking of the contents of an Iterable is + # expensive, so for the purposes of typechecking, we assume that any Iterable + # is the right shape. + return typing.cast(typing.Iterable[tuple[str, str]], potential) + elif hasattr(potential, "keys") and hasattr(potential, "__getitem__"): + return typing.cast("HasGettableStringKeys", potential) + else: + return None + + +class RecentlyUsedContainer(typing.Generic[_KT, _VT], typing.MutableMapping[_KT, _VT]): + """ + Provides a thread-safe dict-like container which maintains up to + ``maxsize`` keys while throwing away the least-recently-used keys beyond + ``maxsize``. + + :param maxsize: + Maximum number of recent elements to retain. + + :param dispose_func: + Every time an item is evicted from the container, + ``dispose_func(value)`` is called. Callback which will get called + """ + + _container: typing.OrderedDict[_KT, _VT] + _maxsize: int + dispose_func: typing.Callable[[_VT], None] | None + lock: RLock + + def __init__( + self, + maxsize: int = 10, + dispose_func: typing.Callable[[_VT], None] | None = None, + ) -> None: + super().__init__() + self._maxsize = maxsize + self.dispose_func = dispose_func + self._container = OrderedDict() + self.lock = RLock() + + def __getitem__(self, key: _KT) -> _VT: + # Re-insert the item, moving it to the end of the eviction line. + with self.lock: + item = self._container.pop(key) + self._container[key] = item + return item + + def __setitem__(self, key: _KT, value: _VT) -> None: + evicted_item = None + with self.lock: + # Possibly evict the existing value of 'key' + try: + # If the key exists, we'll overwrite it, which won't change the + # size of the pool. Because accessing a key should move it to + # the end of the eviction line, we pop it out first. + evicted_item = key, self._container.pop(key) + self._container[key] = value + except KeyError: + # When the key does not exist, we insert the value first so that + # evicting works in all cases, including when self._maxsize is 0 + self._container[key] = value + if len(self._container) > self._maxsize: + # If we didn't evict an existing value, and we've hit our maximum + # size, then we have to evict the least recently used item from + # the beginning of the container. + evicted_item = self._container.popitem(last=False) + + # After releasing the lock on the pool, dispose of any evicted value. + if evicted_item is not None and self.dispose_func: + _, evicted_value = evicted_item + self.dispose_func(evicted_value) + + def __delitem__(self, key: _KT) -> None: + with self.lock: + value = self._container.pop(key) + + if self.dispose_func: + self.dispose_func(value) + + def __len__(self) -> int: + with self.lock: + return len(self._container) + + def __iter__(self) -> typing.NoReturn: + raise NotImplementedError( + "Iteration over this class is unlikely to be threadsafe." + ) + + def clear(self) -> None: + with self.lock: + # Copy pointers to all values, then wipe the mapping + values = list(self._container.values()) + self._container.clear() + + if self.dispose_func: + for value in values: + self.dispose_func(value) + + def keys(self) -> set[_KT]: # type: ignore[override] + with self.lock: + return set(self._container.keys()) + + +class HTTPHeaderDictItemView(set[tuple[str, str]]): + """ + HTTPHeaderDict is unusual for a Mapping[str, str] in that it has two modes of + address. + + If we directly try to get an item with a particular name, we will get a string + back that is the concatenated version of all the values: + + >>> d['X-Header-Name'] + 'Value1, Value2, Value3' + + However, if we iterate over an HTTPHeaderDict's items, we will optionally combine + these values based on whether combine=True was called when building up the dictionary + + >>> d = HTTPHeaderDict({"A": "1", "B": "foo"}) + >>> d.add("A", "2", combine=True) + >>> d.add("B", "bar") + >>> list(d.items()) + [ + ('A', '1, 2'), + ('B', 'foo'), + ('B', 'bar'), + ] + + This class conforms to the interface required by the MutableMapping ABC while + also giving us the nonstandard iteration behavior we want; items with duplicate + keys, ordered by time of first insertion. + """ + + _headers: HTTPHeaderDict + + def __init__(self, headers: HTTPHeaderDict) -> None: + self._headers = headers + + def __len__(self) -> int: + return len(list(self._headers.iteritems())) + + def __iter__(self) -> typing.Iterator[tuple[str, str]]: + return self._headers.iteritems() + + def __contains__(self, item: object) -> bool: + if isinstance(item, tuple) and len(item) == 2: + passed_key, passed_val = item + if isinstance(passed_key, str) and isinstance(passed_val, str): + return self._headers._has_value_for_header(passed_key, passed_val) + return False + + +class HTTPHeaderDict(typing.MutableMapping[str, str]): + """ + :param headers: + An iterable of field-value pairs. Must not contain multiple field names + when compared case-insensitively. + + :param kwargs: + Additional field-value pairs to pass in to ``dict.update``. + + A ``dict`` like container for storing HTTP Headers. + + Field names are stored and compared case-insensitively in compliance with + RFC 7230. Iteration provides the first case-sensitive key seen for each + case-insensitive pair. + + Using ``__setitem__`` syntax overwrites fields that compare equal + case-insensitively in order to maintain ``dict``'s api. For fields that + compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` + in a loop. + + If multiple fields that are equal case-insensitively are passed to the + constructor or ``.update``, the behavior is undefined and some will be + lost. + + >>> headers = HTTPHeaderDict() + >>> headers.add('Set-Cookie', 'foo=bar') + >>> headers.add('set-cookie', 'baz=quxx') + >>> headers['content-length'] = '7' + >>> headers['SET-cookie'] + 'foo=bar, baz=quxx' + >>> headers['Content-Length'] + '7' + """ + + _container: typing.MutableMapping[str, list[str]] + + def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str): + super().__init__() + self._container = {} # 'dict' is insert-ordered + if headers is not None: + if isinstance(headers, HTTPHeaderDict): + self._copy_from(headers) + else: + self.extend(headers) + if kwargs: + self.extend(kwargs) + + def __setitem__(self, key: str, val: str) -> None: + # avoid a bytes/str comparison by decoding before httplib + if isinstance(key, bytes): + key = key.decode("latin-1") + self._container[key.lower()] = [key, val] + + def __getitem__(self, key: str) -> str: + val = self._container[key.lower()] + return ", ".join(val[1:]) + + def __delitem__(self, key: str) -> None: + del self._container[key.lower()] + + def __contains__(self, key: object) -> bool: + if isinstance(key, str): + return key.lower() in self._container + return False + + def setdefault(self, key: str, default: str = "") -> str: + return super().setdefault(key, default) + + def __eq__(self, other: object) -> bool: + maybe_constructable = ensure_can_construct_http_header_dict(other) + if maybe_constructable is None: + return False + else: + other_as_http_header_dict = type(self)(maybe_constructable) + + return {k.lower(): v for k, v in self.itermerged()} == { + k.lower(): v for k, v in other_as_http_header_dict.itermerged() + } + + def __ne__(self, other: object) -> bool: + return not self.__eq__(other) + + def __len__(self) -> int: + return len(self._container) + + def __iter__(self) -> typing.Iterator[str]: + # Only provide the originally cased names + for vals in self._container.values(): + yield vals[0] + + def discard(self, key: str) -> None: + try: + del self[key] + except KeyError: + pass + + def add(self, key: str, val: str, *, combine: bool = False) -> None: + """Adds a (name, value) pair, doesn't overwrite the value if it already + exists. + + If this is called with combine=True, instead of adding a new header value + as a distinct item during iteration, this will instead append the value to + any existing header value with a comma. If no existing header value exists + for the key, then the value will simply be added, ignoring the combine parameter. + + >>> headers = HTTPHeaderDict(foo='bar') + >>> headers.add('Foo', 'baz') + >>> headers['foo'] + 'bar, baz' + >>> list(headers.items()) + [('foo', 'bar'), ('foo', 'baz')] + >>> headers.add('foo', 'quz', combine=True) + >>> list(headers.items()) + [('foo', 'bar, baz, quz')] + """ + # avoid a bytes/str comparison by decoding before httplib + if isinstance(key, bytes): + key = key.decode("latin-1") + key_lower = key.lower() + new_vals = [key, val] + # Keep the common case aka no item present as fast as possible + vals = self._container.setdefault(key_lower, new_vals) + if new_vals is not vals: + # if there are values here, then there is at least the initial + # key/value pair + assert len(vals) >= 2 + if combine: + vals[-1] = vals[-1] + ", " + val + else: + vals.append(val) + + def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None: + """Generic import function for any type of header-like object. + Adapted version of MutableMapping.update in order to insert items + with self.add instead of self.__setitem__ + """ + if len(args) > 1: + raise TypeError( + f"extend() takes at most 1 positional arguments ({len(args)} given)" + ) + other = args[0] if len(args) >= 1 else () + + if isinstance(other, HTTPHeaderDict): + for key, val in other.iteritems(): + self.add(key, val) + elif isinstance(other, typing.Mapping): + for key, val in other.items(): + self.add(key, val) + elif isinstance(other, typing.Iterable): + other = typing.cast(typing.Iterable[tuple[str, str]], other) + for key, value in other: + self.add(key, value) + elif hasattr(other, "keys") and hasattr(other, "__getitem__"): + # THIS IS NOT A TYPESAFE BRANCH + # In this branch, the object has a `keys` attr but is not a Mapping or any of + # the other types indicated in the method signature. We do some stuff with + # it as though it partially implements the Mapping interface, but we're not + # doing that stuff safely AT ALL. + for key in other.keys(): + self.add(key, other[key]) + + for key, value in kwargs.items(): + self.add(key, value) + + @typing.overload + def getlist(self, key: str) -> list[str]: ... + + @typing.overload + def getlist(self, key: str, default: _DT) -> list[str] | _DT: ... + + def getlist( + self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed + ) -> list[str] | _DT: + """Returns a list of all the values for the named field. Returns an + empty list if the key doesn't exist.""" + try: + vals = self._container[key.lower()] + except KeyError: + if default is _Sentinel.not_passed: + # _DT is unbound; empty list is instance of List[str] + return [] + # _DT is bound; default is instance of _DT + return default + else: + # _DT may or may not be bound; vals[1:] is instance of List[str], which + # meets our external interface requirement of `Union[List[str], _DT]`. + return vals[1:] + + def _prepare_for_method_change(self) -> Self: + """ + Remove content-specific header fields before changing the request + method to GET or HEAD according to RFC 9110, Section 15.4. + """ + content_specific_headers = [ + "Content-Encoding", + "Content-Language", + "Content-Location", + "Content-Type", + "Content-Length", + "Digest", + "Last-Modified", + ] + for header in content_specific_headers: + self.discard(header) + return self + + # Backwards compatibility for httplib + getheaders = getlist + getallmatchingheaders = getlist + iget = getlist + + # Backwards compatibility for http.cookiejar + get_all = getlist + + def __repr__(self) -> str: + return f"{type(self).__name__}({dict(self.itermerged())})" + + def _copy_from(self, other: HTTPHeaderDict) -> None: + for key in other: + val = other.getlist(key) + self._container[key.lower()] = [key, *val] + + def copy(self) -> Self: + clone = type(self)() + clone._copy_from(self) + return clone + + def iteritems(self) -> typing.Iterator[tuple[str, str]]: + """Iterate over all header lines, including duplicate ones.""" + for key in self: + vals = self._container[key.lower()] + for val in vals[1:]: + yield vals[0], val + + def itermerged(self) -> typing.Iterator[tuple[str, str]]: + """Iterate over all headers, merging duplicate ones together.""" + for key in self: + val = self._container[key.lower()] + yield val[0], ", ".join(val[1:]) + + def items(self) -> HTTPHeaderDictItemView: # type: ignore[override] + return HTTPHeaderDictItemView(self) + + def _has_value_for_header(self, header_name: str, potential_value: str) -> bool: + if header_name in self: + return potential_value in self._container[header_name.lower()][1:] + return False + + def __ior__(self, other: object) -> HTTPHeaderDict: + # Supports extending a header dict in-place using operator |= + # combining items with add instead of __setitem__ + maybe_constructable = ensure_can_construct_http_header_dict(other) + if maybe_constructable is None: + return NotImplemented + self.extend(maybe_constructable) + return self + + def __or__(self, other: object) -> Self: + # Supports merging header dicts using operator | + # combining items with add instead of __setitem__ + maybe_constructable = ensure_can_construct_http_header_dict(other) + if maybe_constructable is None: + return NotImplemented + result = self.copy() + result.extend(maybe_constructable) + return result + + def __ror__(self, other: object) -> Self: + # Supports merging header dicts using operator | when other is on left side + # combining items with add instead of __setitem__ + maybe_constructable = ensure_can_construct_http_header_dict(other) + if maybe_constructable is None: + return NotImplemented + result = type(self)(maybe_constructable) + result.extend(self) + return result diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/_request_methods.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/_request_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..297c271bf401c1cb48c6225f8822e78f58c3ca56 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/_request_methods.py @@ -0,0 +1,278 @@ +from __future__ import annotations + +import json as _json +import typing +from urllib.parse import urlencode + +from ._base_connection import _TYPE_BODY +from ._collections import HTTPHeaderDict +from .filepost import _TYPE_FIELDS, encode_multipart_formdata +from .response import BaseHTTPResponse + +__all__ = ["RequestMethods"] + +_TYPE_ENCODE_URL_FIELDS = typing.Union[ + typing.Sequence[tuple[str, typing.Union[str, bytes]]], + typing.Mapping[str, typing.Union[str, bytes]], +] + + +class RequestMethods: + """ + Convenience mixin for classes who implement a :meth:`urlopen` method, such + as :class:`urllib3.HTTPConnectionPool` and + :class:`urllib3.PoolManager`. + + Provides behavior for making common types of HTTP request methods and + decides which type of request field encoding to use. + + Specifically, + + :meth:`.request_encode_url` is for sending requests whose fields are + encoded in the URL (such as GET, HEAD, DELETE). + + :meth:`.request_encode_body` is for sending requests whose fields are + encoded in the *body* of the request using multipart or www-form-urlencoded + (such as for POST, PUT, PATCH). + + :meth:`.request` is for making any kind of request, it will look up the + appropriate encoding format and use one of the above two methods to make + the request. + + Initializer parameters: + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + """ + + _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"} + + def __init__(self, headers: typing.Mapping[str, str] | None = None) -> None: + self.headers = headers or {} + + def urlopen( + self, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + encode_multipart: bool = True, + multipart_boundary: str | None = None, + **kw: typing.Any, + ) -> BaseHTTPResponse: # Abstract + raise NotImplementedError( + "Classes extending RequestMethods must implement " + "their own ``urlopen`` method." + ) + + def request( + self, + method: str, + url: str, + body: _TYPE_BODY | None = None, + fields: _TYPE_FIELDS | None = None, + headers: typing.Mapping[str, str] | None = None, + json: typing.Any | None = None, + **urlopen_kw: typing.Any, + ) -> BaseHTTPResponse: + """ + Make a request using :meth:`urlopen` with the appropriate encoding of + ``fields`` based on the ``method`` used. + + This is a convenience method that requires the least amount of manual + effort. It can be used in most situations, while still having the + option to drop down to more specific methods when necessary, such as + :meth:`request_encode_url`, :meth:`request_encode_body`, + or even the lowest level :meth:`urlopen`. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param body: + Data to send in the request body, either :class:`str`, :class:`bytes`, + an iterable of :class:`str`/:class:`bytes`, or a file-like object. + + :param fields: + Data to encode and send in the URL or request body, depending on ``method``. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param json: + Data to encode and send as JSON with UTF-encoded in the request body. + The ``"Content-Type"`` header will be set to ``"application/json"`` + unless specified otherwise. + """ + method = method.upper() + + if json is not None and body is not None: + raise TypeError( + "request got values for both 'body' and 'json' parameters which are mutually exclusive" + ) + + if json is not None: + if headers is None: + headers = self.headers + + if not ("content-type" in map(str.lower, headers.keys())): + headers = HTTPHeaderDict(headers) + headers["Content-Type"] = "application/json" + + body = _json.dumps(json, separators=(",", ":"), ensure_ascii=False).encode( + "utf-8" + ) + + if body is not None: + urlopen_kw["body"] = body + + if method in self._encode_url_methods: + return self.request_encode_url( + method, + url, + fields=fields, # type: ignore[arg-type] + headers=headers, + **urlopen_kw, + ) + else: + return self.request_encode_body( + method, url, fields=fields, headers=headers, **urlopen_kw + ) + + def request_encode_url( + self, + method: str, + url: str, + fields: _TYPE_ENCODE_URL_FIELDS | None = None, + headers: typing.Mapping[str, str] | None = None, + **urlopen_kw: str, + ) -> BaseHTTPResponse: + """ + Make a request using :meth:`urlopen` with the ``fields`` encoded in + the url. This is useful for request methods like GET, HEAD, DELETE, etc. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param fields: + Data to encode and send in the URL. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + """ + if headers is None: + headers = self.headers + + extra_kw: dict[str, typing.Any] = {"headers": headers} + extra_kw.update(urlopen_kw) + + if fields: + url += "?" + urlencode(fields) + + return self.urlopen(method, url, **extra_kw) + + def request_encode_body( + self, + method: str, + url: str, + fields: _TYPE_FIELDS | None = None, + headers: typing.Mapping[str, str] | None = None, + encode_multipart: bool = True, + multipart_boundary: str | None = None, + **urlopen_kw: str, + ) -> BaseHTTPResponse: + """ + Make a request using :meth:`urlopen` with the ``fields`` encoded in + the body. This is useful for request methods like POST, PUT, PATCH, etc. + + When ``encode_multipart=True`` (default), then + :func:`urllib3.encode_multipart_formdata` is used to encode + the payload with the appropriate content type. Otherwise + :func:`urllib.parse.urlencode` is used with the + 'application/x-www-form-urlencoded' content type. + + Multipart encoding must be used when posting files, and it's reasonably + safe to use it in other times too. However, it may break request + signing, such as with OAuth. + + Supports an optional ``fields`` parameter of key/value strings AND + key/filetuple. A filetuple is a (filename, data, MIME type) tuple where + the MIME type is optional. For example:: + + fields = { + 'foo': 'bar', + 'fakefile': ('foofile.txt', 'contents of foofile'), + 'realfile': ('barfile.txt', open('realfile').read()), + 'typedfile': ('bazfile.bin', open('bazfile').read(), + 'image/jpeg'), + 'nonamefile': 'contents of nonamefile field', + } + + When uploading a file, providing a filename (the first parameter of the + tuple) is optional but recommended to best mimic behavior of browsers. + + Note that if ``headers`` are supplied, the 'Content-Type' header will + be overwritten because it depends on the dynamic random boundary string + which is used to compose the body of the request. The random boundary + string can be explicitly set with the ``multipart_boundary`` parameter. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param fields: + Data to encode and send in the request body. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param encode_multipart: + If True, encode the ``fields`` using the multipart/form-data MIME + format. + + :param multipart_boundary: + If not specified, then a random boundary will be generated using + :func:`urllib3.filepost.choose_boundary`. + """ + if headers is None: + headers = self.headers + + extra_kw: dict[str, typing.Any] = {"headers": HTTPHeaderDict(headers)} + body: bytes | str + + if fields: + if "body" in urlopen_kw: + raise TypeError( + "request got values for both 'fields' and 'body', can only specify one." + ) + + if encode_multipart: + body, content_type = encode_multipart_formdata( + fields, boundary=multipart_boundary + ) + else: + body, content_type = ( + urlencode(fields), # type: ignore[arg-type] + "application/x-www-form-urlencoded", + ) + + extra_kw["body"] = body + extra_kw["headers"].setdefault("Content-Type", content_type) + + extra_kw.update(urlopen_kw) + + return self.urlopen(method, url, **extra_kw) diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/_version.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..af6144bb25db9215fcc4687203d19dba9106842d --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/_version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '2.3.0' +__version_tuple__ = version_tuple = (2, 3, 0) diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/connection.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..591ac407bcf655610ca59e4aab4f87a9e32022bf --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/connection.py @@ -0,0 +1,1044 @@ +from __future__ import annotations + +import datetime +import http.client +import logging +import os +import re +import socket +import sys +import threading +import typing +import warnings +from http.client import HTTPConnection as _HTTPConnection +from http.client import HTTPException as HTTPException # noqa: F401 +from http.client import ResponseNotReady +from socket import timeout as SocketTimeout + +if typing.TYPE_CHECKING: + from .response import HTTPResponse + from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT + from .util.ssltransport import SSLTransport + +from ._collections import HTTPHeaderDict +from .http2 import probe as http2_probe +from .util.response import assert_header_parsing +from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout +from .util.util import to_str +from .util.wait import wait_for_read + +try: # Compiled with SSL? + import ssl + + BaseSSLError = ssl.SSLError +except (ImportError, AttributeError): + ssl = None # type: ignore[assignment] + + class BaseSSLError(BaseException): # type: ignore[no-redef] + pass + + +from ._base_connection import _TYPE_BODY +from ._base_connection import ProxyConfig as ProxyConfig +from ._base_connection import _ResponseOptions as _ResponseOptions +from ._version import __version__ +from .exceptions import ( + ConnectTimeoutError, + HeaderParsingError, + NameResolutionError, + NewConnectionError, + ProxyError, + SystemTimeWarning, +) +from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ +from .util.request import body_to_chunks +from .util.ssl_ import assert_fingerprint as _assert_fingerprint +from .util.ssl_ import ( + create_urllib3_context, + is_ipaddress, + resolve_cert_reqs, + resolve_ssl_version, + ssl_wrap_socket, +) +from .util.ssl_match_hostname import CertificateError, match_hostname +from .util.url import Url + +# Not a no-op, we're adding this to the namespace so it can be imported. +ConnectionError = ConnectionError +BrokenPipeError = BrokenPipeError + + +log = logging.getLogger(__name__) + +port_by_scheme = {"http": 80, "https": 443} + +# When it comes time to update this value as a part of regular maintenance +# (ie test_recent_date is failing) update it to ~6 months before the current date. +RECENT_DATE = datetime.date(2023, 6, 1) + +_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") + + +class HTTPConnection(_HTTPConnection): + """ + Based on :class:`http.client.HTTPConnection` but provides an extra constructor + backwards-compatibility layer between older and newer Pythons. + + Additional keyword parameters are used to configure attributes of the connection. + Accepted parameters include: + + - ``source_address``: Set the source address for the current connection. + - ``socket_options``: Set specific options on the underlying socket. If not specified, then + defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling + Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. + + For example, if you wish to enable TCP Keep Alive in addition to the defaults, + you might pass: + + .. code-block:: python + + HTTPConnection.default_socket_options + [ + (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), + ] + + Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). + """ + + default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc] + + #: Disable Nagle's algorithm by default. + #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` + default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [ + (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + ] + + #: Whether this connection verifies the host's certificate. + is_verified: bool = False + + #: Whether this proxy connection verified the proxy host's certificate. + # If no proxy is currently connected to the value will be ``None``. + proxy_is_verified: bool | None = None + + blocksize: int + source_address: tuple[str, int] | None + socket_options: connection._TYPE_SOCKET_OPTIONS | None + + _has_connected_to_proxy: bool + _response_options: _ResponseOptions | None + _tunnel_host: str | None + _tunnel_port: int | None + _tunnel_scheme: str | None + + def __init__( + self, + host: str, + port: int | None = None, + *, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + source_address: tuple[str, int] | None = None, + blocksize: int = 16384, + socket_options: None | ( + connection._TYPE_SOCKET_OPTIONS + ) = default_socket_options, + proxy: Url | None = None, + proxy_config: ProxyConfig | None = None, + ) -> None: + super().__init__( + host=host, + port=port, + timeout=Timeout.resolve_default_timeout(timeout), + source_address=source_address, + blocksize=blocksize, + ) + self.socket_options = socket_options + self.proxy = proxy + self.proxy_config = proxy_config + + self._has_connected_to_proxy = False + self._response_options = None + self._tunnel_host: str | None = None + self._tunnel_port: int | None = None + self._tunnel_scheme: str | None = None + + @property + def host(self) -> str: + """ + Getter method to remove any trailing dots that indicate the hostname is an FQDN. + + In general, SSL certificates don't include the trailing dot indicating a + fully-qualified domain name, and thus, they don't validate properly when + checked against a domain name that includes the dot. In addition, some + servers may not expect to receive the trailing dot when provided. + + However, the hostname with trailing dot is critical to DNS resolution; doing a + lookup with the trailing dot will properly only resolve the appropriate FQDN, + whereas a lookup without a trailing dot will search the system's search domain + list. Thus, it's important to keep the original host around for use only in + those cases where it's appropriate (i.e., when doing DNS lookup to establish the + actual TCP connection across which we're going to send HTTP requests). + """ + return self._dns_host.rstrip(".") + + @host.setter + def host(self, value: str) -> None: + """ + Setter for the `host` property. + + We assume that only urllib3 uses the _dns_host attribute; httplib itself + only uses `host`, and it seems reasonable that other libraries follow suit. + """ + self._dns_host = value + + def _new_conn(self) -> socket.socket: + """Establish a socket connection and set nodelay settings on it. + + :return: New socket connection. + """ + try: + sock = connection.create_connection( + (self._dns_host, self.port), + self.timeout, + source_address=self.source_address, + socket_options=self.socket_options, + ) + except socket.gaierror as e: + raise NameResolutionError(self.host, self, e) from e + except SocketTimeout as e: + raise ConnectTimeoutError( + self, + f"Connection to {self.host} timed out. (connect timeout={self.timeout})", + ) from e + + except OSError as e: + raise NewConnectionError( + self, f"Failed to establish a new connection: {e}" + ) from e + + sys.audit("http.client.connect", self, self.host, self.port) + + return sock + + def set_tunnel( + self, + host: str, + port: int | None = None, + headers: typing.Mapping[str, str] | None = None, + scheme: str = "http", + ) -> None: + if scheme not in ("http", "https"): + raise ValueError( + f"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'" + ) + super().set_tunnel(host, port=port, headers=headers) + self._tunnel_scheme = scheme + + if sys.version_info < (3, 11, 4): + + def _tunnel(self) -> None: + _MAXLINE = http.client._MAXLINE # type: ignore[attr-defined] + connect = b"CONNECT %s:%d HTTP/1.0\r\n" % ( # type: ignore[str-format] + self._tunnel_host.encode("ascii"), # type: ignore[union-attr] + self._tunnel_port, + ) + headers = [connect] + for header, value in self._tunnel_headers.items(): # type: ignore[attr-defined] + headers.append(f"{header}: {value}\r\n".encode("latin-1")) + headers.append(b"\r\n") + # Making a single send() call instead of one per line encourages + # the host OS to use a more optimal packet size instead of + # potentially emitting a series of small packets. + self.send(b"".join(headers)) + del headers + + response = self.response_class(self.sock, method=self._method) # type: ignore[attr-defined] + try: + (version, code, message) = response._read_status() # type: ignore[attr-defined] + + if code != http.HTTPStatus.OK: + self.close() + raise OSError(f"Tunnel connection failed: {code} {message.strip()}") + while True: + line = response.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise http.client.LineTooLong("header line") + if not line: + # for sites which EOF without sending a trailer + break + if line in (b"\r\n", b"\n", b""): + break + + if self.debuglevel > 0: + print("header:", line.decode()) + finally: + response.close() + + def connect(self) -> None: + self.sock = self._new_conn() + if self._tunnel_host: + # If we're tunneling it means we're connected to our proxy. + self._has_connected_to_proxy = True + + # TODO: Fix tunnel so it doesn't depend on self.sock state. + self._tunnel() + + # If there's a proxy to be connected to we are fully connected. + # This is set twice (once above and here) due to forwarding proxies + # not using tunnelling. + self._has_connected_to_proxy = bool(self.proxy) + + if self._has_connected_to_proxy: + self.proxy_is_verified = False + + @property + def is_closed(self) -> bool: + return self.sock is None + + @property + def is_connected(self) -> bool: + if self.sock is None: + return False + return not wait_for_read(self.sock, timeout=0.0) + + @property + def has_connected_to_proxy(self) -> bool: + return self._has_connected_to_proxy + + @property + def proxy_is_forwarding(self) -> bool: + """ + Return True if a forwarding proxy is configured, else return False + """ + return bool(self.proxy) and self._tunnel_host is None + + @property + def proxy_is_tunneling(self) -> bool: + """ + Return True if a tunneling proxy is configured, else return False + """ + return self._tunnel_host is not None + + def close(self) -> None: + try: + super().close() + finally: + # Reset all stateful properties so connection + # can be re-used without leaking prior configs. + self.sock = None + self.is_verified = False + self.proxy_is_verified = None + self._has_connected_to_proxy = False + self._response_options = None + self._tunnel_host = None + self._tunnel_port = None + self._tunnel_scheme = None + + def putrequest( + self, + method: str, + url: str, + skip_host: bool = False, + skip_accept_encoding: bool = False, + ) -> None: + """""" + # Empty docstring because the indentation of CPython's implementation + # is broken but we don't want this method in our documentation. + match = _CONTAINS_CONTROL_CHAR_RE.search(method) + if match: + raise ValueError( + f"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})" + ) + + return super().putrequest( + method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding + ) + + def putheader(self, header: str, *values: str) -> None: # type: ignore[override] + """""" + if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): + super().putheader(header, *values) + elif to_str(header.lower()) not in SKIPPABLE_HEADERS: + skippable_headers = "', '".join( + [str.title(header) for header in sorted(SKIPPABLE_HEADERS)] + ) + raise ValueError( + f"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'" + ) + + # `request` method's signature intentionally violates LSP. + # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental. + def request( # type: ignore[override] + self, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + *, + chunked: bool = False, + preload_content: bool = True, + decode_content: bool = True, + enforce_content_length: bool = True, + ) -> None: + # Update the inner socket's timeout value to send the request. + # This only triggers if the connection is re-used. + if self.sock is not None: + self.sock.settimeout(self.timeout) + + # Store these values to be fed into the HTTPResponse + # object later. TODO: Remove this in favor of a real + # HTTP lifecycle mechanism. + + # We have to store these before we call .request() + # because sometimes we can still salvage a response + # off the wire even if we aren't able to completely + # send the request body. + self._response_options = _ResponseOptions( + request_method=method, + request_url=url, + preload_content=preload_content, + decode_content=decode_content, + enforce_content_length=enforce_content_length, + ) + + if headers is None: + headers = {} + header_keys = frozenset(to_str(k.lower()) for k in headers) + skip_accept_encoding = "accept-encoding" in header_keys + skip_host = "host" in header_keys + self.putrequest( + method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host + ) + + # Transform the body into an iterable of sendall()-able chunks + # and detect if an explicit Content-Length is doable. + chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize) + chunks = chunks_and_cl.chunks + content_length = chunks_and_cl.content_length + + # When chunked is explicit set to 'True' we respect that. + if chunked: + if "transfer-encoding" not in header_keys: + self.putheader("Transfer-Encoding", "chunked") + else: + # Detect whether a framing mechanism is already in use. If so + # we respect that value, otherwise we pick chunked vs content-length + # depending on the type of 'body'. + if "content-length" in header_keys: + chunked = False + elif "transfer-encoding" in header_keys: + chunked = True + + # Otherwise we go off the recommendation of 'body_to_chunks()'. + else: + chunked = False + if content_length is None: + if chunks is not None: + chunked = True + self.putheader("Transfer-Encoding", "chunked") + else: + self.putheader("Content-Length", str(content_length)) + + # Now that framing headers are out of the way we send all the other headers. + if "user-agent" not in header_keys: + self.putheader("User-Agent", _get_default_user_agent()) + for header, value in headers.items(): + self.putheader(header, value) + self.endheaders() + + # If we're given a body we start sending that in chunks. + if chunks is not None: + for chunk in chunks: + # Sending empty chunks isn't allowed for TE: chunked + # as it indicates the end of the body. + if not chunk: + continue + if isinstance(chunk, str): + chunk = chunk.encode("utf-8") + if chunked: + self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk)) + else: + self.send(chunk) + + # Regardless of whether we have a body or not, if we're in + # chunked mode we want to send an explicit empty chunk. + if chunked: + self.send(b"0\r\n\r\n") + + def request_chunked( + self, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + ) -> None: + """ + Alternative to the common request method, which sends the + body with chunked encoding and not as one block + """ + warnings.warn( + "HTTPConnection.request_chunked() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).", + category=DeprecationWarning, + stacklevel=2, + ) + self.request(method, url, body=body, headers=headers, chunked=True) + + def getresponse( # type: ignore[override] + self, + ) -> HTTPResponse: + """ + Get the response from the server. + + If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. + + If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. + """ + # Raise the same error as http.client.HTTPConnection + if self._response_options is None: + raise ResponseNotReady() + + # Reset this attribute for being used again. + resp_options = self._response_options + self._response_options = None + + # Since the connection's timeout value may have been updated + # we need to set the timeout on the socket. + self.sock.settimeout(self.timeout) + + # This is needed here to avoid circular import errors + from .response import HTTPResponse + + # Save a reference to the shutdown function before ownership is passed + # to httplib_response + # TODO should we implement it everywhere? + _shutdown = getattr(self.sock, "shutdown", None) + + # Get the response from http.client.HTTPConnection + httplib_response = super().getresponse() + + try: + assert_header_parsing(httplib_response.msg) + except (HeaderParsingError, TypeError) as hpe: + log.warning( + "Failed to parse headers (url=%s): %s", + _url_from_connection(self, resp_options.request_url), + hpe, + exc_info=True, + ) + + headers = HTTPHeaderDict(httplib_response.msg.items()) + + response = HTTPResponse( + body=httplib_response, + headers=headers, + status=httplib_response.status, + version=httplib_response.version, + version_string=getattr(self, "_http_vsn_str", "HTTP/?"), + reason=httplib_response.reason, + preload_content=resp_options.preload_content, + decode_content=resp_options.decode_content, + original_response=httplib_response, + enforce_content_length=resp_options.enforce_content_length, + request_method=resp_options.request_method, + request_url=resp_options.request_url, + sock_shutdown=_shutdown, + ) + return response + + +class HTTPSConnection(HTTPConnection): + """ + Many of the parameters to this constructor are passed to the underlying SSL + socket by means of :py:func:`urllib3.util.ssl_wrap_socket`. + """ + + default_port = port_by_scheme["https"] # type: ignore[misc] + + cert_reqs: int | str | None = None + ca_certs: str | None = None + ca_cert_dir: str | None = None + ca_cert_data: None | str | bytes = None + ssl_version: int | str | None = None + ssl_minimum_version: int | None = None + ssl_maximum_version: int | None = None + assert_fingerprint: str | None = None + _connect_callback: typing.Callable[..., None] | None = None + + def __init__( + self, + host: str, + port: int | None = None, + *, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + source_address: tuple[str, int] | None = None, + blocksize: int = 16384, + socket_options: None | ( + connection._TYPE_SOCKET_OPTIONS + ) = HTTPConnection.default_socket_options, + proxy: Url | None = None, + proxy_config: ProxyConfig | None = None, + cert_reqs: int | str | None = None, + assert_hostname: None | str | typing.Literal[False] = None, + assert_fingerprint: str | None = None, + server_hostname: str | None = None, + ssl_context: ssl.SSLContext | None = None, + ca_certs: str | None = None, + ca_cert_dir: str | None = None, + ca_cert_data: None | str | bytes = None, + ssl_minimum_version: int | None = None, + ssl_maximum_version: int | None = None, + ssl_version: int | str | None = None, # Deprecated + cert_file: str | None = None, + key_file: str | None = None, + key_password: str | None = None, + ) -> None: + super().__init__( + host, + port=port, + timeout=timeout, + source_address=source_address, + blocksize=blocksize, + socket_options=socket_options, + proxy=proxy, + proxy_config=proxy_config, + ) + + self.key_file = key_file + self.cert_file = cert_file + self.key_password = key_password + self.ssl_context = ssl_context + self.server_hostname = server_hostname + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + self.ssl_version = ssl_version + self.ssl_minimum_version = ssl_minimum_version + self.ssl_maximum_version = ssl_maximum_version + self.ca_certs = ca_certs and os.path.expanduser(ca_certs) + self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) + self.ca_cert_data = ca_cert_data + + # cert_reqs depends on ssl_context so calculate last. + if cert_reqs is None: + if self.ssl_context is not None: + cert_reqs = self.ssl_context.verify_mode + else: + cert_reqs = resolve_cert_reqs(None) + self.cert_reqs = cert_reqs + self._connect_callback = None + + def set_cert( + self, + key_file: str | None = None, + cert_file: str | None = None, + cert_reqs: int | str | None = None, + key_password: str | None = None, + ca_certs: str | None = None, + assert_hostname: None | str | typing.Literal[False] = None, + assert_fingerprint: str | None = None, + ca_cert_dir: str | None = None, + ca_cert_data: None | str | bytes = None, + ) -> None: + """ + This method should only be called once, before the connection is used. + """ + warnings.warn( + "HTTPSConnection.set_cert() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead provide the parameters to the " + "HTTPSConnection constructor.", + category=DeprecationWarning, + stacklevel=2, + ) + + # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also + # have an SSLContext object in which case we'll use its verify_mode. + if cert_reqs is None: + if self.ssl_context is not None: + cert_reqs = self.ssl_context.verify_mode + else: + cert_reqs = resolve_cert_reqs(None) + + self.key_file = key_file + self.cert_file = cert_file + self.cert_reqs = cert_reqs + self.key_password = key_password + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + self.ca_certs = ca_certs and os.path.expanduser(ca_certs) + self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) + self.ca_cert_data = ca_cert_data + + def connect(self) -> None: + # Today we don't need to be doing this step before the /actual/ socket + # connection, however in the future we'll need to decide whether to + # create a new socket or re-use an existing "shared" socket as a part + # of the HTTP/2 handshake dance. + if self._tunnel_host is not None and self._tunnel_port is not None: + probe_http2_host = self._tunnel_host + probe_http2_port = self._tunnel_port + else: + probe_http2_host = self.host + probe_http2_port = self.port + + # Check if the target origin supports HTTP/2. + # If the value comes back as 'None' it means that the current thread + # is probing for HTTP/2 support. Otherwise, we're waiting for another + # probe to complete, or we get a value right away. + target_supports_http2: bool | None + if "h2" in ssl_.ALPN_PROTOCOLS: + target_supports_http2 = http2_probe.acquire_and_get( + host=probe_http2_host, port=probe_http2_port + ) + else: + # If HTTP/2 isn't going to be offered it doesn't matter if + # the target supports HTTP/2. Don't want to make a probe. + target_supports_http2 = False + + if self._connect_callback is not None: + self._connect_callback( + "before connect", + thread_id=threading.get_ident(), + target_supports_http2=target_supports_http2, + ) + + try: + sock: socket.socket | ssl.SSLSocket + self.sock = sock = self._new_conn() + server_hostname: str = self.host + tls_in_tls = False + + # Do we need to establish a tunnel? + if self.proxy_is_tunneling: + # We're tunneling to an HTTPS origin so need to do TLS-in-TLS. + if self._tunnel_scheme == "https": + # _connect_tls_proxy will verify and assign proxy_is_verified + self.sock = sock = self._connect_tls_proxy(self.host, sock) + tls_in_tls = True + elif self._tunnel_scheme == "http": + self.proxy_is_verified = False + + # If we're tunneling it means we're connected to our proxy. + self._has_connected_to_proxy = True + + self._tunnel() + # Override the host with the one we're requesting data from. + server_hostname = typing.cast(str, self._tunnel_host) + + if self.server_hostname is not None: + server_hostname = self.server_hostname + + is_time_off = datetime.date.today() < RECENT_DATE + if is_time_off: + warnings.warn( + ( + f"System time is way off (before {RECENT_DATE}). This will probably " + "lead to SSL verification errors" + ), + SystemTimeWarning, + ) + + # Remove trailing '.' from fqdn hostnames to allow certificate validation + server_hostname_rm_dot = server_hostname.rstrip(".") + + sock_and_verified = _ssl_wrap_socket_and_match_hostname( + sock=sock, + cert_reqs=self.cert_reqs, + ssl_version=self.ssl_version, + ssl_minimum_version=self.ssl_minimum_version, + ssl_maximum_version=self.ssl_maximum_version, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + ca_cert_data=self.ca_cert_data, + cert_file=self.cert_file, + key_file=self.key_file, + key_password=self.key_password, + server_hostname=server_hostname_rm_dot, + ssl_context=self.ssl_context, + tls_in_tls=tls_in_tls, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint, + ) + self.sock = sock_and_verified.socket + + # If an error occurs during connection/handshake we may need to release + # our lock so another connection can probe the origin. + except BaseException: + if self._connect_callback is not None: + self._connect_callback( + "after connect failure", + thread_id=threading.get_ident(), + target_supports_http2=target_supports_http2, + ) + + if target_supports_http2 is None: + http2_probe.set_and_release( + host=probe_http2_host, port=probe_http2_port, supports_http2=None + ) + raise + + # If this connection doesn't know if the origin supports HTTP/2 + # we report back to the HTTP/2 probe our result. + if target_supports_http2 is None: + supports_http2 = sock_and_verified.socket.selected_alpn_protocol() == "h2" + http2_probe.set_and_release( + host=probe_http2_host, + port=probe_http2_port, + supports_http2=supports_http2, + ) + + # Forwarding proxies can never have a verified target since + # the proxy is the one doing the verification. Should instead + # use a CONNECT tunnel in order to verify the target. + # See: https://github.com/urllib3/urllib3/issues/3267. + if self.proxy_is_forwarding: + self.is_verified = False + else: + self.is_verified = sock_and_verified.is_verified + + # If there's a proxy to be connected to we are fully connected. + # This is set twice (once above and here) due to forwarding proxies + # not using tunnelling. + self._has_connected_to_proxy = bool(self.proxy) + + # Set `self.proxy_is_verified` unless it's already set while + # establishing a tunnel. + if self._has_connected_to_proxy and self.proxy_is_verified is None: + self.proxy_is_verified = sock_and_verified.is_verified + + def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket: + """ + Establish a TLS connection to the proxy using the provided SSL context. + """ + # `_connect_tls_proxy` is called when self._tunnel_host is truthy. + proxy_config = typing.cast(ProxyConfig, self.proxy_config) + ssl_context = proxy_config.ssl_context + sock_and_verified = _ssl_wrap_socket_and_match_hostname( + sock, + cert_reqs=self.cert_reqs, + ssl_version=self.ssl_version, + ssl_minimum_version=self.ssl_minimum_version, + ssl_maximum_version=self.ssl_maximum_version, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + ca_cert_data=self.ca_cert_data, + server_hostname=hostname, + ssl_context=ssl_context, + assert_hostname=proxy_config.assert_hostname, + assert_fingerprint=proxy_config.assert_fingerprint, + # Features that aren't implemented for proxies yet: + cert_file=None, + key_file=None, + key_password=None, + tls_in_tls=False, + ) + self.proxy_is_verified = sock_and_verified.is_verified + return sock_and_verified.socket # type: ignore[return-value] + + +class _WrappedAndVerifiedSocket(typing.NamedTuple): + """ + Wrapped socket and whether the connection is + verified after the TLS handshake + """ + + socket: ssl.SSLSocket | SSLTransport + is_verified: bool + + +def _ssl_wrap_socket_and_match_hostname( + sock: socket.socket, + *, + cert_reqs: None | str | int, + ssl_version: None | str | int, + ssl_minimum_version: int | None, + ssl_maximum_version: int | None, + cert_file: str | None, + key_file: str | None, + key_password: str | None, + ca_certs: str | None, + ca_cert_dir: str | None, + ca_cert_data: None | str | bytes, + assert_hostname: None | str | typing.Literal[False], + assert_fingerprint: str | None, + server_hostname: str | None, + ssl_context: ssl.SSLContext | None, + tls_in_tls: bool = False, +) -> _WrappedAndVerifiedSocket: + """Logic for constructing an SSLContext from all TLS parameters, passing + that down into ssl_wrap_socket, and then doing certificate verification + either via hostname or fingerprint. This function exists to guarantee + that both proxies and targets have the same behavior when connecting via TLS. + """ + default_ssl_context = False + if ssl_context is None: + default_ssl_context = True + context = create_urllib3_context( + ssl_version=resolve_ssl_version(ssl_version), + ssl_minimum_version=ssl_minimum_version, + ssl_maximum_version=ssl_maximum_version, + cert_reqs=resolve_cert_reqs(cert_reqs), + ) + else: + context = ssl_context + + context.verify_mode = resolve_cert_reqs(cert_reqs) + + # In some cases, we want to verify hostnames ourselves + if ( + # `ssl` can't verify fingerprints or alternate hostnames + assert_fingerprint + or assert_hostname + # assert_hostname can be set to False to disable hostname checking + or assert_hostname is False + # We still support OpenSSL 1.0.2, which prevents us from verifying + # hostnames easily: https://github.com/pyca/pyopenssl/pull/933 + or ssl_.IS_PYOPENSSL + or not ssl_.HAS_NEVER_CHECK_COMMON_NAME + ): + context.check_hostname = False + + # Try to load OS default certs if none are given. We need to do the hasattr() check + # for custom pyOpenSSL SSLContext objects because they don't support + # load_default_certs(). + if ( + not ca_certs + and not ca_cert_dir + and not ca_cert_data + and default_ssl_context + and hasattr(context, "load_default_certs") + ): + context.load_default_certs() + + # Ensure that IPv6 addresses are in the proper format and don't have a + # scope ID. Python's SSL module fails to recognize scoped IPv6 addresses + # and interprets them as DNS hostnames. + if server_hostname is not None: + normalized = server_hostname.strip("[]") + if "%" in normalized: + normalized = normalized[: normalized.rfind("%")] + if is_ipaddress(normalized): + server_hostname = normalized + + ssl_sock = ssl_wrap_socket( + sock=sock, + keyfile=key_file, + certfile=cert_file, + key_password=key_password, + ca_certs=ca_certs, + ca_cert_dir=ca_cert_dir, + ca_cert_data=ca_cert_data, + server_hostname=server_hostname, + ssl_context=context, + tls_in_tls=tls_in_tls, + ) + + try: + if assert_fingerprint: + _assert_fingerprint( + ssl_sock.getpeercert(binary_form=True), assert_fingerprint + ) + elif ( + context.verify_mode != ssl.CERT_NONE + and not context.check_hostname + and assert_hostname is not False + ): + cert: _TYPE_PEER_CERT_RET_DICT = ssl_sock.getpeercert() # type: ignore[assignment] + + # Need to signal to our match_hostname whether to use 'commonName' or not. + # If we're using our own constructed SSLContext we explicitly set 'False' + # because PyPy hard-codes 'True' from SSLContext.hostname_checks_common_name. + if default_ssl_context: + hostname_checks_common_name = False + else: + hostname_checks_common_name = ( + getattr(context, "hostname_checks_common_name", False) or False + ) + + _match_hostname( + cert, + assert_hostname or server_hostname, # type: ignore[arg-type] + hostname_checks_common_name, + ) + + return _WrappedAndVerifiedSocket( + socket=ssl_sock, + is_verified=context.verify_mode == ssl.CERT_REQUIRED + or bool(assert_fingerprint), + ) + except BaseException: + ssl_sock.close() + raise + + +def _match_hostname( + cert: _TYPE_PEER_CERT_RET_DICT | None, + asserted_hostname: str, + hostname_checks_common_name: bool = False, +) -> None: + # Our upstream implementation of ssl.match_hostname() + # only applies this normalization to IP addresses so it doesn't + # match DNS SANs so we do the same thing! + stripped_hostname = asserted_hostname.strip("[]") + if is_ipaddress(stripped_hostname): + asserted_hostname = stripped_hostname + + try: + match_hostname(cert, asserted_hostname, hostname_checks_common_name) + except CertificateError as e: + log.warning( + "Certificate did not match expected hostname: %s. Certificate: %s", + asserted_hostname, + cert, + ) + # Add cert to exception and reraise so client code can inspect + # the cert when catching the exception, if they want to + e._peer_cert = cert # type: ignore[attr-defined] + raise + + +def _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError: + # Look for the phrase 'wrong version number', if found + # then we should warn the user that we're very sure that + # this proxy is HTTP-only and they have a configuration issue. + error_normalized = " ".join(re.split("[^a-z]", str(err).lower())) + is_likely_http_proxy = ( + "wrong version number" in error_normalized + or "unknown protocol" in error_normalized + or "record layer failure" in error_normalized + ) + http_proxy_warning = ( + ". Your proxy appears to only use HTTP and not HTTPS, " + "try changing your proxy URL to be HTTP. See: " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#https-proxy-error-http-proxy" + ) + new_err = ProxyError( + f"Unable to connect to proxy" + f"{http_proxy_warning if is_likely_http_proxy and proxy_scheme == 'https' else ''}", + err, + ) + new_err.__cause__ = err + return new_err + + +def _get_default_user_agent() -> str: + return f"python-urllib3/{__version__}" + + +class DummyConnection: + """Used to detect a failed ConnectionCls import.""" + + +if not ssl: + HTTPSConnection = DummyConnection # type: ignore[misc, assignment] # noqa: F811 + + +VerifiedHTTPSConnection = HTTPSConnection + + +def _url_from_connection( + conn: HTTPConnection | HTTPSConnection, path: str | None = None +) -> str: + """Returns the URL from a given connection. This is mainly used for testing and logging.""" + + scheme = "https" if isinstance(conn, HTTPSConnection) else "http" + + return Url(scheme=scheme, host=conn.host, port=conn.port, path=path).url diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/connectionpool.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/connectionpool.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0685b4cdd0562e508b9dd032765b5c759ea61e --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/connectionpool.py @@ -0,0 +1,1178 @@ +from __future__ import annotations + +import errno +import logging +import queue +import sys +import typing +import warnings +import weakref +from socket import timeout as SocketTimeout +from types import TracebackType + +from ._base_connection import _TYPE_BODY +from ._collections import HTTPHeaderDict +from ._request_methods import RequestMethods +from .connection import ( + BaseSSLError, + BrokenPipeError, + DummyConnection, + HTTPConnection, + HTTPException, + HTTPSConnection, + ProxyConfig, + _wrap_proxy_error, +) +from .connection import port_by_scheme as port_by_scheme +from .exceptions import ( + ClosedPoolError, + EmptyPoolError, + FullPoolError, + HostChangedError, + InsecureRequestWarning, + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, + ProxyError, + ReadTimeoutError, + SSLError, + TimeoutError, +) +from .response import BaseHTTPResponse +from .util.connection import is_connection_dropped +from .util.proxy import connection_requires_http_tunnel +from .util.request import _TYPE_BODY_POSITION, set_file_position +from .util.retry import Retry +from .util.ssl_match_hostname import CertificateError +from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout +from .util.url import Url, _encode_target +from .util.url import _normalize_host as normalize_host +from .util.url import parse_url +from .util.util import to_str + +if typing.TYPE_CHECKING: + import ssl + + from typing_extensions import Self + + from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection + +log = logging.getLogger(__name__) + +_TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] + + +# Pool objects +class ConnectionPool: + """ + Base class for all connection pools, such as + :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. + + .. note:: + ConnectionPool.urlopen() does not normalize or percent-encode target URIs + which is useful if your target server doesn't support percent-encoded + target URIs. + """ + + scheme: str | None = None + QueueCls = queue.LifoQueue + + def __init__(self, host: str, port: int | None = None) -> None: + if not host: + raise LocationValueError("No host specified.") + + self.host = _normalize_host(host, scheme=self.scheme) + self.port = port + + # This property uses 'normalize_host()' (not '_normalize_host()') + # to avoid removing square braces around IPv6 addresses. + # This value is sent to `HTTPConnection.set_tunnel()` if called + # because square braces are required for HTTP CONNECT tunneling. + self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() + + def __str__(self) -> str: + return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> typing.Literal[False]: + self.close() + # Return False to re-raise any potential exceptions + return False + + def close(self) -> None: + """ + Close all pooled connections and disable the pool. + """ + + +# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 +_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} + + +class HTTPConnectionPool(ConnectionPool, RequestMethods): + """ + Thread-safe connection pool for one host. + + :param host: + Host used for this HTTP Connection (e.g. "localhost"), passed into + :class:`http.client.HTTPConnection`. + + :param port: + Port used for this HTTP Connection (None is equivalent to 80), passed + into :class:`http.client.HTTPConnection`. + + :param timeout: + Socket timeout in seconds for each individual connection. This can + be a float or integer, which sets the timeout for the HTTP request, + or an instance of :class:`urllib3.util.Timeout` which gives you more + fine-grained control over request timeouts. After the constructor has + been parsed, this is always a `urllib3.util.Timeout` object. + + :param maxsize: + Number of connections to save that can be reused. More than 1 is useful + in multithreaded situations. If ``block`` is set to False, more + connections will be created but they will not be saved once they've + been used. + + :param block: + If set to True, no more than ``maxsize`` connections will be used at + a time. When no free connections are available, the call will block + until a connection has been released. This is a useful side effect for + particular multithreaded situations where one does not want to use more + than maxsize connections per host to prevent flooding. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param retries: + Retry configuration to use by default with requests in this pool. + + :param _proxy: + Parsed proxy URL, should not be used directly, instead, see + :class:`urllib3.ProxyManager` + + :param _proxy_headers: + A dictionary with proxy headers, should not be used directly, + instead, see :class:`urllib3.ProxyManager` + + :param \\**conn_kw: + Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, + :class:`urllib3.connection.HTTPSConnection` instances. + """ + + scheme = "http" + ConnectionCls: type[BaseHTTPConnection] | type[BaseHTTPSConnection] = HTTPConnection + + def __init__( + self, + host: str, + port: int | None = None, + timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, + maxsize: int = 1, + block: bool = False, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + _proxy: Url | None = None, + _proxy_headers: typing.Mapping[str, str] | None = None, + _proxy_config: ProxyConfig | None = None, + **conn_kw: typing.Any, + ): + ConnectionPool.__init__(self, host, port) + RequestMethods.__init__(self, headers) + + if not isinstance(timeout, Timeout): + timeout = Timeout.from_float(timeout) + + if retries is None: + retries = Retry.DEFAULT + + self.timeout = timeout + self.retries = retries + + self.pool: queue.LifoQueue[typing.Any] | None = self.QueueCls(maxsize) + self.block = block + + self.proxy = _proxy + self.proxy_headers = _proxy_headers or {} + self.proxy_config = _proxy_config + + # Fill the queue up so that doing get() on it will block properly + for _ in range(maxsize): + self.pool.put(None) + + # These are mostly for testing and debugging purposes. + self.num_connections = 0 + self.num_requests = 0 + self.conn_kw = conn_kw + + if self.proxy: + # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. + # We cannot know if the user has added default socket options, so we cannot replace the + # list. + self.conn_kw.setdefault("socket_options", []) + + self.conn_kw["proxy"] = self.proxy + self.conn_kw["proxy_config"] = self.proxy_config + + # Do not pass 'self' as callback to 'finalize'. + # Then the 'finalize' would keep an endless living (leak) to self. + # By just passing a reference to the pool allows the garbage collector + # to free self if nobody else has a reference to it. + pool = self.pool + + # Close all the HTTPConnections in the pool before the + # HTTPConnectionPool object is garbage collected. + weakref.finalize(self, _close_pool_connections, pool) + + def _new_conn(self) -> BaseHTTPConnection: + """ + Return a fresh :class:`HTTPConnection`. + """ + self.num_connections += 1 + log.debug( + "Starting new HTTP connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "80", + ) + + conn = self.ConnectionCls( + host=self.host, + port=self.port, + timeout=self.timeout.connect_timeout, + **self.conn_kw, + ) + return conn + + def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection: + """ + Get a connection. Will return a pooled connection if one is available. + + If no connections are available and :prop:`.block` is ``False``, then a + fresh connection is returned. + + :param timeout: + Seconds to wait before giving up and raising + :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and + :prop:`.block` is ``True``. + """ + conn = None + + if self.pool is None: + raise ClosedPoolError(self, "Pool is closed.") + + try: + conn = self.pool.get(block=self.block, timeout=timeout) + + except AttributeError: # self.pool is None + raise ClosedPoolError(self, "Pool is closed.") from None # Defensive: + + except queue.Empty: + if self.block: + raise EmptyPoolError( + self, + "Pool is empty and a new connection can't be opened due to blocking mode.", + ) from None + pass # Oh well, we'll create a new connection then + + # If this is a persistent connection, check if it got disconnected + if conn and is_connection_dropped(conn): + log.debug("Resetting dropped connection: %s", self.host) + conn.close() + + return conn or self._new_conn() + + def _put_conn(self, conn: BaseHTTPConnection | None) -> None: + """ + Put a connection back into the pool. + + :param conn: + Connection object for the current host and port as returned by + :meth:`._new_conn` or :meth:`._get_conn`. + + If the pool is already full, the connection is closed and discarded + because we exceeded maxsize. If connections are discarded frequently, + then maxsize should be increased. + + If the pool is closed, then the connection will be closed and discarded. + """ + if self.pool is not None: + try: + self.pool.put(conn, block=False) + return # Everything is dandy, done. + except AttributeError: + # self.pool is None. + pass + except queue.Full: + # Connection never got put back into the pool, close it. + if conn: + conn.close() + + if self.block: + # This should never happen if you got the conn from self._get_conn + raise FullPoolError( + self, + "Pool reached maximum size and no more connections are allowed.", + ) from None + + log.warning( + "Connection pool is full, discarding connection: %s. Connection pool size: %s", + self.host, + self.pool.qsize(), + ) + + # Connection never got put back into the pool, close it. + if conn: + conn.close() + + def _validate_conn(self, conn: BaseHTTPConnection) -> None: + """ + Called right before a request is made, after the socket is created. + """ + + def _prepare_proxy(self, conn: BaseHTTPConnection) -> None: + # Nothing to do for HTTP connections. + pass + + def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout: + """Helper that always returns a :class:`urllib3.util.Timeout`""" + if timeout is _DEFAULT_TIMEOUT: + return self.timeout.clone() + + if isinstance(timeout, Timeout): + return timeout.clone() + else: + # User passed us an int/float. This is for backwards compatibility, + # can be removed later + return Timeout.from_float(timeout) + + def _raise_timeout( + self, + err: BaseSSLError | OSError | SocketTimeout, + url: str, + timeout_value: _TYPE_TIMEOUT | None, + ) -> None: + """Is the error actually a timeout? Will raise a ReadTimeout or pass""" + + if isinstance(err, SocketTimeout): + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={timeout_value})" + ) from err + + # See the above comment about EAGAIN in Python 3. + if hasattr(err, "errno") and err.errno in _blocking_errnos: + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={timeout_value})" + ) from err + + def _make_request( + self, + conn: BaseHTTPConnection, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | None = None, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + chunked: bool = False, + response_conn: BaseHTTPConnection | None = None, + preload_content: bool = True, + decode_content: bool = True, + enforce_content_length: bool = True, + ) -> BaseHTTPResponse: + """ + Perform a request on a given urllib connection object taken from our + pool. + + :param conn: + a connection from one of our connection pools + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param body: + Data to send in the request body, either :class:`str`, :class:`bytes`, + an iterable of :class:`str`/:class:`bytes`, or a file-like object. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + Pass ``None`` to retry until you receive a response. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param chunked: + If True, urllib3 will send the body using chunked transfer + encoding. Otherwise, urllib3 will send the body using the standard + content-length form. Defaults to False. + + :param response_conn: + Set this to ``None`` if you will handle releasing the connection or + set the connection to have the response release it. + + :param preload_content: + If True, the response's body will be preloaded during construction. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param enforce_content_length: + Enforce content length checking. Body returned by server must match + value of Content-Length header, if present. Otherwise, raise error. + """ + self.num_requests += 1 + + timeout_obj = self._get_timeout(timeout) + timeout_obj.start_connect() + conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) + + try: + # Trigger any extra validation we need to do. + try: + self._validate_conn(conn) + except (SocketTimeout, BaseSSLError) as e: + self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) + raise + + # _validate_conn() starts the connection to an HTTPS proxy + # so we need to wrap errors with 'ProxyError' here too. + except ( + OSError, + NewConnectionError, + TimeoutError, + BaseSSLError, + CertificateError, + SSLError, + ) as e: + new_e: Exception = e + if isinstance(e, (BaseSSLError, CertificateError)): + new_e = SSLError(e) + # If the connection didn't successfully connect to it's proxy + # then there + if isinstance( + new_e, (OSError, NewConnectionError, TimeoutError, SSLError) + ) and (conn and conn.proxy and not conn.has_connected_to_proxy): + new_e = _wrap_proxy_error(new_e, conn.proxy.scheme) + raise new_e + + # conn.request() calls http.client.*.request, not the method in + # urllib3.request. It also calls makefile (recv) on the socket. + try: + conn.request( + method, + url, + body=body, + headers=headers, + chunked=chunked, + preload_content=preload_content, + decode_content=decode_content, + enforce_content_length=enforce_content_length, + ) + + # We are swallowing BrokenPipeError (errno.EPIPE) since the server is + # legitimately able to close the connection after sending a valid response. + # With this behaviour, the received response is still readable. + except BrokenPipeError: + pass + except OSError as e: + # MacOS/Linux + # EPROTOTYPE and ECONNRESET are needed on macOS + # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ + # Condition changed later to emit ECONNRESET instead of only EPROTOTYPE. + if e.errno != errno.EPROTOTYPE and e.errno != errno.ECONNRESET: + raise + + # Reset the timeout for the recv() on the socket + read_timeout = timeout_obj.read_timeout + + if not conn.is_closed: + # In Python 3 socket.py will catch EAGAIN and return None when you + # try and read into the file pointer created by http.client, which + # instead raises a BadStatusLine exception. Instead of catching + # the exception and assuming all BadStatusLine exceptions are read + # timeouts, check for a zero timeout before making the request. + if read_timeout == 0: + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={read_timeout})" + ) + conn.timeout = read_timeout + + # Receive the response from the server + try: + response = conn.getresponse() + except (BaseSSLError, OSError) as e: + self._raise_timeout(err=e, url=url, timeout_value=read_timeout) + raise + + # Set properties that are used by the pooling layer. + response.retries = retries + response._connection = response_conn # type: ignore[attr-defined] + response._pool = self # type: ignore[attr-defined] + + log.debug( + '%s://%s:%s "%s %s %s" %s %s', + self.scheme, + self.host, + self.port, + method, + url, + response.version_string, + response.status, + response.length_remaining, + ) + + return response + + def close(self) -> None: + """ + Close all pooled connections and disable the pool. + """ + if self.pool is None: + return + # Disable access to the pool + old_pool, self.pool = self.pool, None + + # Close all the HTTPConnections in the pool. + _close_pool_connections(old_pool) + + def is_same_host(self, url: str) -> bool: + """ + Check if the given ``url`` is a member of the same host as this + connection pool. + """ + if url.startswith("/"): + return True + + # TODO: Add optional support for socket.gethostbyname checking. + scheme, _, host, port, *_ = parse_url(url) + scheme = scheme or "http" + if host is not None: + host = _normalize_host(host, scheme=scheme) + + # Use explicit default port for comparison when none is given + if self.port and not port: + port = port_by_scheme.get(scheme) + elif not self.port and port == port_by_scheme.get(scheme): + port = None + + return (scheme, host, port) == (self.scheme, self.host, self.port) + + def urlopen( # type: ignore[override] + self, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + redirect: bool = True, + assert_same_host: bool = True, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + pool_timeout: int | None = None, + release_conn: bool | None = None, + chunked: bool = False, + body_pos: _TYPE_BODY_POSITION | None = None, + preload_content: bool = True, + decode_content: bool = True, + **response_kw: typing.Any, + ) -> BaseHTTPResponse: + """ + Get a connection from the pool and perform an HTTP request. This is the + lowest level call for making a request, so you'll need to specify all + the raw details. + + .. note:: + + More commonly, it's appropriate to use a convenience method + such as :meth:`request`. + + .. note:: + + `release_conn` will only behave as expected if + `preload_content=False` because we want to make + `preload_content=False` the default behaviour someday soon without + breaking backwards compatibility. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param body: + Data to send in the request body, either :class:`str`, :class:`bytes`, + an iterable of :class:`str`/:class:`bytes`, or a file-like object. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param redirect: + If True, automatically handle redirects (status codes 301, 302, + 303, 307, 308). Each redirect counts as a retry. Disabling retries + will disable redirect, too. + + :param assert_same_host: + If ``True``, will make sure that the host of the pool requests is + consistent else will raise HostChangedError. When ``False``, you can + use the pool on an HTTP proxy and request foreign hosts. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param pool_timeout: + If set and the pool is set to block=True, then this method will + block for ``pool_timeout`` seconds and raise EmptyPoolError if no + connection is available within the time period. + + :param bool preload_content: + If True, the response's body will be preloaded into memory. + + :param bool decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param release_conn: + If False, then the urlopen call will not release the connection + back into the pool once a response is received (but will release if + you read the entire contents of the response such as when + `preload_content=True`). This is useful if you're not preloading + the response's content immediately. You will need to call + ``r.release_conn()`` on the response ``r`` to return the connection + back into the pool. If None, it takes the value of ``preload_content`` + which defaults to ``True``. + + :param bool chunked: + If True, urllib3 will send the body using chunked transfer + encoding. Otherwise, urllib3 will send the body using the standard + content-length form. Defaults to False. + + :param int body_pos: + Position to seek to in file-like body in the event of a retry or + redirect. Typically this won't need to be set because urllib3 will + auto-populate the value when needed. + """ + parsed_url = parse_url(url) + destination_scheme = parsed_url.scheme + + if headers is None: + headers = self.headers + + if not isinstance(retries, Retry): + retries = Retry.from_int(retries, redirect=redirect, default=self.retries) + + if release_conn is None: + release_conn = preload_content + + # Check host + if assert_same_host and not self.is_same_host(url): + raise HostChangedError(self, url, retries) + + # Ensure that the URL we're connecting to is properly encoded + if url.startswith("/"): + url = to_str(_encode_target(url)) + else: + url = to_str(parsed_url.url) + + conn = None + + # Track whether `conn` needs to be released before + # returning/raising/recursing. Update this variable if necessary, and + # leave `release_conn` constant throughout the function. That way, if + # the function recurses, the original value of `release_conn` will be + # passed down into the recursive call, and its value will be respected. + # + # See issue #651 [1] for details. + # + # [1] + release_this_conn = release_conn + + http_tunnel_required = connection_requires_http_tunnel( + self.proxy, self.proxy_config, destination_scheme + ) + + # Merge the proxy headers. Only done when not using HTTP CONNECT. We + # have to copy the headers dict so we can safely change it without those + # changes being reflected in anyone else's copy. + if not http_tunnel_required: + headers = headers.copy() # type: ignore[attr-defined] + headers.update(self.proxy_headers) # type: ignore[union-attr] + + # Must keep the exception bound to a separate variable or else Python 3 + # complains about UnboundLocalError. + err = None + + # Keep track of whether we cleanly exited the except block. This + # ensures we do proper cleanup in finally. + clean_exit = False + + # Rewind body position, if needed. Record current position + # for future rewinds in the event of a redirect/retry. + body_pos = set_file_position(body, body_pos) + + try: + # Request a connection from the queue. + timeout_obj = self._get_timeout(timeout) + conn = self._get_conn(timeout=pool_timeout) + + conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment] + + # Is this a closed/new connection that requires CONNECT tunnelling? + if self.proxy is not None and http_tunnel_required and conn.is_closed: + try: + self._prepare_proxy(conn) + except (BaseSSLError, OSError, SocketTimeout) as e: + self._raise_timeout( + err=e, url=self.proxy.url, timeout_value=conn.timeout + ) + raise + + # If we're going to release the connection in ``finally:``, then + # the response doesn't need to know about the connection. Otherwise + # it will also try to release it and we'll have a double-release + # mess. + response_conn = conn if not release_conn else None + + # Make the request on the HTTPConnection object + response = self._make_request( + conn, + method, + url, + timeout=timeout_obj, + body=body, + headers=headers, + chunked=chunked, + retries=retries, + response_conn=response_conn, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Everything went great! + clean_exit = True + + except EmptyPoolError: + # Didn't get a connection from the pool, no need to clean up + clean_exit = True + release_this_conn = False + raise + + except ( + TimeoutError, + HTTPException, + OSError, + ProtocolError, + BaseSSLError, + SSLError, + CertificateError, + ProxyError, + ) as e: + # Discard the connection for these exceptions. It will be + # replaced during the next _get_conn() call. + clean_exit = False + new_e: Exception = e + if isinstance(e, (BaseSSLError, CertificateError)): + new_e = SSLError(e) + if isinstance( + new_e, + ( + OSError, + NewConnectionError, + TimeoutError, + SSLError, + HTTPException, + ), + ) and (conn and conn.proxy and not conn.has_connected_to_proxy): + new_e = _wrap_proxy_error(new_e, conn.proxy.scheme) + elif isinstance(new_e, (OSError, HTTPException)): + new_e = ProtocolError("Connection aborted.", new_e) + + retries = retries.increment( + method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2] + ) + retries.sleep() + + # Keep track of the error for the retry warning. + err = e + + finally: + if not clean_exit: + # We hit some kind of exception, handled or otherwise. We need + # to throw the connection away unless explicitly told not to. + # Close the connection, set the variable to None, and make sure + # we put the None back in the pool to avoid leaking it. + if conn: + conn.close() + conn = None + release_this_conn = True + + if release_this_conn: + # Put the connection back to be reused. If the connection is + # expired then it will be None, which will get replaced with a + # fresh connection during _get_conn. + self._put_conn(conn) + + if not conn: + # Try again + log.warning( + "Retrying (%r) after connection broken by '%r': %s", retries, err, url + ) + return self.urlopen( + method, + url, + body, + headers, + retries, + redirect, + assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Handle redirect? + redirect_location = redirect and response.get_redirect_location() + if redirect_location: + if response.status == 303: + # Change the method according to RFC 9110, Section 15.4.4. + method = "GET" + # And lose the body not to transfer anything sensitive. + body = None + headers = HTTPHeaderDict(headers)._prepare_for_method_change() + + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_redirect: + response.drain_conn() + raise + return response + + response.drain_conn() + retries.sleep_for_retry(response) + log.debug("Redirecting %s -> %s", url, redirect_location) + return self.urlopen( + method, + redirect_location, + body, + headers, + retries=retries, + redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Check if we should retry the HTTP response. + has_retry_after = bool(response.headers.get("Retry-After")) + if retries.is_retry(method, response.status, has_retry_after): + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_status: + response.drain_conn() + raise + return response + + response.drain_conn() + retries.sleep(response) + log.debug("Retry: %s", url) + return self.urlopen( + method, + url, + body, + headers, + retries=retries, + redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + return response + + +class HTTPSConnectionPool(HTTPConnectionPool): + """ + Same as :class:`.HTTPConnectionPool`, but HTTPS. + + :class:`.HTTPSConnection` uses one of ``assert_fingerprint``, + ``assert_hostname`` and ``host`` in this order to verify connections. + If ``assert_hostname`` is False, no verification is done. + + The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, + ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` + is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade + the connection socket into an SSL socket. + """ + + scheme = "https" + ConnectionCls: type[BaseHTTPSConnection] = HTTPSConnection + + def __init__( + self, + host: str, + port: int | None = None, + timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, + maxsize: int = 1, + block: bool = False, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + _proxy: Url | None = None, + _proxy_headers: typing.Mapping[str, str] | None = None, + key_file: str | None = None, + cert_file: str | None = None, + cert_reqs: int | str | None = None, + key_password: str | None = None, + ca_certs: str | None = None, + ssl_version: int | str | None = None, + ssl_minimum_version: ssl.TLSVersion | None = None, + ssl_maximum_version: ssl.TLSVersion | None = None, + assert_hostname: str | typing.Literal[False] | None = None, + assert_fingerprint: str | None = None, + ca_cert_dir: str | None = None, + **conn_kw: typing.Any, + ) -> None: + super().__init__( + host, + port, + timeout, + maxsize, + block, + headers, + retries, + _proxy, + _proxy_headers, + **conn_kw, + ) + + self.key_file = key_file + self.cert_file = cert_file + self.cert_reqs = cert_reqs + self.key_password = key_password + self.ca_certs = ca_certs + self.ca_cert_dir = ca_cert_dir + self.ssl_version = ssl_version + self.ssl_minimum_version = ssl_minimum_version + self.ssl_maximum_version = ssl_maximum_version + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + + def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override] + """Establishes a tunnel connection through HTTP CONNECT.""" + if self.proxy and self.proxy.scheme == "https": + tunnel_scheme = "https" + else: + tunnel_scheme = "http" + + conn.set_tunnel( + scheme=tunnel_scheme, + host=self._tunnel_host, + port=self.port, + headers=self.proxy_headers, + ) + conn.connect() + + def _new_conn(self) -> BaseHTTPSConnection: + """ + Return a fresh :class:`urllib3.connection.HTTPConnection`. + """ + self.num_connections += 1 + log.debug( + "Starting new HTTPS connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "443", + ) + + if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap] + raise ImportError( + "Can't connect to HTTPS URL because the SSL module is not available." + ) + + actual_host: str = self.host + actual_port = self.port + if self.proxy is not None and self.proxy.host is not None: + actual_host = self.proxy.host + actual_port = self.proxy.port + + return self.ConnectionCls( + host=actual_host, + port=actual_port, + timeout=self.timeout.connect_timeout, + cert_file=self.cert_file, + key_file=self.key_file, + key_password=self.key_password, + cert_reqs=self.cert_reqs, + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint, + ssl_version=self.ssl_version, + ssl_minimum_version=self.ssl_minimum_version, + ssl_maximum_version=self.ssl_maximum_version, + **self.conn_kw, + ) + + def _validate_conn(self, conn: BaseHTTPConnection) -> None: + """ + Called right before a request is made, after the socket is created. + """ + super()._validate_conn(conn) + + # Force connect early to allow us to validate the connection. + if conn.is_closed: + conn.connect() + + # TODO revise this, see https://github.com/urllib3/urllib3/issues/2791 + if not conn.is_verified and not conn.proxy_is_verified: + warnings.warn( + ( + f"Unverified HTTPS request is being made to host '{conn.host}'. " + "Adding certificate verification is strongly advised. See: " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#tls-warnings" + ), + InsecureRequestWarning, + ) + + +def connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool: + """ + Given a url, return an :class:`.ConnectionPool` instance of its host. + + This is a shortcut for not having to parse out the scheme, host, and port + of the url before creating an :class:`.ConnectionPool` instance. + + :param url: + Absolute URL string that must include the scheme. Port is optional. + + :param \\**kw: + Passes additional parameters to the constructor of the appropriate + :class:`.ConnectionPool`. Useful for specifying things like + timeout, maxsize, headers, etc. + + Example:: + + >>> conn = connection_from_url('http://google.com/') + >>> r = conn.request('GET', '/') + """ + scheme, _, host, port, *_ = parse_url(url) + scheme = scheme or "http" + port = port or port_by_scheme.get(scheme, 80) + if scheme == "https": + return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type] + else: + return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type] + + +@typing.overload +def _normalize_host(host: None, scheme: str | None) -> None: ... + + +@typing.overload +def _normalize_host(host: str, scheme: str | None) -> str: ... + + +def _normalize_host(host: str | None, scheme: str | None) -> str | None: + """ + Normalize hosts for comparisons and use with sockets. + """ + + host = normalize_host(host, scheme) + + # httplib doesn't like it when we include brackets in IPv6 addresses + # Specifically, if we include brackets but also pass the port then + # httplib crazily doubles up the square brackets on the Host header. + # Instead, we need to make sure we never pass ``None`` as the port. + # However, for backward compatibility reasons we can't actually + # *assert* that. See http://bugs.python.org/issue28539 + if host and host.startswith("[") and host.endswith("]"): + host = host[1:-1] + return host + + +def _url_from_pool( + pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None +) -> str: + """Returns the URL from a given connection pool. This is mainly used for testing and logging.""" + return Url(scheme=pool.scheme, host=pool.host, port=pool.port, path=path).url + + +def _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None: + """Drains a queue of connections and closes each one.""" + try: + while True: + conn = pool.get(block=False) + if conn: + conn.close() + except queue.Empty: + pass # Done. diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/__init__.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/emscripten/__init__.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/emscripten/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3c5bebdc151eef715663628a697118bb2932ed --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/emscripten/__init__.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import urllib3.connection + +from ...connectionpool import HTTPConnectionPool, HTTPSConnectionPool +from .connection import EmscriptenHTTPConnection, EmscriptenHTTPSConnection + + +def inject_into_urllib3() -> None: + # override connection classes to use emscripten specific classes + # n.b. mypy complains about the overriding of classes below + # if it isn't ignored + HTTPConnectionPool.ConnectionCls = EmscriptenHTTPConnection + HTTPSConnectionPool.ConnectionCls = EmscriptenHTTPSConnection + urllib3.connection.HTTPConnection = EmscriptenHTTPConnection # type: ignore[misc,assignment] + urllib3.connection.HTTPSConnection = EmscriptenHTTPSConnection # type: ignore[misc,assignment] diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/emscripten/emscripten_fetch_worker.js b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/emscripten/emscripten_fetch_worker.js new file mode 100644 index 0000000000000000000000000000000000000000..243b86222f90a9be4b6b4ce0bf997eefd29289af --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/emscripten/emscripten_fetch_worker.js @@ -0,0 +1,110 @@ +let Status = { + SUCCESS_HEADER: -1, + SUCCESS_EOF: -2, + ERROR_TIMEOUT: -3, + ERROR_EXCEPTION: -4, +}; + +let connections = {}; +let nextConnectionID = 1; +const encoder = new TextEncoder(); + +self.addEventListener("message", async function (event) { + if (event.data.close) { + let connectionID = event.data.close; + delete connections[connectionID]; + return; + } else if (event.data.getMore) { + let connectionID = event.data.getMore; + let { curOffset, value, reader, intBuffer, byteBuffer } = + connections[connectionID]; + // if we still have some in buffer, then just send it back straight away + if (!value || curOffset >= value.length) { + // read another buffer if required + try { + let readResponse = await reader.read(); + + if (readResponse.done) { + // read everything - clear connection and return + delete connections[connectionID]; + Atomics.store(intBuffer, 0, Status.SUCCESS_EOF); + Atomics.notify(intBuffer, 0); + // finished reading successfully + // return from event handler + return; + } + curOffset = 0; + connections[connectionID].value = readResponse.value; + value = readResponse.value; + } catch (error) { + console.log("Request exception:", error); + let errorBytes = encoder.encode(error.message); + let written = errorBytes.length; + byteBuffer.set(errorBytes); + intBuffer[1] = written; + Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION); + Atomics.notify(intBuffer, 0); + } + } + + // send as much buffer as we can + let curLen = value.length - curOffset; + if (curLen > byteBuffer.length) { + curLen = byteBuffer.length; + } + byteBuffer.set(value.subarray(curOffset, curOffset + curLen), 0); + + Atomics.store(intBuffer, 0, curLen); // store current length in bytes + Atomics.notify(intBuffer, 0); + curOffset += curLen; + connections[connectionID].curOffset = curOffset; + + return; + } else { + // start fetch + let connectionID = nextConnectionID; + nextConnectionID += 1; + const intBuffer = new Int32Array(event.data.buffer); + const byteBuffer = new Uint8Array(event.data.buffer, 8); + try { + const response = await fetch(event.data.url, event.data.fetchParams); + // return the headers first via textencoder + var headers = []; + for (const pair of response.headers.entries()) { + headers.push([pair[0], pair[1]]); + } + let headerObj = { + headers: headers, + status: response.status, + connectionID, + }; + const headerText = JSON.stringify(headerObj); + let headerBytes = encoder.encode(headerText); + let written = headerBytes.length; + byteBuffer.set(headerBytes); + intBuffer[1] = written; + // make a connection + connections[connectionID] = { + reader: response.body.getReader(), + intBuffer: intBuffer, + byteBuffer: byteBuffer, + value: undefined, + curOffset: 0, + }; + // set header ready + Atomics.store(intBuffer, 0, Status.SUCCESS_HEADER); + Atomics.notify(intBuffer, 0); + // all fetching after this goes through a new postmessage call with getMore + // this allows for parallel requests + } catch (error) { + console.log("Request exception:", error); + let errorBytes = encoder.encode(error.message); + let written = errorBytes.length; + byteBuffer.set(errorBytes); + intBuffer[1] = written; + Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION); + Atomics.notify(intBuffer, 0); + } + } +}); +self.postMessage({ inited: true }); diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/pyopenssl.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/pyopenssl.py new file mode 100644 index 0000000000000000000000000000000000000000..ed65430661605995b285d889d57a5927c6a777ab --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/pyopenssl.py @@ -0,0 +1,554 @@ +""" +Module for using pyOpenSSL as a TLS backend. This module was relevant before +the standard library ``ssl`` module supported SNI, but now that we've dropped +support for Python 2.7 all relevant Python versions support SNI so +**this module is no longer recommended**. + +This needs the following packages installed: + +* `pyOpenSSL`_ (tested with 16.0.0) +* `cryptography`_ (minimum 1.3.4, from pyopenssl) +* `idna`_ (minimum 2.0) + +However, pyOpenSSL depends on cryptography, so while we use all three directly here we +end up having relatively few packages required. + +You can install them with the following command: + +.. code-block:: bash + + $ python -m pip install pyopenssl cryptography idna + +To activate certificate checking, call +:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code +before you begin making HTTP requests. This can be done in a ``sitecustomize`` +module, or at any other time before your application begins using ``urllib3``, +like this: + +.. code-block:: python + + try: + import urllib3.contrib.pyopenssl + urllib3.contrib.pyopenssl.inject_into_urllib3() + except ImportError: + pass + +.. _pyopenssl: https://www.pyopenssl.org +.. _cryptography: https://cryptography.io +.. _idna: https://github.com/kjd/idna +""" + +from __future__ import annotations + +import OpenSSL.SSL # type: ignore[import-untyped] +from cryptography import x509 + +try: + from cryptography.x509 import UnsupportedExtension # type: ignore[attr-defined] +except ImportError: + # UnsupportedExtension is gone in cryptography >= 2.1.0 + class UnsupportedExtension(Exception): # type: ignore[no-redef] + pass + + +import logging +import ssl +import typing +from io import BytesIO +from socket import socket as socket_cls +from socket import timeout + +from .. import util + +if typing.TYPE_CHECKING: + from OpenSSL.crypto import X509 # type: ignore[import-untyped] + + +__all__ = ["inject_into_urllib3", "extract_from_urllib3"] + +# Map from urllib3 to PyOpenSSL compatible parameter-values. +_openssl_versions: dict[int, int] = { + util.ssl_.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD, # type: ignore[attr-defined] + util.ssl_.PROTOCOL_TLS_CLIENT: OpenSSL.SSL.SSLv23_METHOD, # type: ignore[attr-defined] + ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, +} + +if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"): + _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD + +if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"): + _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD + + +_stdlib_to_openssl_verify = { + ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, + ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, + ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER + + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, +} +_openssl_to_stdlib_verify = {v: k for k, v in _stdlib_to_openssl_verify.items()} + +# The SSLvX values are the most likely to be missing in the future +# but we check them all just to be sure. +_OP_NO_SSLv2_OR_SSLv3: int = getattr(OpenSSL.SSL, "OP_NO_SSLv2", 0) | getattr( + OpenSSL.SSL, "OP_NO_SSLv3", 0 +) +_OP_NO_TLSv1: int = getattr(OpenSSL.SSL, "OP_NO_TLSv1", 0) +_OP_NO_TLSv1_1: int = getattr(OpenSSL.SSL, "OP_NO_TLSv1_1", 0) +_OP_NO_TLSv1_2: int = getattr(OpenSSL.SSL, "OP_NO_TLSv1_2", 0) +_OP_NO_TLSv1_3: int = getattr(OpenSSL.SSL, "OP_NO_TLSv1_3", 0) + +_openssl_to_ssl_minimum_version: dict[int, int] = { + ssl.TLSVersion.MINIMUM_SUPPORTED: _OP_NO_SSLv2_OR_SSLv3, + ssl.TLSVersion.TLSv1: _OP_NO_SSLv2_OR_SSLv3, + ssl.TLSVersion.TLSv1_1: _OP_NO_SSLv2_OR_SSLv3 | _OP_NO_TLSv1, + ssl.TLSVersion.TLSv1_2: _OP_NO_SSLv2_OR_SSLv3 | _OP_NO_TLSv1 | _OP_NO_TLSv1_1, + ssl.TLSVersion.TLSv1_3: ( + _OP_NO_SSLv2_OR_SSLv3 | _OP_NO_TLSv1 | _OP_NO_TLSv1_1 | _OP_NO_TLSv1_2 + ), + ssl.TLSVersion.MAXIMUM_SUPPORTED: ( + _OP_NO_SSLv2_OR_SSLv3 | _OP_NO_TLSv1 | _OP_NO_TLSv1_1 | _OP_NO_TLSv1_2 + ), +} +_openssl_to_ssl_maximum_version: dict[int, int] = { + ssl.TLSVersion.MINIMUM_SUPPORTED: ( + _OP_NO_SSLv2_OR_SSLv3 + | _OP_NO_TLSv1 + | _OP_NO_TLSv1_1 + | _OP_NO_TLSv1_2 + | _OP_NO_TLSv1_3 + ), + ssl.TLSVersion.TLSv1: ( + _OP_NO_SSLv2_OR_SSLv3 | _OP_NO_TLSv1_1 | _OP_NO_TLSv1_2 | _OP_NO_TLSv1_3 + ), + ssl.TLSVersion.TLSv1_1: _OP_NO_SSLv2_OR_SSLv3 | _OP_NO_TLSv1_2 | _OP_NO_TLSv1_3, + ssl.TLSVersion.TLSv1_2: _OP_NO_SSLv2_OR_SSLv3 | _OP_NO_TLSv1_3, + ssl.TLSVersion.TLSv1_3: _OP_NO_SSLv2_OR_SSLv3, + ssl.TLSVersion.MAXIMUM_SUPPORTED: _OP_NO_SSLv2_OR_SSLv3, +} + +# OpenSSL will only write 16K at a time +SSL_WRITE_BLOCKSIZE = 16384 + +orig_util_SSLContext = util.ssl_.SSLContext + + +log = logging.getLogger(__name__) + + +def inject_into_urllib3() -> None: + "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support." + + _validate_dependencies_met() + + util.SSLContext = PyOpenSSLContext # type: ignore[assignment] + util.ssl_.SSLContext = PyOpenSSLContext # type: ignore[assignment] + util.IS_PYOPENSSL = True + util.ssl_.IS_PYOPENSSL = True + + +def extract_from_urllib3() -> None: + "Undo monkey-patching by :func:`inject_into_urllib3`." + + util.SSLContext = orig_util_SSLContext + util.ssl_.SSLContext = orig_util_SSLContext + util.IS_PYOPENSSL = False + util.ssl_.IS_PYOPENSSL = False + + +def _validate_dependencies_met() -> None: + """ + Verifies that PyOpenSSL's package-level dependencies have been met. + Throws `ImportError` if they are not met. + """ + # Method added in `cryptography==1.1`; not available in older versions + from cryptography.x509.extensions import Extensions + + if getattr(Extensions, "get_extension_for_class", None) is None: + raise ImportError( + "'cryptography' module missing required functionality. " + "Try upgrading to v1.3.4 or newer." + ) + + # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 + # attribute is only present on those versions. + from OpenSSL.crypto import X509 + + x509 = X509() + if getattr(x509, "_x509", None) is None: + raise ImportError( + "'pyOpenSSL' module missing required functionality. " + "Try upgrading to v0.14 or newer." + ) + + +def _dnsname_to_stdlib(name: str) -> str | None: + """ + Converts a dNSName SubjectAlternativeName field to the form used by the + standard library on the given Python version. + + Cryptography produces a dNSName as a unicode string that was idna-decoded + from ASCII bytes. We need to idna-encode that string to get it back, and + then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib + uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). + + If the name cannot be idna-encoded then we return None signalling that + the name given should be skipped. + """ + + def idna_encode(name: str) -> bytes | None: + """ + Borrowed wholesale from the Python Cryptography Project. It turns out + that we can't just safely call `idna.encode`: it can explode for + wildcard names. This avoids that problem. + """ + import idna + + try: + for prefix in ["*.", "."]: + if name.startswith(prefix): + name = name[len(prefix) :] + return prefix.encode("ascii") + idna.encode(name) + return idna.encode(name) + except idna.core.IDNAError: + return None + + # Don't send IPv6 addresses through the IDNA encoder. + if ":" in name: + return name + + encoded_name = idna_encode(name) + if encoded_name is None: + return None + return encoded_name.decode("utf-8") + + +def get_subj_alt_name(peer_cert: X509) -> list[tuple[str, str]]: + """ + Given an PyOpenSSL certificate, provides all the subject alternative names. + """ + cert = peer_cert.to_cryptography() + + # We want to find the SAN extension. Ask Cryptography to locate it (it's + # faster than looping in Python) + try: + ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value + except x509.ExtensionNotFound: + # No such extension, return the empty list. + return [] + except ( + x509.DuplicateExtension, + UnsupportedExtension, + x509.UnsupportedGeneralNameType, + UnicodeError, + ) as e: + # A problem has been found with the quality of the certificate. Assume + # no SAN field is present. + log.warning( + "A problem was encountered with the certificate that prevented " + "urllib3 from finding the SubjectAlternativeName field. This can " + "affect certificate validation. The error was %s", + e, + ) + return [] + + # We want to return dNSName and iPAddress fields. We need to cast the IPs + # back to strings because the match_hostname function wants them as + # strings. + # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 + # decoded. This is pretty frustrating, but that's what the standard library + # does with certificates, and so we need to attempt to do the same. + # We also want to skip over names which cannot be idna encoded. + names = [ + ("DNS", name) + for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) + if name is not None + ] + names.extend( + ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress) + ) + + return names + + +class WrappedSocket: + """API-compatibility wrapper for Python OpenSSL's Connection-class.""" + + def __init__( + self, + connection: OpenSSL.SSL.Connection, + socket: socket_cls, + suppress_ragged_eofs: bool = True, + ) -> None: + self.connection = connection + self.socket = socket + self.suppress_ragged_eofs = suppress_ragged_eofs + self._io_refs = 0 + self._closed = False + + def fileno(self) -> int: + return self.socket.fileno() + + # Copy-pasted from Python 3.5 source code + def _decref_socketios(self) -> None: + if self._io_refs > 0: + self._io_refs -= 1 + if self._closed: + self.close() + + def recv(self, *args: typing.Any, **kwargs: typing.Any) -> bytes: + try: + data = self.connection.recv(*args, **kwargs) + except OpenSSL.SSL.SysCallError as e: + if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): + return b"" + else: + raise OSError(e.args[0], str(e)) from e + except OpenSSL.SSL.ZeroReturnError: + if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: + return b"" + else: + raise + except OpenSSL.SSL.WantReadError as e: + if not util.wait_for_read(self.socket, self.socket.gettimeout()): + raise timeout("The read operation timed out") from e + else: + return self.recv(*args, **kwargs) + + # TLS 1.3 post-handshake authentication + except OpenSSL.SSL.Error as e: + raise ssl.SSLError(f"read error: {e!r}") from e + else: + return data # type: ignore[no-any-return] + + def recv_into(self, *args: typing.Any, **kwargs: typing.Any) -> int: + try: + return self.connection.recv_into(*args, **kwargs) # type: ignore[no-any-return] + except OpenSSL.SSL.SysCallError as e: + if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): + return 0 + else: + raise OSError(e.args[0], str(e)) from e + except OpenSSL.SSL.ZeroReturnError: + if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: + return 0 + else: + raise + except OpenSSL.SSL.WantReadError as e: + if not util.wait_for_read(self.socket, self.socket.gettimeout()): + raise timeout("The read operation timed out") from e + else: + return self.recv_into(*args, **kwargs) + + # TLS 1.3 post-handshake authentication + except OpenSSL.SSL.Error as e: + raise ssl.SSLError(f"read error: {e!r}") from e + + def settimeout(self, timeout: float) -> None: + return self.socket.settimeout(timeout) + + def _send_until_done(self, data: bytes) -> int: + while True: + try: + return self.connection.send(data) # type: ignore[no-any-return] + except OpenSSL.SSL.WantWriteError as e: + if not util.wait_for_write(self.socket, self.socket.gettimeout()): + raise timeout() from e + continue + except OpenSSL.SSL.SysCallError as e: + raise OSError(e.args[0], str(e)) from e + + def sendall(self, data: bytes) -> None: + total_sent = 0 + while total_sent < len(data): + sent = self._send_until_done( + data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE] + ) + total_sent += sent + + def shutdown(self, how: int) -> None: + try: + self.connection.shutdown() + except OpenSSL.SSL.Error as e: + raise ssl.SSLError(f"shutdown error: {e!r}") from e + + def close(self) -> None: + self._closed = True + if self._io_refs <= 0: + self._real_close() + + def _real_close(self) -> None: + try: + return self.connection.close() # type: ignore[no-any-return] + except OpenSSL.SSL.Error: + return + + def getpeercert( + self, binary_form: bool = False + ) -> dict[str, list[typing.Any]] | None: + x509 = self.connection.get_peer_certificate() + + if not x509: + return x509 # type: ignore[no-any-return] + + if binary_form: + return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509) # type: ignore[no-any-return] + + return { + "subject": ((("commonName", x509.get_subject().CN),),), # type: ignore[dict-item] + "subjectAltName": get_subj_alt_name(x509), + } + + def version(self) -> str: + return self.connection.get_protocol_version_name() # type: ignore[no-any-return] + + def selected_alpn_protocol(self) -> str | None: + alpn_proto = self.connection.get_alpn_proto_negotiated() + return alpn_proto.decode() if alpn_proto else None + + +WrappedSocket.makefile = socket_cls.makefile # type: ignore[attr-defined] + + +class PyOpenSSLContext: + """ + I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible + for translating the interface of the standard library ``SSLContext`` object + to calls into PyOpenSSL. + """ + + def __init__(self, protocol: int) -> None: + self.protocol = _openssl_versions[protocol] + self._ctx = OpenSSL.SSL.Context(self.protocol) + self._options = 0 + self.check_hostname = False + self._minimum_version: int = ssl.TLSVersion.MINIMUM_SUPPORTED + self._maximum_version: int = ssl.TLSVersion.MAXIMUM_SUPPORTED + + @property + def options(self) -> int: + return self._options + + @options.setter + def options(self, value: int) -> None: + self._options = value + self._set_ctx_options() + + @property + def verify_mode(self) -> int: + return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] + + @verify_mode.setter + def verify_mode(self, value: ssl.VerifyMode) -> None: + self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback) + + def set_default_verify_paths(self) -> None: + self._ctx.set_default_verify_paths() + + def set_ciphers(self, ciphers: bytes | str) -> None: + if isinstance(ciphers, str): + ciphers = ciphers.encode("utf-8") + self._ctx.set_cipher_list(ciphers) + + def load_verify_locations( + self, + cafile: str | None = None, + capath: str | None = None, + cadata: bytes | None = None, + ) -> None: + if cafile is not None: + cafile = cafile.encode("utf-8") # type: ignore[assignment] + if capath is not None: + capath = capath.encode("utf-8") # type: ignore[assignment] + try: + self._ctx.load_verify_locations(cafile, capath) + if cadata is not None: + self._ctx.load_verify_locations(BytesIO(cadata)) + except OpenSSL.SSL.Error as e: + raise ssl.SSLError(f"unable to load trusted certificates: {e!r}") from e + + def load_cert_chain( + self, + certfile: str, + keyfile: str | None = None, + password: str | None = None, + ) -> None: + try: + self._ctx.use_certificate_chain_file(certfile) + if password is not None: + if not isinstance(password, bytes): + password = password.encode("utf-8") # type: ignore[assignment] + self._ctx.set_passwd_cb(lambda *_: password) + self._ctx.use_privatekey_file(keyfile or certfile) + except OpenSSL.SSL.Error as e: + raise ssl.SSLError(f"Unable to load certificate chain: {e!r}") from e + + def set_alpn_protocols(self, protocols: list[bytes | str]) -> None: + protocols = [util.util.to_bytes(p, "ascii") for p in protocols] + return self._ctx.set_alpn_protos(protocols) # type: ignore[no-any-return] + + def wrap_socket( + self, + sock: socket_cls, + server_side: bool = False, + do_handshake_on_connect: bool = True, + suppress_ragged_eofs: bool = True, + server_hostname: bytes | str | None = None, + ) -> WrappedSocket: + cnx = OpenSSL.SSL.Connection(self._ctx, sock) + + # If server_hostname is an IP, don't use it for SNI, per RFC6066 Section 3 + if server_hostname and not util.ssl_.is_ipaddress(server_hostname): + if isinstance(server_hostname, str): + server_hostname = server_hostname.encode("utf-8") + cnx.set_tlsext_host_name(server_hostname) + + cnx.set_connect_state() + + while True: + try: + cnx.do_handshake() + except OpenSSL.SSL.WantReadError as e: + if not util.wait_for_read(sock, sock.gettimeout()): + raise timeout("select timed out") from e + continue + except OpenSSL.SSL.Error as e: + raise ssl.SSLError(f"bad handshake: {e!r}") from e + break + + return WrappedSocket(cnx, sock) + + def _set_ctx_options(self) -> None: + self._ctx.set_options( + self._options + | _openssl_to_ssl_minimum_version[self._minimum_version] + | _openssl_to_ssl_maximum_version[self._maximum_version] + ) + + @property + def minimum_version(self) -> int: + return self._minimum_version + + @minimum_version.setter + def minimum_version(self, minimum_version: int) -> None: + self._minimum_version = minimum_version + self._set_ctx_options() + + @property + def maximum_version(self) -> int: + return self._maximum_version + + @maximum_version.setter + def maximum_version(self, maximum_version: int) -> None: + self._maximum_version = maximum_version + self._set_ctx_options() + + +def _verify_callback( + cnx: OpenSSL.SSL.Connection, + x509: X509, + err_no: int, + err_depth: int, + return_code: int, +) -> bool: + return err_no == 0 diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/socks.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/socks.py new file mode 100644 index 0000000000000000000000000000000000000000..c62b5e0332e7004d3a20da1fd555033f6f88780b --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/contrib/socks.py @@ -0,0 +1,228 @@ +""" +This module contains provisional support for SOCKS proxies from within +urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and +SOCKS5. To enable its functionality, either install PySocks or install this +module with the ``socks`` extra. + +The SOCKS implementation supports the full range of urllib3 features. It also +supports the following SOCKS features: + +- SOCKS4A (``proxy_url='socks4a://...``) +- SOCKS4 (``proxy_url='socks4://...``) +- SOCKS5 with remote DNS (``proxy_url='socks5h://...``) +- SOCKS5 with local DNS (``proxy_url='socks5://...``) +- Usernames and passwords for the SOCKS proxy + +.. note:: + It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in + your ``proxy_url`` to ensure that DNS resolution is done from the remote + server instead of client-side when connecting to a domain name. + +SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5 +supports IPv4, IPv6, and domain names. + +When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url`` +will be sent as the ``userid`` section of the SOCKS request: + +.. code-block:: python + + proxy_url="socks4a://@proxy-host" + +When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion +of the ``proxy_url`` will be sent as the username/password to authenticate +with the proxy: + +.. code-block:: python + + proxy_url="socks5h://:@proxy-host" + +""" + +from __future__ import annotations + +try: + import socks # type: ignore[import-not-found] +except ImportError: + import warnings + + from ..exceptions import DependencyWarning + + warnings.warn( + ( + "SOCKS support in urllib3 requires the installation of optional " + "dependencies: specifically, PySocks. For more information, see " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html#socks-proxies" + ), + DependencyWarning, + ) + raise + +import typing +from socket import timeout as SocketTimeout + +from ..connection import HTTPConnection, HTTPSConnection +from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool +from ..exceptions import ConnectTimeoutError, NewConnectionError +from ..poolmanager import PoolManager +from ..util.url import parse_url + +try: + import ssl +except ImportError: + ssl = None # type: ignore[assignment] + + +class _TYPE_SOCKS_OPTIONS(typing.TypedDict): + socks_version: int + proxy_host: str | None + proxy_port: str | None + username: str | None + password: str | None + rdns: bool + + +class SOCKSConnection(HTTPConnection): + """ + A plain-text HTTP connection that connects via a SOCKS proxy. + """ + + def __init__( + self, + _socks_options: _TYPE_SOCKS_OPTIONS, + *args: typing.Any, + **kwargs: typing.Any, + ) -> None: + self._socks_options = _socks_options + super().__init__(*args, **kwargs) + + def _new_conn(self) -> socks.socksocket: + """ + Establish a new connection via the SOCKS proxy. + """ + extra_kw: dict[str, typing.Any] = {} + if self.source_address: + extra_kw["source_address"] = self.source_address + + if self.socket_options: + extra_kw["socket_options"] = self.socket_options + + try: + conn = socks.create_connection( + (self.host, self.port), + proxy_type=self._socks_options["socks_version"], + proxy_addr=self._socks_options["proxy_host"], + proxy_port=self._socks_options["proxy_port"], + proxy_username=self._socks_options["username"], + proxy_password=self._socks_options["password"], + proxy_rdns=self._socks_options["rdns"], + timeout=self.timeout, + **extra_kw, + ) + + except SocketTimeout as e: + raise ConnectTimeoutError( + self, + f"Connection to {self.host} timed out. (connect timeout={self.timeout})", + ) from e + + except socks.ProxyError as e: + # This is fragile as hell, but it seems to be the only way to raise + # useful errors here. + if e.socket_err: + error = e.socket_err + if isinstance(error, SocketTimeout): + raise ConnectTimeoutError( + self, + f"Connection to {self.host} timed out. (connect timeout={self.timeout})", + ) from e + else: + # Adding `from e` messes with coverage somehow, so it's omitted. + # See #2386. + raise NewConnectionError( + self, f"Failed to establish a new connection: {error}" + ) + else: + raise NewConnectionError( + self, f"Failed to establish a new connection: {e}" + ) from e + + except OSError as e: # Defensive: PySocks should catch all these. + raise NewConnectionError( + self, f"Failed to establish a new connection: {e}" + ) from e + + return conn + + +# We don't need to duplicate the Verified/Unverified distinction from +# urllib3/connection.py here because the HTTPSConnection will already have been +# correctly set to either the Verified or Unverified form by that module. This +# means the SOCKSHTTPSConnection will automatically be the correct type. +class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): + pass + + +class SOCKSHTTPConnectionPool(HTTPConnectionPool): + ConnectionCls = SOCKSConnection + + +class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): + ConnectionCls = SOCKSHTTPSConnection + + +class SOCKSProxyManager(PoolManager): + """ + A version of the urllib3 ProxyManager that routes connections via the + defined SOCKS proxy. + """ + + pool_classes_by_scheme = { + "http": SOCKSHTTPConnectionPool, + "https": SOCKSHTTPSConnectionPool, + } + + def __init__( + self, + proxy_url: str, + username: str | None = None, + password: str | None = None, + num_pools: int = 10, + headers: typing.Mapping[str, str] | None = None, + **connection_pool_kw: typing.Any, + ): + parsed = parse_url(proxy_url) + + if username is None and password is None and parsed.auth is not None: + split = parsed.auth.split(":") + if len(split) == 2: + username, password = split + if parsed.scheme == "socks5": + socks_version = socks.PROXY_TYPE_SOCKS5 + rdns = False + elif parsed.scheme == "socks5h": + socks_version = socks.PROXY_TYPE_SOCKS5 + rdns = True + elif parsed.scheme == "socks4": + socks_version = socks.PROXY_TYPE_SOCKS4 + rdns = False + elif parsed.scheme == "socks4a": + socks_version = socks.PROXY_TYPE_SOCKS4 + rdns = True + else: + raise ValueError(f"Unable to determine SOCKS version from {proxy_url}") + + self.proxy_url = proxy_url + + socks_options = { + "socks_version": socks_version, + "proxy_host": parsed.host, + "proxy_port": parsed.port, + "username": username, + "password": password, + "rdns": rdns, + } + connection_pool_kw["_socks_options"] = socks_options + + super().__init__(num_pools, headers, **connection_pool_kw) + + self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/exceptions.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..0394578287c19042344f338c896c1178ba81fbd6 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/exceptions.py @@ -0,0 +1,327 @@ +from __future__ import annotations + +import socket +import typing +import warnings +from email.errors import MessageDefect +from http.client import IncompleteRead as httplib_IncompleteRead + +if typing.TYPE_CHECKING: + from .connection import HTTPConnection + from .connectionpool import ConnectionPool + from .response import HTTPResponse + from .util.retry import Retry + +# Base Exceptions + + +class HTTPError(Exception): + """Base exception used by this module.""" + + +class HTTPWarning(Warning): + """Base warning used by this module.""" + + +_TYPE_REDUCE_RESULT = tuple[typing.Callable[..., object], tuple[object, ...]] + + +class PoolError(HTTPError): + """Base exception for errors caused within a pool.""" + + def __init__(self, pool: ConnectionPool, message: str) -> None: + self.pool = pool + super().__init__(f"{pool}: {message}") + + def __reduce__(self) -> _TYPE_REDUCE_RESULT: + # For pickling purposes. + return self.__class__, (None, None) + + +class RequestError(PoolError): + """Base exception for PoolErrors that have associated URLs.""" + + def __init__(self, pool: ConnectionPool, url: str, message: str) -> None: + self.url = url + super().__init__(pool, message) + + def __reduce__(self) -> _TYPE_REDUCE_RESULT: + # For pickling purposes. + return self.__class__, (None, self.url, None) + + +class SSLError(HTTPError): + """Raised when SSL certificate fails in an HTTPS connection.""" + + +class ProxyError(HTTPError): + """Raised when the connection to a proxy fails.""" + + # The original error is also available as __cause__. + original_error: Exception + + def __init__(self, message: str, error: Exception) -> None: + super().__init__(message, error) + self.original_error = error + + +class DecodeError(HTTPError): + """Raised when automatic decoding based on Content-Type fails.""" + + +class ProtocolError(HTTPError): + """Raised when something unexpected happens mid-request/response.""" + + +#: Renamed to ProtocolError but aliased for backwards compatibility. +ConnectionError = ProtocolError + + +# Leaf Exceptions + + +class MaxRetryError(RequestError): + """Raised when the maximum number of retries is exceeded. + + :param pool: The connection pool + :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` + :param str url: The requested Url + :param reason: The underlying error + :type reason: :class:`Exception` + + """ + + def __init__( + self, pool: ConnectionPool, url: str, reason: Exception | None = None + ) -> None: + self.reason = reason + + message = f"Max retries exceeded with url: {url} (Caused by {reason!r})" + + super().__init__(pool, url, message) + + +class HostChangedError(RequestError): + """Raised when an existing pool gets a request for a foreign host.""" + + def __init__( + self, pool: ConnectionPool, url: str, retries: Retry | int = 3 + ) -> None: + message = f"Tried to open a foreign host with url: {url}" + super().__init__(pool, url, message) + self.retries = retries + + +class TimeoutStateError(HTTPError): + """Raised when passing an invalid state to a timeout""" + + +class TimeoutError(HTTPError): + """Raised when a socket timeout error occurs. + + Catching this error will catch both :exc:`ReadTimeoutErrors + ` and :exc:`ConnectTimeoutErrors `. + """ + + +class ReadTimeoutError(TimeoutError, RequestError): + """Raised when a socket timeout occurs while receiving data from a server""" + + +# This timeout error does not have a URL attached and needs to inherit from the +# base HTTPError +class ConnectTimeoutError(TimeoutError): + """Raised when a socket timeout occurs while connecting to a server""" + + +class NewConnectionError(ConnectTimeoutError, HTTPError): + """Raised when we fail to establish a new connection. Usually ECONNREFUSED.""" + + def __init__(self, conn: HTTPConnection, message: str) -> None: + self.conn = conn + super().__init__(f"{conn}: {message}") + + def __reduce__(self) -> _TYPE_REDUCE_RESULT: + # For pickling purposes. + return self.__class__, (None, None) + + @property + def pool(self) -> HTTPConnection: + warnings.warn( + "The 'pool' property is deprecated and will be removed " + "in urllib3 v2.1.0. Use 'conn' instead.", + DeprecationWarning, + stacklevel=2, + ) + + return self.conn + + +class NameResolutionError(NewConnectionError): + """Raised when host name resolution fails.""" + + def __init__(self, host: str, conn: HTTPConnection, reason: socket.gaierror): + message = f"Failed to resolve '{host}' ({reason})" + super().__init__(conn, message) + + def __reduce__(self) -> _TYPE_REDUCE_RESULT: + # For pickling purposes. + return self.__class__, (None, None, None) + + +class EmptyPoolError(PoolError): + """Raised when a pool runs out of connections and no more are allowed.""" + + +class FullPoolError(PoolError): + """Raised when we try to add a connection to a full pool in blocking mode.""" + + +class ClosedPoolError(PoolError): + """Raised when a request enters a pool after the pool has been closed.""" + + +class LocationValueError(ValueError, HTTPError): + """Raised when there is something wrong with a given URL input.""" + + +class LocationParseError(LocationValueError): + """Raised when get_host or similar fails to parse the URL input.""" + + def __init__(self, location: str) -> None: + message = f"Failed to parse: {location}" + super().__init__(message) + + self.location = location + + +class URLSchemeUnknown(LocationValueError): + """Raised when a URL input has an unsupported scheme.""" + + def __init__(self, scheme: str): + message = f"Not supported URL scheme {scheme}" + super().__init__(message) + + self.scheme = scheme + + +class ResponseError(HTTPError): + """Used as a container for an error reason supplied in a MaxRetryError.""" + + GENERIC_ERROR = "too many error responses" + SPECIFIC_ERROR = "too many {status_code} error responses" + + +class SecurityWarning(HTTPWarning): + """Warned when performing security reducing actions""" + + +class InsecureRequestWarning(SecurityWarning): + """Warned when making an unverified HTTPS request.""" + + +class NotOpenSSLWarning(SecurityWarning): + """Warned when using unsupported SSL library""" + + +class SystemTimeWarning(SecurityWarning): + """Warned when system time is suspected to be wrong""" + + +class InsecurePlatformWarning(SecurityWarning): + """Warned when certain TLS/SSL configuration is not available on a platform.""" + + +class DependencyWarning(HTTPWarning): + """ + Warned when an attempt is made to import a module with missing optional + dependencies. + """ + + +class ResponseNotChunked(ProtocolError, ValueError): + """Response needs to be chunked in order to read it as chunks.""" + + +class BodyNotHttplibCompatible(HTTPError): + """ + Body should be :class:`http.client.HTTPResponse` like + (have an fp attribute which returns raw chunks) for read_chunked(). + """ + + +class IncompleteRead(HTTPError, httplib_IncompleteRead): + """ + Response length doesn't match expected Content-Length + + Subclass of :class:`http.client.IncompleteRead` to allow int value + for ``partial`` to avoid creating large objects on streamed reads. + """ + + partial: int # type: ignore[assignment] + expected: int + + def __init__(self, partial: int, expected: int) -> None: + self.partial = partial + self.expected = expected + + def __repr__(self) -> str: + return "IncompleteRead(%i bytes read, %i more expected)" % ( + self.partial, + self.expected, + ) + + +class InvalidChunkLength(HTTPError, httplib_IncompleteRead): + """Invalid chunk length in a chunked response.""" + + def __init__(self, response: HTTPResponse, length: bytes) -> None: + self.partial: int = response.tell() # type: ignore[assignment] + self.expected: int | None = response.length_remaining + self.response = response + self.length = length + + def __repr__(self) -> str: + return "InvalidChunkLength(got length %r, %i bytes read)" % ( + self.length, + self.partial, + ) + + +class InvalidHeader(HTTPError): + """The header provided was somehow invalid.""" + + +class ProxySchemeUnknown(AssertionError, URLSchemeUnknown): + """ProxyManager does not support the supplied scheme""" + + # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. + + def __init__(self, scheme: str | None) -> None: + # 'localhost' is here because our URL parser parses + # localhost:8080 -> scheme=localhost, remove if we fix this. + if scheme == "localhost": + scheme = None + if scheme is None: + message = "Proxy URL had no scheme, should start with http:// or https://" + else: + message = f"Proxy URL had unsupported scheme {scheme}, should use http:// or https://" + super().__init__(message) + + +class ProxySchemeUnsupported(ValueError): + """Fetching HTTPS resources through HTTPS proxies is unsupported""" + + +class HeaderParsingError(HTTPError): + """Raised by assert_header_parsing, but we convert it to a log.warning statement.""" + + def __init__( + self, defects: list[MessageDefect], unparsed_data: bytes | str | None + ) -> None: + message = f"{defects or 'Unknown'}, unparsed data: {unparsed_data!r}" + super().__init__(message) + + +class UnrewindableBodyError(HTTPError): + """urllib3 encountered an error when trying to rewind a body""" diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/fields.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/fields.py new file mode 100644 index 0000000000000000000000000000000000000000..97c4730cff0df570e1ab47f77e6aa879ec3c36e7 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/fields.py @@ -0,0 +1,341 @@ +from __future__ import annotations + +import email.utils +import mimetypes +import typing + +_TYPE_FIELD_VALUE = typing.Union[str, bytes] +_TYPE_FIELD_VALUE_TUPLE = typing.Union[ + _TYPE_FIELD_VALUE, + tuple[str, _TYPE_FIELD_VALUE], + tuple[str, _TYPE_FIELD_VALUE, str], +] + + +def guess_content_type( + filename: str | None, default: str = "application/octet-stream" +) -> str: + """ + Guess the "Content-Type" of a file. + + :param filename: + The filename to guess the "Content-Type" of using :mod:`mimetypes`. + :param default: + If no "Content-Type" can be guessed, default to `default`. + """ + if filename: + return mimetypes.guess_type(filename)[0] or default + return default + + +def format_header_param_rfc2231(name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + Helper function to format and quote a single header parameter using the + strategy defined in RFC 2231. + + Particularly useful for header parameters which might contain + non-ASCII values, like file names. This follows + `RFC 2388 Section 4.4 `_. + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as ``bytes`` or `str``. + :returns: + An RFC-2231-formatted unicode string. + + .. deprecated:: 2.0.0 + Will be removed in urllib3 v2.1.0. This is not valid for + ``multipart/form-data`` header parameters. + """ + import warnings + + warnings.warn( + "'format_header_param_rfc2231' is deprecated and will be " + "removed in urllib3 v2.1.0. This is not valid for " + "multipart/form-data header parameters.", + DeprecationWarning, + stacklevel=2, + ) + + if isinstance(value, bytes): + value = value.decode("utf-8") + + if not any(ch in value for ch in '"\\\r\n'): + result = f'{name}="{value}"' + try: + result.encode("ascii") + except (UnicodeEncodeError, UnicodeDecodeError): + pass + else: + return result + + value = email.utils.encode_rfc2231(value, "utf-8") + value = f"{name}*={value}" + + return value + + +def format_multipart_header_param(name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + Format and quote a single multipart header parameter. + + This follows the `WHATWG HTML Standard`_ as of 2021/06/10, matching + the behavior of current browser and curl versions. Values are + assumed to be UTF-8. The ``\\n``, ``\\r``, and ``"`` characters are + percent encoded. + + .. _WHATWG HTML Standard: + https://html.spec.whatwg.org/multipage/ + form-control-infrastructure.html#multipart-form-data + + :param name: + The name of the parameter, an ASCII-only ``str``. + :param value: + The value of the parameter, a ``str`` or UTF-8 encoded + ``bytes``. + :returns: + A string ``name="value"`` with the escaped value. + + .. versionchanged:: 2.0.0 + Matches the WHATWG HTML Standard as of 2021/06/10. Control + characters are no longer percent encoded. + + .. versionchanged:: 2.0.0 + Renamed from ``format_header_param_html5`` and + ``format_header_param``. The old names will be removed in + urllib3 v2.1.0. + """ + if isinstance(value, bytes): + value = value.decode("utf-8") + + # percent encode \n \r " + value = value.translate({10: "%0A", 13: "%0D", 34: "%22"}) + return f'{name}="{value}"' + + +def format_header_param_html5(name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + .. deprecated:: 2.0.0 + Renamed to :func:`format_multipart_header_param`. Will be + removed in urllib3 v2.1.0. + """ + import warnings + + warnings.warn( + "'format_header_param_html5' has been renamed to " + "'format_multipart_header_param'. The old name will be " + "removed in urllib3 v2.1.0.", + DeprecationWarning, + stacklevel=2, + ) + return format_multipart_header_param(name, value) + + +def format_header_param(name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + .. deprecated:: 2.0.0 + Renamed to :func:`format_multipart_header_param`. Will be + removed in urllib3 v2.1.0. + """ + import warnings + + warnings.warn( + "'format_header_param' has been renamed to " + "'format_multipart_header_param'. The old name will be " + "removed in urllib3 v2.1.0.", + DeprecationWarning, + stacklevel=2, + ) + return format_multipart_header_param(name, value) + + +class RequestField: + """ + A data container for request body parameters. + + :param name: + The name of this request field. Must be unicode. + :param data: + The data/value body. + :param filename: + An optional filename of the request field. Must be unicode. + :param headers: + An optional dict-like object of headers to initially use for the field. + + .. versionchanged:: 2.0.0 + The ``header_formatter`` parameter is deprecated and will + be removed in urllib3 v2.1.0. + """ + + def __init__( + self, + name: str, + data: _TYPE_FIELD_VALUE, + filename: str | None = None, + headers: typing.Mapping[str, str] | None = None, + header_formatter: typing.Callable[[str, _TYPE_FIELD_VALUE], str] | None = None, + ): + self._name = name + self._filename = filename + self.data = data + self.headers: dict[str, str | None] = {} + if headers: + self.headers = dict(headers) + + if header_formatter is not None: + import warnings + + warnings.warn( + "The 'header_formatter' parameter is deprecated and " + "will be removed in urllib3 v2.1.0.", + DeprecationWarning, + stacklevel=2, + ) + self.header_formatter = header_formatter + else: + self.header_formatter = format_multipart_header_param + + @classmethod + def from_tuples( + cls, + fieldname: str, + value: _TYPE_FIELD_VALUE_TUPLE, + header_formatter: typing.Callable[[str, _TYPE_FIELD_VALUE], str] | None = None, + ) -> RequestField: + """ + A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. + + Supports constructing :class:`~urllib3.fields.RequestField` from + parameter of key/value strings AND key/filetuple. A filetuple is a + (filename, data, MIME type) tuple where the MIME type is optional. + For example:: + + 'foo': 'bar', + 'fakefile': ('foofile.txt', 'contents of foofile'), + 'realfile': ('barfile.txt', open('realfile').read()), + 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), + 'nonamefile': 'contents of nonamefile field', + + Field names and filenames must be unicode. + """ + filename: str | None + content_type: str | None + data: _TYPE_FIELD_VALUE + + if isinstance(value, tuple): + if len(value) == 3: + filename, data, content_type = value + else: + filename, data = value + content_type = guess_content_type(filename) + else: + filename = None + content_type = None + data = value + + request_param = cls( + fieldname, data, filename=filename, header_formatter=header_formatter + ) + request_param.make_multipart(content_type=content_type) + + return request_param + + def _render_part(self, name: str, value: _TYPE_FIELD_VALUE) -> str: + """ + Override this method to change how each multipart header + parameter is formatted. By default, this calls + :func:`format_multipart_header_param`. + + :param name: + The name of the parameter, an ASCII-only ``str``. + :param value: + The value of the parameter, a ``str`` or UTF-8 encoded + ``bytes``. + + :meta public: + """ + return self.header_formatter(name, value) + + def _render_parts( + self, + header_parts: ( + dict[str, _TYPE_FIELD_VALUE | None] + | typing.Sequence[tuple[str, _TYPE_FIELD_VALUE | None]] + ), + ) -> str: + """ + Helper function to format and quote a single header. + + Useful for single headers that are composed of multiple items. E.g., + 'Content-Disposition' fields. + + :param header_parts: + A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format + as `k1="v1"; k2="v2"; ...`. + """ + iterable: typing.Iterable[tuple[str, _TYPE_FIELD_VALUE | None]] + + parts = [] + if isinstance(header_parts, dict): + iterable = header_parts.items() + else: + iterable = header_parts + + for name, value in iterable: + if value is not None: + parts.append(self._render_part(name, value)) + + return "; ".join(parts) + + def render_headers(self) -> str: + """ + Renders the headers for this request field. + """ + lines = [] + + sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"] + for sort_key in sort_keys: + if self.headers.get(sort_key, False): + lines.append(f"{sort_key}: {self.headers[sort_key]}") + + for header_name, header_value in self.headers.items(): + if header_name not in sort_keys: + if header_value: + lines.append(f"{header_name}: {header_value}") + + lines.append("\r\n") + return "\r\n".join(lines) + + def make_multipart( + self, + content_disposition: str | None = None, + content_type: str | None = None, + content_location: str | None = None, + ) -> None: + """ + Makes this request field into a multipart request field. + + This method overrides "Content-Disposition", "Content-Type" and + "Content-Location" headers to the request parameter. + + :param content_disposition: + The 'Content-Disposition' of the request body. Defaults to 'form-data' + :param content_type: + The 'Content-Type' of the request body. + :param content_location: + The 'Content-Location' of the request body. + + """ + content_disposition = (content_disposition or "form-data") + "; ".join( + [ + "", + self._render_parts( + (("name", self._name), ("filename", self._filename)) + ), + ] + ) + + self.headers["Content-Disposition"] = content_disposition + self.headers["Content-Type"] = content_type + self.headers["Content-Location"] = content_location diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/filepost.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/filepost.py new file mode 100644 index 0000000000000000000000000000000000000000..14f70b05b4778f91137e4a9e7059d7514aa44d28 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/filepost.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import binascii +import codecs +import os +import typing +from io import BytesIO + +from .fields import _TYPE_FIELD_VALUE_TUPLE, RequestField + +writer = codecs.lookup("utf-8")[3] + +_TYPE_FIELDS_SEQUENCE = typing.Sequence[ + typing.Union[tuple[str, _TYPE_FIELD_VALUE_TUPLE], RequestField] +] +_TYPE_FIELDS = typing.Union[ + _TYPE_FIELDS_SEQUENCE, + typing.Mapping[str, _TYPE_FIELD_VALUE_TUPLE], +] + + +def choose_boundary() -> str: + """ + Our embarrassingly-simple replacement for mimetools.choose_boundary. + """ + return binascii.hexlify(os.urandom(16)).decode() + + +def iter_field_objects(fields: _TYPE_FIELDS) -> typing.Iterable[RequestField]: + """ + Iterate over fields. + + Supports list of (k, v) tuples and dicts, and lists of + :class:`~urllib3.fields.RequestField`. + + """ + iterable: typing.Iterable[RequestField | tuple[str, _TYPE_FIELD_VALUE_TUPLE]] + + if isinstance(fields, typing.Mapping): + iterable = fields.items() + else: + iterable = fields + + for field in iterable: + if isinstance(field, RequestField): + yield field + else: + yield RequestField.from_tuples(*field) + + +def encode_multipart_formdata( + fields: _TYPE_FIELDS, boundary: str | None = None +) -> tuple[bytes, str]: + """ + Encode a dictionary of ``fields`` using the multipart/form-data MIME format. + + :param fields: + Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). + Values are processed by :func:`urllib3.fields.RequestField.from_tuples`. + + :param boundary: + If not specified, then a random boundary will be generated using + :func:`urllib3.filepost.choose_boundary`. + """ + body = BytesIO() + if boundary is None: + boundary = choose_boundary() + + for field in iter_field_objects(fields): + body.write(f"--{boundary}\r\n".encode("latin-1")) + + writer(body).write(field.render_headers()) + data = field.data + + if isinstance(data, int): + data = str(data) # Backwards compatibility + + if isinstance(data, str): + writer(body).write(data) + else: + body.write(data) + + body.write(b"\r\n") + + body.write(f"--{boundary}--\r\n".encode("latin-1")) + + content_type = f"multipart/form-data; boundary={boundary}" + + return body.getvalue(), content_type diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/poolmanager.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/poolmanager.py new file mode 100644 index 0000000000000000000000000000000000000000..085d1dbafdb3d8141523b2b0e93fdd26845e3aa0 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/poolmanager.py @@ -0,0 +1,637 @@ +from __future__ import annotations + +import functools +import logging +import typing +import warnings +from types import TracebackType +from urllib.parse import urljoin + +from ._collections import HTTPHeaderDict, RecentlyUsedContainer +from ._request_methods import RequestMethods +from .connection import ProxyConfig +from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme +from .exceptions import ( + LocationValueError, + MaxRetryError, + ProxySchemeUnknown, + URLSchemeUnknown, +) +from .response import BaseHTTPResponse +from .util.connection import _TYPE_SOCKET_OPTIONS +from .util.proxy import connection_requires_http_tunnel +from .util.retry import Retry +from .util.timeout import Timeout +from .util.url import Url, parse_url + +if typing.TYPE_CHECKING: + import ssl + + from typing_extensions import Self + +__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] + + +log = logging.getLogger(__name__) + +SSL_KEYWORDS = ( + "key_file", + "cert_file", + "cert_reqs", + "ca_certs", + "ca_cert_data", + "ssl_version", + "ssl_minimum_version", + "ssl_maximum_version", + "ca_cert_dir", + "ssl_context", + "key_password", + "server_hostname", +) +# Default value for `blocksize` - a new parameter introduced to +# http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 +_DEFAULT_BLOCKSIZE = 16384 + + +class PoolKey(typing.NamedTuple): + """ + All known keyword arguments that could be provided to the pool manager, its + pools, or the underlying connections. + + All custom key schemes should include the fields in this key at a minimum. + """ + + key_scheme: str + key_host: str + key_port: int | None + key_timeout: Timeout | float | int | None + key_retries: Retry | bool | int | None + key_block: bool | None + key_source_address: tuple[str, int] | None + key_key_file: str | None + key_key_password: str | None + key_cert_file: str | None + key_cert_reqs: str | None + key_ca_certs: str | None + key_ca_cert_data: str | bytes | None + key_ssl_version: int | str | None + key_ssl_minimum_version: ssl.TLSVersion | None + key_ssl_maximum_version: ssl.TLSVersion | None + key_ca_cert_dir: str | None + key_ssl_context: ssl.SSLContext | None + key_maxsize: int | None + key_headers: frozenset[tuple[str, str]] | None + key__proxy: Url | None + key__proxy_headers: frozenset[tuple[str, str]] | None + key__proxy_config: ProxyConfig | None + key_socket_options: _TYPE_SOCKET_OPTIONS | None + key__socks_options: frozenset[tuple[str, str]] | None + key_assert_hostname: bool | str | None + key_assert_fingerprint: str | None + key_server_hostname: str | None + key_blocksize: int | None + + +def _default_key_normalizer( + key_class: type[PoolKey], request_context: dict[str, typing.Any] +) -> PoolKey: + """ + Create a pool key out of a request context dictionary. + + According to RFC 3986, both the scheme and host are case-insensitive. + Therefore, this function normalizes both before constructing the pool + key for an HTTPS request. If you wish to change this behaviour, provide + alternate callables to ``key_fn_by_scheme``. + + :param key_class: + The class to use when constructing the key. This should be a namedtuple + with the ``scheme`` and ``host`` keys at a minimum. + :type key_class: namedtuple + :param request_context: + A dictionary-like object that contain the context for a request. + :type request_context: dict + + :return: A namedtuple that can be used as a connection pool key. + :rtype: PoolKey + """ + # Since we mutate the dictionary, make a copy first + context = request_context.copy() + context["scheme"] = context["scheme"].lower() + context["host"] = context["host"].lower() + + # These are both dictionaries and need to be transformed into frozensets + for key in ("headers", "_proxy_headers", "_socks_options"): + if key in context and context[key] is not None: + context[key] = frozenset(context[key].items()) + + # The socket_options key may be a list and needs to be transformed into a + # tuple. + socket_opts = context.get("socket_options") + if socket_opts is not None: + context["socket_options"] = tuple(socket_opts) + + # Map the kwargs to the names in the namedtuple - this is necessary since + # namedtuples can't have fields starting with '_'. + for key in list(context.keys()): + context["key_" + key] = context.pop(key) + + # Default to ``None`` for keys missing from the context + for field in key_class._fields: + if field not in context: + context[field] = None + + # Default key_blocksize to _DEFAULT_BLOCKSIZE if missing from the context + if context.get("key_blocksize") is None: + context["key_blocksize"] = _DEFAULT_BLOCKSIZE + + return key_class(**context) + + +#: A dictionary that maps a scheme to a callable that creates a pool key. +#: This can be used to alter the way pool keys are constructed, if desired. +#: Each PoolManager makes a copy of this dictionary so they can be configured +#: globally here, or individually on the instance. +key_fn_by_scheme = { + "http": functools.partial(_default_key_normalizer, PoolKey), + "https": functools.partial(_default_key_normalizer, PoolKey), +} + +pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} + + +class PoolManager(RequestMethods): + """ + Allows for arbitrary requests while transparently keeping track of + necessary connection pools for you. + + :param num_pools: + Number of connection pools to cache before discarding the least + recently used pool. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param \\**connection_pool_kw: + Additional parameters are used to create fresh + :class:`urllib3.connectionpool.ConnectionPool` instances. + + Example: + + .. code-block:: python + + import urllib3 + + http = urllib3.PoolManager(num_pools=2) + + resp1 = http.request("GET", "https://google.com/") + resp2 = http.request("GET", "https://google.com/mail") + resp3 = http.request("GET", "https://yahoo.com/") + + print(len(http.pools)) + # 2 + + """ + + proxy: Url | None = None + proxy_config: ProxyConfig | None = None + + def __init__( + self, + num_pools: int = 10, + headers: typing.Mapping[str, str] | None = None, + **connection_pool_kw: typing.Any, + ) -> None: + super().__init__(headers) + self.connection_pool_kw = connection_pool_kw + + self.pools: RecentlyUsedContainer[PoolKey, HTTPConnectionPool] + self.pools = RecentlyUsedContainer(num_pools) + + # Locally set the pool classes and keys so other PoolManagers can + # override them. + self.pool_classes_by_scheme = pool_classes_by_scheme + self.key_fn_by_scheme = key_fn_by_scheme.copy() + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> typing.Literal[False]: + self.clear() + # Return False to re-raise any potential exceptions + return False + + def _new_pool( + self, + scheme: str, + host: str, + port: int, + request_context: dict[str, typing.Any] | None = None, + ) -> HTTPConnectionPool: + """ + Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and + any additional pool keyword arguments. + + If ``request_context`` is provided, it is provided as keyword arguments + to the pool class used. This method is used to actually create the + connection pools handed out by :meth:`connection_from_url` and + companion methods. It is intended to be overridden for customization. + """ + pool_cls: type[HTTPConnectionPool] = self.pool_classes_by_scheme[scheme] + if request_context is None: + request_context = self.connection_pool_kw.copy() + + # Default blocksize to _DEFAULT_BLOCKSIZE if missing or explicitly + # set to 'None' in the request_context. + if request_context.get("blocksize") is None: + request_context["blocksize"] = _DEFAULT_BLOCKSIZE + + # Although the context has everything necessary to create the pool, + # this function has historically only used the scheme, host, and port + # in the positional args. When an API change is acceptable these can + # be removed. + for key in ("scheme", "host", "port"): + request_context.pop(key, None) + + if scheme == "http": + for kw in SSL_KEYWORDS: + request_context.pop(kw, None) + + return pool_cls(host, port, **request_context) + + def clear(self) -> None: + """ + Empty our store of pools and direct them all to close. + + This will not affect in-flight connections, but they will not be + re-used after completion. + """ + self.pools.clear() + + def connection_from_host( + self, + host: str | None, + port: int | None = None, + scheme: str | None = "http", + pool_kwargs: dict[str, typing.Any] | None = None, + ) -> HTTPConnectionPool: + """ + Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. + + If ``port`` isn't given, it will be derived from the ``scheme`` using + ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is + provided, it is merged with the instance's ``connection_pool_kw`` + variable and used to create the new connection pool, if one is + needed. + """ + + if not host: + raise LocationValueError("No host specified.") + + request_context = self._merge_pool_kwargs(pool_kwargs) + request_context["scheme"] = scheme or "http" + if not port: + port = port_by_scheme.get(request_context["scheme"].lower(), 80) + request_context["port"] = port + request_context["host"] = host + + return self.connection_from_context(request_context) + + def connection_from_context( + self, request_context: dict[str, typing.Any] + ) -> HTTPConnectionPool: + """ + Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. + + ``request_context`` must at least contain the ``scheme`` key and its + value must be a key in ``key_fn_by_scheme`` instance variable. + """ + if "strict" in request_context: + warnings.warn( + "The 'strict' parameter is no longer needed on Python 3+. " + "This will raise an error in urllib3 v2.1.0.", + DeprecationWarning, + ) + request_context.pop("strict") + + scheme = request_context["scheme"].lower() + pool_key_constructor = self.key_fn_by_scheme.get(scheme) + if not pool_key_constructor: + raise URLSchemeUnknown(scheme) + pool_key = pool_key_constructor(request_context) + + return self.connection_from_pool_key(pool_key, request_context=request_context) + + def connection_from_pool_key( + self, pool_key: PoolKey, request_context: dict[str, typing.Any] + ) -> HTTPConnectionPool: + """ + Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. + + ``pool_key`` should be a namedtuple that only contains immutable + objects. At a minimum it must have the ``scheme``, ``host``, and + ``port`` fields. + """ + with self.pools.lock: + # If the scheme, host, or port doesn't match existing open + # connections, open a new ConnectionPool. + pool = self.pools.get(pool_key) + if pool: + return pool + + # Make a fresh ConnectionPool of the desired type + scheme = request_context["scheme"] + host = request_context["host"] + port = request_context["port"] + pool = self._new_pool(scheme, host, port, request_context=request_context) + self.pools[pool_key] = pool + + return pool + + def connection_from_url( + self, url: str, pool_kwargs: dict[str, typing.Any] | None = None + ) -> HTTPConnectionPool: + """ + Similar to :func:`urllib3.connectionpool.connection_from_url`. + + If ``pool_kwargs`` is not provided and a new pool needs to be + constructed, ``self.connection_pool_kw`` is used to initialize + the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` + is provided, it is used instead. Note that if a new pool does not + need to be created for the request, the provided ``pool_kwargs`` are + not used. + """ + u = parse_url(url) + return self.connection_from_host( + u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs + ) + + def _merge_pool_kwargs( + self, override: dict[str, typing.Any] | None + ) -> dict[str, typing.Any]: + """ + Merge a dictionary of override values for self.connection_pool_kw. + + This does not modify self.connection_pool_kw and returns a new dict. + Any keys in the override dictionary with a value of ``None`` are + removed from the merged dictionary. + """ + base_pool_kwargs = self.connection_pool_kw.copy() + if override: + for key, value in override.items(): + if value is None: + try: + del base_pool_kwargs[key] + except KeyError: + pass + else: + base_pool_kwargs[key] = value + return base_pool_kwargs + + def _proxy_requires_url_absolute_form(self, parsed_url: Url) -> bool: + """ + Indicates if the proxy requires the complete destination URL in the + request. Normally this is only needed when not using an HTTP CONNECT + tunnel. + """ + if self.proxy is None: + return False + + return not connection_requires_http_tunnel( + self.proxy, self.proxy_config, parsed_url.scheme + ) + + def urlopen( # type: ignore[override] + self, method: str, url: str, redirect: bool = True, **kw: typing.Any + ) -> BaseHTTPResponse: + """ + Same as :meth:`urllib3.HTTPConnectionPool.urlopen` + with custom cross-host redirect logic and only sends the request-uri + portion of the ``url``. + + The given ``url`` parameter must be absolute, such that an appropriate + :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. + """ + u = parse_url(url) + + if u.scheme is None: + warnings.warn( + "URLs without a scheme (ie 'https://') are deprecated and will raise an error " + "in a future version of urllib3. To avoid this DeprecationWarning ensure all URLs " + "start with 'https://' or 'http://'. Read more in this issue: " + "https://github.com/urllib3/urllib3/issues/2920", + category=DeprecationWarning, + stacklevel=2, + ) + + conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) + + kw["assert_same_host"] = False + kw["redirect"] = False + + if "headers" not in kw: + kw["headers"] = self.headers + + if self._proxy_requires_url_absolute_form(u): + response = conn.urlopen(method, url, **kw) + else: + response = conn.urlopen(method, u.request_uri, **kw) + + redirect_location = redirect and response.get_redirect_location() + if not redirect_location: + return response + + # Support relative URLs for redirecting. + redirect_location = urljoin(url, redirect_location) + + if response.status == 303: + # Change the method according to RFC 9110, Section 15.4.4. + method = "GET" + # And lose the body not to transfer anything sensitive. + kw["body"] = None + kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change() + + retries = kw.get("retries") + if not isinstance(retries, Retry): + retries = Retry.from_int(retries, redirect=redirect) + + # Strip headers marked as unsafe to forward to the redirected location. + # Check remove_headers_on_redirect to avoid a potential network call within + # conn.is_same_host() which may use socket.gethostbyname() in the future. + if retries.remove_headers_on_redirect and not conn.is_same_host( + redirect_location + ): + new_headers = kw["headers"].copy() + for header in kw["headers"]: + if header.lower() in retries.remove_headers_on_redirect: + new_headers.pop(header, None) + kw["headers"] = new_headers + + try: + retries = retries.increment(method, url, response=response, _pool=conn) + except MaxRetryError: + if retries.raise_on_redirect: + response.drain_conn() + raise + return response + + kw["retries"] = retries + kw["redirect"] = redirect + + log.info("Redirecting %s -> %s", url, redirect_location) + + response.drain_conn() + return self.urlopen(method, redirect_location, **kw) + + +class ProxyManager(PoolManager): + """ + Behaves just like :class:`PoolManager`, but sends all requests through + the defined proxy, using the CONNECT method for HTTPS URLs. + + :param proxy_url: + The URL of the proxy to be used. + + :param proxy_headers: + A dictionary containing headers that will be sent to the proxy. In case + of HTTP they are being sent with each request, while in the + HTTPS/CONNECT case they are sent only once. Could be used for proxy + authentication. + + :param proxy_ssl_context: + The proxy SSL context is used to establish the TLS connection to the + proxy when using HTTPS proxies. + + :param use_forwarding_for_https: + (Defaults to False) If set to True will forward requests to the HTTPS + proxy to be made on behalf of the client instead of creating a TLS + tunnel via the CONNECT method. **Enabling this flag means that request + and response headers and content will be visible from the HTTPS proxy** + whereas tunneling keeps request and response headers and content + private. IP address, target hostname, SNI, and port are always visible + to an HTTPS proxy even when this flag is disabled. + + :param proxy_assert_hostname: + The hostname of the certificate to verify against. + + :param proxy_assert_fingerprint: + The fingerprint of the certificate to verify against. + + Example: + + .. code-block:: python + + import urllib3 + + proxy = urllib3.ProxyManager("https://localhost:3128/") + + resp1 = proxy.request("GET", "https://google.com/") + resp2 = proxy.request("GET", "https://httpbin.org/") + + print(len(proxy.pools)) + # 1 + + resp3 = proxy.request("GET", "https://httpbin.org/") + resp4 = proxy.request("GET", "https://twitter.com/") + + print(len(proxy.pools)) + # 3 + + """ + + def __init__( + self, + proxy_url: str, + num_pools: int = 10, + headers: typing.Mapping[str, str] | None = None, + proxy_headers: typing.Mapping[str, str] | None = None, + proxy_ssl_context: ssl.SSLContext | None = None, + use_forwarding_for_https: bool = False, + proxy_assert_hostname: None | str | typing.Literal[False] = None, + proxy_assert_fingerprint: str | None = None, + **connection_pool_kw: typing.Any, + ) -> None: + if isinstance(proxy_url, HTTPConnectionPool): + str_proxy_url = f"{proxy_url.scheme}://{proxy_url.host}:{proxy_url.port}" + else: + str_proxy_url = proxy_url + proxy = parse_url(str_proxy_url) + + if proxy.scheme not in ("http", "https"): + raise ProxySchemeUnknown(proxy.scheme) + + if not proxy.port: + port = port_by_scheme.get(proxy.scheme, 80) + proxy = proxy._replace(port=port) + + self.proxy = proxy + self.proxy_headers = proxy_headers or {} + self.proxy_ssl_context = proxy_ssl_context + self.proxy_config = ProxyConfig( + proxy_ssl_context, + use_forwarding_for_https, + proxy_assert_hostname, + proxy_assert_fingerprint, + ) + + connection_pool_kw["_proxy"] = self.proxy + connection_pool_kw["_proxy_headers"] = self.proxy_headers + connection_pool_kw["_proxy_config"] = self.proxy_config + + super().__init__(num_pools, headers, **connection_pool_kw) + + def connection_from_host( + self, + host: str | None, + port: int | None = None, + scheme: str | None = "http", + pool_kwargs: dict[str, typing.Any] | None = None, + ) -> HTTPConnectionPool: + if scheme == "https": + return super().connection_from_host( + host, port, scheme, pool_kwargs=pool_kwargs + ) + + return super().connection_from_host( + self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs # type: ignore[union-attr] + ) + + def _set_proxy_headers( + self, url: str, headers: typing.Mapping[str, str] | None = None + ) -> typing.Mapping[str, str]: + """ + Sets headers needed by proxies: specifically, the Accept and Host + headers. Only sets headers not provided by the user. + """ + headers_ = {"Accept": "*/*"} + + netloc = parse_url(url).netloc + if netloc: + headers_["Host"] = netloc + + if headers: + headers_.update(headers) + return headers_ + + def urlopen( # type: ignore[override] + self, method: str, url: str, redirect: bool = True, **kw: typing.Any + ) -> BaseHTTPResponse: + "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." + u = parse_url(url) + if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme): + # For connections using HTTP CONNECT, httplib sets the necessary + # headers on the CONNECT to the proxy. If we're not using CONNECT, + # we'll definitely need to set 'Host' at the very least. + headers = kw.get("headers", self.headers) + kw["headers"] = self._set_proxy_headers(url, headers) + + return super().urlopen(method, url, redirect=redirect, **kw) + + +def proxy_from_url(url: str, **kw: typing.Any) -> ProxyManager: + return ProxyManager(proxy_url=url, **kw) diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/py.typed b/infer_4_33_0/lib/python3.10/site-packages/urllib3/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..5f3ea3d919363f08ab03edbc85b6099bc4df5647 --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/py.typed @@ -0,0 +1,2 @@ +# Instruct type checkers to look for inline type annotations in this package. +# See PEP 561. diff --git a/infer_4_33_0/lib/python3.10/site-packages/urllib3/response.py b/infer_4_33_0/lib/python3.10/site-packages/urllib3/response.py new file mode 100644 index 0000000000000000000000000000000000000000..66c6a687ec06e2da1028b94a0a32b2fcd3580e2b --- /dev/null +++ b/infer_4_33_0/lib/python3.10/site-packages/urllib3/response.py @@ -0,0 +1,1278 @@ +from __future__ import annotations + +import collections +import io +import json as _json +import logging +import re +import socket +import sys +import typing +import warnings +import zlib +from contextlib import contextmanager +from http.client import HTTPMessage as _HttplibHTTPMessage +from http.client import HTTPResponse as _HttplibHTTPResponse +from socket import timeout as SocketTimeout + +if typing.TYPE_CHECKING: + from ._base_connection import BaseHTTPConnection + +try: + try: + import brotlicffi as brotli # type: ignore[import-not-found] + except ImportError: + import brotli # type: ignore[import-not-found] +except ImportError: + brotli = None + +try: + import zstandard as zstd +except (AttributeError, ImportError, ValueError): # Defensive: + HAS_ZSTD = False +else: + # The package 'zstandard' added the 'eof' property starting + # in v0.18.0 which we require to ensure a complete and + # valid zstd stream was fed into the ZstdDecoder. + # See: https://github.com/urllib3/urllib3/pull/2624 + _zstd_version = tuple( + map(int, re.search(r"^([0-9]+)\.([0-9]+)", zstd.__version__).groups()) # type: ignore[union-attr] + ) + if _zstd_version < (0, 18): # Defensive: + HAS_ZSTD = False + else: + HAS_ZSTD = True + +from . import util +from ._base_connection import _TYPE_BODY +from ._collections import HTTPHeaderDict +from .connection import BaseSSLError, HTTPConnection, HTTPException +from .exceptions import ( + BodyNotHttplibCompatible, + DecodeError, + HTTPError, + IncompleteRead, + InvalidChunkLength, + InvalidHeader, + ProtocolError, + ReadTimeoutError, + ResponseNotChunked, + SSLError, +) +from .util.response import is_fp_closed, is_response_to_head +from .util.retry import Retry + +if typing.TYPE_CHECKING: + from .connectionpool import HTTPConnectionPool + +log = logging.getLogger(__name__) + + +class ContentDecoder: + def decompress(self, data: bytes) -> bytes: + raise NotImplementedError() + + def flush(self) -> bytes: + raise NotImplementedError() + + +class DeflateDecoder(ContentDecoder): + def __init__(self) -> None: + self._first_try = True + self._data = b"" + self._obj = zlib.decompressobj() + + def decompress(self, data: bytes) -> bytes: + if not data: + return data + + if not self._first_try: + return self._obj.decompress(data) + + self._data += data + try: + decompressed = self._obj.decompress(data) + if decompressed: + self._first_try = False + self._data = None # type: ignore[assignment] + return decompressed + except zlib.error: + self._first_try = False + self._obj = zlib.decompressobj(-zlib.MAX_WBITS) + try: + return self.decompress(self._data) + finally: + self._data = None # type: ignore[assignment] + + def flush(self) -> bytes: + return self._obj.flush() + + +class GzipDecoderState: + FIRST_MEMBER = 0 + OTHER_MEMBERS = 1 + SWALLOW_DATA = 2 + + +class GzipDecoder(ContentDecoder): + def __init__(self) -> None: + self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) + self._state = GzipDecoderState.FIRST_MEMBER + + def decompress(self, data: bytes) -> bytes: + ret = bytearray() + if self._state == GzipDecoderState.SWALLOW_DATA or not data: + return bytes(ret) + while True: + try: + ret += self._obj.decompress(data) + except zlib.error: + previous_state = self._state + # Ignore data after the first error + self._state = GzipDecoderState.SWALLOW_DATA + if previous_state == GzipDecoderState.OTHER_MEMBERS: + # Allow trailing garbage acceptable in other gzip clients + return bytes(ret) + raise + data = self._obj.unused_data + if not data: + return bytes(ret) + self._state = GzipDecoderState.OTHER_MEMBERS + self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) + + def flush(self) -> bytes: + return self._obj.flush() + + +if brotli is not None: + + class BrotliDecoder(ContentDecoder): + # Supports both 'brotlipy' and 'Brotli' packages + # since they share an import name. The top branches + # are for 'brotlipy' and bottom branches for 'Brotli' + def __init__(self) -> None: + self._obj = brotli.Decompressor() + if hasattr(self._obj, "decompress"): + setattr(self, "decompress", self._obj.decompress) + else: + setattr(self, "decompress", self._obj.process) + + def flush(self) -> bytes: + if hasattr(self._obj, "flush"): + return self._obj.flush() # type: ignore[no-any-return] + return b"" + + +if HAS_ZSTD: + + class ZstdDecoder(ContentDecoder): + def __init__(self) -> None: + self._obj = zstd.ZstdDecompressor().decompressobj() + + def decompress(self, data: bytes) -> bytes: + if not data: + return b"" + data_parts = [self._obj.decompress(data)] + while self._obj.eof and self._obj.unused_data: + unused_data = self._obj.unused_data + self._obj = zstd.ZstdDecompressor().decompressobj() + data_parts.append(self._obj.decompress(unused_data)) + return b"".join(data_parts) + + def flush(self) -> bytes: + ret = self._obj.flush() # note: this is a no-op + if not self._obj.eof: + raise DecodeError("Zstandard data is incomplete") + return ret + + +class MultiDecoder(ContentDecoder): + """ + From RFC7231: + If one or more encodings have been applied to a representation, the + sender that applied the encodings MUST generate a Content-Encoding + header field that lists the content codings in the order in which + they were applied. + """ + + def __init__(self, modes: str) -> None: + self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")] + + def flush(self) -> bytes: + return self._decoders[0].flush() + + def decompress(self, data: bytes) -> bytes: + for d in reversed(self._decoders): + data = d.decompress(data) + return data + + +def _get_decoder(mode: str) -> ContentDecoder: + if "," in mode: + return MultiDecoder(mode) + + # According to RFC 9110 section 8.4.1.3, recipients should + # consider x-gzip equivalent to gzip + if mode in ("gzip", "x-gzip"): + return GzipDecoder() + + if brotli is not None and mode == "br": + return BrotliDecoder() + + if HAS_ZSTD and mode == "zstd": + return ZstdDecoder() + + return DeflateDecoder() + + +class BytesQueueBuffer: + """Memory-efficient bytes buffer + + To return decoded data in read() and still follow the BufferedIOBase API, we need a + buffer to always return the correct amount of bytes. + + This buffer should be filled using calls to put() + + Our maximum memory usage is determined by the sum of the size of: + + * self.buffer, which contains the full data + * the largest chunk that we will copy in get() + + The worst case scenario is a single chunk, in which case we'll make a full copy of + the data inside get(). + """ + + def __init__(self) -> None: + self.buffer: typing.Deque[bytes] = collections.deque() + self._size: int = 0 + + def __len__(self) -> int: + return self._size + + def put(self, data: bytes) -> None: + self.buffer.append(data) + self._size += len(data) + + def get(self, n: int) -> bytes: + if n == 0: + return b"" + elif not self.buffer: + raise RuntimeError("buffer is empty") + elif n < 0: + raise ValueError("n should be > 0") + + fetched = 0 + ret = io.BytesIO() + while fetched < n: + remaining = n - fetched + chunk = self.buffer.popleft() + chunk_length = len(chunk) + if remaining < chunk_length: + left_chunk, right_chunk = chunk[:remaining], chunk[remaining:] + ret.write(left_chunk) + self.buffer.appendleft(right_chunk) + self._size -= remaining + break + else: + ret.write(chunk) + self._size -= chunk_length + fetched += chunk_length + + if not self.buffer: + break + + return ret.getvalue() + + def get_all(self) -> bytes: + buffer = self.buffer + if not buffer: + assert self._size == 0 + return b"" + if len(buffer) == 1: + result = buffer.pop() + else: + ret = io.BytesIO() + ret.writelines(buffer.popleft() for _ in range(len(buffer))) + result = ret.getvalue() + self._size = 0 + return result + + +class BaseHTTPResponse(io.IOBase): + CONTENT_DECODERS = ["gzip", "x-gzip", "deflate"] + if brotli is not None: + CONTENT_DECODERS += ["br"] + if HAS_ZSTD: + CONTENT_DECODERS += ["zstd"] + REDIRECT_STATUSES = [301, 302, 303, 307, 308] + + DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error) + if brotli is not None: + DECODER_ERROR_CLASSES += (brotli.error,) + + if HAS_ZSTD: + DECODER_ERROR_CLASSES += (zstd.ZstdError,) + + def __init__( + self, + *, + headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None, + status: int, + version: int, + version_string: str, + reason: str | None, + decode_content: bool, + request_url: str | None, + retries: Retry | None = None, + ) -> None: + if isinstance(headers, HTTPHeaderDict): + self.headers = headers + else: + self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type] + self.status = status + self.version = version + self.version_string = version_string + self.reason = reason + self.decode_content = decode_content + self._has_decoded_content = False + self._request_url: str | None = request_url + self.retries = retries + + self.chunked = False + tr_enc = self.headers.get("transfer-encoding", "").lower() + # Don't incur the penalty of creating a list and then discarding it + encodings = (enc.strip() for enc in tr_enc.split(",")) + if "chunked" in encodings: + self.chunked = True + + self._decoder: ContentDecoder | None = None + self.length_remaining: int | None + + def get_redirect_location(self) -> str | None | typing.Literal[False]: + """ + Should we redirect and where to? + + :returns: Truthy redirect location string if we got a redirect status + code and valid location. ``None`` if redirect status and no + location. ``False`` if not a redirect status code. + """ + if self.status in self.REDIRECT_STATUSES: + return self.headers.get("location") + return False + + @property + def data(self) -> bytes: + raise NotImplementedError() + + def json(self) -> typing.Any: + """ + Deserializes the body of the HTTP response as a Python object. + + The body of the HTTP response must be encoded using UTF-8, as per + `RFC 8529 Section 8.1 `_. + + To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to + your custom decoder instead. + + If the body of the HTTP response is not decodable to UTF-8, a + `UnicodeDecodeError` will be raised. If the body of the HTTP response is not a + valid JSON document, a `json.JSONDecodeError` will be raised. + + Read more :ref:`here `. + + :returns: The body of the HTTP response as a Python object. + """ + data = self.data.decode("utf-8") + return _json.loads(data) + + @property + def url(self) -> str | None: + raise NotImplementedError() + + @url.setter + def url(self, url: str | None) -> None: + raise NotImplementedError() + + @property + def connection(self) -> BaseHTTPConnection | None: + raise NotImplementedError() + + @property + def retries(self) -> Retry | None: + return self._retries + + @retries.setter + def retries(self, retries: Retry | None) -> None: + # Override the request_url if retries has a redirect location. + if retries is not None and retries.history: + self.url = retries.history[-1].redirect_location + self._retries = retries + + def stream( + self, amt: int | None = 2**16, decode_content: bool | None = None + ) -> typing.Iterator[bytes]: + raise NotImplementedError() + + def read( + self, + amt: int | None = None, + decode_content: bool | None = None, + cache_content: bool = False, + ) -> bytes: + raise NotImplementedError() + + def read1( + self, + amt: int | None = None, + decode_content: bool | None = None, + ) -> bytes: + raise NotImplementedError() + + def read_chunked( + self, + amt: int | None = None, + decode_content: bool | None = None, + ) -> typing.Iterator[bytes]: + raise NotImplementedError() + + def release_conn(self) -> None: + raise NotImplementedError() + + def drain_conn(self) -> None: + raise NotImplementedError() + + def shutdown(self) -> None: + raise NotImplementedError() + + def close(self) -> None: + raise NotImplementedError() + + def _init_decoder(self) -> None: + """ + Set-up the _decoder attribute if necessary. + """ + # Note: content-encoding value should be case-insensitive, per RFC 7230 + # Section 3.2 + content_encoding = self.headers.get("content-encoding", "").lower() + if self._decoder is None: + if content_encoding in self.CONTENT_DECODERS: + self._decoder = _get_decoder(content_encoding) + elif "," in content_encoding: + encodings = [ + e.strip() + for e in content_encoding.split(",") + if e.strip() in self.CONTENT_DECODERS + ] + if encodings: + self._decoder = _get_decoder(content_encoding) + + def _decode( + self, data: bytes, decode_content: bool | None, flush_decoder: bool + ) -> bytes: + """ + Decode the data passed in and potentially flush the decoder. + """ + if not decode_content: + if self._has_decoded_content: + raise RuntimeError( + "Calling read(decode_content=False) is not supported after " + "read(decode_content=True) was called." + ) + return data + + try: + if self._decoder: + data = self._decoder.decompress(data) + self._has_decoded_content = True + except self.DECODER_ERROR_CLASSES as e: + content_encoding = self.headers.get("content-encoding", "").lower() + raise DecodeError( + "Received response with content-encoding: %s, but " + "failed to decode it." % content_encoding, + e, + ) from e + if flush_decoder: + data += self._flush_decoder() + + return data + + def _flush_decoder(self) -> bytes: + """ + Flushes the decoder. Should only be called if the decoder is actually + being used. + """ + if self._decoder: + return self._decoder.decompress(b"") + self._decoder.flush() + return b"" + + # Compatibility methods for `io` module + def readinto(self, b: bytearray) -> int: + temp = self.read(len(b)) + if len(temp) == 0: + return 0 + else: + b[: len(temp)] = temp + return len(temp) + + # Compatibility methods for http.client.HTTPResponse + def getheaders(self) -> HTTPHeaderDict: + warnings.warn( + "HTTPResponse.getheaders() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.", + category=DeprecationWarning, + stacklevel=2, + ) + return self.headers + + def getheader(self, name: str, default: str | None = None) -> str | None: + warnings.warn( + "HTTPResponse.getheader() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).", + category=DeprecationWarning, + stacklevel=2, + ) + return self.headers.get(name, default) + + # Compatibility method for http.cookiejar + def info(self) -> HTTPHeaderDict: + return self.headers + + def geturl(self) -> str | None: + return self.url + + +class HTTPResponse(BaseHTTPResponse): + """ + HTTP Response container. + + Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is + loaded and decoded on-demand when the ``data`` property is accessed. This + class is also compatible with the Python standard library's :mod:`io` + module, and can hence be treated as a readable object in the context of that + framework. + + Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`: + + :param preload_content: + If True, the response's body will be preloaded during construction. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param original_response: + When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse` + object, it's convenient to include the original for debug purposes. It's + otherwise unused. + + :param retries: + The retries contains the last :class:`~urllib3.util.retry.Retry` that + was used during the request. + + :param enforce_content_length: + Enforce content length checking. Body returned by server must match + value of Content-Length header, if present. Otherwise, raise error. + """ + + def __init__( + self, + body: _TYPE_BODY = "", + headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None, + status: int = 0, + version: int = 0, + version_string: str = "HTTP/?", + reason: str | None = None, + preload_content: bool = True, + decode_content: bool = True, + original_response: _HttplibHTTPResponse | None = None, + pool: HTTPConnectionPool | None = None, + connection: HTTPConnection | None = None, + msg: _HttplibHTTPMessage | None = None, + retries: Retry | None = None, + enforce_content_length: bool = True, + request_method: str | None = None, + request_url: str | None = None, + auto_close: bool = True, + sock_shutdown: typing.Callable[[int], None] | None = None, + ) -> None: + super().__init__( + headers=headers, + status=status, + version=version, + version_string=version_string, + reason=reason, + decode_content=decode_content, + request_url=request_url, + retries=retries, + ) + + self.enforce_content_length = enforce_content_length + self.auto_close = auto_close + + self._body = None + self._fp: _HttplibHTTPResponse | None = None + self._original_response = original_response + self._fp_bytes_read = 0 + self.msg = msg + + if body and isinstance(body, (str, bytes)): + self._body = body + + self._pool = pool + self._connection = connection + + if hasattr(body, "read"): + self._fp = body # type: ignore[assignment] + self._sock_shutdown = sock_shutdown + + # Are we using the chunked-style of transfer encoding? + self.chunk_left: int | None = None + + # Determine length of response + self.length_remaining = self._init_length(request_method) + + # Used to return the correct amount of bytes for partial read()s + self._decoded_buffer = BytesQueueBuffer() + + # If requested, preload the body. + if preload_content and not self._body: + self._body = self.read(decode_content=decode_content) + + def release_conn(self) -> None: + if not self._pool or not self._connection: + return None + + self._pool._put_conn(self._connection) + self._connection = None + + def drain_conn(self) -> None: + """ + Read and discard any remaining HTTP response data in the response connection. + + Unread data in the HTTPResponse connection blocks the connection from being released back to the pool. + """ + try: + self.read() + except (HTTPError, OSError, BaseSSLError, HTTPException): + pass + + @property + def data(self) -> bytes: + # For backwards-compat with earlier urllib3 0.4 and earlier. + if self._body: + return self._body # type: ignore[return-value] + + if self._fp: + return self.read(cache_content=True) + + return None # type: ignore[return-value] + + @property + def connection(self) -> HTTPConnection | None: + return self._connection + + def isclosed(self) -> bool: + return is_fp_closed(self._fp) + + def tell(self) -> int: + """ + Obtain the number of bytes pulled over the wire so far. May differ from + the amount of content returned by :meth:``urllib3.response.HTTPResponse.read`` + if bytes are encoded on the wire (e.g, compressed). + """ + return self._fp_bytes_read + + def _init_length(self, request_method: str | None) -> int | None: + """ + Set initial length value for Response content if available. + """ + length: int | None + content_length: str | None = self.headers.get("content-length") + + if content_length is not None: + if self.chunked: + # This Response will fail with an IncompleteRead if it can't be + # received as chunked. This method falls back to attempt reading + # the response before raising an exception. + log.warning( + "Received response with both Content-Length and " + "Transfer-Encoding set. This is expressly forbidden " + "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " + "attempting to process response as Transfer-Encoding: " + "chunked." + ) + return None + + try: + # RFC 7230 section 3.3.2 specifies multiple content lengths can + # be sent in a single Content-Length header + # (e.g. Content-Length: 42, 42). This line ensures the values + # are all valid ints and that as long as the `set` length is 1, + # all values are the same. Otherwise, the header is invalid. + lengths = {int(val) for val in content_length.split(",")} + if len(lengths) > 1: + raise InvalidHeader( + "Content-Length contained multiple " + "unmatching values (%s)" % content_length + ) + length = lengths.pop() + except ValueError: + length = None + else: + if length < 0: + length = None + + else: # if content_length is None + length = None + + # Convert status to int for comparison + # In some cases, httplib returns a status of "_UNKNOWN" + try: + status = int(self.status) + except ValueError: + status = 0 + + # Check for responses that shouldn't include a body + if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD": + length = 0 + + return length + + @contextmanager + def _error_catcher(self) -> typing.Generator[None]: + """ + Catch low-level python exceptions, instead re-raising urllib3 + variants, so that low-level exceptions are not leaked in the + high-level api. + + On exit, release the connection back to the pool. + """ + clean_exit = False + + try: + try: + yield + + except SocketTimeout as e: + # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but + # there is yet no clean way to get at it from this context. + raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type] + + except BaseSSLError as e: + # FIXME: Is there a better way to differentiate between SSLErrors? + if "read operation timed out" not in str(e): + # SSL errors related to framing/MAC get wrapped and reraised here + raise SSLError(e) from e + + raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type] + + except IncompleteRead as e: + if ( + e.expected is not None + and e.partial is not None + and e.expected == -e.partial + ): + arg = "Response may not contain content." + else: + arg = f"Connection broken: {e!r}" + raise ProtocolError(arg, e) from e + + except (HTTPException, OSError) as e: + raise ProtocolError(f"Connection broken: {e!r}", e) from e + + # If no exception is thrown, we should avoid cleaning up + # unnecessarily. + clean_exit = True + finally: + # If we didn't terminate cleanly, we need to throw away our + # connection. + if not clean_exit: + # The response may not be closed but we're not going to use it + # anymore so close it now to ensure that the connection is + # released back to the pool. + if self._original_response: + self._original_response.close() + + # Closing the response may not actually be sufficient to close + # everything, so if we have a hold of the connection close that + # too. + if self._connection: + self._connection.close() + + # If we hold the original response but it's closed now, we should + # return the connection back to the pool. + if self._original_response and self._original_response.isclosed(): + self.release_conn() + + def _fp_read( + self, + amt: int | None = None, + *, + read1: bool = False, + ) -> bytes: + """ + Read a response with the thought that reading the number of bytes + larger than can fit in a 32-bit int at a time via SSL in some + known cases leads to an overflow error that has to be prevented + if `amt` or `self.length_remaining` indicate that a problem may + happen. + + The known cases: + * CPython < 3.9.7 because of a bug + https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900. + * urllib3 injected with pyOpenSSL-backed SSL-support. + * CPython < 3.10 only when `amt` does not fit 32-bit int. + """ + assert self._fp + c_int_max = 2**31 - 1 + if ( + (amt and amt > c_int_max) + or ( + amt is None + and self.length_remaining + and self.length_remaining > c_int_max + ) + ) and (util.IS_PYOPENSSL or sys.version_info < (3, 10)): + if read1: + return self._fp.read1(c_int_max) + buffer = io.BytesIO() + # Besides `max_chunk_amt` being a maximum chunk size, it + # affects memory overhead of reading a response by this + # method in CPython. + # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum + # chunk size that does not lead to an overflow error, but + # 256 MiB is a compromise. + max_chunk_amt = 2**28 + while amt is None or amt != 0: + if amt is not None: + chunk_amt = min(amt, max_chunk_amt) + amt -= chunk_amt + else: + chunk_amt = max_chunk_amt + data = self._fp.read(chunk_amt) + if not data: + break + buffer.write(data) + del data # to reduce peak memory usage by `max_chunk_amt`. + return buffer.getvalue() + elif read1: + return self._fp.read1(amt) if amt is not None else self._fp.read1() + else: + # StringIO doesn't like amt=None + return self._fp.read(amt) if amt is not None else self._fp.read() + + def _raw_read( + self, + amt: int | None = None, + *, + read1: bool = False, + ) -> bytes: + """ + Reads `amt` of bytes from the socket. + """ + if self._fp is None: + return None # type: ignore[return-value] + + fp_closed = getattr(self._fp, "closed", False) + + with self._error_catcher(): + data = self._fp_read(amt, read1=read1) if not fp_closed else b"" + if amt is not None and amt != 0 and not data: + # Platform-specific: Buggy versions of Python. + # Close the connection when no data is returned + # + # This is redundant to what httplib/http.client _should_ + # already do. However, versions of python released before + # December 15, 2012 (http://bugs.python.org/issue16298) do + # not properly close the connection in all cases. There is + # no harm in redundantly calling close. + self._fp.close() + if ( + self.enforce_content_length + and self.length_remaining is not None + and self.length_remaining != 0 + ): + # This is an edge case that httplib failed to cover due + # to concerns of backward compatibility. We're + # addressing it here to make sure IncompleteRead is + # raised during streaming, so all calls with incorrect + # Content-Length are caught. + raise IncompleteRead(self._fp_bytes_read, self.length_remaining) + elif read1 and ( + (amt != 0 and not data) or self.length_remaining == len(data) + ): + # All data has been read, but `self._fp.read1` in + # CPython 3.12 and older doesn't always close + # `http.client.HTTPResponse`, so we close it here. + # See https://github.com/python/cpython/issues/113199 + self._fp.close() + + if data: + self._fp_bytes_read += len(data) + if self.length_remaining is not None: + self.length_remaining -= len(data) + return data + + def read( + self, + amt: int | None = None, + decode_content: bool | None = None, + cache_content: bool = False, + ) -> bytes: + """ + Similar to :meth:`http.client.HTTPResponse.read`, but with two additional + parameters: ``decode_content`` and ``cache_content``. + + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param cache_content: + If True, will save the returned data such that the same result is + returned despite of the state of the underlying file object. This + is useful if you want the ``.data`` property to continue working + after having ``.read()`` the file object. (Overridden if ``amt`` is + set.) + """ + self._init_decoder() + if decode_content is None: + decode_content = self.decode_content + + if amt and amt < 0: + # Negative numbers and `None` should be treated the same. + amt = None + elif amt is not None: + cache_content = False + + if len(self._decoded_buffer) >= amt: + return self._decoded_buffer.get(amt) + + data = self._raw_read(amt) + + flush_decoder = amt is None or (amt != 0 and not data) + + if not data and len(self._decoded_buffer) == 0: + return data + + if amt is None: + data = self._decode(data, decode_content, flush_decoder) + if cache_content: + self._body = data + else: + # do not waste memory on buffer when not decoding + if not decode_content: + if self._has_decoded_content: + raise RuntimeError( + "Calling read(decode_content=False) is not supported after " + "read(decode_content=True) was called." + ) + return data + + decoded_data = self._decode(data, decode_content, flush_decoder) + self._decoded_buffer.put(decoded_data) + + while len(self._decoded_buffer) < amt and data: + # TODO make sure to initially read enough data to get past the headers + # For example, the GZ file header takes 10 bytes, we don't want to read + # it one byte at a time + data = self._raw_read(amt) + decoded_data = self._decode(data, decode_content, flush_decoder) + self._decoded_buffer.put(decoded_data) + data = self._decoded_buffer.get(amt) + + return data + + def read1( + self, + amt: int | None = None, + decode_content: bool | None = None, + ) -> bytes: + """ + Similar to ``http.client.HTTPResponse.read1`` and documented + in :meth:`io.BufferedReader.read1`, but with an additional parameter: + ``decode_content``. + + :param amt: + How much of the content to read. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + if decode_content is None: + decode_content = self.decode_content + if amt and amt < 0: + # Negative numbers and `None` should be treated the same. + amt = None + # try and respond without going to the network + if self._has_decoded_content: + if not decode_content: + raise RuntimeError( + "Calling read1(decode_content=False) is not supported after " + "read1(decode_content=True) was called." + ) + if len(self._decoded_buffer) > 0: + if amt is None: + return self._decoded_buffer.get_all() + return self._decoded_buffer.get(amt) + if amt == 0: + return b"" + + # FIXME, this method's type doesn't say returning None is possible + data = self._raw_read(amt, read1=True) + if not decode_content or data is None: + return data + + self._init_decoder() + while True: + flush_decoder = not data + decoded_data = self._decode(data, decode_content, flush_decoder) + self._decoded_buffer.put(decoded_data) + if decoded_data or flush_decoder: + break + data = self._raw_read(8192, read1=True) + + if amt is None: + return self._decoded_buffer.get_all() + return self._decoded_buffer.get(amt) + + def stream( + self, amt: int | None = 2**16, decode_content: bool | None = None + ) -> typing.Generator[bytes]: + """ + A generator wrapper for the read() method. A call will block until + ``amt`` bytes have been read from the connection or until the + connection is closed. + + :param amt: + How much of the content to read. The generator will return up to + much data per iteration, but may return less. This is particularly + likely when using compressed data. However, the empty string will + never be returned. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + if self.chunked and self.supports_chunked_reads(): + yield from self.read_chunked(amt, decode_content=decode_content) + else: + while not is_fp_closed(self._fp) or len(self._decoded_buffer) > 0: + data = self.read(amt=amt, decode_content=decode_content) + + if data: + yield data + + # Overrides from io.IOBase + def readable(self) -> bool: + return True + + def shutdown(self) -> None: + if not self._sock_shutdown: + raise ValueError("Cannot shutdown socket as self._sock_shutdown is not set") + self._sock_shutdown(socket.SHUT_RD) + + def close(self) -> None: + self._sock_shutdown = None + + if not self.closed and self._fp: + self._fp.close() + + if self._connection: + self._connection.close() + + if not self.auto_close: + io.IOBase.close(self) + + @property + def closed(self) -> bool: + if not self.auto_close: + return io.IOBase.closed.__get__(self) # type: ignore[no-any-return] + elif self._fp is None: + return True + elif hasattr(self._fp, "isclosed"): + return self._fp.isclosed() + elif hasattr(self._fp, "closed"): + return self._fp.closed + else: + return True + + def fileno(self) -> int: + if self._fp is None: + raise OSError("HTTPResponse has no file to get a fileno from") + elif hasattr(self._fp, "fileno"): + return self._fp.fileno() + else: + raise OSError( + "The file-like object this HTTPResponse is wrapped " + "around has no file descriptor" + ) + + def flush(self) -> None: + if ( + self._fp is not None + and hasattr(self._fp, "flush") + and not getattr(self._fp, "closed", False) + ): + return self._fp.flush() + + def supports_chunked_reads(self) -> bool: + """ + Checks if the underlying file-like object looks like a + :class:`http.client.HTTPResponse` object. We do this by testing for + the fp attribute. If it is present we assume it returns raw chunks as + processed by read_chunked(). + """ + return hasattr(self._fp, "fp") + + def _update_chunk_length(self) -> None: + # First, we'll figure out length of a chunk and then + # we'll try to read it from socket. + if self.chunk_left is not None: + return None + line = self._fp.fp.readline() # type: ignore[union-attr] + line = line.split(b";", 1)[0] + try: + self.chunk_left = int(line, 16) + except ValueError: + self.close() + if line: + # Invalid chunked protocol response, abort. + raise InvalidChunkLength(self, line) from None + else: + # Truncated at start of next chunk + raise ProtocolError("Response ended prematurely") from None + + def _handle_chunk(self, amt: int | None) -> bytes: + returned_chunk = None + if amt is None: + chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr] + returned_chunk = chunk + self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk. + self.chunk_left = None + elif self.chunk_left is not None and amt < self.chunk_left: + value = self._fp._safe_read(amt) # type: ignore[union-attr] + self.chunk_left = self.chunk_left - amt + returned_chunk = value + elif amt == self.chunk_left: + value = self._fp._safe_read(amt) # type: ignore[union-attr] + self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk. + self.chunk_left = None + returned_chunk = value + else: # amt > self.chunk_left + returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr] + self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk. + self.chunk_left = None + return returned_chunk # type: ignore[no-any-return] + + def read_chunked( + self, amt: int | None = None, decode_content: bool | None = None + ) -> typing.Generator[bytes]: + """ + Similar to :meth:`HTTPResponse.read`, but with an additional + parameter: ``decode_content``. + + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + """ + self._init_decoder() + # FIXME: Rewrite this method and make it a class with a better structured logic. + if not self.chunked: + raise ResponseNotChunked( + "Response is not chunked. " + "Header 'transfer-encoding: chunked' is missing." + ) + if not self.supports_chunked_reads(): + raise BodyNotHttplibCompatible( + "Body should be http.client.HTTPResponse like. " + "It should have have an fp attribute which returns raw chunks." + ) + + with self._error_catcher(): + # Don't bother reading the body of a HEAD request. + if self._original_response and is_response_to_head(self._original_response): + self._original_response.close() + return None + + # If a response is already read and closed + # then return immediately. + if self._fp.fp is None: # type: ignore[union-attr] + return None + + if amt and amt < 0: + # Negative numbers and `None` should be treated the same, + # but httplib handles only `None` correctly. + amt = None + + while True: + self._update_chunk_length() + if self.chunk_left == 0: + break + chunk = self._handle_chunk(amt) + decoded = self._decode( + chunk, decode_content=decode_content, flush_decoder=False + ) + if decoded: + yield decoded + + if decode_content: + # On CPython and PyPy, we should never need to flush the + # decoder. However, on Jython we *might* need to, so + # lets defensively do it anyway. + decoded = self._flush_decoder() + if decoded: # Platform-specific: Jython. + yield decoded + + # Chunk content ends with \r\n: discard it. + while self._fp is not None: + line = self._fp.fp.readline() + if not line: + # Some sites may not end with '\r\n'. + break + if line == b"\r\n": + break + + # We read everything; close the "file". + if self._original_response: + self._original_response.close() + + @property + def url(self) -> str | None: + """ + Returns the URL that was the source of this response. + If the request that generated this response redirected, this method + will return the final redirect location. + """ + return self._request_url + + @url.setter + def url(self, url: str) -> None: + self._request_url = url + + def __iter__(self) -> typing.Iterator[bytes]: + buffer: list[bytes] = [] + for chunk in self.stream(decode_content=True): + if b"\n" in chunk: + chunks = chunk.split(b"\n") + yield b"".join(buffer) + chunks[0] + b"\n" + for x in chunks[1:-1]: + yield x + b"\n" + if chunks[-1]: + buffer = [chunks[-1]] + else: + buffer = [] + else: + buffer.append(chunk) + if buffer: + yield b"".join(buffer)