ZTWHHH commited on
Commit
38cc323
·
verified ·
1 Parent(s): 09b6c38

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. emu3/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc +0 -0
  2. emu3/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc +0 -0
  3. emu3/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc +0 -0
  4. emu3/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc +0 -0
  5. emu3/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc +0 -0
  6. emu3/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc +0 -0
  7. emu3/lib/python3.10/site-packages/pandas/_config/__init__.py +57 -0
  8. emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc +0 -0
  9. emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc +0 -0
  10. emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc +0 -0
  11. emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc +0 -0
  12. emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc +0 -0
  13. emu3/lib/python3.10/site-packages/pandas/_config/config.py +948 -0
  14. emu3/lib/python3.10/site-packages/pandas/_config/dates.py +25 -0
  15. emu3/lib/python3.10/site-packages/pandas/_config/display.py +62 -0
  16. emu3/lib/python3.10/site-packages/pandas/_config/localization.py +172 -0
  17. emu3/lib/python3.10/site-packages/pandas/_testing/__init__.py +639 -0
  18. emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc +0 -0
  19. emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc +0 -0
  20. emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc +0 -0
  21. emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc +0 -0
  22. emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc +0 -0
  23. emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc +0 -0
  24. emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc +0 -0
  25. emu3/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py +93 -0
  26. emu3/lib/python3.10/site-packages/pandas/_testing/_io.py +170 -0
  27. emu3/lib/python3.10/site-packages/pandas/_testing/_warnings.py +232 -0
  28. emu3/lib/python3.10/site-packages/pandas/_testing/asserters.py +1435 -0
  29. emu3/lib/python3.10/site-packages/pandas/_testing/compat.py +29 -0
  30. emu3/lib/python3.10/site-packages/pandas/_testing/contexts.py +257 -0
  31. emu3/lib/python3.10/site-packages/pandas/api/__init__.py +16 -0
  32. emu3/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
  33. emu3/lib/python3.10/site-packages/pandas/api/types/__init__.py +23 -0
  34. emu3/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
  35. emu3/lib/python3.10/site-packages/pandas/api/typing/__init__.py +55 -0
  36. emu3/lib/python3.10/site-packages/pandas/compat/__init__.py +199 -0
  37. emu3/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc +0 -0
  38. emu3/lib/python3.10/site-packages/pandas/compat/_constants.py +30 -0
  39. emu3/lib/python3.10/site-packages/pandas/compat/_optional.py +168 -0
  40. emu3/lib/python3.10/site-packages/pandas/compat/compressors.py +77 -0
  41. emu3/lib/python3.10/site-packages/pandas/compat/pickle_compat.py +262 -0
  42. emu3/lib/python3.10/site-packages/pandas/compat/pyarrow.py +29 -0
  43. emu3/lib/python3.10/site-packages/pandas/tests/computation/__init__.py +0 -0
  44. emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc +0 -0
  45. emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc +0 -0
  46. emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc +0 -0
  47. emu3/lib/python3.10/site-packages/pandas/tests/computation/test_compat.py +32 -0
  48. emu3/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py +2001 -0
  49. emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/__init__.cpython-310.pyc +0 -0
  50. emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc +0 -0
emu3/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.95 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc ADDED
Binary file (248 Bytes). View file
 
emu3/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (46.1 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc ADDED
Binary file (404 Bytes). View file
 
emu3/lib/python3.10/site-packages/pandas/_config/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pandas._config is considered explicitly upstream of everything else in pandas,
3
+ should have no intra-pandas dependencies.
4
+
5
+ importing `dates` and `display` ensures that keys needed by _libs
6
+ are initialized.
7
+ """
8
+ __all__ = [
9
+ "config",
10
+ "detect_console_encoding",
11
+ "get_option",
12
+ "set_option",
13
+ "reset_option",
14
+ "describe_option",
15
+ "option_context",
16
+ "options",
17
+ "using_copy_on_write",
18
+ "warn_copy_on_write",
19
+ ]
20
+ from pandas._config import config
21
+ from pandas._config import dates # pyright: ignore[reportUnusedImport] # noqa: F401
22
+ from pandas._config.config import (
23
+ _global_config,
24
+ describe_option,
25
+ get_option,
26
+ option_context,
27
+ options,
28
+ reset_option,
29
+ set_option,
30
+ )
31
+ from pandas._config.display import detect_console_encoding
32
+
33
+
34
+ def using_copy_on_write() -> bool:
35
+ _mode_options = _global_config["mode"]
36
+ return (
37
+ _mode_options["copy_on_write"] is True
38
+ and _mode_options["data_manager"] == "block"
39
+ )
40
+
41
+
42
+ def warn_copy_on_write() -> bool:
43
+ _mode_options = _global_config["mode"]
44
+ return (
45
+ _mode_options["copy_on_write"] == "warn"
46
+ and _mode_options["data_manager"] == "block"
47
+ )
48
+
49
+
50
+ def using_nullable_dtypes() -> bool:
51
+ _mode_options = _global_config["mode"]
52
+ return _mode_options["nullable_dtypes"]
53
+
54
+
55
+ def using_pyarrow_string_dtype() -> bool:
56
+ _mode_options = _global_config["future"]
57
+ return _mode_options["infer_string"]
emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.49 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc ADDED
Binary file (730 Bytes). View file
 
emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_config/config.py ADDED
@@ -0,0 +1,948 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The config module holds package-wide configurables and provides
3
+ a uniform API for working with them.
4
+
5
+ Overview
6
+ ========
7
+
8
+ This module supports the following requirements:
9
+ - options are referenced using keys in dot.notation, e.g. "x.y.option - z".
10
+ - keys are case-insensitive.
11
+ - functions should accept partial/regex keys, when unambiguous.
12
+ - options can be registered by modules at import time.
13
+ - options can be registered at init-time (via core.config_init)
14
+ - options have a default value, and (optionally) a description and
15
+ validation function associated with them.
16
+ - options can be deprecated, in which case referencing them
17
+ should produce a warning.
18
+ - deprecated options can optionally be rerouted to a replacement
19
+ so that accessing a deprecated option reroutes to a differently
20
+ named option.
21
+ - options can be reset to their default value.
22
+ - all option can be reset to their default value at once.
23
+ - all options in a certain sub - namespace can be reset at once.
24
+ - the user can set / get / reset or ask for the description of an option.
25
+ - a developer can register and mark an option as deprecated.
26
+ - you can register a callback to be invoked when the option value
27
+ is set or reset. Changing the stored value is considered misuse, but
28
+ is not verboten.
29
+
30
+ Implementation
31
+ ==============
32
+
33
+ - Data is stored using nested dictionaries, and should be accessed
34
+ through the provided API.
35
+
36
+ - "Registered options" and "Deprecated options" have metadata associated
37
+ with them, which are stored in auxiliary dictionaries keyed on the
38
+ fully-qualified key, e.g. "x.y.z.option".
39
+
40
+ - the config_init module is imported by the package's __init__.py file.
41
+ placing any register_option() calls there will ensure those options
42
+ are available as soon as pandas is loaded. If you use register_option
43
+ in a module, it will only be available after that module is imported,
44
+ which you should be aware of.
45
+
46
+ - `config_prefix` is a context_manager (for use with the `with` keyword)
47
+ which can save developers some typing, see the docstring.
48
+
49
+ """
50
+
51
+ from __future__ import annotations
52
+
53
+ from contextlib import (
54
+ ContextDecorator,
55
+ contextmanager,
56
+ )
57
+ import re
58
+ from typing import (
59
+ TYPE_CHECKING,
60
+ Any,
61
+ Callable,
62
+ Generic,
63
+ NamedTuple,
64
+ cast,
65
+ )
66
+ import warnings
67
+
68
+ from pandas._typing import (
69
+ F,
70
+ T,
71
+ )
72
+ from pandas.util._exceptions import find_stack_level
73
+
74
+ if TYPE_CHECKING:
75
+ from collections.abc import (
76
+ Generator,
77
+ Iterable,
78
+ )
79
+
80
+
81
+ class DeprecatedOption(NamedTuple):
82
+ key: str
83
+ msg: str | None
84
+ rkey: str | None
85
+ removal_ver: str | None
86
+
87
+
88
+ class RegisteredOption(NamedTuple):
89
+ key: str
90
+ defval: object
91
+ doc: str
92
+ validator: Callable[[object], Any] | None
93
+ cb: Callable[[str], Any] | None
94
+
95
+
96
+ # holds deprecated option metadata
97
+ _deprecated_options: dict[str, DeprecatedOption] = {}
98
+
99
+ # holds registered option metadata
100
+ _registered_options: dict[str, RegisteredOption] = {}
101
+
102
+ # holds the current values for registered options
103
+ _global_config: dict[str, Any] = {}
104
+
105
+ # keys which have a special meaning
106
+ _reserved_keys: list[str] = ["all"]
107
+
108
+
109
+ class OptionError(AttributeError, KeyError):
110
+ """
111
+ Exception raised for pandas.options.
112
+
113
+ Backwards compatible with KeyError checks.
114
+
115
+ Examples
116
+ --------
117
+ >>> pd.options.context
118
+ Traceback (most recent call last):
119
+ OptionError: No such option
120
+ """
121
+
122
+
123
+ #
124
+ # User API
125
+
126
+
127
+ def _get_single_key(pat: str, silent: bool) -> str:
128
+ keys = _select_options(pat)
129
+ if len(keys) == 0:
130
+ if not silent:
131
+ _warn_if_deprecated(pat)
132
+ raise OptionError(f"No such keys(s): {repr(pat)}")
133
+ if len(keys) > 1:
134
+ raise OptionError("Pattern matched multiple keys")
135
+ key = keys[0]
136
+
137
+ if not silent:
138
+ _warn_if_deprecated(key)
139
+
140
+ key = _translate_key(key)
141
+
142
+ return key
143
+
144
+
145
+ def _get_option(pat: str, silent: bool = False) -> Any:
146
+ key = _get_single_key(pat, silent)
147
+
148
+ # walk the nested dict
149
+ root, k = _get_root(key)
150
+ return root[k]
151
+
152
+
153
+ def _set_option(*args, **kwargs) -> None:
154
+ # must at least 1 arg deal with constraints later
155
+ nargs = len(args)
156
+ if not nargs or nargs % 2 != 0:
157
+ raise ValueError("Must provide an even number of non-keyword arguments")
158
+
159
+ # default to false
160
+ silent = kwargs.pop("silent", False)
161
+
162
+ if kwargs:
163
+ kwarg = next(iter(kwargs.keys()))
164
+ raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"')
165
+
166
+ for k, v in zip(args[::2], args[1::2]):
167
+ key = _get_single_key(k, silent)
168
+
169
+ o = _get_registered_option(key)
170
+ if o and o.validator:
171
+ o.validator(v)
172
+
173
+ # walk the nested dict
174
+ root, k_root = _get_root(key)
175
+ root[k_root] = v
176
+
177
+ if o.cb:
178
+ if silent:
179
+ with warnings.catch_warnings(record=True):
180
+ o.cb(key)
181
+ else:
182
+ o.cb(key)
183
+
184
+
185
+ def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None:
186
+ keys = _select_options(pat)
187
+ if len(keys) == 0:
188
+ raise OptionError("No such keys(s)")
189
+
190
+ s = "\n".join([_build_option_description(k) for k in keys])
191
+
192
+ if _print_desc:
193
+ print(s)
194
+ return None
195
+ return s
196
+
197
+
198
+ def _reset_option(pat: str, silent: bool = False) -> None:
199
+ keys = _select_options(pat)
200
+
201
+ if len(keys) == 0:
202
+ raise OptionError("No such keys(s)")
203
+
204
+ if len(keys) > 1 and len(pat) < 4 and pat != "all":
205
+ raise ValueError(
206
+ "You must specify at least 4 characters when "
207
+ "resetting multiple keys, use the special keyword "
208
+ '"all" to reset all the options to their default value'
209
+ )
210
+
211
+ for k in keys:
212
+ _set_option(k, _registered_options[k].defval, silent=silent)
213
+
214
+
215
+ def get_default_val(pat: str):
216
+ key = _get_single_key(pat, silent=True)
217
+ return _get_registered_option(key).defval
218
+
219
+
220
+ class DictWrapper:
221
+ """provide attribute-style access to a nested dict"""
222
+
223
+ d: dict[str, Any]
224
+
225
+ def __init__(self, d: dict[str, Any], prefix: str = "") -> None:
226
+ object.__setattr__(self, "d", d)
227
+ object.__setattr__(self, "prefix", prefix)
228
+
229
+ def __setattr__(self, key: str, val: Any) -> None:
230
+ prefix = object.__getattribute__(self, "prefix")
231
+ if prefix:
232
+ prefix += "."
233
+ prefix += key
234
+ # you can't set new keys
235
+ # can you can't overwrite subtrees
236
+ if key in self.d and not isinstance(self.d[key], dict):
237
+ _set_option(prefix, val)
238
+ else:
239
+ raise OptionError("You can only set the value of existing options")
240
+
241
+ def __getattr__(self, key: str):
242
+ prefix = object.__getattribute__(self, "prefix")
243
+ if prefix:
244
+ prefix += "."
245
+ prefix += key
246
+ try:
247
+ v = object.__getattribute__(self, "d")[key]
248
+ except KeyError as err:
249
+ raise OptionError("No such option") from err
250
+ if isinstance(v, dict):
251
+ return DictWrapper(v, prefix)
252
+ else:
253
+ return _get_option(prefix)
254
+
255
+ def __dir__(self) -> list[str]:
256
+ return list(self.d.keys())
257
+
258
+
259
+ # For user convenience, we'd like to have the available options described
260
+ # in the docstring. For dev convenience we'd like to generate the docstrings
261
+ # dynamically instead of maintaining them by hand. To this, we use the
262
+ # class below which wraps functions inside a callable, and converts
263
+ # __doc__ into a property function. The doctsrings below are templates
264
+ # using the py2.6+ advanced formatting syntax to plug in a concise list
265
+ # of options, and option descriptions.
266
+
267
+
268
+ class CallableDynamicDoc(Generic[T]):
269
+ def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None:
270
+ self.__doc_tmpl__ = doc_tmpl
271
+ self.__func__ = func
272
+
273
+ def __call__(self, *args, **kwds) -> T:
274
+ return self.__func__(*args, **kwds)
275
+
276
+ # error: Signature of "__doc__" incompatible with supertype "object"
277
+ @property
278
+ def __doc__(self) -> str: # type: ignore[override]
279
+ opts_desc = _describe_option("all", _print_desc=False)
280
+ opts_list = pp_options_list(list(_registered_options.keys()))
281
+ return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list)
282
+
283
+
284
+ _get_option_tmpl = """
285
+ get_option(pat)
286
+
287
+ Retrieves the value of the specified option.
288
+
289
+ Available options:
290
+
291
+ {opts_list}
292
+
293
+ Parameters
294
+ ----------
295
+ pat : str
296
+ Regexp which should match a single option.
297
+ Note: partial matches are supported for convenience, but unless you use the
298
+ full option name (e.g. x.y.z.option_name), your code may break in future
299
+ versions if new options with similar names are introduced.
300
+
301
+ Returns
302
+ -------
303
+ result : the value of the option
304
+
305
+ Raises
306
+ ------
307
+ OptionError : if no such option exists
308
+
309
+ Notes
310
+ -----
311
+ Please reference the :ref:`User Guide <options>` for more information.
312
+
313
+ The available options with its descriptions:
314
+
315
+ {opts_desc}
316
+
317
+ Examples
318
+ --------
319
+ >>> pd.get_option('display.max_columns') # doctest: +SKIP
320
+ 4
321
+ """
322
+
323
+ _set_option_tmpl = """
324
+ set_option(pat, value)
325
+
326
+ Sets the value of the specified option.
327
+
328
+ Available options:
329
+
330
+ {opts_list}
331
+
332
+ Parameters
333
+ ----------
334
+ pat : str
335
+ Regexp which should match a single option.
336
+ Note: partial matches are supported for convenience, but unless you use the
337
+ full option name (e.g. x.y.z.option_name), your code may break in future
338
+ versions if new options with similar names are introduced.
339
+ value : object
340
+ New value of option.
341
+
342
+ Returns
343
+ -------
344
+ None
345
+
346
+ Raises
347
+ ------
348
+ OptionError if no such option exists
349
+
350
+ Notes
351
+ -----
352
+ Please reference the :ref:`User Guide <options>` for more information.
353
+
354
+ The available options with its descriptions:
355
+
356
+ {opts_desc}
357
+
358
+ Examples
359
+ --------
360
+ >>> pd.set_option('display.max_columns', 4)
361
+ >>> df = pd.DataFrame([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
362
+ >>> df
363
+ 0 1 ... 3 4
364
+ 0 1 2 ... 4 5
365
+ 1 6 7 ... 9 10
366
+ [2 rows x 5 columns]
367
+ >>> pd.reset_option('display.max_columns')
368
+ """
369
+
370
+ _describe_option_tmpl = """
371
+ describe_option(pat, _print_desc=False)
372
+
373
+ Prints the description for one or more registered options.
374
+
375
+ Call with no arguments to get a listing for all registered options.
376
+
377
+ Available options:
378
+
379
+ {opts_list}
380
+
381
+ Parameters
382
+ ----------
383
+ pat : str
384
+ Regexp pattern. All matching keys will have their description displayed.
385
+ _print_desc : bool, default True
386
+ If True (default) the description(s) will be printed to stdout.
387
+ Otherwise, the description(s) will be returned as a unicode string
388
+ (for testing).
389
+
390
+ Returns
391
+ -------
392
+ None by default, the description(s) as a unicode string if _print_desc
393
+ is False
394
+
395
+ Notes
396
+ -----
397
+ Please reference the :ref:`User Guide <options>` for more information.
398
+
399
+ The available options with its descriptions:
400
+
401
+ {opts_desc}
402
+
403
+ Examples
404
+ --------
405
+ >>> pd.describe_option('display.max_columns') # doctest: +SKIP
406
+ display.max_columns : int
407
+ If max_cols is exceeded, switch to truncate view...
408
+ """
409
+
410
+ _reset_option_tmpl = """
411
+ reset_option(pat)
412
+
413
+ Reset one or more options to their default value.
414
+
415
+ Pass "all" as argument to reset all options.
416
+
417
+ Available options:
418
+
419
+ {opts_list}
420
+
421
+ Parameters
422
+ ----------
423
+ pat : str/regex
424
+ If specified only options matching `prefix*` will be reset.
425
+ Note: partial matches are supported for convenience, but unless you
426
+ use the full option name (e.g. x.y.z.option_name), your code may break
427
+ in future versions if new options with similar names are introduced.
428
+
429
+ Returns
430
+ -------
431
+ None
432
+
433
+ Notes
434
+ -----
435
+ Please reference the :ref:`User Guide <options>` for more information.
436
+
437
+ The available options with its descriptions:
438
+
439
+ {opts_desc}
440
+
441
+ Examples
442
+ --------
443
+ >>> pd.reset_option('display.max_columns') # doctest: +SKIP
444
+ """
445
+
446
+ # bind the functions with their docstrings into a Callable
447
+ # and use that as the functions exposed in pd.api
448
+ get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
449
+ set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
450
+ reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
451
+ describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
452
+ options = DictWrapper(_global_config)
453
+
454
+ #
455
+ # Functions for use by pandas developers, in addition to User - api
456
+
457
+
458
+ class option_context(ContextDecorator):
459
+ """
460
+ Context manager to temporarily set options in the `with` statement context.
461
+
462
+ You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
463
+
464
+ Examples
465
+ --------
466
+ >>> from pandas import option_context
467
+ >>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
468
+ ... pass
469
+ """
470
+
471
+ def __init__(self, *args) -> None:
472
+ if len(args) % 2 != 0 or len(args) < 2:
473
+ raise ValueError(
474
+ "Need to invoke as option_context(pat, val, [(pat, val), ...])."
475
+ )
476
+
477
+ self.ops = list(zip(args[::2], args[1::2]))
478
+
479
+ def __enter__(self) -> None:
480
+ self.undo = [(pat, _get_option(pat)) for pat, val in self.ops]
481
+
482
+ for pat, val in self.ops:
483
+ _set_option(pat, val, silent=True)
484
+
485
+ def __exit__(self, *args) -> None:
486
+ if self.undo:
487
+ for pat, val in self.undo:
488
+ _set_option(pat, val, silent=True)
489
+
490
+
491
+ def register_option(
492
+ key: str,
493
+ defval: object,
494
+ doc: str = "",
495
+ validator: Callable[[object], Any] | None = None,
496
+ cb: Callable[[str], Any] | None = None,
497
+ ) -> None:
498
+ """
499
+ Register an option in the package-wide pandas config object
500
+
501
+ Parameters
502
+ ----------
503
+ key : str
504
+ Fully-qualified key, e.g. "x.y.option - z".
505
+ defval : object
506
+ Default value of the option.
507
+ doc : str
508
+ Description of the option.
509
+ validator : Callable, optional
510
+ Function of a single argument, should raise `ValueError` if
511
+ called with a value which is not a legal value for the option.
512
+ cb
513
+ a function of a single argument "key", which is called
514
+ immediately after an option value is set/reset. key is
515
+ the full name of the option.
516
+
517
+ Raises
518
+ ------
519
+ ValueError if `validator` is specified and `defval` is not a valid value.
520
+
521
+ """
522
+ import keyword
523
+ import tokenize
524
+
525
+ key = key.lower()
526
+
527
+ if key in _registered_options:
528
+ raise OptionError(f"Option '{key}' has already been registered")
529
+ if key in _reserved_keys:
530
+ raise OptionError(f"Option '{key}' is a reserved key")
531
+
532
+ # the default value should be legal
533
+ if validator:
534
+ validator(defval)
535
+
536
+ # walk the nested dict, creating dicts as needed along the path
537
+ path = key.split(".")
538
+
539
+ for k in path:
540
+ if not re.match("^" + tokenize.Name + "$", k):
541
+ raise ValueError(f"{k} is not a valid identifier")
542
+ if keyword.iskeyword(k):
543
+ raise ValueError(f"{k} is a python keyword")
544
+
545
+ cursor = _global_config
546
+ msg = "Path prefix to option '{option}' is already an option"
547
+
548
+ for i, p in enumerate(path[:-1]):
549
+ if not isinstance(cursor, dict):
550
+ raise OptionError(msg.format(option=".".join(path[:i])))
551
+ if p not in cursor:
552
+ cursor[p] = {}
553
+ cursor = cursor[p]
554
+
555
+ if not isinstance(cursor, dict):
556
+ raise OptionError(msg.format(option=".".join(path[:-1])))
557
+
558
+ cursor[path[-1]] = defval # initialize
559
+
560
+ # save the option metadata
561
+ _registered_options[key] = RegisteredOption(
562
+ key=key, defval=defval, doc=doc, validator=validator, cb=cb
563
+ )
564
+
565
+
566
+ def deprecate_option(
567
+ key: str,
568
+ msg: str | None = None,
569
+ rkey: str | None = None,
570
+ removal_ver: str | None = None,
571
+ ) -> None:
572
+ """
573
+ Mark option `key` as deprecated, if code attempts to access this option,
574
+ a warning will be produced, using `msg` if given, or a default message
575
+ if not.
576
+ if `rkey` is given, any access to the key will be re-routed to `rkey`.
577
+
578
+ Neither the existence of `key` nor that if `rkey` is checked. If they
579
+ do not exist, any subsequence access will fail as usual, after the
580
+ deprecation warning is given.
581
+
582
+ Parameters
583
+ ----------
584
+ key : str
585
+ Name of the option to be deprecated.
586
+ must be a fully-qualified option name (e.g "x.y.z.rkey").
587
+ msg : str, optional
588
+ Warning message to output when the key is referenced.
589
+ if no message is given a default message will be emitted.
590
+ rkey : str, optional
591
+ Name of an option to reroute access to.
592
+ If specified, any referenced `key` will be
593
+ re-routed to `rkey` including set/get/reset.
594
+ rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
595
+ used by the default message if no `msg` is specified.
596
+ removal_ver : str, optional
597
+ Specifies the version in which this option will
598
+ be removed. used by the default message if no `msg` is specified.
599
+
600
+ Raises
601
+ ------
602
+ OptionError
603
+ If the specified key has already been deprecated.
604
+ """
605
+ key = key.lower()
606
+
607
+ if key in _deprecated_options:
608
+ raise OptionError(f"Option '{key}' has already been defined as deprecated.")
609
+
610
+ _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
611
+
612
+
613
+ #
614
+ # functions internal to the module
615
+
616
+
617
+ def _select_options(pat: str) -> list[str]:
618
+ """
619
+ returns a list of keys matching `pat`
620
+
621
+ if pat=="all", returns all registered options
622
+ """
623
+ # short-circuit for exact key
624
+ if pat in _registered_options:
625
+ return [pat]
626
+
627
+ # else look through all of them
628
+ keys = sorted(_registered_options.keys())
629
+ if pat == "all": # reserved key
630
+ return keys
631
+
632
+ return [k for k in keys if re.search(pat, k, re.I)]
633
+
634
+
635
+ def _get_root(key: str) -> tuple[dict[str, Any], str]:
636
+ path = key.split(".")
637
+ cursor = _global_config
638
+ for p in path[:-1]:
639
+ cursor = cursor[p]
640
+ return cursor, path[-1]
641
+
642
+
643
+ def _is_deprecated(key: str) -> bool:
644
+ """Returns True if the given option has been deprecated"""
645
+ key = key.lower()
646
+ return key in _deprecated_options
647
+
648
+
649
+ def _get_deprecated_option(key: str):
650
+ """
651
+ Retrieves the metadata for a deprecated option, if `key` is deprecated.
652
+
653
+ Returns
654
+ -------
655
+ DeprecatedOption (namedtuple) if key is deprecated, None otherwise
656
+ """
657
+ try:
658
+ d = _deprecated_options[key]
659
+ except KeyError:
660
+ return None
661
+ else:
662
+ return d
663
+
664
+
665
+ def _get_registered_option(key: str):
666
+ """
667
+ Retrieves the option metadata if `key` is a registered option.
668
+
669
+ Returns
670
+ -------
671
+ RegisteredOption (namedtuple) if key is deprecated, None otherwise
672
+ """
673
+ return _registered_options.get(key)
674
+
675
+
676
+ def _translate_key(key: str) -> str:
677
+ """
678
+ if key id deprecated and a replacement key defined, will return the
679
+ replacement key, otherwise returns `key` as - is
680
+ """
681
+ d = _get_deprecated_option(key)
682
+ if d:
683
+ return d.rkey or key
684
+ else:
685
+ return key
686
+
687
+
688
+ def _warn_if_deprecated(key: str) -> bool:
689
+ """
690
+ Checks if `key` is a deprecated option and if so, prints a warning.
691
+
692
+ Returns
693
+ -------
694
+ bool - True if `key` is deprecated, False otherwise.
695
+ """
696
+ d = _get_deprecated_option(key)
697
+ if d:
698
+ if d.msg:
699
+ warnings.warn(
700
+ d.msg,
701
+ FutureWarning,
702
+ stacklevel=find_stack_level(),
703
+ )
704
+ else:
705
+ msg = f"'{key}' is deprecated"
706
+ if d.removal_ver:
707
+ msg += f" and will be removed in {d.removal_ver}"
708
+ if d.rkey:
709
+ msg += f", please use '{d.rkey}' instead."
710
+ else:
711
+ msg += ", please refrain from using it."
712
+
713
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
714
+ return True
715
+ return False
716
+
717
+
718
+ def _build_option_description(k: str) -> str:
719
+ """Builds a formatted description of a registered option and prints it"""
720
+ o = _get_registered_option(k)
721
+ d = _get_deprecated_option(k)
722
+
723
+ s = f"{k} "
724
+
725
+ if o.doc:
726
+ s += "\n".join(o.doc.strip().split("\n"))
727
+ else:
728
+ s += "No description available."
729
+
730
+ if o:
731
+ s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]"
732
+
733
+ if d:
734
+ rkey = d.rkey or ""
735
+ s += "\n (Deprecated"
736
+ s += f", use `{rkey}` instead."
737
+ s += ")"
738
+
739
+ return s
740
+
741
+
742
+ def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False):
743
+ """Builds a concise listing of available options, grouped by prefix"""
744
+ from itertools import groupby
745
+ from textwrap import wrap
746
+
747
+ def pp(name: str, ks: Iterable[str]) -> list[str]:
748
+ pfx = "- " + name + ".[" if name else ""
749
+ ls = wrap(
750
+ ", ".join(ks),
751
+ width,
752
+ initial_indent=pfx,
753
+ subsequent_indent=" ",
754
+ break_long_words=False,
755
+ )
756
+ if ls and ls[-1] and name:
757
+ ls[-1] = ls[-1] + "]"
758
+ return ls
759
+
760
+ ls: list[str] = []
761
+ singles = [x for x in sorted(keys) if x.find(".") < 0]
762
+ if singles:
763
+ ls += pp("", singles)
764
+ keys = [x for x in keys if x.find(".") >= 0]
765
+
766
+ for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]):
767
+ ks = [x[len(k) + 1 :] for x in list(g)]
768
+ ls += pp(k, ks)
769
+ s = "\n".join(ls)
770
+ if _print:
771
+ print(s)
772
+ else:
773
+ return s
774
+
775
+
776
+ #
777
+ # helpers
778
+
779
+
780
+ @contextmanager
781
+ def config_prefix(prefix: str) -> Generator[None, None, None]:
782
+ """
783
+ contextmanager for multiple invocations of API with a common prefix
784
+
785
+ supported API functions: (register / get / set )__option
786
+
787
+ Warning: This is not thread - safe, and won't work properly if you import
788
+ the API functions into your module using the "from x import y" construct.
789
+
790
+ Example
791
+ -------
792
+ import pandas._config.config as cf
793
+ with cf.config_prefix("display.font"):
794
+ cf.register_option("color", "red")
795
+ cf.register_option("size", " 5 pt")
796
+ cf.set_option(size, " 6 pt")
797
+ cf.get_option(size)
798
+ ...
799
+
800
+ etc'
801
+
802
+ will register options "display.font.color", "display.font.size", set the
803
+ value of "display.font.size"... and so on.
804
+ """
805
+ # Note: reset_option relies on set_option, and on key directly
806
+ # it does not fit in to this monkey-patching scheme
807
+
808
+ global register_option, get_option, set_option
809
+
810
+ def wrap(func: F) -> F:
811
+ def inner(key: str, *args, **kwds):
812
+ pkey = f"{prefix}.{key}"
813
+ return func(pkey, *args, **kwds)
814
+
815
+ return cast(F, inner)
816
+
817
+ _register_option = register_option
818
+ _get_option = get_option
819
+ _set_option = set_option
820
+ set_option = wrap(set_option)
821
+ get_option = wrap(get_option)
822
+ register_option = wrap(register_option)
823
+ try:
824
+ yield
825
+ finally:
826
+ set_option = _set_option
827
+ get_option = _get_option
828
+ register_option = _register_option
829
+
830
+
831
+ # These factories and methods are handy for use as the validator
832
+ # arg in register_option
833
+
834
+
835
+ def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:
836
+ """
837
+
838
+ Parameters
839
+ ----------
840
+ `_type` - a type to be compared against (e.g. type(x) == `_type`)
841
+
842
+ Returns
843
+ -------
844
+ validator - a function of a single argument x , which raises
845
+ ValueError if type(x) is not equal to `_type`
846
+
847
+ """
848
+
849
+ def inner(x) -> None:
850
+ if type(x) != _type:
851
+ raise ValueError(f"Value must have type '{_type}'")
852
+
853
+ return inner
854
+
855
+
856
+ def is_instance_factory(_type) -> Callable[[Any], None]:
857
+ """
858
+
859
+ Parameters
860
+ ----------
861
+ `_type` - the type to be checked against
862
+
863
+ Returns
864
+ -------
865
+ validator - a function of a single argument x , which raises
866
+ ValueError if x is not an instance of `_type`
867
+
868
+ """
869
+ if isinstance(_type, (tuple, list)):
870
+ _type = tuple(_type)
871
+ type_repr = "|".join(map(str, _type))
872
+ else:
873
+ type_repr = f"'{_type}'"
874
+
875
+ def inner(x) -> None:
876
+ if not isinstance(x, _type):
877
+ raise ValueError(f"Value must be an instance of {type_repr}")
878
+
879
+ return inner
880
+
881
+
882
+ def is_one_of_factory(legal_values) -> Callable[[Any], None]:
883
+ callables = [c for c in legal_values if callable(c)]
884
+ legal_values = [c for c in legal_values if not callable(c)]
885
+
886
+ def inner(x) -> None:
887
+ if x not in legal_values:
888
+ if not any(c(x) for c in callables):
889
+ uvals = [str(lval) for lval in legal_values]
890
+ pp_values = "|".join(uvals)
891
+ msg = f"Value must be one of {pp_values}"
892
+ if len(callables):
893
+ msg += " or a callable"
894
+ raise ValueError(msg)
895
+
896
+ return inner
897
+
898
+
899
+ def is_nonnegative_int(value: object) -> None:
900
+ """
901
+ Verify that value is None or a positive int.
902
+
903
+ Parameters
904
+ ----------
905
+ value : None or int
906
+ The `value` to be checked.
907
+
908
+ Raises
909
+ ------
910
+ ValueError
911
+ When the value is not None or is a negative integer
912
+ """
913
+ if value is None:
914
+ return
915
+
916
+ elif isinstance(value, int):
917
+ if value >= 0:
918
+ return
919
+
920
+ msg = "Value must be a nonnegative integer or None"
921
+ raise ValueError(msg)
922
+
923
+
924
+ # common type validators, for convenience
925
+ # usage: register_option(... , validator = is_int)
926
+ is_int = is_type_factory(int)
927
+ is_bool = is_type_factory(bool)
928
+ is_float = is_type_factory(float)
929
+ is_str = is_type_factory(str)
930
+ is_text = is_instance_factory((str, bytes))
931
+
932
+
933
+ def is_callable(obj) -> bool:
934
+ """
935
+
936
+ Parameters
937
+ ----------
938
+ `obj` - the object to be checked
939
+
940
+ Returns
941
+ -------
942
+ validator - returns True if object is callable
943
+ raises ValueError otherwise.
944
+
945
+ """
946
+ if not callable(obj):
947
+ raise ValueError("Value must be a callable")
948
+ return True
emu3/lib/python3.10/site-packages/pandas/_config/dates.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ config for datetime formatting
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from pandas._config import config as cf
7
+
8
+ pc_date_dayfirst_doc = """
9
+ : boolean
10
+ When True, prints and parses dates with the day first, eg 20/01/2005
11
+ """
12
+
13
+ pc_date_yearfirst_doc = """
14
+ : boolean
15
+ When True, prints and parses dates with the year first, eg 2005/01/20
16
+ """
17
+
18
+ with cf.config_prefix("display"):
19
+ # Needed upstream of `_libs` because these are used in tslibs.parsing
20
+ cf.register_option(
21
+ "date_dayfirst", False, pc_date_dayfirst_doc, validator=cf.is_bool
22
+ )
23
+ cf.register_option(
24
+ "date_yearfirst", False, pc_date_yearfirst_doc, validator=cf.is_bool
25
+ )
emu3/lib/python3.10/site-packages/pandas/_config/display.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unopinionated display configuration.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import locale
8
+ import sys
9
+
10
+ from pandas._config import config as cf
11
+
12
+ # -----------------------------------------------------------------------------
13
+ # Global formatting options
14
+ _initial_defencoding: str | None = None
15
+
16
+
17
+ def detect_console_encoding() -> str:
18
+ """
19
+ Try to find the most capable encoding supported by the console.
20
+ slightly modified from the way IPython handles the same issue.
21
+ """
22
+ global _initial_defencoding
23
+
24
+ encoding = None
25
+ try:
26
+ encoding = sys.stdout.encoding or sys.stdin.encoding
27
+ except (AttributeError, OSError):
28
+ pass
29
+
30
+ # try again for something better
31
+ if not encoding or "ascii" in encoding.lower():
32
+ try:
33
+ encoding = locale.getpreferredencoding()
34
+ except locale.Error:
35
+ # can be raised by locale.setlocale(), which is
36
+ # called by getpreferredencoding
37
+ # (on some systems, see stdlib locale docs)
38
+ pass
39
+
40
+ # when all else fails. this will usually be "ascii"
41
+ if not encoding or "ascii" in encoding.lower():
42
+ encoding = sys.getdefaultencoding()
43
+
44
+ # GH#3360, save the reported defencoding at import time
45
+ # MPL backends may change it. Make available for debugging.
46
+ if not _initial_defencoding:
47
+ _initial_defencoding = sys.getdefaultencoding()
48
+
49
+ return encoding
50
+
51
+
52
+ pc_encoding_doc = """
53
+ : str/unicode
54
+ Defaults to the detected encoding of the console.
55
+ Specifies the encoding to be used for strings returned by to_string,
56
+ these are generally strings meant to be displayed on the console.
57
+ """
58
+
59
+ with cf.config_prefix("display"):
60
+ cf.register_option(
61
+ "encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text
62
+ )
emu3/lib/python3.10/site-packages/pandas/_config/localization.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for configuring locale settings.
3
+
4
+ Name `localization` is chosen to avoid overlap with builtin `locale` module.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from contextlib import contextmanager
9
+ import locale
10
+ import platform
11
+ import re
12
+ import subprocess
13
+ from typing import TYPE_CHECKING
14
+
15
+ from pandas._config.config import options
16
+
17
+ if TYPE_CHECKING:
18
+ from collections.abc import Generator
19
+
20
+
21
+ @contextmanager
22
+ def set_locale(
23
+ new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL
24
+ ) -> Generator[str | tuple[str, str], None, None]:
25
+ """
26
+ Context manager for temporarily setting a locale.
27
+
28
+ Parameters
29
+ ----------
30
+ new_locale : str or tuple
31
+ A string of the form <language_country>.<encoding>. For example to set
32
+ the current locale to US English with a UTF8 encoding, you would pass
33
+ "en_US.UTF-8".
34
+ lc_var : int, default `locale.LC_ALL`
35
+ The category of the locale being set.
36
+
37
+ Notes
38
+ -----
39
+ This is useful when you want to run a particular block of code under a
40
+ particular locale, without globally setting the locale. This probably isn't
41
+ thread-safe.
42
+ """
43
+ # getlocale is not always compliant with setlocale, use setlocale. GH#46595
44
+ current_locale = locale.setlocale(lc_var)
45
+
46
+ try:
47
+ locale.setlocale(lc_var, new_locale)
48
+ normalized_code, normalized_encoding = locale.getlocale()
49
+ if normalized_code is not None and normalized_encoding is not None:
50
+ yield f"{normalized_code}.{normalized_encoding}"
51
+ else:
52
+ yield new_locale
53
+ finally:
54
+ locale.setlocale(lc_var, current_locale)
55
+
56
+
57
+ def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool:
58
+ """
59
+ Check to see if we can set a locale, and subsequently get the locale,
60
+ without raising an Exception.
61
+
62
+ Parameters
63
+ ----------
64
+ lc : str
65
+ The locale to attempt to set.
66
+ lc_var : int, default `locale.LC_ALL`
67
+ The category of the locale being set.
68
+
69
+ Returns
70
+ -------
71
+ bool
72
+ Whether the passed locale can be set
73
+ """
74
+ try:
75
+ with set_locale(lc, lc_var=lc_var):
76
+ pass
77
+ except (ValueError, locale.Error):
78
+ # horrible name for a Exception subclass
79
+ return False
80
+ else:
81
+ return True
82
+
83
+
84
+ def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]:
85
+ """
86
+ Return a list of normalized locales that do not throw an ``Exception``
87
+ when set.
88
+
89
+ Parameters
90
+ ----------
91
+ locales : str
92
+ A string where each locale is separated by a newline.
93
+ normalize : bool
94
+ Whether to call ``locale.normalize`` on each locale.
95
+
96
+ Returns
97
+ -------
98
+ valid_locales : list
99
+ A list of valid locales.
100
+ """
101
+ return [
102
+ loc
103
+ for loc in (
104
+ locale.normalize(loc.strip()) if normalize else loc.strip()
105
+ for loc in locales
106
+ )
107
+ if can_set_locale(loc)
108
+ ]
109
+
110
+
111
+ def get_locales(
112
+ prefix: str | None = None,
113
+ normalize: bool = True,
114
+ ) -> list[str]:
115
+ """
116
+ Get all the locales that are available on the system.
117
+
118
+ Parameters
119
+ ----------
120
+ prefix : str
121
+ If not ``None`` then return only those locales with the prefix
122
+ provided. For example to get all English language locales (those that
123
+ start with ``"en"``), pass ``prefix="en"``.
124
+ normalize : bool
125
+ Call ``locale.normalize`` on the resulting list of available locales.
126
+ If ``True``, only locales that can be set without throwing an
127
+ ``Exception`` are returned.
128
+
129
+ Returns
130
+ -------
131
+ locales : list of strings
132
+ A list of locale strings that can be set with ``locale.setlocale()``.
133
+ For example::
134
+
135
+ locale.setlocale(locale.LC_ALL, locale_string)
136
+
137
+ On error will return an empty list (no locale available, e.g. Windows)
138
+
139
+ """
140
+ if platform.system() in ("Linux", "Darwin"):
141
+ raw_locales = subprocess.check_output(["locale", "-a"])
142
+ else:
143
+ # Other platforms e.g. windows platforms don't define "locale -a"
144
+ # Note: is_platform_windows causes circular import here
145
+ return []
146
+
147
+ try:
148
+ # raw_locales is "\n" separated list of locales
149
+ # it may contain non-decodable parts, so split
150
+ # extract what we can and then rejoin.
151
+ split_raw_locales = raw_locales.split(b"\n")
152
+ out_locales = []
153
+ for x in split_raw_locales:
154
+ try:
155
+ out_locales.append(str(x, encoding=options.display.encoding))
156
+ except UnicodeError:
157
+ # 'locale -a' is used to populated 'raw_locales' and on
158
+ # Redhat 7 Linux (and maybe others) prints locale names
159
+ # using windows-1252 encoding. Bug only triggered by
160
+ # a few special characters and when there is an
161
+ # extensive list of installed locales.
162
+ out_locales.append(str(x, encoding="windows-1252"))
163
+
164
+ except TypeError:
165
+ pass
166
+
167
+ if prefix is None:
168
+ return _valid_locales(out_locales, normalize)
169
+
170
+ pattern = re.compile(f"{prefix}.*")
171
+ found = pattern.findall("\n".join(out_locales))
172
+ return _valid_locales(found, normalize)
emu3/lib/python3.10/site-packages/pandas/_testing/__init__.py ADDED
@@ -0,0 +1,639 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from decimal import Decimal
4
+ import operator
5
+ import os
6
+ from sys import byteorder
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Callable,
10
+ ContextManager,
11
+ cast,
12
+ )
13
+ import warnings
14
+
15
+ import numpy as np
16
+
17
+ from pandas._config.localization import (
18
+ can_set_locale,
19
+ get_locales,
20
+ set_locale,
21
+ )
22
+
23
+ from pandas.compat import pa_version_under10p1
24
+
25
+ from pandas.core.dtypes.common import is_string_dtype
26
+
27
+ import pandas as pd
28
+ from pandas import (
29
+ ArrowDtype,
30
+ DataFrame,
31
+ Index,
32
+ MultiIndex,
33
+ RangeIndex,
34
+ Series,
35
+ )
36
+ from pandas._testing._io import (
37
+ round_trip_localpath,
38
+ round_trip_pathlib,
39
+ round_trip_pickle,
40
+ write_to_compressed,
41
+ )
42
+ from pandas._testing._warnings import (
43
+ assert_produces_warning,
44
+ maybe_produces_warning,
45
+ )
46
+ from pandas._testing.asserters import (
47
+ assert_almost_equal,
48
+ assert_attr_equal,
49
+ assert_categorical_equal,
50
+ assert_class_equal,
51
+ assert_contains_all,
52
+ assert_copy,
53
+ assert_datetime_array_equal,
54
+ assert_dict_equal,
55
+ assert_equal,
56
+ assert_extension_array_equal,
57
+ assert_frame_equal,
58
+ assert_index_equal,
59
+ assert_indexing_slices_equivalent,
60
+ assert_interval_array_equal,
61
+ assert_is_sorted,
62
+ assert_is_valid_plot_return_object,
63
+ assert_metadata_equivalent,
64
+ assert_numpy_array_equal,
65
+ assert_period_array_equal,
66
+ assert_series_equal,
67
+ assert_sp_array_equal,
68
+ assert_timedelta_array_equal,
69
+ raise_assert_detail,
70
+ )
71
+ from pandas._testing.compat import (
72
+ get_dtype,
73
+ get_obj,
74
+ )
75
+ from pandas._testing.contexts import (
76
+ assert_cow_warning,
77
+ decompress_file,
78
+ ensure_clean,
79
+ raises_chained_assignment_error,
80
+ set_timezone,
81
+ use_numexpr,
82
+ with_csv_dialect,
83
+ )
84
+ from pandas.core.arrays import (
85
+ BaseMaskedArray,
86
+ ExtensionArray,
87
+ NumpyExtensionArray,
88
+ )
89
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
90
+ from pandas.core.construction import extract_array
91
+
92
+ if TYPE_CHECKING:
93
+ from pandas._typing import (
94
+ Dtype,
95
+ NpDtype,
96
+ )
97
+
98
+ from pandas.core.arrays import ArrowExtensionArray
99
+
100
+ UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"]
101
+ UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
102
+ SIGNED_INT_NUMPY_DTYPES: list[NpDtype] = [int, "int8", "int16", "int32", "int64"]
103
+ SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
104
+ ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES
105
+ ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
106
+ ALL_INT_DTYPES: list[Dtype] = [*ALL_INT_NUMPY_DTYPES, *ALL_INT_EA_DTYPES]
107
+
108
+ FLOAT_NUMPY_DTYPES: list[NpDtype] = [float, "float32", "float64"]
109
+ FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
110
+ ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES]
111
+
112
+ COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
113
+ STRING_DTYPES: list[Dtype] = [str, "str", "U"]
114
+ COMPLEX_FLOAT_DTYPES: list[Dtype] = [*COMPLEX_DTYPES, *FLOAT_NUMPY_DTYPES]
115
+
116
+ DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
117
+ TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
118
+
119
+ BOOL_DTYPES: list[Dtype] = [bool, "bool"]
120
+ BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
121
+ OBJECT_DTYPES: list[Dtype] = [object, "object"]
122
+
123
+ ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
124
+ ALL_REAL_EXTENSION_DTYPES = FLOAT_EA_DTYPES + ALL_INT_EA_DTYPES
125
+ ALL_REAL_DTYPES: list[Dtype] = [*ALL_REAL_NUMPY_DTYPES, *ALL_REAL_EXTENSION_DTYPES]
126
+ ALL_NUMERIC_DTYPES: list[Dtype] = [*ALL_REAL_DTYPES, *COMPLEX_DTYPES]
127
+
128
+ ALL_NUMPY_DTYPES = (
129
+ ALL_REAL_NUMPY_DTYPES
130
+ + COMPLEX_DTYPES
131
+ + STRING_DTYPES
132
+ + DATETIME64_DTYPES
133
+ + TIMEDELTA64_DTYPES
134
+ + BOOL_DTYPES
135
+ + OBJECT_DTYPES
136
+ + BYTES_DTYPES
137
+ )
138
+
139
+ NARROW_NP_DTYPES = [
140
+ np.float16,
141
+ np.float32,
142
+ np.int8,
143
+ np.int16,
144
+ np.int32,
145
+ np.uint8,
146
+ np.uint16,
147
+ np.uint32,
148
+ ]
149
+
150
+ PYTHON_DATA_TYPES = [
151
+ str,
152
+ int,
153
+ float,
154
+ complex,
155
+ list,
156
+ tuple,
157
+ range,
158
+ dict,
159
+ set,
160
+ frozenset,
161
+ bool,
162
+ bytes,
163
+ bytearray,
164
+ memoryview,
165
+ ]
166
+
167
+ ENDIAN = {"little": "<", "big": ">"}[byteorder]
168
+
169
+ NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
170
+ NP_NAT_OBJECTS = [
171
+ cls("NaT", unit)
172
+ for cls in [np.datetime64, np.timedelta64]
173
+ for unit in [
174
+ "Y",
175
+ "M",
176
+ "W",
177
+ "D",
178
+ "h",
179
+ "m",
180
+ "s",
181
+ "ms",
182
+ "us",
183
+ "ns",
184
+ "ps",
185
+ "fs",
186
+ "as",
187
+ ]
188
+ ]
189
+
190
+ if not pa_version_under10p1:
191
+ import pyarrow as pa
192
+
193
+ UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
194
+ SIGNED_INT_PYARROW_DTYPES = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
195
+ ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES
196
+ ALL_INT_PYARROW_DTYPES_STR_REPR = [
197
+ str(ArrowDtype(typ)) for typ in ALL_INT_PYARROW_DTYPES
198
+ ]
199
+
200
+ # pa.float16 doesn't seem supported
201
+ # https://github.com/apache/arrow/blob/master/python/pyarrow/src/arrow/python/helpers.cc#L86
202
+ FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()]
203
+ FLOAT_PYARROW_DTYPES_STR_REPR = [
204
+ str(ArrowDtype(typ)) for typ in FLOAT_PYARROW_DTYPES
205
+ ]
206
+ DECIMAL_PYARROW_DTYPES = [pa.decimal128(7, 3)]
207
+ STRING_PYARROW_DTYPES = [pa.string()]
208
+ BINARY_PYARROW_DTYPES = [pa.binary()]
209
+
210
+ TIME_PYARROW_DTYPES = [
211
+ pa.time32("s"),
212
+ pa.time32("ms"),
213
+ pa.time64("us"),
214
+ pa.time64("ns"),
215
+ ]
216
+ DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()]
217
+ DATETIME_PYARROW_DTYPES = [
218
+ pa.timestamp(unit=unit, tz=tz)
219
+ for unit in ["s", "ms", "us", "ns"]
220
+ for tz in [None, "UTC", "US/Pacific", "US/Eastern"]
221
+ ]
222
+ TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]]
223
+
224
+ BOOL_PYARROW_DTYPES = [pa.bool_()]
225
+
226
+ # TODO: Add container like pyarrow types:
227
+ # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions
228
+ ALL_PYARROW_DTYPES = (
229
+ ALL_INT_PYARROW_DTYPES
230
+ + FLOAT_PYARROW_DTYPES
231
+ + DECIMAL_PYARROW_DTYPES
232
+ + STRING_PYARROW_DTYPES
233
+ + BINARY_PYARROW_DTYPES
234
+ + TIME_PYARROW_DTYPES
235
+ + DATE_PYARROW_DTYPES
236
+ + DATETIME_PYARROW_DTYPES
237
+ + TIMEDELTA_PYARROW_DTYPES
238
+ + BOOL_PYARROW_DTYPES
239
+ )
240
+ ALL_REAL_PYARROW_DTYPES_STR_REPR = (
241
+ ALL_INT_PYARROW_DTYPES_STR_REPR + FLOAT_PYARROW_DTYPES_STR_REPR
242
+ )
243
+ else:
244
+ FLOAT_PYARROW_DTYPES_STR_REPR = []
245
+ ALL_INT_PYARROW_DTYPES_STR_REPR = []
246
+ ALL_PYARROW_DTYPES = []
247
+ ALL_REAL_PYARROW_DTYPES_STR_REPR = []
248
+
249
+ ALL_REAL_NULLABLE_DTYPES = (
250
+ FLOAT_NUMPY_DTYPES + ALL_REAL_EXTENSION_DTYPES + ALL_REAL_PYARROW_DTYPES_STR_REPR
251
+ )
252
+
253
+ arithmetic_dunder_methods = [
254
+ "__add__",
255
+ "__radd__",
256
+ "__sub__",
257
+ "__rsub__",
258
+ "__mul__",
259
+ "__rmul__",
260
+ "__floordiv__",
261
+ "__rfloordiv__",
262
+ "__truediv__",
263
+ "__rtruediv__",
264
+ "__pow__",
265
+ "__rpow__",
266
+ "__mod__",
267
+ "__rmod__",
268
+ ]
269
+
270
+ comparison_dunder_methods = ["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"]
271
+
272
+
273
+ # -----------------------------------------------------------------------------
274
+ # Comparators
275
+
276
+
277
+ def box_expected(expected, box_cls, transpose: bool = True):
278
+ """
279
+ Helper function to wrap the expected output of a test in a given box_class.
280
+
281
+ Parameters
282
+ ----------
283
+ expected : np.ndarray, Index, Series
284
+ box_cls : {Index, Series, DataFrame}
285
+
286
+ Returns
287
+ -------
288
+ subclass of box_cls
289
+ """
290
+ if box_cls is pd.array:
291
+ if isinstance(expected, RangeIndex):
292
+ # pd.array would return an IntegerArray
293
+ expected = NumpyExtensionArray(np.asarray(expected._values))
294
+ else:
295
+ expected = pd.array(expected, copy=False)
296
+ elif box_cls is Index:
297
+ with warnings.catch_warnings():
298
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
299
+ expected = Index(expected)
300
+ elif box_cls is Series:
301
+ with warnings.catch_warnings():
302
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
303
+ expected = Series(expected)
304
+ elif box_cls is DataFrame:
305
+ with warnings.catch_warnings():
306
+ warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning)
307
+ expected = Series(expected).to_frame()
308
+ if transpose:
309
+ # for vector operations, we need a DataFrame to be a single-row,
310
+ # not a single-column, in order to operate against non-DataFrame
311
+ # vectors of the same length. But convert to two rows to avoid
312
+ # single-row special cases in datetime arithmetic
313
+ expected = expected.T
314
+ expected = pd.concat([expected] * 2, ignore_index=True)
315
+ elif box_cls is np.ndarray or box_cls is np.array:
316
+ expected = np.array(expected)
317
+ elif box_cls is to_array:
318
+ expected = to_array(expected)
319
+ else:
320
+ raise NotImplementedError(box_cls)
321
+ return expected
322
+
323
+
324
+ def to_array(obj):
325
+ """
326
+ Similar to pd.array, but does not cast numpy dtypes to nullable dtypes.
327
+ """
328
+ # temporary implementation until we get pd.array in place
329
+ dtype = getattr(obj, "dtype", None)
330
+
331
+ if dtype is None:
332
+ return np.asarray(obj)
333
+
334
+ return extract_array(obj, extract_numpy=True)
335
+
336
+
337
+ class SubclassedSeries(Series):
338
+ _metadata = ["testattr", "name"]
339
+
340
+ @property
341
+ def _constructor(self):
342
+ # For testing, those properties return a generic callable, and not
343
+ # the actual class. In this case that is equivalent, but it is to
344
+ # ensure we don't rely on the property returning a class
345
+ # See https://github.com/pandas-dev/pandas/pull/46018 and
346
+ # https://github.com/pandas-dev/pandas/issues/32638 and linked issues
347
+ return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
348
+
349
+ @property
350
+ def _constructor_expanddim(self):
351
+ return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
352
+
353
+
354
+ class SubclassedDataFrame(DataFrame):
355
+ _metadata = ["testattr"]
356
+
357
+ @property
358
+ def _constructor(self):
359
+ return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
360
+
361
+ @property
362
+ def _constructor_sliced(self):
363
+ return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
364
+
365
+
366
+ def convert_rows_list_to_csv_str(rows_list: list[str]) -> str:
367
+ """
368
+ Convert list of CSV rows to single CSV-formatted string for current OS.
369
+
370
+ This method is used for creating expected value of to_csv() method.
371
+
372
+ Parameters
373
+ ----------
374
+ rows_list : List[str]
375
+ Each element represents the row of csv.
376
+
377
+ Returns
378
+ -------
379
+ str
380
+ Expected output of to_csv() in current OS.
381
+ """
382
+ sep = os.linesep
383
+ return sep.join(rows_list) + sep
384
+
385
+
386
+ def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
387
+ """
388
+ Helper function to mark pytest.raises that have an external error message.
389
+
390
+ Parameters
391
+ ----------
392
+ expected_exception : Exception
393
+ Expected error to raise.
394
+
395
+ Returns
396
+ -------
397
+ Callable
398
+ Regular `pytest.raises` function with `match` equal to `None`.
399
+ """
400
+ import pytest
401
+
402
+ return pytest.raises(expected_exception, match=None)
403
+
404
+
405
+ cython_table = pd.core.common._cython_table.items()
406
+
407
+
408
+ def get_cython_table_params(ndframe, func_names_and_expected):
409
+ """
410
+ Combine frame, functions from com._cython_table
411
+ keys and expected result.
412
+
413
+ Parameters
414
+ ----------
415
+ ndframe : DataFrame or Series
416
+ func_names_and_expected : Sequence of two items
417
+ The first item is a name of a NDFrame method ('sum', 'prod') etc.
418
+ The second item is the expected return value.
419
+
420
+ Returns
421
+ -------
422
+ list
423
+ List of three items (DataFrame, function, expected result)
424
+ """
425
+ results = []
426
+ for func_name, expected in func_names_and_expected:
427
+ results.append((ndframe, func_name, expected))
428
+ results += [
429
+ (ndframe, func, expected)
430
+ for func, name in cython_table
431
+ if name == func_name
432
+ ]
433
+ return results
434
+
435
+
436
+ def get_op_from_name(op_name: str) -> Callable:
437
+ """
438
+ The operator function for a given op name.
439
+
440
+ Parameters
441
+ ----------
442
+ op_name : str
443
+ The op name, in form of "add" or "__add__".
444
+
445
+ Returns
446
+ -------
447
+ function
448
+ A function performing the operation.
449
+ """
450
+ short_opname = op_name.strip("_")
451
+ try:
452
+ op = getattr(operator, short_opname)
453
+ except AttributeError:
454
+ # Assume it is the reverse operator
455
+ rop = getattr(operator, short_opname[1:])
456
+ op = lambda x, y: rop(y, x)
457
+
458
+ return op
459
+
460
+
461
+ # -----------------------------------------------------------------------------
462
+ # Indexing test helpers
463
+
464
+
465
+ def getitem(x):
466
+ return x
467
+
468
+
469
+ def setitem(x):
470
+ return x
471
+
472
+
473
+ def loc(x):
474
+ return x.loc
475
+
476
+
477
+ def iloc(x):
478
+ return x.iloc
479
+
480
+
481
+ def at(x):
482
+ return x.at
483
+
484
+
485
+ def iat(x):
486
+ return x.iat
487
+
488
+
489
+ # -----------------------------------------------------------------------------
490
+
491
+ _UNITS = ["s", "ms", "us", "ns"]
492
+
493
+
494
+ def get_finest_unit(left: str, right: str):
495
+ """
496
+ Find the higher of two datetime64 units.
497
+ """
498
+ if _UNITS.index(left) >= _UNITS.index(right):
499
+ return left
500
+ return right
501
+
502
+
503
+ def shares_memory(left, right) -> bool:
504
+ """
505
+ Pandas-compat for np.shares_memory.
506
+ """
507
+ if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
508
+ return np.shares_memory(left, right)
509
+ elif isinstance(left, np.ndarray):
510
+ # Call with reversed args to get to unpacking logic below.
511
+ return shares_memory(right, left)
512
+
513
+ if isinstance(left, RangeIndex):
514
+ return False
515
+ if isinstance(left, MultiIndex):
516
+ return shares_memory(left._codes, right)
517
+ if isinstance(left, (Index, Series)):
518
+ return shares_memory(left._values, right)
519
+
520
+ if isinstance(left, NDArrayBackedExtensionArray):
521
+ return shares_memory(left._ndarray, right)
522
+ if isinstance(left, pd.core.arrays.SparseArray):
523
+ return shares_memory(left.sp_values, right)
524
+ if isinstance(left, pd.core.arrays.IntervalArray):
525
+ return shares_memory(left._left, right) or shares_memory(left._right, right)
526
+
527
+ if (
528
+ isinstance(left, ExtensionArray)
529
+ and is_string_dtype(left.dtype)
530
+ and left.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined]
531
+ ):
532
+ # https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669
533
+ left = cast("ArrowExtensionArray", left)
534
+ if (
535
+ isinstance(right, ExtensionArray)
536
+ and is_string_dtype(right.dtype)
537
+ and right.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined]
538
+ ):
539
+ right = cast("ArrowExtensionArray", right)
540
+ left_pa_data = left._pa_array
541
+ right_pa_data = right._pa_array
542
+ left_buf1 = left_pa_data.chunk(0).buffers()[1]
543
+ right_buf1 = right_pa_data.chunk(0).buffers()[1]
544
+ return left_buf1 == right_buf1
545
+
546
+ if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray):
547
+ # By convention, we'll say these share memory if they share *either*
548
+ # the _data or the _mask
549
+ return np.shares_memory(left._data, right._data) or np.shares_memory(
550
+ left._mask, right._mask
551
+ )
552
+
553
+ if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1:
554
+ arr = left._mgr.arrays[0]
555
+ return shares_memory(arr, right)
556
+
557
+ raise NotImplementedError(type(left), type(right))
558
+
559
+
560
+ __all__ = [
561
+ "ALL_INT_EA_DTYPES",
562
+ "ALL_INT_NUMPY_DTYPES",
563
+ "ALL_NUMPY_DTYPES",
564
+ "ALL_REAL_NUMPY_DTYPES",
565
+ "assert_almost_equal",
566
+ "assert_attr_equal",
567
+ "assert_categorical_equal",
568
+ "assert_class_equal",
569
+ "assert_contains_all",
570
+ "assert_copy",
571
+ "assert_datetime_array_equal",
572
+ "assert_dict_equal",
573
+ "assert_equal",
574
+ "assert_extension_array_equal",
575
+ "assert_frame_equal",
576
+ "assert_index_equal",
577
+ "assert_indexing_slices_equivalent",
578
+ "assert_interval_array_equal",
579
+ "assert_is_sorted",
580
+ "assert_is_valid_plot_return_object",
581
+ "assert_metadata_equivalent",
582
+ "assert_numpy_array_equal",
583
+ "assert_period_array_equal",
584
+ "assert_produces_warning",
585
+ "assert_series_equal",
586
+ "assert_sp_array_equal",
587
+ "assert_timedelta_array_equal",
588
+ "assert_cow_warning",
589
+ "at",
590
+ "BOOL_DTYPES",
591
+ "box_expected",
592
+ "BYTES_DTYPES",
593
+ "can_set_locale",
594
+ "COMPLEX_DTYPES",
595
+ "convert_rows_list_to_csv_str",
596
+ "DATETIME64_DTYPES",
597
+ "decompress_file",
598
+ "ENDIAN",
599
+ "ensure_clean",
600
+ "external_error_raised",
601
+ "FLOAT_EA_DTYPES",
602
+ "FLOAT_NUMPY_DTYPES",
603
+ "get_cython_table_params",
604
+ "get_dtype",
605
+ "getitem",
606
+ "get_locales",
607
+ "get_finest_unit",
608
+ "get_obj",
609
+ "get_op_from_name",
610
+ "iat",
611
+ "iloc",
612
+ "loc",
613
+ "maybe_produces_warning",
614
+ "NARROW_NP_DTYPES",
615
+ "NP_NAT_OBJECTS",
616
+ "NULL_OBJECTS",
617
+ "OBJECT_DTYPES",
618
+ "raise_assert_detail",
619
+ "raises_chained_assignment_error",
620
+ "round_trip_localpath",
621
+ "round_trip_pathlib",
622
+ "round_trip_pickle",
623
+ "setitem",
624
+ "set_locale",
625
+ "set_timezone",
626
+ "shares_memory",
627
+ "SIGNED_INT_EA_DTYPES",
628
+ "SIGNED_INT_NUMPY_DTYPES",
629
+ "STRING_DTYPES",
630
+ "SubclassedDataFrame",
631
+ "SubclassedSeries",
632
+ "TIMEDELTA64_DTYPES",
633
+ "to_array",
634
+ "UNSIGNED_INT_EA_DTYPES",
635
+ "UNSIGNED_INT_NUMPY_DTYPES",
636
+ "use_numexpr",
637
+ "with_csv_dialect",
638
+ "write_to_compressed",
639
+ ]
emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc ADDED
Binary file (6.49 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc ADDED
Binary file (32.9 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc ADDED
Binary file (935 Bytes). View file
 
emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc ADDED
Binary file (6.23 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Hypothesis data generator helpers.
3
+ """
4
+ from datetime import datetime
5
+
6
+ from hypothesis import strategies as st
7
+ from hypothesis.extra.dateutil import timezones as dateutil_timezones
8
+ from hypothesis.extra.pytz import timezones as pytz_timezones
9
+
10
+ from pandas.compat import is_platform_windows
11
+
12
+ import pandas as pd
13
+
14
+ from pandas.tseries.offsets import (
15
+ BMonthBegin,
16
+ BMonthEnd,
17
+ BQuarterBegin,
18
+ BQuarterEnd,
19
+ BYearBegin,
20
+ BYearEnd,
21
+ MonthBegin,
22
+ MonthEnd,
23
+ QuarterBegin,
24
+ QuarterEnd,
25
+ YearBegin,
26
+ YearEnd,
27
+ )
28
+
29
+ OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3)
30
+
31
+ OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3)
32
+
33
+ OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3)
34
+
35
+ OPTIONAL_DICTS = st.lists(
36
+ st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
37
+ max_size=10,
38
+ min_size=3,
39
+ )
40
+
41
+ OPTIONAL_LISTS = st.lists(
42
+ st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)),
43
+ max_size=10,
44
+ min_size=3,
45
+ )
46
+
47
+ OPTIONAL_ONE_OF_ALL = st.one_of(
48
+ OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT
49
+ )
50
+
51
+ if is_platform_windows():
52
+ DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1))
53
+ else:
54
+ DATETIME_NO_TZ = st.datetimes()
55
+
56
+ DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
57
+ min_value=pd.Timestamp(
58
+ 1900, 1, 1
59
+ ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
60
+ max_value=pd.Timestamp(
61
+ 1900, 1, 1
62
+ ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
63
+ timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
64
+ )
65
+
66
+ DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
67
+ min_value=pd.Timestamp.min.to_pydatetime(warn=False),
68
+ max_value=pd.Timestamp.max.to_pydatetime(warn=False),
69
+ )
70
+
71
+ INT_NEG_999_TO_POS_999 = st.integers(-999, 999)
72
+
73
+ # The strategy for each type is registered in conftest.py, as they don't carry
74
+ # enough runtime information (e.g. type hints) to infer how to build them.
75
+ YQM_OFFSET = st.one_of(
76
+ *map(
77
+ st.from_type,
78
+ [
79
+ MonthBegin,
80
+ MonthEnd,
81
+ BMonthBegin,
82
+ BMonthEnd,
83
+ QuarterBegin,
84
+ QuarterEnd,
85
+ BQuarterBegin,
86
+ BQuarterEnd,
87
+ YearBegin,
88
+ YearEnd,
89
+ BYearBegin,
90
+ BYearEnd,
91
+ ],
92
+ )
93
+ )
emu3/lib/python3.10/site-packages/pandas/_testing/_io.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gzip
4
+ import io
5
+ import pathlib
6
+ import tarfile
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Callable,
11
+ )
12
+ import uuid
13
+ import zipfile
14
+
15
+ from pandas.compat import (
16
+ get_bz2_file,
17
+ get_lzma_file,
18
+ )
19
+ from pandas.compat._optional import import_optional_dependency
20
+
21
+ import pandas as pd
22
+ from pandas._testing.contexts import ensure_clean
23
+
24
+ if TYPE_CHECKING:
25
+ from pandas._typing import (
26
+ FilePath,
27
+ ReadPickleBuffer,
28
+ )
29
+
30
+ from pandas import (
31
+ DataFrame,
32
+ Series,
33
+ )
34
+
35
+ # ------------------------------------------------------------------
36
+ # File-IO
37
+
38
+
39
+ def round_trip_pickle(
40
+ obj: Any, path: FilePath | ReadPickleBuffer | None = None
41
+ ) -> DataFrame | Series:
42
+ """
43
+ Pickle an object and then read it again.
44
+
45
+ Parameters
46
+ ----------
47
+ obj : any object
48
+ The object to pickle and then re-read.
49
+ path : str, path object or file-like object, default None
50
+ The path where the pickled object is written and then read.
51
+
52
+ Returns
53
+ -------
54
+ pandas object
55
+ The original object that was pickled and then re-read.
56
+ """
57
+ _path = path
58
+ if _path is None:
59
+ _path = f"__{uuid.uuid4()}__.pickle"
60
+ with ensure_clean(_path) as temp_path:
61
+ pd.to_pickle(obj, temp_path)
62
+ return pd.read_pickle(temp_path)
63
+
64
+
65
+ def round_trip_pathlib(writer, reader, path: str | None = None):
66
+ """
67
+ Write an object to file specified by a pathlib.Path and read it back
68
+
69
+ Parameters
70
+ ----------
71
+ writer : callable bound to pandas object
72
+ IO writing function (e.g. DataFrame.to_csv )
73
+ reader : callable
74
+ IO reading function (e.g. pd.read_csv )
75
+ path : str, default None
76
+ The path where the object is written and then read.
77
+
78
+ Returns
79
+ -------
80
+ pandas object
81
+ The original object that was serialized and then re-read.
82
+ """
83
+ Path = pathlib.Path
84
+ if path is None:
85
+ path = "___pathlib___"
86
+ with ensure_clean(path) as path:
87
+ writer(Path(path)) # type: ignore[arg-type]
88
+ obj = reader(Path(path)) # type: ignore[arg-type]
89
+ return obj
90
+
91
+
92
+ def round_trip_localpath(writer, reader, path: str | None = None):
93
+ """
94
+ Write an object to file specified by a py.path LocalPath and read it back.
95
+
96
+ Parameters
97
+ ----------
98
+ writer : callable bound to pandas object
99
+ IO writing function (e.g. DataFrame.to_csv )
100
+ reader : callable
101
+ IO reading function (e.g. pd.read_csv )
102
+ path : str, default None
103
+ The path where the object is written and then read.
104
+
105
+ Returns
106
+ -------
107
+ pandas object
108
+ The original object that was serialized and then re-read.
109
+ """
110
+ import pytest
111
+
112
+ LocalPath = pytest.importorskip("py.path").local
113
+ if path is None:
114
+ path = "___localpath___"
115
+ with ensure_clean(path) as path:
116
+ writer(LocalPath(path))
117
+ obj = reader(LocalPath(path))
118
+ return obj
119
+
120
+
121
+ def write_to_compressed(compression, path, data, dest: str = "test") -> None:
122
+ """
123
+ Write data to a compressed file.
124
+
125
+ Parameters
126
+ ----------
127
+ compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'}
128
+ The compression type to use.
129
+ path : str
130
+ The file path to write the data.
131
+ data : str
132
+ The data to write.
133
+ dest : str, default "test"
134
+ The destination file (for ZIP only)
135
+
136
+ Raises
137
+ ------
138
+ ValueError : An invalid compression value was passed in.
139
+ """
140
+ args: tuple[Any, ...] = (data,)
141
+ mode = "wb"
142
+ method = "write"
143
+ compress_method: Callable
144
+
145
+ if compression == "zip":
146
+ compress_method = zipfile.ZipFile
147
+ mode = "w"
148
+ args = (dest, data)
149
+ method = "writestr"
150
+ elif compression == "tar":
151
+ compress_method = tarfile.TarFile
152
+ mode = "w"
153
+ file = tarfile.TarInfo(name=dest)
154
+ bytes = io.BytesIO(data)
155
+ file.size = len(data)
156
+ args = (file, bytes)
157
+ method = "addfile"
158
+ elif compression == "gzip":
159
+ compress_method = gzip.GzipFile
160
+ elif compression == "bz2":
161
+ compress_method = get_bz2_file()
162
+ elif compression == "zstd":
163
+ compress_method = import_optional_dependency("zstandard").open
164
+ elif compression == "xz":
165
+ compress_method = get_lzma_file()
166
+ else:
167
+ raise ValueError(f"Unrecognized compression type: {compression}")
168
+
169
+ with compress_method(path, mode=mode) as f:
170
+ getattr(f, method)(*args)
emu3/lib/python3.10/site-packages/pandas/_testing/_warnings.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from contextlib import (
4
+ contextmanager,
5
+ nullcontext,
6
+ )
7
+ import inspect
8
+ import re
9
+ import sys
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Literal,
13
+ cast,
14
+ )
15
+ import warnings
16
+
17
+ from pandas.compat import PY311
18
+
19
+ if TYPE_CHECKING:
20
+ from collections.abc import (
21
+ Generator,
22
+ Sequence,
23
+ )
24
+
25
+
26
+ @contextmanager
27
+ def assert_produces_warning(
28
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None = Warning,
29
+ filter_level: Literal[
30
+ "error", "ignore", "always", "default", "module", "once"
31
+ ] = "always",
32
+ check_stacklevel: bool = True,
33
+ raise_on_extra_warnings: bool = True,
34
+ match: str | None = None,
35
+ ) -> Generator[list[warnings.WarningMessage], None, None]:
36
+ """
37
+ Context manager for running code expected to either raise a specific warning,
38
+ multiple specific warnings, or not raise any warnings. Verifies that the code
39
+ raises the expected warning(s), and that it does not raise any other unexpected
40
+ warnings. It is basically a wrapper around ``warnings.catch_warnings``.
41
+
42
+ Parameters
43
+ ----------
44
+ expected_warning : {Warning, False, tuple[Warning, ...], None}, default Warning
45
+ The type of Exception raised. ``exception.Warning`` is the base
46
+ class for all warnings. To raise multiple types of exceptions,
47
+ pass them as a tuple. To check that no warning is returned,
48
+ specify ``False`` or ``None``.
49
+ filter_level : str or None, default "always"
50
+ Specifies whether warnings are ignored, displayed, or turned
51
+ into errors.
52
+ Valid values are:
53
+
54
+ * "error" - turns matching warnings into exceptions
55
+ * "ignore" - discard the warning
56
+ * "always" - always emit a warning
57
+ * "default" - print the warning the first time it is generated
58
+ from each location
59
+ * "module" - print the warning the first time it is generated
60
+ from each module
61
+ * "once" - print the warning the first time it is generated
62
+
63
+ check_stacklevel : bool, default True
64
+ If True, displays the line that called the function containing
65
+ the warning to show were the function is called. Otherwise, the
66
+ line that implements the function is displayed.
67
+ raise_on_extra_warnings : bool, default True
68
+ Whether extra warnings not of the type `expected_warning` should
69
+ cause the test to fail.
70
+ match : str, optional
71
+ Match warning message.
72
+
73
+ Examples
74
+ --------
75
+ >>> import warnings
76
+ >>> with assert_produces_warning():
77
+ ... warnings.warn(UserWarning())
78
+ ...
79
+ >>> with assert_produces_warning(False):
80
+ ... warnings.warn(RuntimeWarning())
81
+ ...
82
+ Traceback (most recent call last):
83
+ ...
84
+ AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
85
+ >>> with assert_produces_warning(UserWarning):
86
+ ... warnings.warn(RuntimeWarning())
87
+ Traceback (most recent call last):
88
+ ...
89
+ AssertionError: Did not see expected warning of class 'UserWarning'.
90
+
91
+ ..warn:: This is *not* thread-safe.
92
+ """
93
+ __tracebackhide__ = True
94
+
95
+ with warnings.catch_warnings(record=True) as w:
96
+ warnings.simplefilter(filter_level)
97
+ try:
98
+ yield w
99
+ finally:
100
+ if expected_warning:
101
+ expected_warning = cast(type[Warning], expected_warning)
102
+ _assert_caught_expected_warning(
103
+ caught_warnings=w,
104
+ expected_warning=expected_warning,
105
+ match=match,
106
+ check_stacklevel=check_stacklevel,
107
+ )
108
+ if raise_on_extra_warnings:
109
+ _assert_caught_no_extra_warnings(
110
+ caught_warnings=w,
111
+ expected_warning=expected_warning,
112
+ )
113
+
114
+
115
+ def maybe_produces_warning(warning: type[Warning], condition: bool, **kwargs):
116
+ """
117
+ Return a context manager that possibly checks a warning based on the condition
118
+ """
119
+ if condition:
120
+ return assert_produces_warning(warning, **kwargs)
121
+ else:
122
+ return nullcontext()
123
+
124
+
125
+ def _assert_caught_expected_warning(
126
+ *,
127
+ caught_warnings: Sequence[warnings.WarningMessage],
128
+ expected_warning: type[Warning],
129
+ match: str | None,
130
+ check_stacklevel: bool,
131
+ ) -> None:
132
+ """Assert that there was the expected warning among the caught warnings."""
133
+ saw_warning = False
134
+ matched_message = False
135
+ unmatched_messages = []
136
+
137
+ for actual_warning in caught_warnings:
138
+ if issubclass(actual_warning.category, expected_warning):
139
+ saw_warning = True
140
+
141
+ if check_stacklevel:
142
+ _assert_raised_with_correct_stacklevel(actual_warning)
143
+
144
+ if match is not None:
145
+ if re.search(match, str(actual_warning.message)):
146
+ matched_message = True
147
+ else:
148
+ unmatched_messages.append(actual_warning.message)
149
+
150
+ if not saw_warning:
151
+ raise AssertionError(
152
+ f"Did not see expected warning of class "
153
+ f"{repr(expected_warning.__name__)}"
154
+ )
155
+
156
+ if match and not matched_message:
157
+ raise AssertionError(
158
+ f"Did not see warning {repr(expected_warning.__name__)} "
159
+ f"matching '{match}'. The emitted warning messages are "
160
+ f"{unmatched_messages}"
161
+ )
162
+
163
+
164
+ def _assert_caught_no_extra_warnings(
165
+ *,
166
+ caught_warnings: Sequence[warnings.WarningMessage],
167
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None,
168
+ ) -> None:
169
+ """Assert that no extra warnings apart from the expected ones are caught."""
170
+ extra_warnings = []
171
+
172
+ for actual_warning in caught_warnings:
173
+ if _is_unexpected_warning(actual_warning, expected_warning):
174
+ # GH#38630 pytest.filterwarnings does not suppress these.
175
+ if actual_warning.category == ResourceWarning:
176
+ # GH 44732: Don't make the CI flaky by filtering SSL-related
177
+ # ResourceWarning from dependencies
178
+ if "unclosed <ssl.SSLSocket" in str(actual_warning.message):
179
+ continue
180
+ # GH 44844: Matplotlib leaves font files open during the entire process
181
+ # upon import. Don't make CI flaky if ResourceWarning raised
182
+ # due to these open files.
183
+ if any("matplotlib" in mod for mod in sys.modules):
184
+ continue
185
+ if PY311 and actual_warning.category == EncodingWarning:
186
+ # EncodingWarnings are checked in the CI
187
+ # pyproject.toml errors on EncodingWarnings in pandas
188
+ # Ignore EncodingWarnings from other libraries
189
+ continue
190
+ extra_warnings.append(
191
+ (
192
+ actual_warning.category.__name__,
193
+ actual_warning.message,
194
+ actual_warning.filename,
195
+ actual_warning.lineno,
196
+ )
197
+ )
198
+
199
+ if extra_warnings:
200
+ raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
201
+
202
+
203
+ def _is_unexpected_warning(
204
+ actual_warning: warnings.WarningMessage,
205
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None,
206
+ ) -> bool:
207
+ """Check if the actual warning issued is unexpected."""
208
+ if actual_warning and not expected_warning:
209
+ return True
210
+ expected_warning = cast(type[Warning], expected_warning)
211
+ return bool(not issubclass(actual_warning.category, expected_warning))
212
+
213
+
214
+ def _assert_raised_with_correct_stacklevel(
215
+ actual_warning: warnings.WarningMessage,
216
+ ) -> None:
217
+ # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
218
+ frame = inspect.currentframe()
219
+ for _ in range(4):
220
+ frame = frame.f_back # type: ignore[union-attr]
221
+ try:
222
+ caller_filename = inspect.getfile(frame) # type: ignore[arg-type]
223
+ finally:
224
+ # See note in
225
+ # https://docs.python.org/3/library/inspect.html#inspect.Traceback
226
+ del frame
227
+ msg = (
228
+ "Warning not set with correct stacklevel. "
229
+ f"File where warning is raised: {actual_warning.filename} != "
230
+ f"{caller_filename}. Warning message: {actual_warning.message}"
231
+ )
232
+ assert actual_warning.filename == caller_filename, msg
emu3/lib/python3.10/site-packages/pandas/_testing/asserters.py ADDED
@@ -0,0 +1,1435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import operator
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Literal,
7
+ NoReturn,
8
+ cast,
9
+ )
10
+
11
+ import numpy as np
12
+
13
+ from pandas._libs import lib
14
+ from pandas._libs.missing import is_matching_na
15
+ from pandas._libs.sparse import SparseIndex
16
+ import pandas._libs.testing as _testing
17
+ from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
18
+
19
+ from pandas.core.dtypes.common import (
20
+ is_bool,
21
+ is_float_dtype,
22
+ is_integer_dtype,
23
+ is_number,
24
+ is_numeric_dtype,
25
+ needs_i8_conversion,
26
+ )
27
+ from pandas.core.dtypes.dtypes import (
28
+ CategoricalDtype,
29
+ DatetimeTZDtype,
30
+ ExtensionDtype,
31
+ NumpyEADtype,
32
+ )
33
+ from pandas.core.dtypes.missing import array_equivalent
34
+
35
+ import pandas as pd
36
+ from pandas import (
37
+ Categorical,
38
+ DataFrame,
39
+ DatetimeIndex,
40
+ Index,
41
+ IntervalDtype,
42
+ IntervalIndex,
43
+ MultiIndex,
44
+ PeriodIndex,
45
+ RangeIndex,
46
+ Series,
47
+ TimedeltaIndex,
48
+ )
49
+ from pandas.core.arrays import (
50
+ DatetimeArray,
51
+ ExtensionArray,
52
+ IntervalArray,
53
+ PeriodArray,
54
+ TimedeltaArray,
55
+ )
56
+ from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
57
+ from pandas.core.arrays.string_ import StringDtype
58
+ from pandas.core.indexes.api import safe_sort_index
59
+
60
+ from pandas.io.formats.printing import pprint_thing
61
+
62
+ if TYPE_CHECKING:
63
+ from pandas._typing import DtypeObj
64
+
65
+
66
+ def assert_almost_equal(
67
+ left,
68
+ right,
69
+ check_dtype: bool | Literal["equiv"] = "equiv",
70
+ rtol: float = 1.0e-5,
71
+ atol: float = 1.0e-8,
72
+ **kwargs,
73
+ ) -> None:
74
+ """
75
+ Check that the left and right objects are approximately equal.
76
+
77
+ By approximately equal, we refer to objects that are numbers or that
78
+ contain numbers which may be equivalent to specific levels of precision.
79
+
80
+ Parameters
81
+ ----------
82
+ left : object
83
+ right : object
84
+ check_dtype : bool or {'equiv'}, default 'equiv'
85
+ Check dtype if both a and b are the same type. If 'equiv' is passed in,
86
+ then `RangeIndex` and `Index` with int64 dtype are also considered
87
+ equivalent when doing type checking.
88
+ rtol : float, default 1e-5
89
+ Relative tolerance.
90
+ atol : float, default 1e-8
91
+ Absolute tolerance.
92
+ """
93
+ if isinstance(left, Index):
94
+ assert_index_equal(
95
+ left,
96
+ right,
97
+ check_exact=False,
98
+ exact=check_dtype,
99
+ rtol=rtol,
100
+ atol=atol,
101
+ **kwargs,
102
+ )
103
+
104
+ elif isinstance(left, Series):
105
+ assert_series_equal(
106
+ left,
107
+ right,
108
+ check_exact=False,
109
+ check_dtype=check_dtype,
110
+ rtol=rtol,
111
+ atol=atol,
112
+ **kwargs,
113
+ )
114
+
115
+ elif isinstance(left, DataFrame):
116
+ assert_frame_equal(
117
+ left,
118
+ right,
119
+ check_exact=False,
120
+ check_dtype=check_dtype,
121
+ rtol=rtol,
122
+ atol=atol,
123
+ **kwargs,
124
+ )
125
+
126
+ else:
127
+ # Other sequences.
128
+ if check_dtype:
129
+ if is_number(left) and is_number(right):
130
+ # Do not compare numeric classes, like np.float64 and float.
131
+ pass
132
+ elif is_bool(left) and is_bool(right):
133
+ # Do not compare bool classes, like np.bool_ and bool.
134
+ pass
135
+ else:
136
+ if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
137
+ obj = "numpy array"
138
+ else:
139
+ obj = "Input"
140
+ assert_class_equal(left, right, obj=obj)
141
+
142
+ # if we have "equiv", this becomes True
143
+ _testing.assert_almost_equal(
144
+ left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs
145
+ )
146
+
147
+
148
+ def _check_isinstance(left, right, cls) -> None:
149
+ """
150
+ Helper method for our assert_* methods that ensures that
151
+ the two objects being compared have the right type before
152
+ proceeding with the comparison.
153
+
154
+ Parameters
155
+ ----------
156
+ left : The first object being compared.
157
+ right : The second object being compared.
158
+ cls : The class type to check against.
159
+
160
+ Raises
161
+ ------
162
+ AssertionError : Either `left` or `right` is not an instance of `cls`.
163
+ """
164
+ cls_name = cls.__name__
165
+
166
+ if not isinstance(left, cls):
167
+ raise AssertionError(
168
+ f"{cls_name} Expected type {cls}, found {type(left)} instead"
169
+ )
170
+ if not isinstance(right, cls):
171
+ raise AssertionError(
172
+ f"{cls_name} Expected type {cls}, found {type(right)} instead"
173
+ )
174
+
175
+
176
+ def assert_dict_equal(left, right, compare_keys: bool = True) -> None:
177
+ _check_isinstance(left, right, dict)
178
+ _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
179
+
180
+
181
+ def assert_index_equal(
182
+ left: Index,
183
+ right: Index,
184
+ exact: bool | str = "equiv",
185
+ check_names: bool = True,
186
+ check_exact: bool = True,
187
+ check_categorical: bool = True,
188
+ check_order: bool = True,
189
+ rtol: float = 1.0e-5,
190
+ atol: float = 1.0e-8,
191
+ obj: str = "Index",
192
+ ) -> None:
193
+ """
194
+ Check that left and right Index are equal.
195
+
196
+ Parameters
197
+ ----------
198
+ left : Index
199
+ right : Index
200
+ exact : bool or {'equiv'}, default 'equiv'
201
+ Whether to check the Index class, dtype and inferred_type
202
+ are identical. If 'equiv', then RangeIndex can be substituted for
203
+ Index with an int64 dtype as well.
204
+ check_names : bool, default True
205
+ Whether to check the names attribute.
206
+ check_exact : bool, default True
207
+ Whether to compare number exactly.
208
+ check_categorical : bool, default True
209
+ Whether to compare internal Categorical exactly.
210
+ check_order : bool, default True
211
+ Whether to compare the order of index entries as well as their values.
212
+ If True, both indexes must contain the same elements, in the same order.
213
+ If False, both indexes must contain the same elements, but in any order.
214
+ rtol : float, default 1e-5
215
+ Relative tolerance. Only used when check_exact is False.
216
+ atol : float, default 1e-8
217
+ Absolute tolerance. Only used when check_exact is False.
218
+ obj : str, default 'Index'
219
+ Specify object name being compared, internally used to show appropriate
220
+ assertion message.
221
+
222
+ Examples
223
+ --------
224
+ >>> from pandas import testing as tm
225
+ >>> a = pd.Index([1, 2, 3])
226
+ >>> b = pd.Index([1, 2, 3])
227
+ >>> tm.assert_index_equal(a, b)
228
+ """
229
+ __tracebackhide__ = True
230
+
231
+ def _check_types(left, right, obj: str = "Index") -> None:
232
+ if not exact:
233
+ return
234
+
235
+ assert_class_equal(left, right, exact=exact, obj=obj)
236
+ assert_attr_equal("inferred_type", left, right, obj=obj)
237
+
238
+ # Skip exact dtype checking when `check_categorical` is False
239
+ if isinstance(left.dtype, CategoricalDtype) and isinstance(
240
+ right.dtype, CategoricalDtype
241
+ ):
242
+ if check_categorical:
243
+ assert_attr_equal("dtype", left, right, obj=obj)
244
+ assert_index_equal(left.categories, right.categories, exact=exact)
245
+ return
246
+
247
+ assert_attr_equal("dtype", left, right, obj=obj)
248
+
249
+ # instance validation
250
+ _check_isinstance(left, right, Index)
251
+
252
+ # class / dtype comparison
253
+ _check_types(left, right, obj=obj)
254
+
255
+ # level comparison
256
+ if left.nlevels != right.nlevels:
257
+ msg1 = f"{obj} levels are different"
258
+ msg2 = f"{left.nlevels}, {left}"
259
+ msg3 = f"{right.nlevels}, {right}"
260
+ raise_assert_detail(obj, msg1, msg2, msg3)
261
+
262
+ # length comparison
263
+ if len(left) != len(right):
264
+ msg1 = f"{obj} length are different"
265
+ msg2 = f"{len(left)}, {left}"
266
+ msg3 = f"{len(right)}, {right}"
267
+ raise_assert_detail(obj, msg1, msg2, msg3)
268
+
269
+ # If order doesn't matter then sort the index entries
270
+ if not check_order:
271
+ left = safe_sort_index(left)
272
+ right = safe_sort_index(right)
273
+
274
+ # MultiIndex special comparison for little-friendly error messages
275
+ if isinstance(left, MultiIndex):
276
+ right = cast(MultiIndex, right)
277
+
278
+ for level in range(left.nlevels):
279
+ lobj = f"MultiIndex level [{level}]"
280
+ try:
281
+ # try comparison on levels/codes to avoid densifying MultiIndex
282
+ assert_index_equal(
283
+ left.levels[level],
284
+ right.levels[level],
285
+ exact=exact,
286
+ check_names=check_names,
287
+ check_exact=check_exact,
288
+ check_categorical=check_categorical,
289
+ rtol=rtol,
290
+ atol=atol,
291
+ obj=lobj,
292
+ )
293
+ assert_numpy_array_equal(left.codes[level], right.codes[level])
294
+ except AssertionError:
295
+ llevel = left.get_level_values(level)
296
+ rlevel = right.get_level_values(level)
297
+
298
+ assert_index_equal(
299
+ llevel,
300
+ rlevel,
301
+ exact=exact,
302
+ check_names=check_names,
303
+ check_exact=check_exact,
304
+ check_categorical=check_categorical,
305
+ rtol=rtol,
306
+ atol=atol,
307
+ obj=lobj,
308
+ )
309
+ # get_level_values may change dtype
310
+ _check_types(left.levels[level], right.levels[level], obj=obj)
311
+
312
+ # skip exact index checking when `check_categorical` is False
313
+ elif check_exact and check_categorical:
314
+ if not left.equals(right):
315
+ mismatch = left._values != right._values
316
+
317
+ if not isinstance(mismatch, np.ndarray):
318
+ mismatch = cast("ExtensionArray", mismatch).fillna(True)
319
+
320
+ diff = np.sum(mismatch.astype(int)) * 100.0 / len(left)
321
+ msg = f"{obj} values are different ({np.round(diff, 5)} %)"
322
+ raise_assert_detail(obj, msg, left, right)
323
+ else:
324
+ # if we have "equiv", this becomes True
325
+ exact_bool = bool(exact)
326
+ _testing.assert_almost_equal(
327
+ left.values,
328
+ right.values,
329
+ rtol=rtol,
330
+ atol=atol,
331
+ check_dtype=exact_bool,
332
+ obj=obj,
333
+ lobj=left,
334
+ robj=right,
335
+ )
336
+
337
+ # metadata comparison
338
+ if check_names:
339
+ assert_attr_equal("names", left, right, obj=obj)
340
+ if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex):
341
+ assert_attr_equal("dtype", left, right, obj=obj)
342
+ if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex):
343
+ assert_interval_array_equal(left._values, right._values)
344
+
345
+ if check_categorical:
346
+ if isinstance(left.dtype, CategoricalDtype) or isinstance(
347
+ right.dtype, CategoricalDtype
348
+ ):
349
+ assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
350
+
351
+
352
+ def assert_class_equal(
353
+ left, right, exact: bool | str = True, obj: str = "Input"
354
+ ) -> None:
355
+ """
356
+ Checks classes are equal.
357
+ """
358
+ __tracebackhide__ = True
359
+
360
+ def repr_class(x):
361
+ if isinstance(x, Index):
362
+ # return Index as it is to include values in the error message
363
+ return x
364
+
365
+ return type(x).__name__
366
+
367
+ def is_class_equiv(idx: Index) -> bool:
368
+ """Classes that are a RangeIndex (sub-)instance or exactly an `Index` .
369
+
370
+ This only checks class equivalence. There is a separate check that the
371
+ dtype is int64.
372
+ """
373
+ return type(idx) is Index or isinstance(idx, RangeIndex)
374
+
375
+ if type(left) == type(right):
376
+ return
377
+
378
+ if exact == "equiv":
379
+ if is_class_equiv(left) and is_class_equiv(right):
380
+ return
381
+
382
+ msg = f"{obj} classes are different"
383
+ raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
384
+
385
+
386
+ def assert_attr_equal(attr: str, left, right, obj: str = "Attributes") -> None:
387
+ """
388
+ Check attributes are equal. Both objects must have attribute.
389
+
390
+ Parameters
391
+ ----------
392
+ attr : str
393
+ Attribute name being compared.
394
+ left : object
395
+ right : object
396
+ obj : str, default 'Attributes'
397
+ Specify object name being compared, internally used to show appropriate
398
+ assertion message
399
+ """
400
+ __tracebackhide__ = True
401
+
402
+ left_attr = getattr(left, attr)
403
+ right_attr = getattr(right, attr)
404
+
405
+ if left_attr is right_attr or is_matching_na(left_attr, right_attr):
406
+ # e.g. both np.nan, both NaT, both pd.NA, ...
407
+ return None
408
+
409
+ try:
410
+ result = left_attr == right_attr
411
+ except TypeError:
412
+ # datetimetz on rhs may raise TypeError
413
+ result = False
414
+ if (left_attr is pd.NA) ^ (right_attr is pd.NA):
415
+ result = False
416
+ elif not isinstance(result, bool):
417
+ result = result.all()
418
+
419
+ if not result:
420
+ msg = f'Attribute "{attr}" are different'
421
+ raise_assert_detail(obj, msg, left_attr, right_attr)
422
+ return None
423
+
424
+
425
+ def assert_is_valid_plot_return_object(objs) -> None:
426
+ from matplotlib.artist import Artist
427
+ from matplotlib.axes import Axes
428
+
429
+ if isinstance(objs, (Series, np.ndarray)):
430
+ if isinstance(objs, Series):
431
+ objs = objs._values
432
+ for el in objs.ravel():
433
+ msg = (
434
+ "one of 'objs' is not a matplotlib Axes instance, "
435
+ f"type encountered {repr(type(el).__name__)}"
436
+ )
437
+ assert isinstance(el, (Axes, dict)), msg
438
+ else:
439
+ msg = (
440
+ "objs is neither an ndarray of Artist instances nor a single "
441
+ "ArtistArtist instance, tuple, or dict, 'objs' is a "
442
+ f"{repr(type(objs).__name__)}"
443
+ )
444
+ assert isinstance(objs, (Artist, tuple, dict)), msg
445
+
446
+
447
+ def assert_is_sorted(seq) -> None:
448
+ """Assert that the sequence is sorted."""
449
+ if isinstance(seq, (Index, Series)):
450
+ seq = seq.values
451
+ # sorting does not change precisions
452
+ if isinstance(seq, np.ndarray):
453
+ assert_numpy_array_equal(seq, np.sort(np.array(seq)))
454
+ else:
455
+ assert_extension_array_equal(seq, seq[seq.argsort()])
456
+
457
+
458
+ def assert_categorical_equal(
459
+ left,
460
+ right,
461
+ check_dtype: bool = True,
462
+ check_category_order: bool = True,
463
+ obj: str = "Categorical",
464
+ ) -> None:
465
+ """
466
+ Test that Categoricals are equivalent.
467
+
468
+ Parameters
469
+ ----------
470
+ left : Categorical
471
+ right : Categorical
472
+ check_dtype : bool, default True
473
+ Check that integer dtype of the codes are the same.
474
+ check_category_order : bool, default True
475
+ Whether the order of the categories should be compared, which
476
+ implies identical integer codes. If False, only the resulting
477
+ values are compared. The ordered attribute is
478
+ checked regardless.
479
+ obj : str, default 'Categorical'
480
+ Specify object name being compared, internally used to show appropriate
481
+ assertion message.
482
+ """
483
+ _check_isinstance(left, right, Categorical)
484
+
485
+ exact: bool | str
486
+ if isinstance(left.categories, RangeIndex) or isinstance(
487
+ right.categories, RangeIndex
488
+ ):
489
+ exact = "equiv"
490
+ else:
491
+ # We still want to require exact matches for Index
492
+ exact = True
493
+
494
+ if check_category_order:
495
+ assert_index_equal(
496
+ left.categories, right.categories, obj=f"{obj}.categories", exact=exact
497
+ )
498
+ assert_numpy_array_equal(
499
+ left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
500
+ )
501
+ else:
502
+ try:
503
+ lc = left.categories.sort_values()
504
+ rc = right.categories.sort_values()
505
+ except TypeError:
506
+ # e.g. '<' not supported between instances of 'int' and 'str'
507
+ lc, rc = left.categories, right.categories
508
+ assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact)
509
+ assert_index_equal(
510
+ left.categories.take(left.codes),
511
+ right.categories.take(right.codes),
512
+ obj=f"{obj}.values",
513
+ exact=exact,
514
+ )
515
+
516
+ assert_attr_equal("ordered", left, right, obj=obj)
517
+
518
+
519
+ def assert_interval_array_equal(
520
+ left, right, exact: bool | Literal["equiv"] = "equiv", obj: str = "IntervalArray"
521
+ ) -> None:
522
+ """
523
+ Test that two IntervalArrays are equivalent.
524
+
525
+ Parameters
526
+ ----------
527
+ left, right : IntervalArray
528
+ The IntervalArrays to compare.
529
+ exact : bool or {'equiv'}, default 'equiv'
530
+ Whether to check the Index class, dtype and inferred_type
531
+ are identical. If 'equiv', then RangeIndex can be substituted for
532
+ Index with an int64 dtype as well.
533
+ obj : str, default 'IntervalArray'
534
+ Specify object name being compared, internally used to show appropriate
535
+ assertion message
536
+ """
537
+ _check_isinstance(left, right, IntervalArray)
538
+
539
+ kwargs = {}
540
+ if left._left.dtype.kind in "mM":
541
+ # We have a DatetimeArray or TimedeltaArray
542
+ kwargs["check_freq"] = False
543
+
544
+ assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
545
+ assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
546
+
547
+ assert_attr_equal("closed", left, right, obj=obj)
548
+
549
+
550
+ def assert_period_array_equal(left, right, obj: str = "PeriodArray") -> None:
551
+ _check_isinstance(left, right, PeriodArray)
552
+
553
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
554
+ assert_attr_equal("dtype", left, right, obj=obj)
555
+
556
+
557
+ def assert_datetime_array_equal(
558
+ left, right, obj: str = "DatetimeArray", check_freq: bool = True
559
+ ) -> None:
560
+ __tracebackhide__ = True
561
+ _check_isinstance(left, right, DatetimeArray)
562
+
563
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
564
+ if check_freq:
565
+ assert_attr_equal("freq", left, right, obj=obj)
566
+ assert_attr_equal("tz", left, right, obj=obj)
567
+
568
+
569
+ def assert_timedelta_array_equal(
570
+ left, right, obj: str = "TimedeltaArray", check_freq: bool = True
571
+ ) -> None:
572
+ __tracebackhide__ = True
573
+ _check_isinstance(left, right, TimedeltaArray)
574
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
575
+ if check_freq:
576
+ assert_attr_equal("freq", left, right, obj=obj)
577
+
578
+
579
+ def raise_assert_detail(
580
+ obj, message, left, right, diff=None, first_diff=None, index_values=None
581
+ ) -> NoReturn:
582
+ __tracebackhide__ = True
583
+
584
+ msg = f"""{obj} are different
585
+
586
+ {message}"""
587
+
588
+ if isinstance(index_values, Index):
589
+ index_values = np.asarray(index_values)
590
+
591
+ if isinstance(index_values, np.ndarray):
592
+ msg += f"\n[index]: {pprint_thing(index_values)}"
593
+
594
+ if isinstance(left, np.ndarray):
595
+ left = pprint_thing(left)
596
+ elif isinstance(left, (CategoricalDtype, NumpyEADtype, StringDtype)):
597
+ left = repr(left)
598
+
599
+ if isinstance(right, np.ndarray):
600
+ right = pprint_thing(right)
601
+ elif isinstance(right, (CategoricalDtype, NumpyEADtype, StringDtype)):
602
+ right = repr(right)
603
+
604
+ msg += f"""
605
+ [left]: {left}
606
+ [right]: {right}"""
607
+
608
+ if diff is not None:
609
+ msg += f"\n[diff]: {diff}"
610
+
611
+ if first_diff is not None:
612
+ msg += f"\n{first_diff}"
613
+
614
+ raise AssertionError(msg)
615
+
616
+
617
+ def assert_numpy_array_equal(
618
+ left,
619
+ right,
620
+ strict_nan: bool = False,
621
+ check_dtype: bool | Literal["equiv"] = True,
622
+ err_msg=None,
623
+ check_same=None,
624
+ obj: str = "numpy array",
625
+ index_values=None,
626
+ ) -> None:
627
+ """
628
+ Check that 'np.ndarray' is equivalent.
629
+
630
+ Parameters
631
+ ----------
632
+ left, right : numpy.ndarray or iterable
633
+ The two arrays to be compared.
634
+ strict_nan : bool, default False
635
+ If True, consider NaN and None to be different.
636
+ check_dtype : bool, default True
637
+ Check dtype if both a and b are np.ndarray.
638
+ err_msg : str, default None
639
+ If provided, used as assertion message.
640
+ check_same : None|'copy'|'same', default None
641
+ Ensure left and right refer/do not refer to the same memory area.
642
+ obj : str, default 'numpy array'
643
+ Specify object name being compared, internally used to show appropriate
644
+ assertion message.
645
+ index_values : Index | numpy.ndarray, default None
646
+ optional index (shared by both left and right), used in output.
647
+ """
648
+ __tracebackhide__ = True
649
+
650
+ # instance validation
651
+ # Show a detailed error message when classes are different
652
+ assert_class_equal(left, right, obj=obj)
653
+ # both classes must be an np.ndarray
654
+ _check_isinstance(left, right, np.ndarray)
655
+
656
+ def _get_base(obj):
657
+ return obj.base if getattr(obj, "base", None) is not None else obj
658
+
659
+ left_base = _get_base(left)
660
+ right_base = _get_base(right)
661
+
662
+ if check_same == "same":
663
+ if left_base is not right_base:
664
+ raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
665
+ elif check_same == "copy":
666
+ if left_base is right_base:
667
+ raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
668
+
669
+ def _raise(left, right, err_msg) -> NoReturn:
670
+ if err_msg is None:
671
+ if left.shape != right.shape:
672
+ raise_assert_detail(
673
+ obj, f"{obj} shapes are different", left.shape, right.shape
674
+ )
675
+
676
+ diff = 0
677
+ for left_arr, right_arr in zip(left, right):
678
+ # count up differences
679
+ if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
680
+ diff += 1
681
+
682
+ diff = diff * 100.0 / left.size
683
+ msg = f"{obj} values are different ({np.round(diff, 5)} %)"
684
+ raise_assert_detail(obj, msg, left, right, index_values=index_values)
685
+
686
+ raise AssertionError(err_msg)
687
+
688
+ # compare shape and values
689
+ if not array_equivalent(left, right, strict_nan=strict_nan):
690
+ _raise(left, right, err_msg)
691
+
692
+ if check_dtype:
693
+ if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
694
+ assert_attr_equal("dtype", left, right, obj=obj)
695
+
696
+
697
+ def assert_extension_array_equal(
698
+ left,
699
+ right,
700
+ check_dtype: bool | Literal["equiv"] = True,
701
+ index_values=None,
702
+ check_exact: bool | lib.NoDefault = lib.no_default,
703
+ rtol: float | lib.NoDefault = lib.no_default,
704
+ atol: float | lib.NoDefault = lib.no_default,
705
+ obj: str = "ExtensionArray",
706
+ ) -> None:
707
+ """
708
+ Check that left and right ExtensionArrays are equal.
709
+
710
+ Parameters
711
+ ----------
712
+ left, right : ExtensionArray
713
+ The two arrays to compare.
714
+ check_dtype : bool, default True
715
+ Whether to check if the ExtensionArray dtypes are identical.
716
+ index_values : Index | numpy.ndarray, default None
717
+ Optional index (shared by both left and right), used in output.
718
+ check_exact : bool, default False
719
+ Whether to compare number exactly.
720
+
721
+ .. versionchanged:: 2.2.0
722
+
723
+ Defaults to True for integer dtypes if none of
724
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
725
+ rtol : float, default 1e-5
726
+ Relative tolerance. Only used when check_exact is False.
727
+ atol : float, default 1e-8
728
+ Absolute tolerance. Only used when check_exact is False.
729
+ obj : str, default 'ExtensionArray'
730
+ Specify object name being compared, internally used to show appropriate
731
+ assertion message.
732
+
733
+ .. versionadded:: 2.0.0
734
+
735
+ Notes
736
+ -----
737
+ Missing values are checked separately from valid values.
738
+ A mask of missing values is computed for each and checked to match.
739
+ The remaining all-valid values are cast to object dtype and checked.
740
+
741
+ Examples
742
+ --------
743
+ >>> from pandas import testing as tm
744
+ >>> a = pd.Series([1, 2, 3, 4])
745
+ >>> b, c = a.array, a.array
746
+ >>> tm.assert_extension_array_equal(b, c)
747
+ """
748
+ if (
749
+ check_exact is lib.no_default
750
+ and rtol is lib.no_default
751
+ and atol is lib.no_default
752
+ ):
753
+ check_exact = (
754
+ is_numeric_dtype(left.dtype)
755
+ and not is_float_dtype(left.dtype)
756
+ or is_numeric_dtype(right.dtype)
757
+ and not is_float_dtype(right.dtype)
758
+ )
759
+ elif check_exact is lib.no_default:
760
+ check_exact = False
761
+
762
+ rtol = rtol if rtol is not lib.no_default else 1.0e-5
763
+ atol = atol if atol is not lib.no_default else 1.0e-8
764
+
765
+ assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
766
+ assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
767
+ if check_dtype:
768
+ assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
769
+
770
+ if (
771
+ isinstance(left, DatetimeLikeArrayMixin)
772
+ and isinstance(right, DatetimeLikeArrayMixin)
773
+ and type(right) == type(left)
774
+ ):
775
+ # GH 52449
776
+ if not check_dtype and left.dtype.kind in "mM":
777
+ if not isinstance(left.dtype, np.dtype):
778
+ l_unit = cast(DatetimeTZDtype, left.dtype).unit
779
+ else:
780
+ l_unit = np.datetime_data(left.dtype)[0]
781
+ if not isinstance(right.dtype, np.dtype):
782
+ r_unit = cast(DatetimeTZDtype, right.dtype).unit
783
+ else:
784
+ r_unit = np.datetime_data(right.dtype)[0]
785
+ if (
786
+ l_unit != r_unit
787
+ and compare_mismatched_resolutions(
788
+ left._ndarray, right._ndarray, operator.eq
789
+ ).all()
790
+ ):
791
+ return
792
+ # Avoid slow object-dtype comparisons
793
+ # np.asarray for case where we have a np.MaskedArray
794
+ assert_numpy_array_equal(
795
+ np.asarray(left.asi8),
796
+ np.asarray(right.asi8),
797
+ index_values=index_values,
798
+ obj=obj,
799
+ )
800
+ return
801
+
802
+ left_na = np.asarray(left.isna())
803
+ right_na = np.asarray(right.isna())
804
+ assert_numpy_array_equal(
805
+ left_na, right_na, obj=f"{obj} NA mask", index_values=index_values
806
+ )
807
+
808
+ left_valid = left[~left_na].to_numpy(dtype=object)
809
+ right_valid = right[~right_na].to_numpy(dtype=object)
810
+ if check_exact:
811
+ assert_numpy_array_equal(
812
+ left_valid, right_valid, obj=obj, index_values=index_values
813
+ )
814
+ else:
815
+ _testing.assert_almost_equal(
816
+ left_valid,
817
+ right_valid,
818
+ check_dtype=bool(check_dtype),
819
+ rtol=rtol,
820
+ atol=atol,
821
+ obj=obj,
822
+ index_values=index_values,
823
+ )
824
+
825
+
826
+ # This could be refactored to use the NDFrame.equals method
827
+ def assert_series_equal(
828
+ left,
829
+ right,
830
+ check_dtype: bool | Literal["equiv"] = True,
831
+ check_index_type: bool | Literal["equiv"] = "equiv",
832
+ check_series_type: bool = True,
833
+ check_names: bool = True,
834
+ check_exact: bool | lib.NoDefault = lib.no_default,
835
+ check_datetimelike_compat: bool = False,
836
+ check_categorical: bool = True,
837
+ check_category_order: bool = True,
838
+ check_freq: bool = True,
839
+ check_flags: bool = True,
840
+ rtol: float | lib.NoDefault = lib.no_default,
841
+ atol: float | lib.NoDefault = lib.no_default,
842
+ obj: str = "Series",
843
+ *,
844
+ check_index: bool = True,
845
+ check_like: bool = False,
846
+ ) -> None:
847
+ """
848
+ Check that left and right Series are equal.
849
+
850
+ Parameters
851
+ ----------
852
+ left : Series
853
+ right : Series
854
+ check_dtype : bool, default True
855
+ Whether to check the Series dtype is identical.
856
+ check_index_type : bool or {'equiv'}, default 'equiv'
857
+ Whether to check the Index class, dtype and inferred_type
858
+ are identical.
859
+ check_series_type : bool, default True
860
+ Whether to check the Series class is identical.
861
+ check_names : bool, default True
862
+ Whether to check the Series and Index names attribute.
863
+ check_exact : bool, default False
864
+ Whether to compare number exactly.
865
+
866
+ .. versionchanged:: 2.2.0
867
+
868
+ Defaults to True for integer dtypes if none of
869
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
870
+ check_datetimelike_compat : bool, default False
871
+ Compare datetime-like which is comparable ignoring dtype.
872
+ check_categorical : bool, default True
873
+ Whether to compare internal Categorical exactly.
874
+ check_category_order : bool, default True
875
+ Whether to compare category order of internal Categoricals.
876
+ check_freq : bool, default True
877
+ Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
878
+ check_flags : bool, default True
879
+ Whether to check the `flags` attribute.
880
+ rtol : float, default 1e-5
881
+ Relative tolerance. Only used when check_exact is False.
882
+ atol : float, default 1e-8
883
+ Absolute tolerance. Only used when check_exact is False.
884
+ obj : str, default 'Series'
885
+ Specify object name being compared, internally used to show appropriate
886
+ assertion message.
887
+ check_index : bool, default True
888
+ Whether to check index equivalence. If False, then compare only values.
889
+
890
+ .. versionadded:: 1.3.0
891
+ check_like : bool, default False
892
+ If True, ignore the order of the index. Must be False if check_index is False.
893
+ Note: same labels must be with the same data.
894
+
895
+ .. versionadded:: 1.5.0
896
+
897
+ Examples
898
+ --------
899
+ >>> from pandas import testing as tm
900
+ >>> a = pd.Series([1, 2, 3, 4])
901
+ >>> b = pd.Series([1, 2, 3, 4])
902
+ >>> tm.assert_series_equal(a, b)
903
+ """
904
+ __tracebackhide__ = True
905
+ check_exact_index = False if check_exact is lib.no_default else check_exact
906
+ if (
907
+ check_exact is lib.no_default
908
+ and rtol is lib.no_default
909
+ and atol is lib.no_default
910
+ ):
911
+ check_exact = (
912
+ is_numeric_dtype(left.dtype)
913
+ and not is_float_dtype(left.dtype)
914
+ or is_numeric_dtype(right.dtype)
915
+ and not is_float_dtype(right.dtype)
916
+ )
917
+ elif check_exact is lib.no_default:
918
+ check_exact = False
919
+
920
+ rtol = rtol if rtol is not lib.no_default else 1.0e-5
921
+ atol = atol if atol is not lib.no_default else 1.0e-8
922
+
923
+ if not check_index and check_like:
924
+ raise ValueError("check_like must be False if check_index is False")
925
+
926
+ # instance validation
927
+ _check_isinstance(left, right, Series)
928
+
929
+ if check_series_type:
930
+ assert_class_equal(left, right, obj=obj)
931
+
932
+ # length comparison
933
+ if len(left) != len(right):
934
+ msg1 = f"{len(left)}, {left.index}"
935
+ msg2 = f"{len(right)}, {right.index}"
936
+ raise_assert_detail(obj, "Series length are different", msg1, msg2)
937
+
938
+ if check_flags:
939
+ assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
940
+
941
+ if check_index:
942
+ # GH #38183
943
+ assert_index_equal(
944
+ left.index,
945
+ right.index,
946
+ exact=check_index_type,
947
+ check_names=check_names,
948
+ check_exact=check_exact_index,
949
+ check_categorical=check_categorical,
950
+ check_order=not check_like,
951
+ rtol=rtol,
952
+ atol=atol,
953
+ obj=f"{obj}.index",
954
+ )
955
+
956
+ if check_like:
957
+ left = left.reindex_like(right)
958
+
959
+ if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)):
960
+ lidx = left.index
961
+ ridx = right.index
962
+ assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
963
+
964
+ if check_dtype:
965
+ # We want to skip exact dtype checking when `check_categorical`
966
+ # is False. We'll still raise if only one is a `Categorical`,
967
+ # regardless of `check_categorical`
968
+ if (
969
+ isinstance(left.dtype, CategoricalDtype)
970
+ and isinstance(right.dtype, CategoricalDtype)
971
+ and not check_categorical
972
+ ):
973
+ pass
974
+ else:
975
+ assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
976
+ if check_exact:
977
+ left_values = left._values
978
+ right_values = right._values
979
+ # Only check exact if dtype is numeric
980
+ if isinstance(left_values, ExtensionArray) and isinstance(
981
+ right_values, ExtensionArray
982
+ ):
983
+ assert_extension_array_equal(
984
+ left_values,
985
+ right_values,
986
+ check_dtype=check_dtype,
987
+ index_values=left.index,
988
+ obj=str(obj),
989
+ )
990
+ else:
991
+ # convert both to NumPy if not, check_dtype would raise earlier
992
+ lv, rv = left_values, right_values
993
+ if isinstance(left_values, ExtensionArray):
994
+ lv = left_values.to_numpy()
995
+ if isinstance(right_values, ExtensionArray):
996
+ rv = right_values.to_numpy()
997
+ assert_numpy_array_equal(
998
+ lv,
999
+ rv,
1000
+ check_dtype=check_dtype,
1001
+ obj=str(obj),
1002
+ index_values=left.index,
1003
+ )
1004
+ elif check_datetimelike_compat and (
1005
+ needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
1006
+ ):
1007
+ # we want to check only if we have compat dtypes
1008
+ # e.g. integer and M|m are NOT compat, but we can simply check
1009
+ # the values in that case
1010
+
1011
+ # datetimelike may have different objects (e.g. datetime.datetime
1012
+ # vs Timestamp) but will compare equal
1013
+ if not Index(left._values).equals(Index(right._values)):
1014
+ msg = (
1015
+ f"[datetimelike_compat=True] {left._values} "
1016
+ f"is not equal to {right._values}."
1017
+ )
1018
+ raise AssertionError(msg)
1019
+ elif isinstance(left.dtype, IntervalDtype) and isinstance(
1020
+ right.dtype, IntervalDtype
1021
+ ):
1022
+ assert_interval_array_equal(left.array, right.array)
1023
+ elif isinstance(left.dtype, CategoricalDtype) or isinstance(
1024
+ right.dtype, CategoricalDtype
1025
+ ):
1026
+ _testing.assert_almost_equal(
1027
+ left._values,
1028
+ right._values,
1029
+ rtol=rtol,
1030
+ atol=atol,
1031
+ check_dtype=bool(check_dtype),
1032
+ obj=str(obj),
1033
+ index_values=left.index,
1034
+ )
1035
+ elif isinstance(left.dtype, ExtensionDtype) and isinstance(
1036
+ right.dtype, ExtensionDtype
1037
+ ):
1038
+ assert_extension_array_equal(
1039
+ left._values,
1040
+ right._values,
1041
+ rtol=rtol,
1042
+ atol=atol,
1043
+ check_dtype=check_dtype,
1044
+ index_values=left.index,
1045
+ obj=str(obj),
1046
+ )
1047
+ elif is_extension_array_dtype_and_needs_i8_conversion(
1048
+ left.dtype, right.dtype
1049
+ ) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
1050
+ assert_extension_array_equal(
1051
+ left._values,
1052
+ right._values,
1053
+ check_dtype=check_dtype,
1054
+ index_values=left.index,
1055
+ obj=str(obj),
1056
+ )
1057
+ elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
1058
+ # DatetimeArray or TimedeltaArray
1059
+ assert_extension_array_equal(
1060
+ left._values,
1061
+ right._values,
1062
+ check_dtype=check_dtype,
1063
+ index_values=left.index,
1064
+ obj=str(obj),
1065
+ )
1066
+ else:
1067
+ _testing.assert_almost_equal(
1068
+ left._values,
1069
+ right._values,
1070
+ rtol=rtol,
1071
+ atol=atol,
1072
+ check_dtype=bool(check_dtype),
1073
+ obj=str(obj),
1074
+ index_values=left.index,
1075
+ )
1076
+
1077
+ # metadata comparison
1078
+ if check_names:
1079
+ assert_attr_equal("name", left, right, obj=obj)
1080
+
1081
+ if check_categorical:
1082
+ if isinstance(left.dtype, CategoricalDtype) or isinstance(
1083
+ right.dtype, CategoricalDtype
1084
+ ):
1085
+ assert_categorical_equal(
1086
+ left._values,
1087
+ right._values,
1088
+ obj=f"{obj} category",
1089
+ check_category_order=check_category_order,
1090
+ )
1091
+
1092
+
1093
+ # This could be refactored to use the NDFrame.equals method
1094
+ def assert_frame_equal(
1095
+ left,
1096
+ right,
1097
+ check_dtype: bool | Literal["equiv"] = True,
1098
+ check_index_type: bool | Literal["equiv"] = "equiv",
1099
+ check_column_type: bool | Literal["equiv"] = "equiv",
1100
+ check_frame_type: bool = True,
1101
+ check_names: bool = True,
1102
+ by_blocks: bool = False,
1103
+ check_exact: bool | lib.NoDefault = lib.no_default,
1104
+ check_datetimelike_compat: bool = False,
1105
+ check_categorical: bool = True,
1106
+ check_like: bool = False,
1107
+ check_freq: bool = True,
1108
+ check_flags: bool = True,
1109
+ rtol: float | lib.NoDefault = lib.no_default,
1110
+ atol: float | lib.NoDefault = lib.no_default,
1111
+ obj: str = "DataFrame",
1112
+ ) -> None:
1113
+ """
1114
+ Check that left and right DataFrame are equal.
1115
+
1116
+ This function is intended to compare two DataFrames and output any
1117
+ differences. It is mostly intended for use in unit tests.
1118
+ Additional parameters allow varying the strictness of the
1119
+ equality checks performed.
1120
+
1121
+ Parameters
1122
+ ----------
1123
+ left : DataFrame
1124
+ First DataFrame to compare.
1125
+ right : DataFrame
1126
+ Second DataFrame to compare.
1127
+ check_dtype : bool, default True
1128
+ Whether to check the DataFrame dtype is identical.
1129
+ check_index_type : bool or {'equiv'}, default 'equiv'
1130
+ Whether to check the Index class, dtype and inferred_type
1131
+ are identical.
1132
+ check_column_type : bool or {'equiv'}, default 'equiv'
1133
+ Whether to check the columns class, dtype and inferred_type
1134
+ are identical. Is passed as the ``exact`` argument of
1135
+ :func:`assert_index_equal`.
1136
+ check_frame_type : bool, default True
1137
+ Whether to check the DataFrame class is identical.
1138
+ check_names : bool, default True
1139
+ Whether to check that the `names` attribute for both the `index`
1140
+ and `column` attributes of the DataFrame is identical.
1141
+ by_blocks : bool, default False
1142
+ Specify how to compare internal data. If False, compare by columns.
1143
+ If True, compare by blocks.
1144
+ check_exact : bool, default False
1145
+ Whether to compare number exactly.
1146
+
1147
+ .. versionchanged:: 2.2.0
1148
+
1149
+ Defaults to True for integer dtypes if none of
1150
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
1151
+ check_datetimelike_compat : bool, default False
1152
+ Compare datetime-like which is comparable ignoring dtype.
1153
+ check_categorical : bool, default True
1154
+ Whether to compare internal Categorical exactly.
1155
+ check_like : bool, default False
1156
+ If True, ignore the order of index & columns.
1157
+ Note: index labels must match their respective rows
1158
+ (same as in columns) - same labels must be with the same data.
1159
+ check_freq : bool, default True
1160
+ Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
1161
+ check_flags : bool, default True
1162
+ Whether to check the `flags` attribute.
1163
+ rtol : float, default 1e-5
1164
+ Relative tolerance. Only used when check_exact is False.
1165
+ atol : float, default 1e-8
1166
+ Absolute tolerance. Only used when check_exact is False.
1167
+ obj : str, default 'DataFrame'
1168
+ Specify object name being compared, internally used to show appropriate
1169
+ assertion message.
1170
+
1171
+ See Also
1172
+ --------
1173
+ assert_series_equal : Equivalent method for asserting Series equality.
1174
+ DataFrame.equals : Check DataFrame equality.
1175
+
1176
+ Examples
1177
+ --------
1178
+ This example shows comparing two DataFrames that are equal
1179
+ but with columns of differing dtypes.
1180
+
1181
+ >>> from pandas.testing import assert_frame_equal
1182
+ >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
1183
+ >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
1184
+
1185
+ df1 equals itself.
1186
+
1187
+ >>> assert_frame_equal(df1, df1)
1188
+
1189
+ df1 differs from df2 as column 'b' is of a different type.
1190
+
1191
+ >>> assert_frame_equal(df1, df2)
1192
+ Traceback (most recent call last):
1193
+ ...
1194
+ AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
1195
+
1196
+ Attribute "dtype" are different
1197
+ [left]: int64
1198
+ [right]: float64
1199
+
1200
+ Ignore differing dtypes in columns with check_dtype.
1201
+
1202
+ >>> assert_frame_equal(df1, df2, check_dtype=False)
1203
+ """
1204
+ __tracebackhide__ = True
1205
+ _rtol = rtol if rtol is not lib.no_default else 1.0e-5
1206
+ _atol = atol if atol is not lib.no_default else 1.0e-8
1207
+ _check_exact = check_exact if check_exact is not lib.no_default else False
1208
+
1209
+ # instance validation
1210
+ _check_isinstance(left, right, DataFrame)
1211
+
1212
+ if check_frame_type:
1213
+ assert isinstance(left, type(right))
1214
+ # assert_class_equal(left, right, obj=obj)
1215
+
1216
+ # shape comparison
1217
+ if left.shape != right.shape:
1218
+ raise_assert_detail(
1219
+ obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
1220
+ )
1221
+
1222
+ if check_flags:
1223
+ assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
1224
+
1225
+ # index comparison
1226
+ assert_index_equal(
1227
+ left.index,
1228
+ right.index,
1229
+ exact=check_index_type,
1230
+ check_names=check_names,
1231
+ check_exact=_check_exact,
1232
+ check_categorical=check_categorical,
1233
+ check_order=not check_like,
1234
+ rtol=_rtol,
1235
+ atol=_atol,
1236
+ obj=f"{obj}.index",
1237
+ )
1238
+
1239
+ # column comparison
1240
+ assert_index_equal(
1241
+ left.columns,
1242
+ right.columns,
1243
+ exact=check_column_type,
1244
+ check_names=check_names,
1245
+ check_exact=_check_exact,
1246
+ check_categorical=check_categorical,
1247
+ check_order=not check_like,
1248
+ rtol=_rtol,
1249
+ atol=_atol,
1250
+ obj=f"{obj}.columns",
1251
+ )
1252
+
1253
+ if check_like:
1254
+ left = left.reindex_like(right)
1255
+
1256
+ # compare by blocks
1257
+ if by_blocks:
1258
+ rblocks = right._to_dict_of_blocks()
1259
+ lblocks = left._to_dict_of_blocks()
1260
+ for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
1261
+ assert dtype in lblocks
1262
+ assert dtype in rblocks
1263
+ assert_frame_equal(
1264
+ lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
1265
+ )
1266
+
1267
+ # compare by columns
1268
+ else:
1269
+ for i, col in enumerate(left.columns):
1270
+ # We have already checked that columns match, so we can do
1271
+ # fast location-based lookups
1272
+ lcol = left._ixs(i, axis=1)
1273
+ rcol = right._ixs(i, axis=1)
1274
+
1275
+ # GH #38183
1276
+ # use check_index=False, because we do not want to run
1277
+ # assert_index_equal for each column,
1278
+ # as we already checked it for the whole dataframe before.
1279
+ assert_series_equal(
1280
+ lcol,
1281
+ rcol,
1282
+ check_dtype=check_dtype,
1283
+ check_index_type=check_index_type,
1284
+ check_exact=check_exact,
1285
+ check_names=check_names,
1286
+ check_datetimelike_compat=check_datetimelike_compat,
1287
+ check_categorical=check_categorical,
1288
+ check_freq=check_freq,
1289
+ obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
1290
+ rtol=rtol,
1291
+ atol=atol,
1292
+ check_index=False,
1293
+ check_flags=False,
1294
+ )
1295
+
1296
+
1297
+ def assert_equal(left, right, **kwargs) -> None:
1298
+ """
1299
+ Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
1300
+
1301
+ Parameters
1302
+ ----------
1303
+ left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
1304
+ The two items to be compared.
1305
+ **kwargs
1306
+ All keyword arguments are passed through to the underlying assert method.
1307
+ """
1308
+ __tracebackhide__ = True
1309
+
1310
+ if isinstance(left, Index):
1311
+ assert_index_equal(left, right, **kwargs)
1312
+ if isinstance(left, (DatetimeIndex, TimedeltaIndex)):
1313
+ assert left.freq == right.freq, (left.freq, right.freq)
1314
+ elif isinstance(left, Series):
1315
+ assert_series_equal(left, right, **kwargs)
1316
+ elif isinstance(left, DataFrame):
1317
+ assert_frame_equal(left, right, **kwargs)
1318
+ elif isinstance(left, IntervalArray):
1319
+ assert_interval_array_equal(left, right, **kwargs)
1320
+ elif isinstance(left, PeriodArray):
1321
+ assert_period_array_equal(left, right, **kwargs)
1322
+ elif isinstance(left, DatetimeArray):
1323
+ assert_datetime_array_equal(left, right, **kwargs)
1324
+ elif isinstance(left, TimedeltaArray):
1325
+ assert_timedelta_array_equal(left, right, **kwargs)
1326
+ elif isinstance(left, ExtensionArray):
1327
+ assert_extension_array_equal(left, right, **kwargs)
1328
+ elif isinstance(left, np.ndarray):
1329
+ assert_numpy_array_equal(left, right, **kwargs)
1330
+ elif isinstance(left, str):
1331
+ assert kwargs == {}
1332
+ assert left == right
1333
+ else:
1334
+ assert kwargs == {}
1335
+ assert_almost_equal(left, right)
1336
+
1337
+
1338
+ def assert_sp_array_equal(left, right) -> None:
1339
+ """
1340
+ Check that the left and right SparseArray are equal.
1341
+
1342
+ Parameters
1343
+ ----------
1344
+ left : SparseArray
1345
+ right : SparseArray
1346
+ """
1347
+ _check_isinstance(left, right, pd.arrays.SparseArray)
1348
+
1349
+ assert_numpy_array_equal(left.sp_values, right.sp_values)
1350
+
1351
+ # SparseIndex comparison
1352
+ assert isinstance(left.sp_index, SparseIndex)
1353
+ assert isinstance(right.sp_index, SparseIndex)
1354
+
1355
+ left_index = left.sp_index
1356
+ right_index = right.sp_index
1357
+
1358
+ if not left_index.equals(right_index):
1359
+ raise_assert_detail(
1360
+ "SparseArray.index", "index are not equal", left_index, right_index
1361
+ )
1362
+ else:
1363
+ # Just ensure a
1364
+ pass
1365
+
1366
+ assert_attr_equal("fill_value", left, right)
1367
+ assert_attr_equal("dtype", left, right)
1368
+ assert_numpy_array_equal(left.to_dense(), right.to_dense())
1369
+
1370
+
1371
+ def assert_contains_all(iterable, dic) -> None:
1372
+ for k in iterable:
1373
+ assert k in dic, f"Did not contain item: {repr(k)}"
1374
+
1375
+
1376
+ def assert_copy(iter1, iter2, **eql_kwargs) -> None:
1377
+ """
1378
+ iter1, iter2: iterables that produce elements
1379
+ comparable with assert_almost_equal
1380
+
1381
+ Checks that the elements are equal, but not
1382
+ the same object. (Does not check that items
1383
+ in sequences are also not the same object)
1384
+ """
1385
+ for elem1, elem2 in zip(iter1, iter2):
1386
+ assert_almost_equal(elem1, elem2, **eql_kwargs)
1387
+ msg = (
1388
+ f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
1389
+ "different objects, but they were the same object."
1390
+ )
1391
+ assert elem1 is not elem2, msg
1392
+
1393
+
1394
+ def is_extension_array_dtype_and_needs_i8_conversion(
1395
+ left_dtype: DtypeObj, right_dtype: DtypeObj
1396
+ ) -> bool:
1397
+ """
1398
+ Checks that we have the combination of an ExtensionArraydtype and
1399
+ a dtype that should be converted to int64
1400
+
1401
+ Returns
1402
+ -------
1403
+ bool
1404
+
1405
+ Related to issue #37609
1406
+ """
1407
+ return isinstance(left_dtype, ExtensionDtype) and needs_i8_conversion(right_dtype)
1408
+
1409
+
1410
+ def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice) -> None:
1411
+ """
1412
+ Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable,
1413
+ ser[l_slc].
1414
+ """
1415
+ expected = ser.iloc[i_slc]
1416
+
1417
+ assert_series_equal(ser.loc[l_slc], expected)
1418
+
1419
+ if not is_integer_dtype(ser.index):
1420
+ # For integer indices, .loc and plain getitem are position-based.
1421
+ assert_series_equal(ser[l_slc], expected)
1422
+
1423
+
1424
+ def assert_metadata_equivalent(
1425
+ left: DataFrame | Series, right: DataFrame | Series | None = None
1426
+ ) -> None:
1427
+ """
1428
+ Check that ._metadata attributes are equivalent.
1429
+ """
1430
+ for attr in left._metadata:
1431
+ val = getattr(left, attr, None)
1432
+ if right is None:
1433
+ assert val is None
1434
+ else:
1435
+ assert val == getattr(right, attr, None)
emu3/lib/python3.10/site-packages/pandas/_testing/compat.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for sharing tests between DataFrame/Series
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import TYPE_CHECKING
7
+
8
+ from pandas import DataFrame
9
+
10
+ if TYPE_CHECKING:
11
+ from pandas._typing import DtypeObj
12
+
13
+
14
+ def get_dtype(obj) -> DtypeObj:
15
+ if isinstance(obj, DataFrame):
16
+ # Note: we are assuming only one column
17
+ return obj.dtypes.iat[0]
18
+ else:
19
+ return obj.dtype
20
+
21
+
22
+ def get_obj(df: DataFrame, klass):
23
+ """
24
+ For sharing tests using frame_or_series, either return the DataFrame
25
+ unchanged or return it's first column as a Series.
26
+ """
27
+ if klass is DataFrame:
28
+ return df
29
+ return df._ixs(0, axis=1)
emu3/lib/python3.10/site-packages/pandas/_testing/contexts.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from contextlib import contextmanager
4
+ import os
5
+ from pathlib import Path
6
+ import tempfile
7
+ from typing import (
8
+ IO,
9
+ TYPE_CHECKING,
10
+ Any,
11
+ )
12
+ import uuid
13
+
14
+ from pandas._config import using_copy_on_write
15
+
16
+ from pandas.compat import PYPY
17
+ from pandas.errors import ChainedAssignmentError
18
+
19
+ from pandas import set_option
20
+
21
+ from pandas.io.common import get_handle
22
+
23
+ if TYPE_CHECKING:
24
+ from collections.abc import Generator
25
+
26
+ from pandas._typing import (
27
+ BaseBuffer,
28
+ CompressionOptions,
29
+ FilePath,
30
+ )
31
+
32
+
33
+ @contextmanager
34
+ def decompress_file(
35
+ path: FilePath | BaseBuffer, compression: CompressionOptions
36
+ ) -> Generator[IO[bytes], None, None]:
37
+ """
38
+ Open a compressed file and return a file object.
39
+
40
+ Parameters
41
+ ----------
42
+ path : str
43
+ The path where the file is read from.
44
+
45
+ compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None}
46
+ Name of the decompression to use
47
+
48
+ Returns
49
+ -------
50
+ file object
51
+ """
52
+ with get_handle(path, "rb", compression=compression, is_text=False) as handle:
53
+ yield handle.handle
54
+
55
+
56
+ @contextmanager
57
+ def set_timezone(tz: str) -> Generator[None, None, None]:
58
+ """
59
+ Context manager for temporarily setting a timezone.
60
+
61
+ Parameters
62
+ ----------
63
+ tz : str
64
+ A string representing a valid timezone.
65
+
66
+ Examples
67
+ --------
68
+ >>> from datetime import datetime
69
+ >>> from dateutil.tz import tzlocal
70
+ >>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
71
+ 'IST'
72
+
73
+ >>> with set_timezone('US/Eastern'):
74
+ ... tzlocal().tzname(datetime(2021, 1, 1))
75
+ ...
76
+ 'EST'
77
+ """
78
+ import time
79
+
80
+ def setTZ(tz) -> None:
81
+ if tz is None:
82
+ try:
83
+ del os.environ["TZ"]
84
+ except KeyError:
85
+ pass
86
+ else:
87
+ os.environ["TZ"] = tz
88
+ time.tzset()
89
+
90
+ orig_tz = os.environ.get("TZ")
91
+ setTZ(tz)
92
+ try:
93
+ yield
94
+ finally:
95
+ setTZ(orig_tz)
96
+
97
+
98
+ @contextmanager
99
+ def ensure_clean(
100
+ filename=None, return_filelike: bool = False, **kwargs: Any
101
+ ) -> Generator[Any, None, None]:
102
+ """
103
+ Gets a temporary path and agrees to remove on close.
104
+
105
+ This implementation does not use tempfile.mkstemp to avoid having a file handle.
106
+ If the code using the returned path wants to delete the file itself, windows
107
+ requires that no program has a file handle to it.
108
+
109
+ Parameters
110
+ ----------
111
+ filename : str (optional)
112
+ suffix of the created file.
113
+ return_filelike : bool (default False)
114
+ if True, returns a file-like which is *always* cleaned. Necessary for
115
+ savefig and other functions which want to append extensions.
116
+ **kwargs
117
+ Additional keywords are passed to open().
118
+
119
+ """
120
+ folder = Path(tempfile.gettempdir())
121
+
122
+ if filename is None:
123
+ filename = ""
124
+ filename = str(uuid.uuid4()) + filename
125
+ path = folder / filename
126
+
127
+ path.touch()
128
+
129
+ handle_or_str: str | IO = str(path)
130
+ encoding = kwargs.pop("encoding", None)
131
+ if return_filelike:
132
+ kwargs.setdefault("mode", "w+b")
133
+ if encoding is None and "b" not in kwargs["mode"]:
134
+ encoding = "utf-8"
135
+ handle_or_str = open(path, encoding=encoding, **kwargs)
136
+
137
+ try:
138
+ yield handle_or_str
139
+ finally:
140
+ if not isinstance(handle_or_str, str):
141
+ handle_or_str.close()
142
+ if path.is_file():
143
+ path.unlink()
144
+
145
+
146
+ @contextmanager
147
+ def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]:
148
+ """
149
+ Context manager to temporarily register a CSV dialect for parsing CSV.
150
+
151
+ Parameters
152
+ ----------
153
+ name : str
154
+ The name of the dialect.
155
+ kwargs : mapping
156
+ The parameters for the dialect.
157
+
158
+ Raises
159
+ ------
160
+ ValueError : the name of the dialect conflicts with a builtin one.
161
+
162
+ See Also
163
+ --------
164
+ csv : Python's CSV library.
165
+ """
166
+ import csv
167
+
168
+ _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
169
+
170
+ if name in _BUILTIN_DIALECTS:
171
+ raise ValueError("Cannot override builtin dialect.")
172
+
173
+ csv.register_dialect(name, **kwargs)
174
+ try:
175
+ yield
176
+ finally:
177
+ csv.unregister_dialect(name)
178
+
179
+
180
+ @contextmanager
181
+ def use_numexpr(use, min_elements=None) -> Generator[None, None, None]:
182
+ from pandas.core.computation import expressions as expr
183
+
184
+ if min_elements is None:
185
+ min_elements = expr._MIN_ELEMENTS
186
+
187
+ olduse = expr.USE_NUMEXPR
188
+ oldmin = expr._MIN_ELEMENTS
189
+ set_option("compute.use_numexpr", use)
190
+ expr._MIN_ELEMENTS = min_elements
191
+ try:
192
+ yield
193
+ finally:
194
+ expr._MIN_ELEMENTS = oldmin
195
+ set_option("compute.use_numexpr", olduse)
196
+
197
+
198
+ def raises_chained_assignment_error(warn=True, extra_warnings=(), extra_match=()):
199
+ from pandas._testing import assert_produces_warning
200
+
201
+ if not warn:
202
+ from contextlib import nullcontext
203
+
204
+ return nullcontext()
205
+
206
+ if PYPY and not extra_warnings:
207
+ from contextlib import nullcontext
208
+
209
+ return nullcontext()
210
+ elif PYPY and extra_warnings:
211
+ return assert_produces_warning(
212
+ extra_warnings,
213
+ match="|".join(extra_match),
214
+ )
215
+ else:
216
+ if using_copy_on_write():
217
+ warning = ChainedAssignmentError
218
+ match = (
219
+ "A value is trying to be set on a copy of a DataFrame or Series "
220
+ "through chained assignment"
221
+ )
222
+ else:
223
+ warning = FutureWarning # type: ignore[assignment]
224
+ # TODO update match
225
+ match = "ChainedAssignmentError"
226
+ if extra_warnings:
227
+ warning = (warning, *extra_warnings) # type: ignore[assignment]
228
+ return assert_produces_warning(
229
+ warning,
230
+ match="|".join((match, *extra_match)),
231
+ )
232
+
233
+
234
+ def assert_cow_warning(warn=True, match=None, **kwargs):
235
+ """
236
+ Assert that a warning is raised in the CoW warning mode.
237
+
238
+ Parameters
239
+ ----------
240
+ warn : bool, default True
241
+ By default, check that a warning is raised. Can be turned off by passing False.
242
+ match : str
243
+ The warning message to match against, if different from the default.
244
+ kwargs
245
+ Passed through to assert_produces_warning
246
+ """
247
+ from pandas._testing import assert_produces_warning
248
+
249
+ if not warn:
250
+ from contextlib import nullcontext
251
+
252
+ return nullcontext()
253
+
254
+ if not match:
255
+ match = "Setting a value on a view"
256
+
257
+ return assert_produces_warning(FutureWarning, match=match, **kwargs)
emu3/lib/python3.10/site-packages/pandas/api/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ public toolkit API """
2
+ from pandas.api import (
3
+ extensions,
4
+ indexers,
5
+ interchange,
6
+ types,
7
+ typing,
8
+ )
9
+
10
+ __all__ = [
11
+ "interchange",
12
+ "extensions",
13
+ "indexers",
14
+ "types",
15
+ "typing",
16
+ ]
emu3/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (419 Bytes). View file
 
emu3/lib/python3.10/site-packages/pandas/api/types/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Public toolkit API.
3
+ """
4
+
5
+ from pandas._libs.lib import infer_dtype
6
+
7
+ from pandas.core.dtypes.api import * # noqa: F403
8
+ from pandas.core.dtypes.concat import union_categoricals
9
+ from pandas.core.dtypes.dtypes import (
10
+ CategoricalDtype,
11
+ DatetimeTZDtype,
12
+ IntervalDtype,
13
+ PeriodDtype,
14
+ )
15
+
16
+ __all__ = [
17
+ "infer_dtype",
18
+ "union_categoricals",
19
+ "CategoricalDtype",
20
+ "DatetimeTZDtype",
21
+ "IntervalDtype",
22
+ "PeriodDtype",
23
+ ]
emu3/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (552 Bytes). View file
 
emu3/lib/python3.10/site-packages/pandas/api/typing/__init__.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Public API classes that store intermediate results useful for type-hinting.
3
+ """
4
+
5
+ from pandas._libs import NaTType
6
+ from pandas._libs.missing import NAType
7
+
8
+ from pandas.core.groupby import (
9
+ DataFrameGroupBy,
10
+ SeriesGroupBy,
11
+ )
12
+ from pandas.core.resample import (
13
+ DatetimeIndexResamplerGroupby,
14
+ PeriodIndexResamplerGroupby,
15
+ Resampler,
16
+ TimedeltaIndexResamplerGroupby,
17
+ TimeGrouper,
18
+ )
19
+ from pandas.core.window import (
20
+ Expanding,
21
+ ExpandingGroupby,
22
+ ExponentialMovingWindow,
23
+ ExponentialMovingWindowGroupby,
24
+ Rolling,
25
+ RollingGroupby,
26
+ Window,
27
+ )
28
+
29
+ # TODO: Can't import Styler without importing jinja2
30
+ # from pandas.io.formats.style import Styler
31
+ from pandas.io.json._json import JsonReader
32
+ from pandas.io.stata import StataReader
33
+
34
+ __all__ = [
35
+ "DataFrameGroupBy",
36
+ "DatetimeIndexResamplerGroupby",
37
+ "Expanding",
38
+ "ExpandingGroupby",
39
+ "ExponentialMovingWindow",
40
+ "ExponentialMovingWindowGroupby",
41
+ "JsonReader",
42
+ "NaTType",
43
+ "NAType",
44
+ "PeriodIndexResamplerGroupby",
45
+ "Resampler",
46
+ "Rolling",
47
+ "RollingGroupby",
48
+ "SeriesGroupBy",
49
+ "StataReader",
50
+ # See TODO above
51
+ # "Styler",
52
+ "TimedeltaIndexResamplerGroupby",
53
+ "TimeGrouper",
54
+ "Window",
55
+ ]
emu3/lib/python3.10/site-packages/pandas/compat/__init__.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ compat
3
+ ======
4
+
5
+ Cross-compatible functions for different versions of Python.
6
+
7
+ Other items:
8
+ * platform checker
9
+ """
10
+ from __future__ import annotations
11
+
12
+ import os
13
+ import platform
14
+ import sys
15
+ from typing import TYPE_CHECKING
16
+
17
+ from pandas.compat._constants import (
18
+ IS64,
19
+ ISMUSL,
20
+ PY310,
21
+ PY311,
22
+ PY312,
23
+ PYPY,
24
+ )
25
+ import pandas.compat.compressors
26
+ from pandas.compat.numpy import is_numpy_dev
27
+ from pandas.compat.pyarrow import (
28
+ pa_version_under10p1,
29
+ pa_version_under11p0,
30
+ pa_version_under13p0,
31
+ pa_version_under14p0,
32
+ pa_version_under14p1,
33
+ pa_version_under16p0,
34
+ pa_version_under17p0,
35
+ )
36
+
37
+ if TYPE_CHECKING:
38
+ from pandas._typing import F
39
+
40
+
41
+ def set_function_name(f: F, name: str, cls: type) -> F:
42
+ """
43
+ Bind the name/qualname attributes of the function.
44
+ """
45
+ f.__name__ = name
46
+ f.__qualname__ = f"{cls.__name__}.{name}"
47
+ f.__module__ = cls.__module__
48
+ return f
49
+
50
+
51
+ def is_platform_little_endian() -> bool:
52
+ """
53
+ Checking if the running platform is little endian.
54
+
55
+ Returns
56
+ -------
57
+ bool
58
+ True if the running platform is little endian.
59
+ """
60
+ return sys.byteorder == "little"
61
+
62
+
63
+ def is_platform_windows() -> bool:
64
+ """
65
+ Checking if the running platform is windows.
66
+
67
+ Returns
68
+ -------
69
+ bool
70
+ True if the running platform is windows.
71
+ """
72
+ return sys.platform in ["win32", "cygwin"]
73
+
74
+
75
+ def is_platform_linux() -> bool:
76
+ """
77
+ Checking if the running platform is linux.
78
+
79
+ Returns
80
+ -------
81
+ bool
82
+ True if the running platform is linux.
83
+ """
84
+ return sys.platform == "linux"
85
+
86
+
87
+ def is_platform_mac() -> bool:
88
+ """
89
+ Checking if the running platform is mac.
90
+
91
+ Returns
92
+ -------
93
+ bool
94
+ True if the running platform is mac.
95
+ """
96
+ return sys.platform == "darwin"
97
+
98
+
99
+ def is_platform_arm() -> bool:
100
+ """
101
+ Checking if the running platform use ARM architecture.
102
+
103
+ Returns
104
+ -------
105
+ bool
106
+ True if the running platform uses ARM architecture.
107
+ """
108
+ return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith(
109
+ "armv"
110
+ )
111
+
112
+
113
+ def is_platform_power() -> bool:
114
+ """
115
+ Checking if the running platform use Power architecture.
116
+
117
+ Returns
118
+ -------
119
+ bool
120
+ True if the running platform uses ARM architecture.
121
+ """
122
+ return platform.machine() in ("ppc64", "ppc64le")
123
+
124
+
125
+ def is_ci_environment() -> bool:
126
+ """
127
+ Checking if running in a continuous integration environment by checking
128
+ the PANDAS_CI environment variable.
129
+
130
+ Returns
131
+ -------
132
+ bool
133
+ True if the running in a continuous integration environment.
134
+ """
135
+ return os.environ.get("PANDAS_CI", "0") == "1"
136
+
137
+
138
+ def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]:
139
+ """
140
+ Importing the `LZMAFile` class from the `lzma` module.
141
+
142
+ Returns
143
+ -------
144
+ class
145
+ The `LZMAFile` class from the `lzma` module.
146
+
147
+ Raises
148
+ ------
149
+ RuntimeError
150
+ If the `lzma` module was not imported correctly, or didn't exist.
151
+ """
152
+ if not pandas.compat.compressors.has_lzma:
153
+ raise RuntimeError(
154
+ "lzma module not available. "
155
+ "A Python re-install with the proper dependencies, "
156
+ "might be required to solve this issue."
157
+ )
158
+ return pandas.compat.compressors.LZMAFile
159
+
160
+
161
+ def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]:
162
+ """
163
+ Importing the `BZ2File` class from the `bz2` module.
164
+
165
+ Returns
166
+ -------
167
+ class
168
+ The `BZ2File` class from the `bz2` module.
169
+
170
+ Raises
171
+ ------
172
+ RuntimeError
173
+ If the `bz2` module was not imported correctly, or didn't exist.
174
+ """
175
+ if not pandas.compat.compressors.has_bz2:
176
+ raise RuntimeError(
177
+ "bz2 module not available. "
178
+ "A Python re-install with the proper dependencies, "
179
+ "might be required to solve this issue."
180
+ )
181
+ return pandas.compat.compressors.BZ2File
182
+
183
+
184
+ __all__ = [
185
+ "is_numpy_dev",
186
+ "pa_version_under10p1",
187
+ "pa_version_under11p0",
188
+ "pa_version_under13p0",
189
+ "pa_version_under14p0",
190
+ "pa_version_under14p1",
191
+ "pa_version_under16p0",
192
+ "pa_version_under17p0",
193
+ "IS64",
194
+ "ISMUSL",
195
+ "PY310",
196
+ "PY311",
197
+ "PY312",
198
+ "PYPY",
199
+ ]
emu3/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc ADDED
Binary file (1.72 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/compat/_constants.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ _constants
3
+ ======
4
+
5
+ Constants relevant for the Python implementation.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import platform
11
+ import sys
12
+ import sysconfig
13
+
14
+ IS64 = sys.maxsize > 2**32
15
+
16
+ PY310 = sys.version_info >= (3, 10)
17
+ PY311 = sys.version_info >= (3, 11)
18
+ PY312 = sys.version_info >= (3, 12)
19
+ PYPY = platform.python_implementation() == "PyPy"
20
+ ISMUSL = "musl" in (sysconfig.get_config_var("HOST_GNU_TYPE") or "")
21
+ REF_COUNT = 2 if PY311 else 3
22
+
23
+ __all__ = [
24
+ "IS64",
25
+ "ISMUSL",
26
+ "PY310",
27
+ "PY311",
28
+ "PY312",
29
+ "PYPY",
30
+ ]
emu3/lib/python3.10/site-packages/pandas/compat/_optional.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import sys
5
+ from typing import TYPE_CHECKING
6
+ import warnings
7
+
8
+ from pandas.util._exceptions import find_stack_level
9
+
10
+ from pandas.util.version import Version
11
+
12
+ if TYPE_CHECKING:
13
+ import types
14
+
15
+ # Update install.rst & setup.cfg when updating versions!
16
+
17
+ VERSIONS = {
18
+ "adbc-driver-postgresql": "0.8.0",
19
+ "adbc-driver-sqlite": "0.8.0",
20
+ "bs4": "4.11.2",
21
+ "blosc": "1.21.3",
22
+ "bottleneck": "1.3.6",
23
+ "dataframe-api-compat": "0.1.7",
24
+ "fastparquet": "2022.12.0",
25
+ "fsspec": "2022.11.0",
26
+ "html5lib": "1.1",
27
+ "hypothesis": "6.46.1",
28
+ "gcsfs": "2022.11.0",
29
+ "jinja2": "3.1.2",
30
+ "lxml.etree": "4.9.2",
31
+ "matplotlib": "3.6.3",
32
+ "numba": "0.56.4",
33
+ "numexpr": "2.8.4",
34
+ "odfpy": "1.4.1",
35
+ "openpyxl": "3.1.0",
36
+ "pandas_gbq": "0.19.0",
37
+ "psycopg2": "2.9.6", # (dt dec pq3 ext lo64)
38
+ "pymysql": "1.0.2",
39
+ "pyarrow": "10.0.1",
40
+ "pyreadstat": "1.2.0",
41
+ "pytest": "7.3.2",
42
+ "python-calamine": "0.1.7",
43
+ "pyxlsb": "1.0.10",
44
+ "s3fs": "2022.11.0",
45
+ "scipy": "1.10.0",
46
+ "sqlalchemy": "2.0.0",
47
+ "tables": "3.8.0",
48
+ "tabulate": "0.9.0",
49
+ "xarray": "2022.12.0",
50
+ "xlrd": "2.0.1",
51
+ "xlsxwriter": "3.0.5",
52
+ "zstandard": "0.19.0",
53
+ "tzdata": "2022.7",
54
+ "qtpy": "2.3.0",
55
+ "pyqt5": "5.15.9",
56
+ }
57
+
58
+ # A mapping from import name to package name (on PyPI) for packages where
59
+ # these two names are different.
60
+
61
+ INSTALL_MAPPING = {
62
+ "bs4": "beautifulsoup4",
63
+ "bottleneck": "Bottleneck",
64
+ "jinja2": "Jinja2",
65
+ "lxml.etree": "lxml",
66
+ "odf": "odfpy",
67
+ "pandas_gbq": "pandas-gbq",
68
+ "python_calamine": "python-calamine",
69
+ "sqlalchemy": "SQLAlchemy",
70
+ "tables": "pytables",
71
+ }
72
+
73
+
74
+ def get_version(module: types.ModuleType) -> str:
75
+ version = getattr(module, "__version__", None)
76
+
77
+ if version is None:
78
+ raise ImportError(f"Can't determine version for {module.__name__}")
79
+ if module.__name__ == "psycopg2":
80
+ # psycopg2 appends " (dt dec pq3 ext lo64)" to it's version
81
+ version = version.split()[0]
82
+ return version
83
+
84
+
85
+ def import_optional_dependency(
86
+ name: str,
87
+ extra: str = "",
88
+ errors: str = "raise",
89
+ min_version: str | None = None,
90
+ ):
91
+ """
92
+ Import an optional dependency.
93
+
94
+ By default, if a dependency is missing an ImportError with a nice
95
+ message will be raised. If a dependency is present, but too old,
96
+ we raise.
97
+
98
+ Parameters
99
+ ----------
100
+ name : str
101
+ The module name.
102
+ extra : str
103
+ Additional text to include in the ImportError message.
104
+ errors : str {'raise', 'warn', 'ignore'}
105
+ What to do when a dependency is not found or its version is too old.
106
+
107
+ * raise : Raise an ImportError
108
+ * warn : Only applicable when a module's version is to old.
109
+ Warns that the version is too old and returns None
110
+ * ignore: If the module is not installed, return None, otherwise,
111
+ return the module, even if the version is too old.
112
+ It's expected that users validate the version locally when
113
+ using ``errors="ignore"`` (see. ``io/html.py``)
114
+ min_version : str, default None
115
+ Specify a minimum version that is different from the global pandas
116
+ minimum version required.
117
+ Returns
118
+ -------
119
+ maybe_module : Optional[ModuleType]
120
+ The imported module, when found and the version is correct.
121
+ None is returned when the package is not found and `errors`
122
+ is False, or when the package's version is too old and `errors`
123
+ is ``'warn'`` or ``'ignore'``.
124
+ """
125
+ assert errors in {"warn", "raise", "ignore"}
126
+
127
+ package_name = INSTALL_MAPPING.get(name)
128
+ install_name = package_name if package_name is not None else name
129
+
130
+ msg = (
131
+ f"Missing optional dependency '{install_name}'. {extra} "
132
+ f"Use pip or conda to install {install_name}."
133
+ )
134
+ try:
135
+ module = importlib.import_module(name)
136
+ except ImportError:
137
+ if errors == "raise":
138
+ raise ImportError(msg)
139
+ return None
140
+
141
+ # Handle submodules: if we have submodule, grab parent module from sys.modules
142
+ parent = name.split(".")[0]
143
+ if parent != name:
144
+ install_name = parent
145
+ module_to_get = sys.modules[install_name]
146
+ else:
147
+ module_to_get = module
148
+ minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
149
+ if minimum_version:
150
+ version = get_version(module_to_get)
151
+ if version and Version(version) < Version(minimum_version):
152
+ msg = (
153
+ f"Pandas requires version '{minimum_version}' or newer of '{parent}' "
154
+ f"(version '{version}' currently installed)."
155
+ )
156
+ if errors == "warn":
157
+ warnings.warn(
158
+ msg,
159
+ UserWarning,
160
+ stacklevel=find_stack_level(),
161
+ )
162
+ return None
163
+ elif errors == "raise":
164
+ raise ImportError(msg)
165
+ else:
166
+ return None
167
+
168
+ return module
emu3/lib/python3.10/site-packages/pandas/compat/compressors.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Patched ``BZ2File`` and ``LZMAFile`` to handle pickle protocol 5.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from pickle import PickleBuffer
8
+
9
+ from pandas.compat._constants import PY310
10
+
11
+ try:
12
+ import bz2
13
+
14
+ has_bz2 = True
15
+ except ImportError:
16
+ has_bz2 = False
17
+
18
+ try:
19
+ import lzma
20
+
21
+ has_lzma = True
22
+ except ImportError:
23
+ has_lzma = False
24
+
25
+
26
+ def flatten_buffer(
27
+ b: bytes | bytearray | memoryview | PickleBuffer,
28
+ ) -> bytes | bytearray | memoryview:
29
+ """
30
+ Return some 1-D `uint8` typed buffer.
31
+
32
+ Coerces anything that does not match that description to one that does
33
+ without copying if possible (otherwise will copy).
34
+ """
35
+
36
+ if isinstance(b, (bytes, bytearray)):
37
+ return b
38
+
39
+ if not isinstance(b, PickleBuffer):
40
+ b = PickleBuffer(b)
41
+
42
+ try:
43
+ # coerce to 1-D `uint8` C-contiguous `memoryview` zero-copy
44
+ return b.raw()
45
+ except BufferError:
46
+ # perform in-memory copy if buffer is not contiguous
47
+ return memoryview(b).tobytes("A")
48
+
49
+
50
+ if has_bz2:
51
+
52
+ class BZ2File(bz2.BZ2File):
53
+ if not PY310:
54
+
55
+ def write(self, b) -> int:
56
+ # Workaround issue where `bz2.BZ2File` expects `len`
57
+ # to return the number of bytes in `b` by converting
58
+ # `b` into something that meets that constraint with
59
+ # minimal copying.
60
+ #
61
+ # Note: This is fixed in Python 3.10.
62
+ return super().write(flatten_buffer(b))
63
+
64
+
65
+ if has_lzma:
66
+
67
+ class LZMAFile(lzma.LZMAFile):
68
+ if not PY310:
69
+
70
+ def write(self, b) -> int:
71
+ # Workaround issue where `lzma.LZMAFile` expects `len`
72
+ # to return the number of bytes in `b` by converting
73
+ # `b` into something that meets that constraint with
74
+ # minimal copying.
75
+ #
76
+ # Note: This is fixed in Python 3.10.
77
+ return super().write(flatten_buffer(b))
emu3/lib/python3.10/site-packages/pandas/compat/pickle_compat.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Support pre-0.12 series pickle compatibility.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import contextlib
7
+ import copy
8
+ import io
9
+ import pickle as pkl
10
+ from typing import TYPE_CHECKING
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs.arrays import NDArrayBacked
15
+ from pandas._libs.tslibs import BaseOffset
16
+
17
+ from pandas import Index
18
+ from pandas.core.arrays import (
19
+ DatetimeArray,
20
+ PeriodArray,
21
+ TimedeltaArray,
22
+ )
23
+ from pandas.core.internals import BlockManager
24
+
25
+ if TYPE_CHECKING:
26
+ from collections.abc import Generator
27
+
28
+
29
+ def load_reduce(self) -> None:
30
+ stack = self.stack
31
+ args = stack.pop()
32
+ func = stack[-1]
33
+
34
+ try:
35
+ stack[-1] = func(*args)
36
+ return
37
+ except TypeError as err:
38
+ # If we have a deprecated function,
39
+ # try to replace and try again.
40
+
41
+ msg = "_reconstruct: First argument must be a sub-type of ndarray"
42
+
43
+ if msg in str(err):
44
+ try:
45
+ cls = args[0]
46
+ stack[-1] = object.__new__(cls)
47
+ return
48
+ except TypeError:
49
+ pass
50
+ elif args and isinstance(args[0], type) and issubclass(args[0], BaseOffset):
51
+ # TypeError: object.__new__(Day) is not safe, use Day.__new__()
52
+ cls = args[0]
53
+ stack[-1] = cls.__new__(*args)
54
+ return
55
+ elif args and issubclass(args[0], PeriodArray):
56
+ cls = args[0]
57
+ stack[-1] = NDArrayBacked.__new__(*args)
58
+ return
59
+
60
+ raise
61
+
62
+
63
+ # If classes are moved, provide compat here.
64
+ _class_locations_map = {
65
+ ("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
66
+ # 15477
67
+ ("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
68
+ # Re-routing unpickle block logic to go through _unpickle_block instead
69
+ # for pandas <= 1.3.5
70
+ ("pandas.core.internals.blocks", "new_block"): (
71
+ "pandas._libs.internals",
72
+ "_unpickle_block",
73
+ ),
74
+ ("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
75
+ ("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
76
+ # 10890
77
+ ("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
78
+ ("pandas.sparse.series", "SparseTimeSeries"): (
79
+ "pandas.core.sparse.series",
80
+ "SparseSeries",
81
+ ),
82
+ # 12588, extensions moving
83
+ ("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
84
+ ("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
85
+ # 18543 moving period
86
+ ("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
87
+ ("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
88
+ # 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
89
+ ("pandas.tslib", "__nat_unpickle"): (
90
+ "pandas._libs.tslibs.nattype",
91
+ "__nat_unpickle",
92
+ ),
93
+ ("pandas._libs.tslib", "__nat_unpickle"): (
94
+ "pandas._libs.tslibs.nattype",
95
+ "__nat_unpickle",
96
+ ),
97
+ # 15998 top-level dirs moving
98
+ ("pandas.sparse.array", "SparseArray"): (
99
+ "pandas.core.arrays.sparse",
100
+ "SparseArray",
101
+ ),
102
+ ("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
103
+ ("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
104
+ ("pandas.indexes.numeric", "Int64Index"): (
105
+ "pandas.core.indexes.base",
106
+ "Index", # updated in 50775
107
+ ),
108
+ ("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
109
+ ("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
110
+ ("pandas.tseries.index", "_new_DatetimeIndex"): (
111
+ "pandas.core.indexes.datetimes",
112
+ "_new_DatetimeIndex",
113
+ ),
114
+ ("pandas.tseries.index", "DatetimeIndex"): (
115
+ "pandas.core.indexes.datetimes",
116
+ "DatetimeIndex",
117
+ ),
118
+ ("pandas.tseries.period", "PeriodIndex"): (
119
+ "pandas.core.indexes.period",
120
+ "PeriodIndex",
121
+ ),
122
+ # 19269, arrays moving
123
+ ("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
124
+ # 19939, add timedeltaindex, float64index compat from 15998 move
125
+ ("pandas.tseries.tdi", "TimedeltaIndex"): (
126
+ "pandas.core.indexes.timedeltas",
127
+ "TimedeltaIndex",
128
+ ),
129
+ ("pandas.indexes.numeric", "Float64Index"): (
130
+ "pandas.core.indexes.base",
131
+ "Index", # updated in 50775
132
+ ),
133
+ # 50775, remove Int64Index, UInt64Index & Float64Index from codabase
134
+ ("pandas.core.indexes.numeric", "Int64Index"): (
135
+ "pandas.core.indexes.base",
136
+ "Index",
137
+ ),
138
+ ("pandas.core.indexes.numeric", "UInt64Index"): (
139
+ "pandas.core.indexes.base",
140
+ "Index",
141
+ ),
142
+ ("pandas.core.indexes.numeric", "Float64Index"): (
143
+ "pandas.core.indexes.base",
144
+ "Index",
145
+ ),
146
+ ("pandas.core.arrays.sparse.dtype", "SparseDtype"): (
147
+ "pandas.core.dtypes.dtypes",
148
+ "SparseDtype",
149
+ ),
150
+ }
151
+
152
+
153
+ # our Unpickler sub-class to override methods and some dispatcher
154
+ # functions for compat and uses a non-public class of the pickle module.
155
+
156
+
157
+ class Unpickler(pkl._Unpickler):
158
+ def find_class(self, module, name):
159
+ # override superclass
160
+ key = (module, name)
161
+ module, name = _class_locations_map.get(key, key)
162
+ return super().find_class(module, name)
163
+
164
+
165
+ Unpickler.dispatch = copy.copy(Unpickler.dispatch)
166
+ Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
167
+
168
+
169
+ def load_newobj(self) -> None:
170
+ args = self.stack.pop()
171
+ cls = self.stack[-1]
172
+
173
+ # compat
174
+ if issubclass(cls, Index):
175
+ obj = object.__new__(cls)
176
+ elif issubclass(cls, DatetimeArray) and not args:
177
+ arr = np.array([], dtype="M8[ns]")
178
+ obj = cls.__new__(cls, arr, arr.dtype)
179
+ elif issubclass(cls, TimedeltaArray) and not args:
180
+ arr = np.array([], dtype="m8[ns]")
181
+ obj = cls.__new__(cls, arr, arr.dtype)
182
+ elif cls is BlockManager and not args:
183
+ obj = cls.__new__(cls, (), [], False)
184
+ else:
185
+ obj = cls.__new__(cls, *args)
186
+
187
+ self.stack[-1] = obj
188
+
189
+
190
+ Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
191
+
192
+
193
+ def load_newobj_ex(self) -> None:
194
+ kwargs = self.stack.pop()
195
+ args = self.stack.pop()
196
+ cls = self.stack.pop()
197
+
198
+ # compat
199
+ if issubclass(cls, Index):
200
+ obj = object.__new__(cls)
201
+ else:
202
+ obj = cls.__new__(cls, *args, **kwargs)
203
+ self.append(obj)
204
+
205
+
206
+ try:
207
+ Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
208
+ except (AttributeError, KeyError):
209
+ pass
210
+
211
+
212
+ def load(fh, encoding: str | None = None, is_verbose: bool = False):
213
+ """
214
+ Load a pickle, with a provided encoding,
215
+
216
+ Parameters
217
+ ----------
218
+ fh : a filelike object
219
+ encoding : an optional encoding
220
+ is_verbose : show exception output
221
+ """
222
+ try:
223
+ fh.seek(0)
224
+ if encoding is not None:
225
+ up = Unpickler(fh, encoding=encoding)
226
+ else:
227
+ up = Unpickler(fh)
228
+ # "Unpickler" has no attribute "is_verbose" [attr-defined]
229
+ up.is_verbose = is_verbose # type: ignore[attr-defined]
230
+
231
+ return up.load()
232
+ except (ValueError, TypeError):
233
+ raise
234
+
235
+
236
+ def loads(
237
+ bytes_object: bytes,
238
+ *,
239
+ fix_imports: bool = True,
240
+ encoding: str = "ASCII",
241
+ errors: str = "strict",
242
+ ):
243
+ """
244
+ Analogous to pickle._loads.
245
+ """
246
+ fd = io.BytesIO(bytes_object)
247
+ return Unpickler(
248
+ fd, fix_imports=fix_imports, encoding=encoding, errors=errors
249
+ ).load()
250
+
251
+
252
+ @contextlib.contextmanager
253
+ def patch_pickle() -> Generator[None, None, None]:
254
+ """
255
+ Temporarily patch pickle to use our unpickler.
256
+ """
257
+ orig_loads = pkl.loads
258
+ try:
259
+ setattr(pkl, "loads", loads)
260
+ yield
261
+ finally:
262
+ setattr(pkl, "loads", orig_loads)
emu3/lib/python3.10/site-packages/pandas/compat/pyarrow.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ support pyarrow compatibility across versions """
2
+
3
+ from __future__ import annotations
4
+
5
+ from pandas.util.version import Version
6
+
7
+ try:
8
+ import pyarrow as pa
9
+
10
+ _palv = Version(Version(pa.__version__).base_version)
11
+ pa_version_under10p1 = _palv < Version("10.0.1")
12
+ pa_version_under11p0 = _palv < Version("11.0.0")
13
+ pa_version_under12p0 = _palv < Version("12.0.0")
14
+ pa_version_under13p0 = _palv < Version("13.0.0")
15
+ pa_version_under14p0 = _palv < Version("14.0.0")
16
+ pa_version_under14p1 = _palv < Version("14.0.1")
17
+ pa_version_under15p0 = _palv < Version("15.0.0")
18
+ pa_version_under16p0 = _palv < Version("16.0.0")
19
+ pa_version_under17p0 = _palv < Version("17.0.0")
20
+ except ImportError:
21
+ pa_version_under10p1 = True
22
+ pa_version_under11p0 = True
23
+ pa_version_under12p0 = True
24
+ pa_version_under13p0 = True
25
+ pa_version_under14p0 = True
26
+ pa_version_under14p1 = True
27
+ pa_version_under15p0 = True
28
+ pa_version_under16p0 = True
29
+ pa_version_under17p0 = True
emu3/lib/python3.10/site-packages/pandas/tests/computation/__init__.py ADDED
File without changes
emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc ADDED
Binary file (58.7 kB). View file
 
emu3/lib/python3.10/site-packages/pandas/tests/computation/test_compat.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas.compat._optional import VERSIONS
4
+
5
+ import pandas as pd
6
+ from pandas.core.computation import expr
7
+ from pandas.core.computation.engines import ENGINES
8
+ from pandas.util.version import Version
9
+
10
+
11
+ def test_compat():
12
+ # test we have compat with our version of numexpr
13
+
14
+ from pandas.core.computation.check import NUMEXPR_INSTALLED
15
+
16
+ ne = pytest.importorskip("numexpr")
17
+
18
+ ver = ne.__version__
19
+ if Version(ver) < Version(VERSIONS["numexpr"]):
20
+ assert not NUMEXPR_INSTALLED
21
+ else:
22
+ assert NUMEXPR_INSTALLED
23
+
24
+
25
+ @pytest.mark.parametrize("engine", ENGINES)
26
+ @pytest.mark.parametrize("parser", expr.PARSERS)
27
+ def test_invalid_numexpr_version(engine, parser):
28
+ if engine == "numexpr":
29
+ pytest.importorskip("numexpr")
30
+ a, b = 1, 2 # noqa: F841
31
+ res = pd.eval("a + b", engine=engine, parser=parser)
32
+ assert res == 3
emu3/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py ADDED
@@ -0,0 +1,2001 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import reduce
4
+ from itertools import product
5
+ import operator
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas.compat import PY312
11
+ from pandas.errors import (
12
+ NumExprClobberingError,
13
+ PerformanceWarning,
14
+ UndefinedVariableError,
15
+ )
16
+ import pandas.util._test_decorators as td
17
+
18
+ from pandas.core.dtypes.common import (
19
+ is_bool,
20
+ is_float,
21
+ is_list_like,
22
+ is_scalar,
23
+ )
24
+
25
+ import pandas as pd
26
+ from pandas import (
27
+ DataFrame,
28
+ Index,
29
+ Series,
30
+ date_range,
31
+ period_range,
32
+ timedelta_range,
33
+ )
34
+ import pandas._testing as tm
35
+ from pandas.core.computation import (
36
+ expr,
37
+ pytables,
38
+ )
39
+ from pandas.core.computation.engines import ENGINES
40
+ from pandas.core.computation.expr import (
41
+ BaseExprVisitor,
42
+ PandasExprVisitor,
43
+ PythonExprVisitor,
44
+ )
45
+ from pandas.core.computation.expressions import (
46
+ NUMEXPR_INSTALLED,
47
+ USE_NUMEXPR,
48
+ )
49
+ from pandas.core.computation.ops import (
50
+ ARITH_OPS_SYMS,
51
+ SPECIAL_CASE_ARITH_OPS_SYMS,
52
+ _binary_math_ops,
53
+ _binary_ops_dict,
54
+ _unary_math_ops,
55
+ )
56
+ from pandas.core.computation.scope import DEFAULT_GLOBALS
57
+
58
+
59
+ @pytest.fixture(
60
+ params=(
61
+ pytest.param(
62
+ engine,
63
+ marks=[
64
+ pytest.mark.skipif(
65
+ engine == "numexpr" and not USE_NUMEXPR,
66
+ reason=f"numexpr enabled->{USE_NUMEXPR}, "
67
+ f"installed->{NUMEXPR_INSTALLED}",
68
+ ),
69
+ td.skip_if_no("numexpr"),
70
+ ],
71
+ )
72
+ for engine in ENGINES
73
+ )
74
+ )
75
+ def engine(request):
76
+ return request.param
77
+
78
+
79
+ @pytest.fixture(params=expr.PARSERS)
80
+ def parser(request):
81
+ return request.param
82
+
83
+
84
+ def _eval_single_bin(lhs, cmp1, rhs, engine):
85
+ c = _binary_ops_dict[cmp1]
86
+ if ENGINES[engine].has_neg_frac:
87
+ try:
88
+ return c(lhs, rhs)
89
+ except ValueError as e:
90
+ if str(e).startswith(
91
+ "negative number cannot be raised to a fractional power"
92
+ ):
93
+ return np.nan
94
+ raise
95
+ return c(lhs, rhs)
96
+
97
+
98
+ # TODO: using range(5) here is a kludge
99
+ @pytest.fixture(
100
+ params=list(range(5)),
101
+ ids=["DataFrame", "Series", "SeriesNaN", "DataFrameNaN", "float"],
102
+ )
103
+ def lhs(request):
104
+ nan_df1 = DataFrame(np.random.default_rng(2).standard_normal((10, 5)))
105
+ nan_df1[nan_df1 > 0.5] = np.nan
106
+
107
+ opts = (
108
+ DataFrame(np.random.default_rng(2).standard_normal((10, 5))),
109
+ Series(np.random.default_rng(2).standard_normal(5)),
110
+ Series([1, 2, np.nan, np.nan, 5]),
111
+ nan_df1,
112
+ np.random.default_rng(2).standard_normal(),
113
+ )
114
+ return opts[request.param]
115
+
116
+
117
+ rhs = lhs
118
+ midhs = lhs
119
+
120
+
121
+ @pytest.fixture
122
+ def idx_func_dict():
123
+ return {
124
+ "i": lambda n: Index(np.arange(n), dtype=np.int64),
125
+ "f": lambda n: Index(np.arange(n), dtype=np.float64),
126
+ "s": lambda n: Index([f"{i}_{chr(i)}" for i in range(97, 97 + n)]),
127
+ "dt": lambda n: date_range("2020-01-01", periods=n),
128
+ "td": lambda n: timedelta_range("1 day", periods=n),
129
+ "p": lambda n: period_range("2020-01-01", periods=n, freq="D"),
130
+ }
131
+
132
+
133
+ class TestEval:
134
+ @pytest.mark.parametrize(
135
+ "cmp1",
136
+ ["!=", "==", "<=", ">=", "<", ">"],
137
+ ids=["ne", "eq", "le", "ge", "lt", "gt"],
138
+ )
139
+ @pytest.mark.parametrize("cmp2", [">", "<"], ids=["gt", "lt"])
140
+ @pytest.mark.parametrize("binop", expr.BOOL_OPS_SYMS)
141
+ def test_complex_cmp_ops(self, cmp1, cmp2, binop, lhs, rhs, engine, parser):
142
+ if parser == "python" and binop in ["and", "or"]:
143
+ msg = "'BoolOp' nodes are not implemented"
144
+ with pytest.raises(NotImplementedError, match=msg):
145
+ ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)"
146
+ pd.eval(ex, engine=engine, parser=parser)
147
+ return
148
+
149
+ lhs_new = _eval_single_bin(lhs, cmp1, rhs, engine)
150
+ rhs_new = _eval_single_bin(lhs, cmp2, rhs, engine)
151
+ expected = _eval_single_bin(lhs_new, binop, rhs_new, engine)
152
+
153
+ ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)"
154
+ result = pd.eval(ex, engine=engine, parser=parser)
155
+ tm.assert_equal(result, expected)
156
+
157
+ @pytest.mark.parametrize("cmp_op", expr.CMP_OPS_SYMS)
158
+ def test_simple_cmp_ops(self, cmp_op, lhs, rhs, engine, parser):
159
+ lhs = lhs < 0
160
+ rhs = rhs < 0
161
+
162
+ if parser == "python" and cmp_op in ["in", "not in"]:
163
+ msg = "'(In|NotIn)' nodes are not implemented"
164
+
165
+ with pytest.raises(NotImplementedError, match=msg):
166
+ ex = f"lhs {cmp_op} rhs"
167
+ pd.eval(ex, engine=engine, parser=parser)
168
+ return
169
+
170
+ ex = f"lhs {cmp_op} rhs"
171
+ msg = "|".join(
172
+ [
173
+ r"only list-like( or dict-like)? objects are allowed to be "
174
+ r"passed to (DataFrame\.)?isin\(\), you passed a "
175
+ r"(`|')bool(`|')",
176
+ "argument of type 'bool' is not iterable",
177
+ ]
178
+ )
179
+ if cmp_op in ("in", "not in") and not is_list_like(rhs):
180
+ with pytest.raises(TypeError, match=msg):
181
+ pd.eval(
182
+ ex,
183
+ engine=engine,
184
+ parser=parser,
185
+ local_dict={"lhs": lhs, "rhs": rhs},
186
+ )
187
+ else:
188
+ expected = _eval_single_bin(lhs, cmp_op, rhs, engine)
189
+ result = pd.eval(ex, engine=engine, parser=parser)
190
+ tm.assert_equal(result, expected)
191
+
192
+ @pytest.mark.parametrize("op", expr.CMP_OPS_SYMS)
193
+ def test_compound_invert_op(self, op, lhs, rhs, request, engine, parser):
194
+ if parser == "python" and op in ["in", "not in"]:
195
+ msg = "'(In|NotIn)' nodes are not implemented"
196
+ with pytest.raises(NotImplementedError, match=msg):
197
+ ex = f"~(lhs {op} rhs)"
198
+ pd.eval(ex, engine=engine, parser=parser)
199
+ return
200
+
201
+ if (
202
+ is_float(lhs)
203
+ and not is_float(rhs)
204
+ and op in ["in", "not in"]
205
+ and engine == "python"
206
+ and parser == "pandas"
207
+ ):
208
+ mark = pytest.mark.xfail(
209
+ reason="Looks like expected is negative, unclear whether "
210
+ "expected is incorrect or result is incorrect"
211
+ )
212
+ request.applymarker(mark)
213
+ skip_these = ["in", "not in"]
214
+ ex = f"~(lhs {op} rhs)"
215
+
216
+ msg = "|".join(
217
+ [
218
+ r"only list-like( or dict-like)? objects are allowed to be "
219
+ r"passed to (DataFrame\.)?isin\(\), you passed a "
220
+ r"(`|')float(`|')",
221
+ "argument of type 'float' is not iterable",
222
+ ]
223
+ )
224
+ if is_scalar(rhs) and op in skip_these:
225
+ with pytest.raises(TypeError, match=msg):
226
+ pd.eval(
227
+ ex,
228
+ engine=engine,
229
+ parser=parser,
230
+ local_dict={"lhs": lhs, "rhs": rhs},
231
+ )
232
+ else:
233
+ # compound
234
+ if is_scalar(lhs) and is_scalar(rhs):
235
+ lhs, rhs = (np.array([x]) for x in (lhs, rhs))
236
+ expected = _eval_single_bin(lhs, op, rhs, engine)
237
+ if is_scalar(expected):
238
+ expected = not expected
239
+ else:
240
+ expected = ~expected
241
+ result = pd.eval(ex, engine=engine, parser=parser)
242
+ tm.assert_almost_equal(expected, result)
243
+
244
+ @pytest.mark.parametrize("cmp1", ["<", ">"])
245
+ @pytest.mark.parametrize("cmp2", ["<", ">"])
246
+ def test_chained_cmp_op(self, cmp1, cmp2, lhs, midhs, rhs, engine, parser):
247
+ mid = midhs
248
+ if parser == "python":
249
+ ex1 = f"lhs {cmp1} mid {cmp2} rhs"
250
+ msg = "'BoolOp' nodes are not implemented"
251
+ with pytest.raises(NotImplementedError, match=msg):
252
+ pd.eval(ex1, engine=engine, parser=parser)
253
+ return
254
+
255
+ lhs_new = _eval_single_bin(lhs, cmp1, mid, engine)
256
+ rhs_new = _eval_single_bin(mid, cmp2, rhs, engine)
257
+
258
+ if lhs_new is not None and rhs_new is not None:
259
+ ex1 = f"lhs {cmp1} mid {cmp2} rhs"
260
+ ex2 = f"lhs {cmp1} mid and mid {cmp2} rhs"
261
+ ex3 = f"(lhs {cmp1} mid) & (mid {cmp2} rhs)"
262
+ expected = _eval_single_bin(lhs_new, "&", rhs_new, engine)
263
+
264
+ for ex in (ex1, ex2, ex3):
265
+ result = pd.eval(ex, engine=engine, parser=parser)
266
+
267
+ tm.assert_almost_equal(result, expected)
268
+
269
+ @pytest.mark.parametrize(
270
+ "arith1", sorted(set(ARITH_OPS_SYMS).difference(SPECIAL_CASE_ARITH_OPS_SYMS))
271
+ )
272
+ def test_binary_arith_ops(self, arith1, lhs, rhs, engine, parser):
273
+ ex = f"lhs {arith1} rhs"
274
+ result = pd.eval(ex, engine=engine, parser=parser)
275
+ expected = _eval_single_bin(lhs, arith1, rhs, engine)
276
+
277
+ tm.assert_almost_equal(result, expected)
278
+ ex = f"lhs {arith1} rhs {arith1} rhs"
279
+ result = pd.eval(ex, engine=engine, parser=parser)
280
+ nlhs = _eval_single_bin(lhs, arith1, rhs, engine)
281
+ try:
282
+ nlhs, ghs = nlhs.align(rhs)
283
+ except (ValueError, TypeError, AttributeError):
284
+ # ValueError: series frame or frame series align
285
+ # TypeError, AttributeError: series or frame with scalar align
286
+ return
287
+ else:
288
+ if engine == "numexpr":
289
+ import numexpr as ne
290
+
291
+ # direct numpy comparison
292
+ expected = ne.evaluate(f"nlhs {arith1} ghs")
293
+ # Update assert statement due to unreliable numerical
294
+ # precision component (GH37328)
295
+ # TODO: update testing code so that assert_almost_equal statement
296
+ # can be replaced again by the assert_numpy_array_equal statement
297
+ tm.assert_almost_equal(result.values, expected)
298
+ else:
299
+ expected = eval(f"nlhs {arith1} ghs")
300
+ tm.assert_almost_equal(result, expected)
301
+
302
+ # modulus, pow, and floor division require special casing
303
+
304
+ def test_modulus(self, lhs, rhs, engine, parser):
305
+ ex = r"lhs % rhs"
306
+ result = pd.eval(ex, engine=engine, parser=parser)
307
+ expected = lhs % rhs
308
+ tm.assert_almost_equal(result, expected)
309
+
310
+ if engine == "numexpr":
311
+ import numexpr as ne
312
+
313
+ expected = ne.evaluate(r"expected % rhs")
314
+ if isinstance(result, (DataFrame, Series)):
315
+ tm.assert_almost_equal(result.values, expected)
316
+ else:
317
+ tm.assert_almost_equal(result, expected.item())
318
+ else:
319
+ expected = _eval_single_bin(expected, "%", rhs, engine)
320
+ tm.assert_almost_equal(result, expected)
321
+
322
+ def test_floor_division(self, lhs, rhs, engine, parser):
323
+ ex = "lhs // rhs"
324
+
325
+ if engine == "python":
326
+ res = pd.eval(ex, engine=engine, parser=parser)
327
+ expected = lhs // rhs
328
+ tm.assert_equal(res, expected)
329
+ else:
330
+ msg = (
331
+ r"unsupported operand type\(s\) for //: 'VariableNode' and "
332
+ "'VariableNode'"
333
+ )
334
+ with pytest.raises(TypeError, match=msg):
335
+ pd.eval(
336
+ ex,
337
+ local_dict={"lhs": lhs, "rhs": rhs},
338
+ engine=engine,
339
+ parser=parser,
340
+ )
341
+
342
+ @td.skip_if_windows
343
+ def test_pow(self, lhs, rhs, engine, parser):
344
+ # odd failure on win32 platform, so skip
345
+ ex = "lhs ** rhs"
346
+ expected = _eval_single_bin(lhs, "**", rhs, engine)
347
+ result = pd.eval(ex, engine=engine, parser=parser)
348
+
349
+ if (
350
+ is_scalar(lhs)
351
+ and is_scalar(rhs)
352
+ and isinstance(expected, (complex, np.complexfloating))
353
+ and np.isnan(result)
354
+ ):
355
+ msg = "(DataFrame.columns|numpy array) are different"
356
+ with pytest.raises(AssertionError, match=msg):
357
+ tm.assert_numpy_array_equal(result, expected)
358
+ else:
359
+ tm.assert_almost_equal(result, expected)
360
+
361
+ ex = "(lhs ** rhs) ** rhs"
362
+ result = pd.eval(ex, engine=engine, parser=parser)
363
+
364
+ middle = _eval_single_bin(lhs, "**", rhs, engine)
365
+ expected = _eval_single_bin(middle, "**", rhs, engine)
366
+ tm.assert_almost_equal(result, expected)
367
+
368
+ def test_check_single_invert_op(self, lhs, engine, parser):
369
+ # simple
370
+ try:
371
+ elb = lhs.astype(bool)
372
+ except AttributeError:
373
+ elb = np.array([bool(lhs)])
374
+ expected = ~elb
375
+ result = pd.eval("~elb", engine=engine, parser=parser)
376
+ tm.assert_almost_equal(expected, result)
377
+
378
+ def test_frame_invert(self, engine, parser):
379
+ expr = "~lhs"
380
+
381
+ # ~ ##
382
+ # frame
383
+ # float always raises
384
+ lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)))
385
+ if engine == "numexpr":
386
+ msg = "couldn't find matching opcode for 'invert_dd'"
387
+ with pytest.raises(NotImplementedError, match=msg):
388
+ pd.eval(expr, engine=engine, parser=parser)
389
+ else:
390
+ msg = "ufunc 'invert' not supported for the input types"
391
+ with pytest.raises(TypeError, match=msg):
392
+ pd.eval(expr, engine=engine, parser=parser)
393
+
394
+ # int raises on numexpr
395
+ lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2)))
396
+ if engine == "numexpr":
397
+ msg = "couldn't find matching opcode for 'invert"
398
+ with pytest.raises(NotImplementedError, match=msg):
399
+ pd.eval(expr, engine=engine, parser=parser)
400
+ else:
401
+ expect = ~lhs
402
+ result = pd.eval(expr, engine=engine, parser=parser)
403
+ tm.assert_frame_equal(expect, result)
404
+
405
+ # bool always works
406
+ lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5)
407
+ expect = ~lhs
408
+ result = pd.eval(expr, engine=engine, parser=parser)
409
+ tm.assert_frame_equal(expect, result)
410
+
411
+ # object raises
412
+ lhs = DataFrame(
413
+ {"b": ["a", 1, 2.0], "c": np.random.default_rng(2).standard_normal(3) > 0.5}
414
+ )
415
+ if engine == "numexpr":
416
+ with pytest.raises(ValueError, match="unknown type object"):
417
+ pd.eval(expr, engine=engine, parser=parser)
418
+ else:
419
+ msg = "bad operand type for unary ~: 'str'"
420
+ with pytest.raises(TypeError, match=msg):
421
+ pd.eval(expr, engine=engine, parser=parser)
422
+
423
+ def test_series_invert(self, engine, parser):
424
+ # ~ ####
425
+ expr = "~lhs"
426
+
427
+ # series
428
+ # float raises
429
+ lhs = Series(np.random.default_rng(2).standard_normal(5))
430
+ if engine == "numexpr":
431
+ msg = "couldn't find matching opcode for 'invert_dd'"
432
+ with pytest.raises(NotImplementedError, match=msg):
433
+ result = pd.eval(expr, engine=engine, parser=parser)
434
+ else:
435
+ msg = "ufunc 'invert' not supported for the input types"
436
+ with pytest.raises(TypeError, match=msg):
437
+ pd.eval(expr, engine=engine, parser=parser)
438
+
439
+ # int raises on numexpr
440
+ lhs = Series(np.random.default_rng(2).integers(5, size=5))
441
+ if engine == "numexpr":
442
+ msg = "couldn't find matching opcode for 'invert"
443
+ with pytest.raises(NotImplementedError, match=msg):
444
+ pd.eval(expr, engine=engine, parser=parser)
445
+ else:
446
+ expect = ~lhs
447
+ result = pd.eval(expr, engine=engine, parser=parser)
448
+ tm.assert_series_equal(expect, result)
449
+
450
+ # bool
451
+ lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5)
452
+ expect = ~lhs
453
+ result = pd.eval(expr, engine=engine, parser=parser)
454
+ tm.assert_series_equal(expect, result)
455
+
456
+ # float
457
+ # int
458
+ # bool
459
+
460
+ # object
461
+ lhs = Series(["a", 1, 2.0])
462
+ if engine == "numexpr":
463
+ with pytest.raises(ValueError, match="unknown type object"):
464
+ pd.eval(expr, engine=engine, parser=parser)
465
+ else:
466
+ msg = "bad operand type for unary ~: 'str'"
467
+ with pytest.raises(TypeError, match=msg):
468
+ pd.eval(expr, engine=engine, parser=parser)
469
+
470
+ def test_frame_negate(self, engine, parser):
471
+ expr = "-lhs"
472
+
473
+ # float
474
+ lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)))
475
+ expect = -lhs
476
+ result = pd.eval(expr, engine=engine, parser=parser)
477
+ tm.assert_frame_equal(expect, result)
478
+
479
+ # int
480
+ lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2)))
481
+ expect = -lhs
482
+ result = pd.eval(expr, engine=engine, parser=parser)
483
+ tm.assert_frame_equal(expect, result)
484
+
485
+ # bool doesn't work with numexpr but works elsewhere
486
+ lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5)
487
+ if engine == "numexpr":
488
+ msg = "couldn't find matching opcode for 'neg_bb'"
489
+ with pytest.raises(NotImplementedError, match=msg):
490
+ pd.eval(expr, engine=engine, parser=parser)
491
+ else:
492
+ expect = -lhs
493
+ result = pd.eval(expr, engine=engine, parser=parser)
494
+ tm.assert_frame_equal(expect, result)
495
+
496
+ def test_series_negate(self, engine, parser):
497
+ expr = "-lhs"
498
+
499
+ # float
500
+ lhs = Series(np.random.default_rng(2).standard_normal(5))
501
+ expect = -lhs
502
+ result = pd.eval(expr, engine=engine, parser=parser)
503
+ tm.assert_series_equal(expect, result)
504
+
505
+ # int
506
+ lhs = Series(np.random.default_rng(2).integers(5, size=5))
507
+ expect = -lhs
508
+ result = pd.eval(expr, engine=engine, parser=parser)
509
+ tm.assert_series_equal(expect, result)
510
+
511
+ # bool doesn't work with numexpr but works elsewhere
512
+ lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5)
513
+ if engine == "numexpr":
514
+ msg = "couldn't find matching opcode for 'neg_bb'"
515
+ with pytest.raises(NotImplementedError, match=msg):
516
+ pd.eval(expr, engine=engine, parser=parser)
517
+ else:
518
+ expect = -lhs
519
+ result = pd.eval(expr, engine=engine, parser=parser)
520
+ tm.assert_series_equal(expect, result)
521
+
522
+ @pytest.mark.parametrize(
523
+ "lhs",
524
+ [
525
+ # Float
526
+ DataFrame(np.random.default_rng(2).standard_normal((5, 2))),
527
+ # Int
528
+ DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))),
529
+ # bool doesn't work with numexpr but works elsewhere
530
+ DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5),
531
+ ],
532
+ )
533
+ def test_frame_pos(self, lhs, engine, parser):
534
+ expr = "+lhs"
535
+ expect = lhs
536
+
537
+ result = pd.eval(expr, engine=engine, parser=parser)
538
+ tm.assert_frame_equal(expect, result)
539
+
540
+ @pytest.mark.parametrize(
541
+ "lhs",
542
+ [
543
+ # Float
544
+ Series(np.random.default_rng(2).standard_normal(5)),
545
+ # Int
546
+ Series(np.random.default_rng(2).integers(5, size=5)),
547
+ # bool doesn't work with numexpr but works elsewhere
548
+ Series(np.random.default_rng(2).standard_normal(5) > 0.5),
549
+ ],
550
+ )
551
+ def test_series_pos(self, lhs, engine, parser):
552
+ expr = "+lhs"
553
+ expect = lhs
554
+
555
+ result = pd.eval(expr, engine=engine, parser=parser)
556
+ tm.assert_series_equal(expect, result)
557
+
558
+ def test_scalar_unary(self, engine, parser):
559
+ msg = "bad operand type for unary ~: 'float'"
560
+ warn = None
561
+ if PY312 and not (engine == "numexpr" and parser == "pandas"):
562
+ warn = DeprecationWarning
563
+ with pytest.raises(TypeError, match=msg):
564
+ pd.eval("~1.0", engine=engine, parser=parser)
565
+
566
+ assert pd.eval("-1.0", parser=parser, engine=engine) == -1.0
567
+ assert pd.eval("+1.0", parser=parser, engine=engine) == +1.0
568
+ assert pd.eval("~1", parser=parser, engine=engine) == ~1
569
+ assert pd.eval("-1", parser=parser, engine=engine) == -1
570
+ assert pd.eval("+1", parser=parser, engine=engine) == +1
571
+ with tm.assert_produces_warning(
572
+ warn, match="Bitwise inversion", check_stacklevel=False
573
+ ):
574
+ assert pd.eval("~True", parser=parser, engine=engine) == ~True
575
+ with tm.assert_produces_warning(
576
+ warn, match="Bitwise inversion", check_stacklevel=False
577
+ ):
578
+ assert pd.eval("~False", parser=parser, engine=engine) == ~False
579
+ assert pd.eval("-True", parser=parser, engine=engine) == -True
580
+ assert pd.eval("-False", parser=parser, engine=engine) == -False
581
+ assert pd.eval("+True", parser=parser, engine=engine) == +True
582
+ assert pd.eval("+False", parser=parser, engine=engine) == +False
583
+
584
+ def test_unary_in_array(self):
585
+ # GH 11235
586
+ # TODO: 2022-01-29: result return list with numexpr 2.7.3 in CI
587
+ # but cannot reproduce locally
588
+ result = np.array(
589
+ pd.eval("[-True, True, +True, -False, False, +False, -37, 37, ~37, +37]"),
590
+ dtype=np.object_,
591
+ )
592
+ expected = np.array(
593
+ [
594
+ -True,
595
+ True,
596
+ +True,
597
+ -False,
598
+ False,
599
+ +False,
600
+ -37,
601
+ 37,
602
+ ~37,
603
+ +37,
604
+ ],
605
+ dtype=np.object_,
606
+ )
607
+ tm.assert_numpy_array_equal(result, expected)
608
+
609
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
610
+ @pytest.mark.parametrize("expr", ["x < -0.1", "-5 > x"])
611
+ def test_float_comparison_bin_op(self, dtype, expr):
612
+ # GH 16363
613
+ df = DataFrame({"x": np.array([0], dtype=dtype)})
614
+ res = df.eval(expr)
615
+ assert res.values == np.array([False])
616
+
617
+ def test_unary_in_function(self):
618
+ # GH 46471
619
+ df = DataFrame({"x": [0, 1, np.nan]})
620
+
621
+ result = df.eval("x.fillna(-1)")
622
+ expected = df.x.fillna(-1)
623
+ # column name becomes None if using numexpr
624
+ # only check names when the engine is not numexpr
625
+ tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR)
626
+
627
+ result = df.eval("x.shift(1, fill_value=-1)")
628
+ expected = df.x.shift(1, fill_value=-1)
629
+ tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR)
630
+
631
+ @pytest.mark.parametrize(
632
+ "ex",
633
+ (
634
+ "1 or 2",
635
+ "1 and 2",
636
+ "a and b",
637
+ "a or b",
638
+ "1 or 2 and (3 + 2) > 3",
639
+ "2 * x > 2 or 1 and 2",
640
+ "2 * df > 3 and 1 or a",
641
+ ),
642
+ )
643
+ def test_disallow_scalar_bool_ops(self, ex, engine, parser):
644
+ x, a, b = np.random.default_rng(2).standard_normal(3), 1, 2 # noqa: F841
645
+ df = DataFrame(np.random.default_rng(2).standard_normal((3, 2))) # noqa: F841
646
+
647
+ msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not"
648
+ with pytest.raises(NotImplementedError, match=msg):
649
+ pd.eval(ex, engine=engine, parser=parser)
650
+
651
+ def test_identical(self, engine, parser):
652
+ # see gh-10546
653
+ x = 1
654
+ result = pd.eval("x", engine=engine, parser=parser)
655
+ assert result == 1
656
+ assert is_scalar(result)
657
+
658
+ x = 1.5
659
+ result = pd.eval("x", engine=engine, parser=parser)
660
+ assert result == 1.5
661
+ assert is_scalar(result)
662
+
663
+ x = False
664
+ result = pd.eval("x", engine=engine, parser=parser)
665
+ assert not result
666
+ assert is_bool(result)
667
+ assert is_scalar(result)
668
+
669
+ x = np.array([1])
670
+ result = pd.eval("x", engine=engine, parser=parser)
671
+ tm.assert_numpy_array_equal(result, np.array([1]))
672
+ assert result.shape == (1,)
673
+
674
+ x = np.array([1.5])
675
+ result = pd.eval("x", engine=engine, parser=parser)
676
+ tm.assert_numpy_array_equal(result, np.array([1.5]))
677
+ assert result.shape == (1,)
678
+
679
+ x = np.array([False]) # noqa: F841
680
+ result = pd.eval("x", engine=engine, parser=parser)
681
+ tm.assert_numpy_array_equal(result, np.array([False]))
682
+ assert result.shape == (1,)
683
+
684
+ def test_line_continuation(self, engine, parser):
685
+ # GH 11149
686
+ exp = """1 + 2 * \
687
+ 5 - 1 + 2 """
688
+ result = pd.eval(exp, engine=engine, parser=parser)
689
+ assert result == 12
690
+
691
+ def test_float_truncation(self, engine, parser):
692
+ # GH 14241
693
+ exp = "1000000000.006"
694
+ result = pd.eval(exp, engine=engine, parser=parser)
695
+ expected = np.float64(exp)
696
+ assert result == expected
697
+
698
+ df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
699
+ cutoff = 1000000000.0006
700
+ result = df.query(f"A < {cutoff:.4f}")
701
+ assert result.empty
702
+
703
+ cutoff = 1000000000.0010
704
+ result = df.query(f"A > {cutoff:.4f}")
705
+ expected = df.loc[[1, 2], :]
706
+ tm.assert_frame_equal(expected, result)
707
+
708
+ exact = 1000000000.0011
709
+ result = df.query(f"A == {exact:.4f}")
710
+ expected = df.loc[[1], :]
711
+ tm.assert_frame_equal(expected, result)
712
+
713
+ def test_disallow_python_keywords(self):
714
+ # GH 18221
715
+ df = DataFrame([[0, 0, 0]], columns=["foo", "bar", "class"])
716
+ msg = "Python keyword not valid identifier in numexpr query"
717
+ with pytest.raises(SyntaxError, match=msg):
718
+ df.query("class == 0")
719
+
720
+ df = DataFrame()
721
+ df.index.name = "lambda"
722
+ with pytest.raises(SyntaxError, match=msg):
723
+ df.query("lambda == 0")
724
+
725
+ def test_true_false_logic(self):
726
+ # GH 25823
727
+ # This behavior is deprecated in Python 3.12
728
+ with tm.maybe_produces_warning(
729
+ DeprecationWarning, PY312, check_stacklevel=False
730
+ ):
731
+ assert pd.eval("not True") == -2
732
+ assert pd.eval("not False") == -1
733
+ assert pd.eval("True and not True") == 0
734
+
735
+ def test_and_logic_string_match(self):
736
+ # GH 25823
737
+ event = Series({"a": "hello"})
738
+ assert pd.eval(f"{event.str.match('hello').a}")
739
+ assert pd.eval(f"{event.str.match('hello').a and event.str.match('hello').a}")
740
+
741
+
742
+ # -------------------------------------
743
+ # gh-12388: Typecasting rules consistency with python
744
+
745
+
746
+ class TestTypeCasting:
747
+ @pytest.mark.parametrize("op", ["+", "-", "*", "**", "/"])
748
+ # maybe someday... numexpr has too many upcasting rules now
749
+ # chain(*(np.core.sctypes[x] for x in ['uint', 'int', 'float']))
750
+ @pytest.mark.parametrize("left_right", [("df", "3"), ("3", "df")])
751
+ def test_binop_typecasting(
752
+ self, engine, parser, op, complex_or_float_dtype, left_right, request
753
+ ):
754
+ # GH#21374
755
+ dtype = complex_or_float_dtype
756
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)), dtype=dtype)
757
+ left, right = left_right
758
+ s = f"{left} {op} {right}"
759
+ res = pd.eval(s, engine=engine, parser=parser)
760
+ if dtype == "complex64" and engine == "numexpr":
761
+ mark = pytest.mark.xfail(
762
+ reason="numexpr issue with complex that are upcast "
763
+ "to complex 128 "
764
+ "https://github.com/pydata/numexpr/issues/492"
765
+ )
766
+ request.applymarker(mark)
767
+ assert df.values.dtype == dtype
768
+ assert res.values.dtype == dtype
769
+ tm.assert_frame_equal(res, eval(s), check_exact=False)
770
+
771
+
772
+ # -------------------------------------
773
+ # Basic and complex alignment
774
+
775
+
776
+ def should_warn(*args):
777
+ not_mono = not any(map(operator.attrgetter("is_monotonic_increasing"), args))
778
+ only_one_dt = reduce(
779
+ operator.xor, (issubclass(x.dtype.type, np.datetime64) for x in args)
780
+ )
781
+ return not_mono and only_one_dt
782
+
783
+
784
+ class TestAlignment:
785
+ index_types = ["i", "s", "dt"]
786
+ lhs_index_types = index_types + ["s"] # 'p'
787
+
788
+ def test_align_nested_unary_op(self, engine, parser):
789
+ s = "df * ~2"
790
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
791
+ res = pd.eval(s, engine=engine, parser=parser)
792
+ tm.assert_frame_equal(res, df * ~2)
793
+
794
+ @pytest.mark.filterwarnings("always::RuntimeWarning")
795
+ @pytest.mark.parametrize("lr_idx_type", lhs_index_types)
796
+ @pytest.mark.parametrize("rr_idx_type", index_types)
797
+ @pytest.mark.parametrize("c_idx_type", index_types)
798
+ def test_basic_frame_alignment(
799
+ self, engine, parser, lr_idx_type, rr_idx_type, c_idx_type, idx_func_dict
800
+ ):
801
+ df = DataFrame(
802
+ np.random.default_rng(2).standard_normal((10, 10)),
803
+ index=idx_func_dict[lr_idx_type](10),
804
+ columns=idx_func_dict[c_idx_type](10),
805
+ )
806
+ df2 = DataFrame(
807
+ np.random.default_rng(2).standard_normal((20, 10)),
808
+ index=idx_func_dict[rr_idx_type](20),
809
+ columns=idx_func_dict[c_idx_type](10),
810
+ )
811
+ # only warns if not monotonic and not sortable
812
+ if should_warn(df.index, df2.index):
813
+ with tm.assert_produces_warning(RuntimeWarning):
814
+ res = pd.eval("df + df2", engine=engine, parser=parser)
815
+ else:
816
+ res = pd.eval("df + df2", engine=engine, parser=parser)
817
+ tm.assert_frame_equal(res, df + df2)
818
+
819
+ @pytest.mark.parametrize("r_idx_type", lhs_index_types)
820
+ @pytest.mark.parametrize("c_idx_type", lhs_index_types)
821
+ def test_frame_comparison(
822
+ self, engine, parser, r_idx_type, c_idx_type, idx_func_dict
823
+ ):
824
+ df = DataFrame(
825
+ np.random.default_rng(2).standard_normal((10, 10)),
826
+ index=idx_func_dict[r_idx_type](10),
827
+ columns=idx_func_dict[c_idx_type](10),
828
+ )
829
+ res = pd.eval("df < 2", engine=engine, parser=parser)
830
+ tm.assert_frame_equal(res, df < 2)
831
+
832
+ df3 = DataFrame(
833
+ np.random.default_rng(2).standard_normal(df.shape),
834
+ index=df.index,
835
+ columns=df.columns,
836
+ )
837
+ res = pd.eval("df < df3", engine=engine, parser=parser)
838
+ tm.assert_frame_equal(res, df < df3)
839
+
840
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
841
+ @pytest.mark.parametrize("r1", lhs_index_types)
842
+ @pytest.mark.parametrize("c1", index_types)
843
+ @pytest.mark.parametrize("r2", index_types)
844
+ @pytest.mark.parametrize("c2", index_types)
845
+ def test_medium_complex_frame_alignment(
846
+ self, engine, parser, r1, c1, r2, c2, idx_func_dict
847
+ ):
848
+ df = DataFrame(
849
+ np.random.default_rng(2).standard_normal((3, 2)),
850
+ index=idx_func_dict[r1](3),
851
+ columns=idx_func_dict[c1](2),
852
+ )
853
+ df2 = DataFrame(
854
+ np.random.default_rng(2).standard_normal((4, 2)),
855
+ index=idx_func_dict[r2](4),
856
+ columns=idx_func_dict[c2](2),
857
+ )
858
+ df3 = DataFrame(
859
+ np.random.default_rng(2).standard_normal((5, 2)),
860
+ index=idx_func_dict[r2](5),
861
+ columns=idx_func_dict[c2](2),
862
+ )
863
+ if should_warn(df.index, df2.index, df3.index):
864
+ with tm.assert_produces_warning(RuntimeWarning):
865
+ res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
866
+ else:
867
+ res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
868
+ tm.assert_frame_equal(res, df + df2 + df3)
869
+
870
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
871
+ @pytest.mark.parametrize("index_name", ["index", "columns"])
872
+ @pytest.mark.parametrize("c_idx_type", index_types)
873
+ @pytest.mark.parametrize("r_idx_type", lhs_index_types)
874
+ def test_basic_frame_series_alignment(
875
+ self, engine, parser, index_name, r_idx_type, c_idx_type, idx_func_dict
876
+ ):
877
+ df = DataFrame(
878
+ np.random.default_rng(2).standard_normal((10, 10)),
879
+ index=idx_func_dict[r_idx_type](10),
880
+ columns=idx_func_dict[c_idx_type](10),
881
+ )
882
+ index = getattr(df, index_name)
883
+ s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
884
+
885
+ if should_warn(df.index, s.index):
886
+ with tm.assert_produces_warning(RuntimeWarning):
887
+ res = pd.eval("df + s", engine=engine, parser=parser)
888
+ else:
889
+ res = pd.eval("df + s", engine=engine, parser=parser)
890
+
891
+ if r_idx_type == "dt" or c_idx_type == "dt":
892
+ expected = df.add(s) if engine == "numexpr" else df + s
893
+ else:
894
+ expected = df + s
895
+ tm.assert_frame_equal(res, expected)
896
+
897
+ @pytest.mark.parametrize("index_name", ["index", "columns"])
898
+ @pytest.mark.parametrize(
899
+ "r_idx_type, c_idx_type",
900
+ list(product(["i", "s"], ["i", "s"])) + [("dt", "dt")],
901
+ )
902
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
903
+ def test_basic_series_frame_alignment(
904
+ self, request, engine, parser, index_name, r_idx_type, c_idx_type, idx_func_dict
905
+ ):
906
+ if (
907
+ engine == "numexpr"
908
+ and parser in ("pandas", "python")
909
+ and index_name == "index"
910
+ and r_idx_type == "i"
911
+ and c_idx_type == "s"
912
+ ):
913
+ reason = (
914
+ f"Flaky column ordering when engine={engine}, "
915
+ f"parser={parser}, index_name={index_name}, "
916
+ f"r_idx_type={r_idx_type}, c_idx_type={c_idx_type}"
917
+ )
918
+ request.applymarker(pytest.mark.xfail(reason=reason, strict=False))
919
+ df = DataFrame(
920
+ np.random.default_rng(2).standard_normal((10, 7)),
921
+ index=idx_func_dict[r_idx_type](10),
922
+ columns=idx_func_dict[c_idx_type](7),
923
+ )
924
+ index = getattr(df, index_name)
925
+ s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
926
+ if should_warn(s.index, df.index):
927
+ with tm.assert_produces_warning(RuntimeWarning):
928
+ res = pd.eval("s + df", engine=engine, parser=parser)
929
+ else:
930
+ res = pd.eval("s + df", engine=engine, parser=parser)
931
+
932
+ if r_idx_type == "dt" or c_idx_type == "dt":
933
+ expected = df.add(s) if engine == "numexpr" else s + df
934
+ else:
935
+ expected = s + df
936
+ tm.assert_frame_equal(res, expected)
937
+
938
+ @pytest.mark.filterwarnings("ignore::RuntimeWarning")
939
+ @pytest.mark.parametrize("c_idx_type", index_types)
940
+ @pytest.mark.parametrize("r_idx_type", lhs_index_types)
941
+ @pytest.mark.parametrize("index_name", ["index", "columns"])
942
+ @pytest.mark.parametrize("op", ["+", "*"])
943
+ def test_series_frame_commutativity(
944
+ self, engine, parser, index_name, op, r_idx_type, c_idx_type, idx_func_dict
945
+ ):
946
+ df = DataFrame(
947
+ np.random.default_rng(2).standard_normal((10, 10)),
948
+ index=idx_func_dict[r_idx_type](10),
949
+ columns=idx_func_dict[c_idx_type](10),
950
+ )
951
+ index = getattr(df, index_name)
952
+ s = Series(np.random.default_rng(2).standard_normal(5), index[:5])
953
+
954
+ lhs = f"s {op} df"
955
+ rhs = f"df {op} s"
956
+ if should_warn(df.index, s.index):
957
+ with tm.assert_produces_warning(RuntimeWarning):
958
+ a = pd.eval(lhs, engine=engine, parser=parser)
959
+ with tm.assert_produces_warning(RuntimeWarning):
960
+ b = pd.eval(rhs, engine=engine, parser=parser)
961
+ else:
962
+ a = pd.eval(lhs, engine=engine, parser=parser)
963
+ b = pd.eval(rhs, engine=engine, parser=parser)
964
+
965
+ if r_idx_type != "dt" and c_idx_type != "dt":
966
+ if engine == "numexpr":
967
+ tm.assert_frame_equal(a, b)
968
+
969
+ @pytest.mark.filterwarnings("always::RuntimeWarning")
970
+ @pytest.mark.parametrize("r1", lhs_index_types)
971
+ @pytest.mark.parametrize("c1", index_types)
972
+ @pytest.mark.parametrize("r2", index_types)
973
+ @pytest.mark.parametrize("c2", index_types)
974
+ def test_complex_series_frame_alignment(
975
+ self, engine, parser, r1, c1, r2, c2, idx_func_dict
976
+ ):
977
+ n = 3
978
+ m1 = 5
979
+ m2 = 2 * m1
980
+ df = DataFrame(
981
+ np.random.default_rng(2).standard_normal((m1, n)),
982
+ index=idx_func_dict[r1](m1),
983
+ columns=idx_func_dict[c1](n),
984
+ )
985
+ df2 = DataFrame(
986
+ np.random.default_rng(2).standard_normal((m2, n)),
987
+ index=idx_func_dict[r2](m2),
988
+ columns=idx_func_dict[c2](n),
989
+ )
990
+ index = df2.columns
991
+ ser = Series(np.random.default_rng(2).standard_normal(n), index[:n])
992
+
993
+ if r2 == "dt" or c2 == "dt":
994
+ if engine == "numexpr":
995
+ expected2 = df2.add(ser)
996
+ else:
997
+ expected2 = df2 + ser
998
+ else:
999
+ expected2 = df2 + ser
1000
+
1001
+ if r1 == "dt" or c1 == "dt":
1002
+ if engine == "numexpr":
1003
+ expected = expected2.add(df)
1004
+ else:
1005
+ expected = expected2 + df
1006
+ else:
1007
+ expected = expected2 + df
1008
+
1009
+ if should_warn(df2.index, ser.index, df.index):
1010
+ with tm.assert_produces_warning(RuntimeWarning):
1011
+ res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
1012
+ else:
1013
+ res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
1014
+ assert res.shape == expected.shape
1015
+ tm.assert_frame_equal(res, expected)
1016
+
1017
+ def test_performance_warning_for_poor_alignment(self, engine, parser):
1018
+ df = DataFrame(np.random.default_rng(2).standard_normal((1000, 10)))
1019
+ s = Series(np.random.default_rng(2).standard_normal(10000))
1020
+ if engine == "numexpr":
1021
+ seen = PerformanceWarning
1022
+ else:
1023
+ seen = False
1024
+
1025
+ with tm.assert_produces_warning(seen):
1026
+ pd.eval("df + s", engine=engine, parser=parser)
1027
+
1028
+ s = Series(np.random.default_rng(2).standard_normal(1000))
1029
+ with tm.assert_produces_warning(False):
1030
+ pd.eval("df + s", engine=engine, parser=parser)
1031
+
1032
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 10000)))
1033
+ s = Series(np.random.default_rng(2).standard_normal(10000))
1034
+ with tm.assert_produces_warning(False):
1035
+ pd.eval("df + s", engine=engine, parser=parser)
1036
+
1037
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 10)))
1038
+ s = Series(np.random.default_rng(2).standard_normal(10000))
1039
+
1040
+ is_python_engine = engine == "python"
1041
+
1042
+ if not is_python_engine:
1043
+ wrn = PerformanceWarning
1044
+ else:
1045
+ wrn = False
1046
+
1047
+ with tm.assert_produces_warning(wrn) as w:
1048
+ pd.eval("df + s", engine=engine, parser=parser)
1049
+
1050
+ if not is_python_engine:
1051
+ assert len(w) == 1
1052
+ msg = str(w[0].message)
1053
+ logged = np.log10(s.size - df.shape[1])
1054
+ expected = (
1055
+ f"Alignment difference on axis 1 is larger "
1056
+ f"than an order of magnitude on term 'df', "
1057
+ f"by more than {logged:.4g}; performance may suffer."
1058
+ )
1059
+ assert msg == expected
1060
+
1061
+
1062
+ # ------------------------------------
1063
+ # Slightly more complex ops
1064
+
1065
+
1066
+ class TestOperations:
1067
+ def eval(self, *args, **kwargs):
1068
+ kwargs["level"] = kwargs.pop("level", 0) + 1
1069
+ return pd.eval(*args, **kwargs)
1070
+
1071
+ def test_simple_arith_ops(self, engine, parser):
1072
+ exclude_arith = []
1073
+ if parser == "python":
1074
+ exclude_arith = ["in", "not in"]
1075
+
1076
+ arith_ops = [
1077
+ op
1078
+ for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS
1079
+ if op not in exclude_arith
1080
+ ]
1081
+
1082
+ ops = (op for op in arith_ops if op != "//")
1083
+
1084
+ for op in ops:
1085
+ ex = f"1 {op} 1"
1086
+ ex2 = f"x {op} 1"
1087
+ ex3 = f"1 {op} (x + 1)"
1088
+
1089
+ if op in ("in", "not in"):
1090
+ msg = "argument of type 'int' is not iterable"
1091
+ with pytest.raises(TypeError, match=msg):
1092
+ pd.eval(ex, engine=engine, parser=parser)
1093
+ else:
1094
+ expec = _eval_single_bin(1, op, 1, engine)
1095
+ x = self.eval(ex, engine=engine, parser=parser)
1096
+ assert x == expec
1097
+
1098
+ expec = _eval_single_bin(x, op, 1, engine)
1099
+ y = self.eval(ex2, local_dict={"x": x}, engine=engine, parser=parser)
1100
+ assert y == expec
1101
+
1102
+ expec = _eval_single_bin(1, op, x + 1, engine)
1103
+ y = self.eval(ex3, local_dict={"x": x}, engine=engine, parser=parser)
1104
+ assert y == expec
1105
+
1106
+ @pytest.mark.parametrize("rhs", [True, False])
1107
+ @pytest.mark.parametrize("lhs", [True, False])
1108
+ @pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
1109
+ def test_simple_bool_ops(self, rhs, lhs, op):
1110
+ ex = f"{lhs} {op} {rhs}"
1111
+
1112
+ if parser == "python" and op in ["and", "or"]:
1113
+ msg = "'BoolOp' nodes are not implemented"
1114
+ with pytest.raises(NotImplementedError, match=msg):
1115
+ self.eval(ex)
1116
+ return
1117
+
1118
+ res = self.eval(ex)
1119
+ exp = eval(ex)
1120
+ assert res == exp
1121
+
1122
+ @pytest.mark.parametrize("rhs", [True, False])
1123
+ @pytest.mark.parametrize("lhs", [True, False])
1124
+ @pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
1125
+ def test_bool_ops_with_constants(self, rhs, lhs, op):
1126
+ ex = f"{lhs} {op} {rhs}"
1127
+
1128
+ if parser == "python" and op in ["and", "or"]:
1129
+ msg = "'BoolOp' nodes are not implemented"
1130
+ with pytest.raises(NotImplementedError, match=msg):
1131
+ self.eval(ex)
1132
+ return
1133
+
1134
+ res = self.eval(ex)
1135
+ exp = eval(ex)
1136
+ assert res == exp
1137
+
1138
+ def test_4d_ndarray_fails(self):
1139
+ x = np.random.default_rng(2).standard_normal((3, 4, 5, 6))
1140
+ y = Series(np.random.default_rng(2).standard_normal(10))
1141
+ msg = "N-dimensional objects, where N > 2, are not supported with eval"
1142
+ with pytest.raises(NotImplementedError, match=msg):
1143
+ self.eval("x + y", local_dict={"x": x, "y": y})
1144
+
1145
+ def test_constant(self):
1146
+ x = self.eval("1")
1147
+ assert x == 1
1148
+
1149
+ def test_single_variable(self):
1150
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
1151
+ df2 = self.eval("df", local_dict={"df": df})
1152
+ tm.assert_frame_equal(df, df2)
1153
+
1154
+ def test_failing_subscript_with_name_error(self):
1155
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841
1156
+ with pytest.raises(NameError, match="name 'x' is not defined"):
1157
+ self.eval("df[x > 2] > 2")
1158
+
1159
+ def test_lhs_expression_subscript(self):
1160
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
1161
+ result = self.eval("(df + 1)[df > 2]", local_dict={"df": df})
1162
+ expected = (df + 1)[df > 2]
1163
+ tm.assert_frame_equal(result, expected)
1164
+
1165
+ def test_attr_expression(self):
1166
+ df = DataFrame(
1167
+ np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc")
1168
+ )
1169
+ expr1 = "df.a < df.b"
1170
+ expec1 = df.a < df.b
1171
+ expr2 = "df.a + df.b + df.c"
1172
+ expec2 = df.a + df.b + df.c
1173
+ expr3 = "df.a + df.b + df.c[df.b < 0]"
1174
+ expec3 = df.a + df.b + df.c[df.b < 0]
1175
+ exprs = expr1, expr2, expr3
1176
+ expecs = expec1, expec2, expec3
1177
+ for e, expec in zip(exprs, expecs):
1178
+ tm.assert_series_equal(expec, self.eval(e, local_dict={"df": df}))
1179
+
1180
+ def test_assignment_fails(self):
1181
+ df = DataFrame(
1182
+ np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc")
1183
+ )
1184
+ df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
1185
+ expr1 = "df = df2"
1186
+ msg = "cannot assign without a target object"
1187
+ with pytest.raises(ValueError, match=msg):
1188
+ self.eval(expr1, local_dict={"df": df, "df2": df2})
1189
+
1190
+ def test_assignment_column_multiple_raise(self):
1191
+ df = DataFrame(
1192
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1193
+ )
1194
+ # multiple assignees
1195
+ with pytest.raises(SyntaxError, match="invalid syntax"):
1196
+ df.eval("d c = a + b")
1197
+
1198
+ def test_assignment_column_invalid_assign(self):
1199
+ df = DataFrame(
1200
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1201
+ )
1202
+ # invalid assignees
1203
+ msg = "left hand side of an assignment must be a single name"
1204
+ with pytest.raises(SyntaxError, match=msg):
1205
+ df.eval("d,c = a + b")
1206
+
1207
+ def test_assignment_column_invalid_assign_function_call(self):
1208
+ df = DataFrame(
1209
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1210
+ )
1211
+ msg = "cannot assign to function call"
1212
+ with pytest.raises(SyntaxError, match=msg):
1213
+ df.eval('Timestamp("20131001") = a + b')
1214
+
1215
+ def test_assignment_single_assign_existing(self):
1216
+ df = DataFrame(
1217
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1218
+ )
1219
+ # single assignment - existing variable
1220
+ expected = df.copy()
1221
+ expected["a"] = expected["a"] + expected["b"]
1222
+ df.eval("a = a + b", inplace=True)
1223
+ tm.assert_frame_equal(df, expected)
1224
+
1225
+ def test_assignment_single_assign_new(self):
1226
+ df = DataFrame(
1227
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1228
+ )
1229
+ # single assignment - new variable
1230
+ expected = df.copy()
1231
+ expected["c"] = expected["a"] + expected["b"]
1232
+ df.eval("c = a + b", inplace=True)
1233
+ tm.assert_frame_equal(df, expected)
1234
+
1235
+ def test_assignment_single_assign_local_overlap(self):
1236
+ df = DataFrame(
1237
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1238
+ )
1239
+ df = df.copy()
1240
+ a = 1 # noqa: F841
1241
+ df.eval("a = 1 + b", inplace=True)
1242
+
1243
+ expected = df.copy()
1244
+ expected["a"] = 1 + expected["b"]
1245
+ tm.assert_frame_equal(df, expected)
1246
+
1247
+ def test_assignment_single_assign_name(self):
1248
+ df = DataFrame(
1249
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1250
+ )
1251
+
1252
+ a = 1 # noqa: F841
1253
+ old_a = df.a.copy()
1254
+ df.eval("a = a + b", inplace=True)
1255
+ result = old_a + df.b
1256
+ tm.assert_series_equal(result, df.a, check_names=False)
1257
+ assert result.name is None
1258
+
1259
+ def test_assignment_multiple_raises(self):
1260
+ df = DataFrame(
1261
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1262
+ )
1263
+ # multiple assignment
1264
+ df.eval("c = a + b", inplace=True)
1265
+ msg = "can only assign a single expression"
1266
+ with pytest.raises(SyntaxError, match=msg):
1267
+ df.eval("c = a = b")
1268
+
1269
+ def test_assignment_explicit(self):
1270
+ df = DataFrame(
1271
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1272
+ )
1273
+ # explicit targets
1274
+ self.eval("c = df.a + df.b", local_dict={"df": df}, target=df, inplace=True)
1275
+ expected = df.copy()
1276
+ expected["c"] = expected["a"] + expected["b"]
1277
+ tm.assert_frame_equal(df, expected)
1278
+
1279
+ def test_column_in(self):
1280
+ # GH 11235
1281
+ df = DataFrame({"a": [11], "b": [-32]})
1282
+ result = df.eval("a in [11, -32]")
1283
+ expected = Series([True])
1284
+ # TODO: 2022-01-29: Name check failed with numexpr 2.7.3 in CI
1285
+ # but cannot reproduce locally
1286
+ tm.assert_series_equal(result, expected, check_names=False)
1287
+
1288
+ @pytest.mark.xfail(reason="Unknown: Omitted test_ in name prior.")
1289
+ def test_assignment_not_inplace(self):
1290
+ # see gh-9297
1291
+ df = DataFrame(
1292
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
1293
+ )
1294
+
1295
+ actual = df.eval("c = a + b", inplace=False)
1296
+ assert actual is not None
1297
+
1298
+ expected = df.copy()
1299
+ expected["c"] = expected["a"] + expected["b"]
1300
+ tm.assert_frame_equal(df, expected)
1301
+
1302
+ def test_multi_line_expression(self, warn_copy_on_write):
1303
+ # GH 11149
1304
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
1305
+ expected = df.copy()
1306
+
1307
+ expected["c"] = expected["a"] + expected["b"]
1308
+ expected["d"] = expected["c"] + expected["b"]
1309
+ answer = df.eval(
1310
+ """
1311
+ c = a + b
1312
+ d = c + b""",
1313
+ inplace=True,
1314
+ )
1315
+ tm.assert_frame_equal(expected, df)
1316
+ assert answer is None
1317
+
1318
+ expected["a"] = expected["a"] - 1
1319
+ expected["e"] = expected["a"] + 2
1320
+ answer = df.eval(
1321
+ """
1322
+ a = a - 1
1323
+ e = a + 2""",
1324
+ inplace=True,
1325
+ )
1326
+ tm.assert_frame_equal(expected, df)
1327
+ assert answer is None
1328
+
1329
+ # multi-line not valid if not all assignments
1330
+ msg = "Multi-line expressions are only valid if all expressions contain"
1331
+ with pytest.raises(ValueError, match=msg):
1332
+ df.eval(
1333
+ """
1334
+ a = b + 2
1335
+ b - 2""",
1336
+ inplace=False,
1337
+ )
1338
+
1339
+ def test_multi_line_expression_not_inplace(self):
1340
+ # GH 11149
1341
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
1342
+ expected = df.copy()
1343
+
1344
+ expected["c"] = expected["a"] + expected["b"]
1345
+ expected["d"] = expected["c"] + expected["b"]
1346
+ df = df.eval(
1347
+ """
1348
+ c = a + b
1349
+ d = c + b""",
1350
+ inplace=False,
1351
+ )
1352
+ tm.assert_frame_equal(expected, df)
1353
+
1354
+ expected["a"] = expected["a"] - 1
1355
+ expected["e"] = expected["a"] + 2
1356
+ df = df.eval(
1357
+ """
1358
+ a = a - 1
1359
+ e = a + 2""",
1360
+ inplace=False,
1361
+ )
1362
+ tm.assert_frame_equal(expected, df)
1363
+
1364
+ def test_multi_line_expression_local_variable(self):
1365
+ # GH 15342
1366
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
1367
+ expected = df.copy()
1368
+
1369
+ local_var = 7
1370
+ expected["c"] = expected["a"] * local_var
1371
+ expected["d"] = expected["c"] + local_var
1372
+ answer = df.eval(
1373
+ """
1374
+ c = a * @local_var
1375
+ d = c + @local_var
1376
+ """,
1377
+ inplace=True,
1378
+ )
1379
+ tm.assert_frame_equal(expected, df)
1380
+ assert answer is None
1381
+
1382
+ def test_multi_line_expression_callable_local_variable(self):
1383
+ # 26426
1384
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
1385
+
1386
+ def local_func(a, b):
1387
+ return b
1388
+
1389
+ expected = df.copy()
1390
+ expected["c"] = expected["a"] * local_func(1, 7)
1391
+ expected["d"] = expected["c"] + local_func(1, 7)
1392
+ answer = df.eval(
1393
+ """
1394
+ c = a * @local_func(1, 7)
1395
+ d = c + @local_func(1, 7)
1396
+ """,
1397
+ inplace=True,
1398
+ )
1399
+ tm.assert_frame_equal(expected, df)
1400
+ assert answer is None
1401
+
1402
+ def test_multi_line_expression_callable_local_variable_with_kwargs(self):
1403
+ # 26426
1404
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
1405
+
1406
+ def local_func(a, b):
1407
+ return b
1408
+
1409
+ expected = df.copy()
1410
+ expected["c"] = expected["a"] * local_func(b=7, a=1)
1411
+ expected["d"] = expected["c"] + local_func(b=7, a=1)
1412
+ answer = df.eval(
1413
+ """
1414
+ c = a * @local_func(b=7, a=1)
1415
+ d = c + @local_func(b=7, a=1)
1416
+ """,
1417
+ inplace=True,
1418
+ )
1419
+ tm.assert_frame_equal(expected, df)
1420
+ assert answer is None
1421
+
1422
+ def test_assignment_in_query(self):
1423
+ # GH 8664
1424
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
1425
+ df_orig = df.copy()
1426
+ msg = "cannot assign without a target object"
1427
+ with pytest.raises(ValueError, match=msg):
1428
+ df.query("a = 1")
1429
+ tm.assert_frame_equal(df, df_orig)
1430
+
1431
+ def test_query_inplace(self):
1432
+ # see gh-11149
1433
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
1434
+ expected = df.copy()
1435
+ expected = expected[expected["a"] == 2]
1436
+ df.query("a == 2", inplace=True)
1437
+ tm.assert_frame_equal(expected, df)
1438
+
1439
+ df = {}
1440
+ expected = {"a": 3}
1441
+
1442
+ self.eval("a = 1 + 2", target=df, inplace=True)
1443
+ tm.assert_dict_equal(df, expected)
1444
+
1445
+ @pytest.mark.parametrize("invalid_target", [1, "cat", [1, 2], np.array([]), (1, 3)])
1446
+ def test_cannot_item_assign(self, invalid_target):
1447
+ msg = "Cannot assign expression output to target"
1448
+ expression = "a = 1 + 2"
1449
+
1450
+ with pytest.raises(ValueError, match=msg):
1451
+ self.eval(expression, target=invalid_target, inplace=True)
1452
+
1453
+ if hasattr(invalid_target, "copy"):
1454
+ with pytest.raises(ValueError, match=msg):
1455
+ self.eval(expression, target=invalid_target, inplace=False)
1456
+
1457
+ @pytest.mark.parametrize("invalid_target", [1, "cat", (1, 3)])
1458
+ def test_cannot_copy_item(self, invalid_target):
1459
+ msg = "Cannot return a copy of the target"
1460
+ expression = "a = 1 + 2"
1461
+
1462
+ with pytest.raises(ValueError, match=msg):
1463
+ self.eval(expression, target=invalid_target, inplace=False)
1464
+
1465
+ @pytest.mark.parametrize("target", [1, "cat", [1, 2], np.array([]), (1, 3), {1: 2}])
1466
+ def test_inplace_no_assignment(self, target):
1467
+ expression = "1 + 2"
1468
+
1469
+ assert self.eval(expression, target=target, inplace=False) == 3
1470
+
1471
+ msg = "Cannot operate inplace if there is no assignment"
1472
+ with pytest.raises(ValueError, match=msg):
1473
+ self.eval(expression, target=target, inplace=True)
1474
+
1475
+ def test_basic_period_index_boolean_expression(self):
1476
+ df = DataFrame(
1477
+ np.random.default_rng(2).standard_normal((2, 2)),
1478
+ columns=period_range("2020-01-01", freq="D", periods=2),
1479
+ )
1480
+ e = df < 2
1481
+ r = self.eval("df < 2", local_dict={"df": df})
1482
+ x = df < 2
1483
+
1484
+ tm.assert_frame_equal(r, e)
1485
+ tm.assert_frame_equal(x, e)
1486
+
1487
+ def test_basic_period_index_subscript_expression(self):
1488
+ df = DataFrame(
1489
+ np.random.default_rng(2).standard_normal((2, 2)),
1490
+ columns=period_range("2020-01-01", freq="D", periods=2),
1491
+ )
1492
+ r = self.eval("df[df < 2 + 3]", local_dict={"df": df})
1493
+ e = df[df < 2 + 3]
1494
+ tm.assert_frame_equal(r, e)
1495
+
1496
+ def test_nested_period_index_subscript_expression(self):
1497
+ df = DataFrame(
1498
+ np.random.default_rng(2).standard_normal((2, 2)),
1499
+ columns=period_range("2020-01-01", freq="D", periods=2),
1500
+ )
1501
+ r = self.eval("df[df[df < 2] < 2] + df * 2", local_dict={"df": df})
1502
+ e = df[df[df < 2] < 2] + df * 2
1503
+ tm.assert_frame_equal(r, e)
1504
+
1505
+ def test_date_boolean(self, engine, parser):
1506
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
1507
+ df["dates1"] = date_range("1/1/2012", periods=5)
1508
+ res = self.eval(
1509
+ "df.dates1 < 20130101",
1510
+ local_dict={"df": df},
1511
+ engine=engine,
1512
+ parser=parser,
1513
+ )
1514
+ expec = df.dates1 < "20130101"
1515
+ tm.assert_series_equal(res, expec, check_names=False)
1516
+
1517
+ def test_simple_in_ops(self, engine, parser):
1518
+ if parser != "python":
1519
+ res = pd.eval("1 in [1, 2]", engine=engine, parser=parser)
1520
+ assert res
1521
+
1522
+ res = pd.eval("2 in (1, 2)", engine=engine, parser=parser)
1523
+ assert res
1524
+
1525
+ res = pd.eval("3 in (1, 2)", engine=engine, parser=parser)
1526
+ assert not res
1527
+
1528
+ res = pd.eval("3 not in (1, 2)", engine=engine, parser=parser)
1529
+ assert res
1530
+
1531
+ res = pd.eval("[3] not in (1, 2)", engine=engine, parser=parser)
1532
+ assert res
1533
+
1534
+ res = pd.eval("[3] in ([3], 2)", engine=engine, parser=parser)
1535
+ assert res
1536
+
1537
+ res = pd.eval("[[3]] in [[[3]], 2]", engine=engine, parser=parser)
1538
+ assert res
1539
+
1540
+ res = pd.eval("(3,) in [(3,), 2]", engine=engine, parser=parser)
1541
+ assert res
1542
+
1543
+ res = pd.eval("(3,) not in [(3,), 2]", engine=engine, parser=parser)
1544
+ assert not res
1545
+
1546
+ res = pd.eval("[(3,)] in [[(3,)], 2]", engine=engine, parser=parser)
1547
+ assert res
1548
+ else:
1549
+ msg = "'In' nodes are not implemented"
1550
+ with pytest.raises(NotImplementedError, match=msg):
1551
+ pd.eval("1 in [1, 2]", engine=engine, parser=parser)
1552
+ with pytest.raises(NotImplementedError, match=msg):
1553
+ pd.eval("2 in (1, 2)", engine=engine, parser=parser)
1554
+ with pytest.raises(NotImplementedError, match=msg):
1555
+ pd.eval("3 in (1, 2)", engine=engine, parser=parser)
1556
+ with pytest.raises(NotImplementedError, match=msg):
1557
+ pd.eval("[(3,)] in (1, 2, [(3,)])", engine=engine, parser=parser)
1558
+ msg = "'NotIn' nodes are not implemented"
1559
+ with pytest.raises(NotImplementedError, match=msg):
1560
+ pd.eval("3 not in (1, 2)", engine=engine, parser=parser)
1561
+ with pytest.raises(NotImplementedError, match=msg):
1562
+ pd.eval("[3] not in (1, 2, [[3]])", engine=engine, parser=parser)
1563
+
1564
+ def test_check_many_exprs(self, engine, parser):
1565
+ a = 1 # noqa: F841
1566
+ expr = " * ".join("a" * 33)
1567
+ expected = 1
1568
+ res = pd.eval(expr, engine=engine, parser=parser)
1569
+ assert res == expected
1570
+
1571
+ @pytest.mark.parametrize(
1572
+ "expr",
1573
+ [
1574
+ "df > 2 and df > 3",
1575
+ "df > 2 or df > 3",
1576
+ "not df > 2",
1577
+ ],
1578
+ )
1579
+ def test_fails_and_or_not(self, expr, engine, parser):
1580
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
1581
+ if parser == "python":
1582
+ msg = "'BoolOp' nodes are not implemented"
1583
+ if "not" in expr:
1584
+ msg = "'Not' nodes are not implemented"
1585
+
1586
+ with pytest.raises(NotImplementedError, match=msg):
1587
+ pd.eval(
1588
+ expr,
1589
+ local_dict={"df": df},
1590
+ parser=parser,
1591
+ engine=engine,
1592
+ )
1593
+ else:
1594
+ # smoke-test, should not raise
1595
+ pd.eval(
1596
+ expr,
1597
+ local_dict={"df": df},
1598
+ parser=parser,
1599
+ engine=engine,
1600
+ )
1601
+
1602
+ @pytest.mark.parametrize("char", ["|", "&"])
1603
+ def test_fails_ampersand_pipe(self, char, engine, parser):
1604
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841
1605
+ ex = f"(df + 2)[df > 1] > 0 {char} (df > 0)"
1606
+ if parser == "python":
1607
+ msg = "cannot evaluate scalar only bool ops"
1608
+ with pytest.raises(NotImplementedError, match=msg):
1609
+ pd.eval(ex, parser=parser, engine=engine)
1610
+ else:
1611
+ # smoke-test, should not raise
1612
+ pd.eval(ex, parser=parser, engine=engine)
1613
+
1614
+
1615
+ class TestMath:
1616
+ def eval(self, *args, **kwargs):
1617
+ kwargs["level"] = kwargs.pop("level", 0) + 1
1618
+ return pd.eval(*args, **kwargs)
1619
+
1620
+ @pytest.mark.skipif(
1621
+ not NUMEXPR_INSTALLED, reason="Unary ops only implemented for numexpr"
1622
+ )
1623
+ @pytest.mark.parametrize("fn", _unary_math_ops)
1624
+ def test_unary_functions(self, fn):
1625
+ df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)})
1626
+ a = df.a
1627
+
1628
+ expr = f"{fn}(a)"
1629
+ got = self.eval(expr)
1630
+ with np.errstate(all="ignore"):
1631
+ expect = getattr(np, fn)(a)
1632
+ tm.assert_series_equal(got, expect, check_names=False)
1633
+
1634
+ @pytest.mark.parametrize("fn", _binary_math_ops)
1635
+ def test_binary_functions(self, fn):
1636
+ df = DataFrame(
1637
+ {
1638
+ "a": np.random.default_rng(2).standard_normal(10),
1639
+ "b": np.random.default_rng(2).standard_normal(10),
1640
+ }
1641
+ )
1642
+ a = df.a
1643
+ b = df.b
1644
+
1645
+ expr = f"{fn}(a, b)"
1646
+ got = self.eval(expr)
1647
+ with np.errstate(all="ignore"):
1648
+ expect = getattr(np, fn)(a, b)
1649
+ tm.assert_almost_equal(got, expect, check_names=False)
1650
+
1651
+ def test_df_use_case(self, engine, parser):
1652
+ df = DataFrame(
1653
+ {
1654
+ "a": np.random.default_rng(2).standard_normal(10),
1655
+ "b": np.random.default_rng(2).standard_normal(10),
1656
+ }
1657
+ )
1658
+ df.eval(
1659
+ "e = arctan2(sin(a), b)",
1660
+ engine=engine,
1661
+ parser=parser,
1662
+ inplace=True,
1663
+ )
1664
+ got = df.e
1665
+ expect = np.arctan2(np.sin(df.a), df.b)
1666
+ tm.assert_series_equal(got, expect, check_names=False)
1667
+
1668
+ def test_df_arithmetic_subexpression(self, engine, parser):
1669
+ df = DataFrame(
1670
+ {
1671
+ "a": np.random.default_rng(2).standard_normal(10),
1672
+ "b": np.random.default_rng(2).standard_normal(10),
1673
+ }
1674
+ )
1675
+ df.eval("e = sin(a + b)", engine=engine, parser=parser, inplace=True)
1676
+ got = df.e
1677
+ expect = np.sin(df.a + df.b)
1678
+ tm.assert_series_equal(got, expect, check_names=False)
1679
+
1680
+ @pytest.mark.parametrize(
1681
+ "dtype, expect_dtype",
1682
+ [
1683
+ (np.int32, np.float64),
1684
+ (np.int64, np.float64),
1685
+ (np.float32, np.float32),
1686
+ (np.float64, np.float64),
1687
+ pytest.param(np.complex128, np.complex128, marks=td.skip_if_windows),
1688
+ ],
1689
+ )
1690
+ def test_result_types(self, dtype, expect_dtype, engine, parser):
1691
+ # xref https://github.com/pandas-dev/pandas/issues/12293
1692
+ # this fails on Windows, apparently a floating point precision issue
1693
+
1694
+ # Did not test complex64 because DataFrame is converting it to
1695
+ # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952
1696
+ df = DataFrame(
1697
+ {"a": np.random.default_rng(2).standard_normal(10).astype(dtype)}
1698
+ )
1699
+ assert df.a.dtype == dtype
1700
+ df.eval("b = sin(a)", engine=engine, parser=parser, inplace=True)
1701
+ got = df.b
1702
+ expect = np.sin(df.a)
1703
+ assert expect.dtype == got.dtype
1704
+ assert expect_dtype == got.dtype
1705
+ tm.assert_series_equal(got, expect, check_names=False)
1706
+
1707
+ def test_undefined_func(self, engine, parser):
1708
+ df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)})
1709
+ msg = '"mysin" is not a supported function'
1710
+
1711
+ with pytest.raises(ValueError, match=msg):
1712
+ df.eval("mysin(a)", engine=engine, parser=parser)
1713
+
1714
+ def test_keyword_arg(self, engine, parser):
1715
+ df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)})
1716
+ msg = 'Function "sin" does not support keyword arguments'
1717
+
1718
+ with pytest.raises(TypeError, match=msg):
1719
+ df.eval("sin(x=a)", engine=engine, parser=parser)
1720
+
1721
+
1722
+ _var_s = np.random.default_rng(2).standard_normal(10)
1723
+
1724
+
1725
+ class TestScope:
1726
+ def test_global_scope(self, engine, parser):
1727
+ e = "_var_s * 2"
1728
+ tm.assert_numpy_array_equal(
1729
+ _var_s * 2, pd.eval(e, engine=engine, parser=parser)
1730
+ )
1731
+
1732
+ def test_no_new_locals(self, engine, parser):
1733
+ x = 1
1734
+ lcls = locals().copy()
1735
+ pd.eval("x + 1", local_dict=lcls, engine=engine, parser=parser)
1736
+ lcls2 = locals().copy()
1737
+ lcls2.pop("lcls")
1738
+ assert lcls == lcls2
1739
+
1740
+ def test_no_new_globals(self, engine, parser):
1741
+ x = 1 # noqa: F841
1742
+ gbls = globals().copy()
1743
+ pd.eval("x + 1", engine=engine, parser=parser)
1744
+ gbls2 = globals().copy()
1745
+ assert gbls == gbls2
1746
+
1747
+ def test_empty_locals(self, engine, parser):
1748
+ # GH 47084
1749
+ x = 1 # noqa: F841
1750
+ msg = "name 'x' is not defined"
1751
+ with pytest.raises(UndefinedVariableError, match=msg):
1752
+ pd.eval("x + 1", engine=engine, parser=parser, local_dict={})
1753
+
1754
+ def test_empty_globals(self, engine, parser):
1755
+ # GH 47084
1756
+ msg = "name '_var_s' is not defined"
1757
+ e = "_var_s * 2"
1758
+ with pytest.raises(UndefinedVariableError, match=msg):
1759
+ pd.eval(e, engine=engine, parser=parser, global_dict={})
1760
+
1761
+
1762
+ @td.skip_if_no("numexpr")
1763
+ def test_invalid_engine():
1764
+ msg = "Invalid engine 'asdf' passed"
1765
+ with pytest.raises(KeyError, match=msg):
1766
+ pd.eval("x + y", local_dict={"x": 1, "y": 2}, engine="asdf")
1767
+
1768
+
1769
+ @td.skip_if_no("numexpr")
1770
+ @pytest.mark.parametrize(
1771
+ ("use_numexpr", "expected"),
1772
+ (
1773
+ (True, "numexpr"),
1774
+ (False, "python"),
1775
+ ),
1776
+ )
1777
+ def test_numexpr_option_respected(use_numexpr, expected):
1778
+ # GH 32556
1779
+ from pandas.core.computation.eval import _check_engine
1780
+
1781
+ with pd.option_context("compute.use_numexpr", use_numexpr):
1782
+ result = _check_engine(None)
1783
+ assert result == expected
1784
+
1785
+
1786
+ @td.skip_if_no("numexpr")
1787
+ def test_numexpr_option_incompatible_op():
1788
+ # GH 32556
1789
+ with pd.option_context("compute.use_numexpr", False):
1790
+ df = DataFrame(
1791
+ {"A": [True, False, True, False, None, None], "B": [1, 2, 3, 4, 5, 6]}
1792
+ )
1793
+ result = df.query("A.isnull()")
1794
+ expected = DataFrame({"A": [None, None], "B": [5, 6]}, index=[4, 5])
1795
+ tm.assert_frame_equal(result, expected)
1796
+
1797
+
1798
+ @td.skip_if_no("numexpr")
1799
+ def test_invalid_parser():
1800
+ msg = "Invalid parser 'asdf' passed"
1801
+ with pytest.raises(KeyError, match=msg):
1802
+ pd.eval("x + y", local_dict={"x": 1, "y": 2}, parser="asdf")
1803
+
1804
+
1805
+ _parsers: dict[str, type[BaseExprVisitor]] = {
1806
+ "python": PythonExprVisitor,
1807
+ "pytables": pytables.PyTablesExprVisitor,
1808
+ "pandas": PandasExprVisitor,
1809
+ }
1810
+
1811
+
1812
+ @pytest.mark.parametrize("engine", ENGINES)
1813
+ @pytest.mark.parametrize("parser", _parsers)
1814
+ def test_disallowed_nodes(engine, parser):
1815
+ VisitorClass = _parsers[parser]
1816
+ inst = VisitorClass("x + 1", engine, parser)
1817
+
1818
+ for ops in VisitorClass.unsupported_nodes:
1819
+ msg = "nodes are not implemented"
1820
+ with pytest.raises(NotImplementedError, match=msg):
1821
+ getattr(inst, ops)()
1822
+
1823
+
1824
+ def test_syntax_error_exprs(engine, parser):
1825
+ e = "s +"
1826
+ with pytest.raises(SyntaxError, match="invalid syntax"):
1827
+ pd.eval(e, engine=engine, parser=parser)
1828
+
1829
+
1830
+ def test_name_error_exprs(engine, parser):
1831
+ e = "s + t"
1832
+ msg = "name 's' is not defined"
1833
+ with pytest.raises(NameError, match=msg):
1834
+ pd.eval(e, engine=engine, parser=parser)
1835
+
1836
+
1837
+ @pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"])
1838
+ def test_invalid_local_variable_reference(engine, parser, express):
1839
+ a, b = 1, 2 # noqa: F841
1840
+
1841
+ if parser != "pandas":
1842
+ with pytest.raises(SyntaxError, match="The '@' prefix is only"):
1843
+ pd.eval(express, engine=engine, parser=parser)
1844
+ else:
1845
+ with pytest.raises(SyntaxError, match="The '@' prefix is not"):
1846
+ pd.eval(express, engine=engine, parser=parser)
1847
+
1848
+
1849
+ def test_numexpr_builtin_raises(engine, parser):
1850
+ sin, dotted_line = 1, 2
1851
+ if engine == "numexpr":
1852
+ msg = "Variables in expression .+"
1853
+ with pytest.raises(NumExprClobberingError, match=msg):
1854
+ pd.eval("sin + dotted_line", engine=engine, parser=parser)
1855
+ else:
1856
+ res = pd.eval("sin + dotted_line", engine=engine, parser=parser)
1857
+ assert res == sin + dotted_line
1858
+
1859
+
1860
+ def test_bad_resolver_raises(engine, parser):
1861
+ cannot_resolve = 42, 3.0
1862
+ with pytest.raises(TypeError, match="Resolver of type .+"):
1863
+ pd.eval("1 + 2", resolvers=cannot_resolve, engine=engine, parser=parser)
1864
+
1865
+
1866
+ def test_empty_string_raises(engine, parser):
1867
+ # GH 13139
1868
+ with pytest.raises(ValueError, match="expr cannot be an empty string"):
1869
+ pd.eval("", engine=engine, parser=parser)
1870
+
1871
+
1872
+ def test_more_than_one_expression_raises(engine, parser):
1873
+ with pytest.raises(SyntaxError, match="only a single expression is allowed"):
1874
+ pd.eval("1 + 1; 2 + 2", engine=engine, parser=parser)
1875
+
1876
+
1877
+ @pytest.mark.parametrize("cmp", ("and", "or"))
1878
+ @pytest.mark.parametrize("lhs", (int, float))
1879
+ @pytest.mark.parametrize("rhs", (int, float))
1880
+ def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
1881
+ gen = {
1882
+ int: lambda: np.random.default_rng(2).integers(10),
1883
+ float: np.random.default_rng(2).standard_normal,
1884
+ }
1885
+
1886
+ mid = gen[lhs]() # noqa: F841
1887
+ lhs = gen[lhs]()
1888
+ rhs = gen[rhs]()
1889
+
1890
+ ex1 = f"lhs {cmp} mid {cmp} rhs"
1891
+ ex2 = f"lhs {cmp} mid and mid {cmp} rhs"
1892
+ ex3 = f"(lhs {cmp} mid) & (mid {cmp} rhs)"
1893
+ for ex in (ex1, ex2, ex3):
1894
+ msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not"
1895
+ with pytest.raises(NotImplementedError, match=msg):
1896
+ pd.eval(ex, engine=engine, parser=parser)
1897
+
1898
+
1899
+ @pytest.mark.parametrize(
1900
+ "other",
1901
+ [
1902
+ "'x'",
1903
+ "...",
1904
+ ],
1905
+ )
1906
+ def test_equals_various(other):
1907
+ df = DataFrame({"A": ["a", "b", "c"]}, dtype=object)
1908
+ result = df.eval(f"A == {other}")
1909
+ expected = Series([False, False, False], name="A")
1910
+ if USE_NUMEXPR:
1911
+ # https://github.com/pandas-dev/pandas/issues/10239
1912
+ # lose name with numexpr engine. Remove when that's fixed.
1913
+ expected.name = None
1914
+ tm.assert_series_equal(result, expected)
1915
+
1916
+
1917
+ def test_inf(engine, parser):
1918
+ s = "inf + 1"
1919
+ expected = np.inf
1920
+ result = pd.eval(s, engine=engine, parser=parser)
1921
+ assert result == expected
1922
+
1923
+
1924
+ @pytest.mark.parametrize("column", ["Temp(°C)", "Capacitance(μF)"])
1925
+ def test_query_token(engine, column):
1926
+ # See: https://github.com/pandas-dev/pandas/pull/42826
1927
+ df = DataFrame(
1928
+ np.random.default_rng(2).standard_normal((5, 2)), columns=[column, "b"]
1929
+ )
1930
+ expected = df[df[column] > 5]
1931
+ query_string = f"`{column}` > 5"
1932
+ result = df.query(query_string, engine=engine)
1933
+ tm.assert_frame_equal(result, expected)
1934
+
1935
+
1936
+ def test_negate_lt_eq_le(engine, parser):
1937
+ df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"])
1938
+ expected = df[~(df.cat > 0)]
1939
+
1940
+ result = df.query("~(cat > 0)", engine=engine, parser=parser)
1941
+ tm.assert_frame_equal(result, expected)
1942
+
1943
+ if parser == "python":
1944
+ msg = "'Not' nodes are not implemented"
1945
+ with pytest.raises(NotImplementedError, match=msg):
1946
+ df.query("not (cat > 0)", engine=engine, parser=parser)
1947
+ else:
1948
+ result = df.query("not (cat > 0)", engine=engine, parser=parser)
1949
+ tm.assert_frame_equal(result, expected)
1950
+
1951
+
1952
+ @pytest.mark.parametrize(
1953
+ "column",
1954
+ DEFAULT_GLOBALS.keys(),
1955
+ )
1956
+ def test_eval_no_support_column_name(request, column):
1957
+ # GH 44603
1958
+ if column in ["True", "False", "inf", "Inf"]:
1959
+ request.applymarker(
1960
+ pytest.mark.xfail(
1961
+ raises=KeyError,
1962
+ reason=f"GH 47859 DataFrame eval not supported with {column}",
1963
+ )
1964
+ )
1965
+
1966
+ df = DataFrame(
1967
+ np.random.default_rng(2).integers(0, 100, size=(10, 2)),
1968
+ columns=[column, "col1"],
1969
+ )
1970
+ expected = df[df[column] > 6]
1971
+ result = df.query(f"{column}>6")
1972
+
1973
+ tm.assert_frame_equal(result, expected)
1974
+
1975
+
1976
+ def test_set_inplace(using_copy_on_write, warn_copy_on_write):
1977
+ # https://github.com/pandas-dev/pandas/issues/47449
1978
+ # Ensure we don't only update the DataFrame inplace, but also the actual
1979
+ # column values, such that references to this column also get updated
1980
+ df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
1981
+ result_view = df[:]
1982
+ ser = df["A"]
1983
+ with tm.assert_cow_warning(warn_copy_on_write):
1984
+ df.eval("A = B + C", inplace=True)
1985
+ expected = DataFrame({"A": [11, 13, 15], "B": [4, 5, 6], "C": [7, 8, 9]})
1986
+ tm.assert_frame_equal(df, expected)
1987
+ if not using_copy_on_write:
1988
+ tm.assert_series_equal(ser, expected["A"])
1989
+ tm.assert_series_equal(result_view["A"], expected["A"])
1990
+ else:
1991
+ expected = Series([1, 2, 3], name="A")
1992
+ tm.assert_series_equal(ser, expected)
1993
+ tm.assert_series_equal(result_view["A"], expected)
1994
+
1995
+
1996
+ class TestValidate:
1997
+ @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
1998
+ def test_validate_bool_args(self, value):
1999
+ msg = 'For argument "inplace" expected type bool, received type'
2000
+ with pytest.raises(ValueError, match=msg):
2001
+ pd.eval("2+2", inplace=value)
emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (169 Bytes). View file
 
emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc ADDED
Binary file (10.6 kB). View file