diff --git a/emu3/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e8bca2a7c77516e4241023c51bc64e201f2f133 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0ae6076354742fa446636fc4798ac20968a4c5a Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6c23252fcadd69cad9e0caf0d2c3de67113294f Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf44f525b53315e0a0fee4e4e8d5499f7ec1c793 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d74272bc59119b65f91f619d54f3f40cdf07da5 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3074c63e58b25e2fbff9d5eebe377803c3410dc9 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/__init__.py b/emu3/lib/python3.10/site-packages/pandas/_config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97784c924dab46af4f40c291ef293b07b2997684 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_config/__init__.py @@ -0,0 +1,57 @@ +""" +pandas._config is considered explicitly upstream of everything else in pandas, +should have no intra-pandas dependencies. + +importing `dates` and `display` ensures that keys needed by _libs +are initialized. +""" +__all__ = [ + "config", + "detect_console_encoding", + "get_option", + "set_option", + "reset_option", + "describe_option", + "option_context", + "options", + "using_copy_on_write", + "warn_copy_on_write", +] +from pandas._config import config +from pandas._config import dates # pyright: ignore[reportUnusedImport] # noqa: F401 +from pandas._config.config import ( + _global_config, + describe_option, + get_option, + option_context, + options, + reset_option, + set_option, +) +from pandas._config.display import detect_console_encoding + + +def using_copy_on_write() -> bool: + _mode_options = _global_config["mode"] + return ( + _mode_options["copy_on_write"] is True + and _mode_options["data_manager"] == "block" + ) + + +def warn_copy_on_write() -> bool: + _mode_options = _global_config["mode"] + return ( + _mode_options["copy_on_write"] == "warn" + and _mode_options["data_manager"] == "block" + ) + + +def using_nullable_dtypes() -> bool: + _mode_options = _global_config["mode"] + return _mode_options["nullable_dtypes"] + + +def using_pyarrow_string_dtype() -> bool: + _mode_options = _global_config["future"] + return _mode_options["infer_string"] diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0edf7c2a246d39ce4891d5e1bc3040f6bfdd35fa Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d47ed83ba8ea34038fad8f4bf88f031b59c6e272 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21961335a0077c7db73d8bb81b4f6c0fe8bfef67 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0b27c88842c16ee0592059a0adef389f087899c Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f527f96b05929ea19bb8bb4bd30f0e67bfaa4059 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/config.py b/emu3/lib/python3.10/site-packages/pandas/_config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..c391939d22491099652e13ad81e83b201f140b60 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_config/config.py @@ -0,0 +1,948 @@ +""" +The config module holds package-wide configurables and provides +a uniform API for working with them. + +Overview +======== + +This module supports the following requirements: +- options are referenced using keys in dot.notation, e.g. "x.y.option - z". +- keys are case-insensitive. +- functions should accept partial/regex keys, when unambiguous. +- options can be registered by modules at import time. +- options can be registered at init-time (via core.config_init) +- options have a default value, and (optionally) a description and + validation function associated with them. +- options can be deprecated, in which case referencing them + should produce a warning. +- deprecated options can optionally be rerouted to a replacement + so that accessing a deprecated option reroutes to a differently + named option. +- options can be reset to their default value. +- all option can be reset to their default value at once. +- all options in a certain sub - namespace can be reset at once. +- the user can set / get / reset or ask for the description of an option. +- a developer can register and mark an option as deprecated. +- you can register a callback to be invoked when the option value + is set or reset. Changing the stored value is considered misuse, but + is not verboten. + +Implementation +============== + +- Data is stored using nested dictionaries, and should be accessed + through the provided API. + +- "Registered options" and "Deprecated options" have metadata associated + with them, which are stored in auxiliary dictionaries keyed on the + fully-qualified key, e.g. "x.y.z.option". + +- the config_init module is imported by the package's __init__.py file. + placing any register_option() calls there will ensure those options + are available as soon as pandas is loaded. If you use register_option + in a module, it will only be available after that module is imported, + which you should be aware of. + +- `config_prefix` is a context_manager (for use with the `with` keyword) + which can save developers some typing, see the docstring. + +""" + +from __future__ import annotations + +from contextlib import ( + ContextDecorator, + contextmanager, +) +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + NamedTuple, + cast, +) +import warnings + +from pandas._typing import ( + F, + T, +) +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Iterable, + ) + + +class DeprecatedOption(NamedTuple): + key: str + msg: str | None + rkey: str | None + removal_ver: str | None + + +class RegisteredOption(NamedTuple): + key: str + defval: object + doc: str + validator: Callable[[object], Any] | None + cb: Callable[[str], Any] | None + + +# holds deprecated option metadata +_deprecated_options: dict[str, DeprecatedOption] = {} + +# holds registered option metadata +_registered_options: dict[str, RegisteredOption] = {} + +# holds the current values for registered options +_global_config: dict[str, Any] = {} + +# keys which have a special meaning +_reserved_keys: list[str] = ["all"] + + +class OptionError(AttributeError, KeyError): + """ + Exception raised for pandas.options. + + Backwards compatible with KeyError checks. + + Examples + -------- + >>> pd.options.context + Traceback (most recent call last): + OptionError: No such option + """ + + +# +# User API + + +def _get_single_key(pat: str, silent: bool) -> str: + keys = _select_options(pat) + if len(keys) == 0: + if not silent: + _warn_if_deprecated(pat) + raise OptionError(f"No such keys(s): {repr(pat)}") + if len(keys) > 1: + raise OptionError("Pattern matched multiple keys") + key = keys[0] + + if not silent: + _warn_if_deprecated(key) + + key = _translate_key(key) + + return key + + +def _get_option(pat: str, silent: bool = False) -> Any: + key = _get_single_key(pat, silent) + + # walk the nested dict + root, k = _get_root(key) + return root[k] + + +def _set_option(*args, **kwargs) -> None: + # must at least 1 arg deal with constraints later + nargs = len(args) + if not nargs or nargs % 2 != 0: + raise ValueError("Must provide an even number of non-keyword arguments") + + # default to false + silent = kwargs.pop("silent", False) + + if kwargs: + kwarg = next(iter(kwargs.keys())) + raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"') + + for k, v in zip(args[::2], args[1::2]): + key = _get_single_key(k, silent) + + o = _get_registered_option(key) + if o and o.validator: + o.validator(v) + + # walk the nested dict + root, k_root = _get_root(key) + root[k_root] = v + + if o.cb: + if silent: + with warnings.catch_warnings(record=True): + o.cb(key) + else: + o.cb(key) + + +def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None: + keys = _select_options(pat) + if len(keys) == 0: + raise OptionError("No such keys(s)") + + s = "\n".join([_build_option_description(k) for k in keys]) + + if _print_desc: + print(s) + return None + return s + + +def _reset_option(pat: str, silent: bool = False) -> None: + keys = _select_options(pat) + + if len(keys) == 0: + raise OptionError("No such keys(s)") + + if len(keys) > 1 and len(pat) < 4 and pat != "all": + raise ValueError( + "You must specify at least 4 characters when " + "resetting multiple keys, use the special keyword " + '"all" to reset all the options to their default value' + ) + + for k in keys: + _set_option(k, _registered_options[k].defval, silent=silent) + + +def get_default_val(pat: str): + key = _get_single_key(pat, silent=True) + return _get_registered_option(key).defval + + +class DictWrapper: + """provide attribute-style access to a nested dict""" + + d: dict[str, Any] + + def __init__(self, d: dict[str, Any], prefix: str = "") -> None: + object.__setattr__(self, "d", d) + object.__setattr__(self, "prefix", prefix) + + def __setattr__(self, key: str, val: Any) -> None: + prefix = object.__getattribute__(self, "prefix") + if prefix: + prefix += "." + prefix += key + # you can't set new keys + # can you can't overwrite subtrees + if key in self.d and not isinstance(self.d[key], dict): + _set_option(prefix, val) + else: + raise OptionError("You can only set the value of existing options") + + def __getattr__(self, key: str): + prefix = object.__getattribute__(self, "prefix") + if prefix: + prefix += "." + prefix += key + try: + v = object.__getattribute__(self, "d")[key] + except KeyError as err: + raise OptionError("No such option") from err + if isinstance(v, dict): + return DictWrapper(v, prefix) + else: + return _get_option(prefix) + + def __dir__(self) -> list[str]: + return list(self.d.keys()) + + +# For user convenience, we'd like to have the available options described +# in the docstring. For dev convenience we'd like to generate the docstrings +# dynamically instead of maintaining them by hand. To this, we use the +# class below which wraps functions inside a callable, and converts +# __doc__ into a property function. The doctsrings below are templates +# using the py2.6+ advanced formatting syntax to plug in a concise list +# of options, and option descriptions. + + +class CallableDynamicDoc(Generic[T]): + def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None: + self.__doc_tmpl__ = doc_tmpl + self.__func__ = func + + def __call__(self, *args, **kwds) -> T: + return self.__func__(*args, **kwds) + + # error: Signature of "__doc__" incompatible with supertype "object" + @property + def __doc__(self) -> str: # type: ignore[override] + opts_desc = _describe_option("all", _print_desc=False) + opts_list = pp_options_list(list(_registered_options.keys())) + return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list) + + +_get_option_tmpl = """ +get_option(pat) + +Retrieves the value of the specified option. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp which should match a single option. + Note: partial matches are supported for convenience, but unless you use the + full option name (e.g. x.y.z.option_name), your code may break in future + versions if new options with similar names are introduced. + +Returns +------- +result : the value of the option + +Raises +------ +OptionError : if no such option exists + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.get_option('display.max_columns') # doctest: +SKIP +4 +""" + +_set_option_tmpl = """ +set_option(pat, value) + +Sets the value of the specified option. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp which should match a single option. + Note: partial matches are supported for convenience, but unless you use the + full option name (e.g. x.y.z.option_name), your code may break in future + versions if new options with similar names are introduced. +value : object + New value of option. + +Returns +------- +None + +Raises +------ +OptionError if no such option exists + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.set_option('display.max_columns', 4) +>>> df = pd.DataFrame([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) +>>> df + 0 1 ... 3 4 +0 1 2 ... 4 5 +1 6 7 ... 9 10 +[2 rows x 5 columns] +>>> pd.reset_option('display.max_columns') +""" + +_describe_option_tmpl = """ +describe_option(pat, _print_desc=False) + +Prints the description for one or more registered options. + +Call with no arguments to get a listing for all registered options. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp pattern. All matching keys will have their description displayed. +_print_desc : bool, default True + If True (default) the description(s) will be printed to stdout. + Otherwise, the description(s) will be returned as a unicode string + (for testing). + +Returns +------- +None by default, the description(s) as a unicode string if _print_desc +is False + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.describe_option('display.max_columns') # doctest: +SKIP +display.max_columns : int + If max_cols is exceeded, switch to truncate view... +""" + +_reset_option_tmpl = """ +reset_option(pat) + +Reset one or more options to their default value. + +Pass "all" as argument to reset all options. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str/regex + If specified only options matching `prefix*` will be reset. + Note: partial matches are supported for convenience, but unless you + use the full option name (e.g. x.y.z.option_name), your code may break + in future versions if new options with similar names are introduced. + +Returns +------- +None + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.reset_option('display.max_columns') # doctest: +SKIP +""" + +# bind the functions with their docstrings into a Callable +# and use that as the functions exposed in pd.api +get_option = CallableDynamicDoc(_get_option, _get_option_tmpl) +set_option = CallableDynamicDoc(_set_option, _set_option_tmpl) +reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl) +describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl) +options = DictWrapper(_global_config) + +# +# Functions for use by pandas developers, in addition to User - api + + +class option_context(ContextDecorator): + """ + Context manager to temporarily set options in the `with` statement context. + + You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. + + Examples + -------- + >>> from pandas import option_context + >>> with option_context('display.max_rows', 10, 'display.max_columns', 5): + ... pass + """ + + def __init__(self, *args) -> None: + if len(args) % 2 != 0 or len(args) < 2: + raise ValueError( + "Need to invoke as option_context(pat, val, [(pat, val), ...])." + ) + + self.ops = list(zip(args[::2], args[1::2])) + + def __enter__(self) -> None: + self.undo = [(pat, _get_option(pat)) for pat, val in self.ops] + + for pat, val in self.ops: + _set_option(pat, val, silent=True) + + def __exit__(self, *args) -> None: + if self.undo: + for pat, val in self.undo: + _set_option(pat, val, silent=True) + + +def register_option( + key: str, + defval: object, + doc: str = "", + validator: Callable[[object], Any] | None = None, + cb: Callable[[str], Any] | None = None, +) -> None: + """ + Register an option in the package-wide pandas config object + + Parameters + ---------- + key : str + Fully-qualified key, e.g. "x.y.option - z". + defval : object + Default value of the option. + doc : str + Description of the option. + validator : Callable, optional + Function of a single argument, should raise `ValueError` if + called with a value which is not a legal value for the option. + cb + a function of a single argument "key", which is called + immediately after an option value is set/reset. key is + the full name of the option. + + Raises + ------ + ValueError if `validator` is specified and `defval` is not a valid value. + + """ + import keyword + import tokenize + + key = key.lower() + + if key in _registered_options: + raise OptionError(f"Option '{key}' has already been registered") + if key in _reserved_keys: + raise OptionError(f"Option '{key}' is a reserved key") + + # the default value should be legal + if validator: + validator(defval) + + # walk the nested dict, creating dicts as needed along the path + path = key.split(".") + + for k in path: + if not re.match("^" + tokenize.Name + "$", k): + raise ValueError(f"{k} is not a valid identifier") + if keyword.iskeyword(k): + raise ValueError(f"{k} is a python keyword") + + cursor = _global_config + msg = "Path prefix to option '{option}' is already an option" + + for i, p in enumerate(path[:-1]): + if not isinstance(cursor, dict): + raise OptionError(msg.format(option=".".join(path[:i]))) + if p not in cursor: + cursor[p] = {} + cursor = cursor[p] + + if not isinstance(cursor, dict): + raise OptionError(msg.format(option=".".join(path[:-1]))) + + cursor[path[-1]] = defval # initialize + + # save the option metadata + _registered_options[key] = RegisteredOption( + key=key, defval=defval, doc=doc, validator=validator, cb=cb + ) + + +def deprecate_option( + key: str, + msg: str | None = None, + rkey: str | None = None, + removal_ver: str | None = None, +) -> None: + """ + Mark option `key` as deprecated, if code attempts to access this option, + a warning will be produced, using `msg` if given, or a default message + if not. + if `rkey` is given, any access to the key will be re-routed to `rkey`. + + Neither the existence of `key` nor that if `rkey` is checked. If they + do not exist, any subsequence access will fail as usual, after the + deprecation warning is given. + + Parameters + ---------- + key : str + Name of the option to be deprecated. + must be a fully-qualified option name (e.g "x.y.z.rkey"). + msg : str, optional + Warning message to output when the key is referenced. + if no message is given a default message will be emitted. + rkey : str, optional + Name of an option to reroute access to. + If specified, any referenced `key` will be + re-routed to `rkey` including set/get/reset. + rkey must be a fully-qualified option name (e.g "x.y.z.rkey"). + used by the default message if no `msg` is specified. + removal_ver : str, optional + Specifies the version in which this option will + be removed. used by the default message if no `msg` is specified. + + Raises + ------ + OptionError + If the specified key has already been deprecated. + """ + key = key.lower() + + if key in _deprecated_options: + raise OptionError(f"Option '{key}' has already been defined as deprecated.") + + _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver) + + +# +# functions internal to the module + + +def _select_options(pat: str) -> list[str]: + """ + returns a list of keys matching `pat` + + if pat=="all", returns all registered options + """ + # short-circuit for exact key + if pat in _registered_options: + return [pat] + + # else look through all of them + keys = sorted(_registered_options.keys()) + if pat == "all": # reserved key + return keys + + return [k for k in keys if re.search(pat, k, re.I)] + + +def _get_root(key: str) -> tuple[dict[str, Any], str]: + path = key.split(".") + cursor = _global_config + for p in path[:-1]: + cursor = cursor[p] + return cursor, path[-1] + + +def _is_deprecated(key: str) -> bool: + """Returns True if the given option has been deprecated""" + key = key.lower() + return key in _deprecated_options + + +def _get_deprecated_option(key: str): + """ + Retrieves the metadata for a deprecated option, if `key` is deprecated. + + Returns + ------- + DeprecatedOption (namedtuple) if key is deprecated, None otherwise + """ + try: + d = _deprecated_options[key] + except KeyError: + return None + else: + return d + + +def _get_registered_option(key: str): + """ + Retrieves the option metadata if `key` is a registered option. + + Returns + ------- + RegisteredOption (namedtuple) if key is deprecated, None otherwise + """ + return _registered_options.get(key) + + +def _translate_key(key: str) -> str: + """ + if key id deprecated and a replacement key defined, will return the + replacement key, otherwise returns `key` as - is + """ + d = _get_deprecated_option(key) + if d: + return d.rkey or key + else: + return key + + +def _warn_if_deprecated(key: str) -> bool: + """ + Checks if `key` is a deprecated option and if so, prints a warning. + + Returns + ------- + bool - True if `key` is deprecated, False otherwise. + """ + d = _get_deprecated_option(key) + if d: + if d.msg: + warnings.warn( + d.msg, + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + msg = f"'{key}' is deprecated" + if d.removal_ver: + msg += f" and will be removed in {d.removal_ver}" + if d.rkey: + msg += f", please use '{d.rkey}' instead." + else: + msg += ", please refrain from using it." + + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + return True + return False + + +def _build_option_description(k: str) -> str: + """Builds a formatted description of a registered option and prints it""" + o = _get_registered_option(k) + d = _get_deprecated_option(k) + + s = f"{k} " + + if o.doc: + s += "\n".join(o.doc.strip().split("\n")) + else: + s += "No description available." + + if o: + s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]" + + if d: + rkey = d.rkey or "" + s += "\n (Deprecated" + s += f", use `{rkey}` instead." + s += ")" + + return s + + +def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False): + """Builds a concise listing of available options, grouped by prefix""" + from itertools import groupby + from textwrap import wrap + + def pp(name: str, ks: Iterable[str]) -> list[str]: + pfx = "- " + name + ".[" if name else "" + ls = wrap( + ", ".join(ks), + width, + initial_indent=pfx, + subsequent_indent=" ", + break_long_words=False, + ) + if ls and ls[-1] and name: + ls[-1] = ls[-1] + "]" + return ls + + ls: list[str] = [] + singles = [x for x in sorted(keys) if x.find(".") < 0] + if singles: + ls += pp("", singles) + keys = [x for x in keys if x.find(".") >= 0] + + for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]): + ks = [x[len(k) + 1 :] for x in list(g)] + ls += pp(k, ks) + s = "\n".join(ls) + if _print: + print(s) + else: + return s + + +# +# helpers + + +@contextmanager +def config_prefix(prefix: str) -> Generator[None, None, None]: + """ + contextmanager for multiple invocations of API with a common prefix + + supported API functions: (register / get / set )__option + + Warning: This is not thread - safe, and won't work properly if you import + the API functions into your module using the "from x import y" construct. + + Example + ------- + import pandas._config.config as cf + with cf.config_prefix("display.font"): + cf.register_option("color", "red") + cf.register_option("size", " 5 pt") + cf.set_option(size, " 6 pt") + cf.get_option(size) + ... + + etc' + + will register options "display.font.color", "display.font.size", set the + value of "display.font.size"... and so on. + """ + # Note: reset_option relies on set_option, and on key directly + # it does not fit in to this monkey-patching scheme + + global register_option, get_option, set_option + + def wrap(func: F) -> F: + def inner(key: str, *args, **kwds): + pkey = f"{prefix}.{key}" + return func(pkey, *args, **kwds) + + return cast(F, inner) + + _register_option = register_option + _get_option = get_option + _set_option = set_option + set_option = wrap(set_option) + get_option = wrap(get_option) + register_option = wrap(register_option) + try: + yield + finally: + set_option = _set_option + get_option = _get_option + register_option = _register_option + + +# These factories and methods are handy for use as the validator +# arg in register_option + + +def is_type_factory(_type: type[Any]) -> Callable[[Any], None]: + """ + + Parameters + ---------- + `_type` - a type to be compared against (e.g. type(x) == `_type`) + + Returns + ------- + validator - a function of a single argument x , which raises + ValueError if type(x) is not equal to `_type` + + """ + + def inner(x) -> None: + if type(x) != _type: + raise ValueError(f"Value must have type '{_type}'") + + return inner + + +def is_instance_factory(_type) -> Callable[[Any], None]: + """ + + Parameters + ---------- + `_type` - the type to be checked against + + Returns + ------- + validator - a function of a single argument x , which raises + ValueError if x is not an instance of `_type` + + """ + if isinstance(_type, (tuple, list)): + _type = tuple(_type) + type_repr = "|".join(map(str, _type)) + else: + type_repr = f"'{_type}'" + + def inner(x) -> None: + if not isinstance(x, _type): + raise ValueError(f"Value must be an instance of {type_repr}") + + return inner + + +def is_one_of_factory(legal_values) -> Callable[[Any], None]: + callables = [c for c in legal_values if callable(c)] + legal_values = [c for c in legal_values if not callable(c)] + + def inner(x) -> None: + if x not in legal_values: + if not any(c(x) for c in callables): + uvals = [str(lval) for lval in legal_values] + pp_values = "|".join(uvals) + msg = f"Value must be one of {pp_values}" + if len(callables): + msg += " or a callable" + raise ValueError(msg) + + return inner + + +def is_nonnegative_int(value: object) -> None: + """ + Verify that value is None or a positive int. + + Parameters + ---------- + value : None or int + The `value` to be checked. + + Raises + ------ + ValueError + When the value is not None or is a negative integer + """ + if value is None: + return + + elif isinstance(value, int): + if value >= 0: + return + + msg = "Value must be a nonnegative integer or None" + raise ValueError(msg) + + +# common type validators, for convenience +# usage: register_option(... , validator = is_int) +is_int = is_type_factory(int) +is_bool = is_type_factory(bool) +is_float = is_type_factory(float) +is_str = is_type_factory(str) +is_text = is_instance_factory((str, bytes)) + + +def is_callable(obj) -> bool: + """ + + Parameters + ---------- + `obj` - the object to be checked + + Returns + ------- + validator - returns True if object is callable + raises ValueError otherwise. + + """ + if not callable(obj): + raise ValueError("Value must be a callable") + return True diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/dates.py b/emu3/lib/python3.10/site-packages/pandas/_config/dates.py new file mode 100644 index 0000000000000000000000000000000000000000..b37831f96eb73bf2f128929a1769db6c141eebad --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_config/dates.py @@ -0,0 +1,25 @@ +""" +config for datetime formatting +""" +from __future__ import annotations + +from pandas._config import config as cf + +pc_date_dayfirst_doc = """ +: boolean + When True, prints and parses dates with the day first, eg 20/01/2005 +""" + +pc_date_yearfirst_doc = """ +: boolean + When True, prints and parses dates with the year first, eg 2005/01/20 +""" + +with cf.config_prefix("display"): + # Needed upstream of `_libs` because these are used in tslibs.parsing + cf.register_option( + "date_dayfirst", False, pc_date_dayfirst_doc, validator=cf.is_bool + ) + cf.register_option( + "date_yearfirst", False, pc_date_yearfirst_doc, validator=cf.is_bool + ) diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/display.py b/emu3/lib/python3.10/site-packages/pandas/_config/display.py new file mode 100644 index 0000000000000000000000000000000000000000..df2c3ad36c855d77c33d80c78c3d83ab3c09d5f9 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_config/display.py @@ -0,0 +1,62 @@ +""" +Unopinionated display configuration. +""" + +from __future__ import annotations + +import locale +import sys + +from pandas._config import config as cf + +# ----------------------------------------------------------------------------- +# Global formatting options +_initial_defencoding: str | None = None + + +def detect_console_encoding() -> str: + """ + Try to find the most capable encoding supported by the console. + slightly modified from the way IPython handles the same issue. + """ + global _initial_defencoding + + encoding = None + try: + encoding = sys.stdout.encoding or sys.stdin.encoding + except (AttributeError, OSError): + pass + + # try again for something better + if not encoding or "ascii" in encoding.lower(): + try: + encoding = locale.getpreferredencoding() + except locale.Error: + # can be raised by locale.setlocale(), which is + # called by getpreferredencoding + # (on some systems, see stdlib locale docs) + pass + + # when all else fails. this will usually be "ascii" + if not encoding or "ascii" in encoding.lower(): + encoding = sys.getdefaultencoding() + + # GH#3360, save the reported defencoding at import time + # MPL backends may change it. Make available for debugging. + if not _initial_defencoding: + _initial_defencoding = sys.getdefaultencoding() + + return encoding + + +pc_encoding_doc = """ +: str/unicode + Defaults to the detected encoding of the console. + Specifies the encoding to be used for strings returned by to_string, + these are generally strings meant to be displayed on the console. +""" + +with cf.config_prefix("display"): + cf.register_option( + "encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text + ) diff --git a/emu3/lib/python3.10/site-packages/pandas/_config/localization.py b/emu3/lib/python3.10/site-packages/pandas/_config/localization.py new file mode 100644 index 0000000000000000000000000000000000000000..5c1a0ff1395334a55baa6c5d77a71635872fe824 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_config/localization.py @@ -0,0 +1,172 @@ +""" +Helpers for configuring locale settings. + +Name `localization` is chosen to avoid overlap with builtin `locale` module. +""" +from __future__ import annotations + +from contextlib import contextmanager +import locale +import platform +import re +import subprocess +from typing import TYPE_CHECKING + +from pandas._config.config import options + +if TYPE_CHECKING: + from collections.abc import Generator + + +@contextmanager +def set_locale( + new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL +) -> Generator[str | tuple[str, str], None, None]: + """ + Context manager for temporarily setting a locale. + + Parameters + ---------- + new_locale : str or tuple + A string of the form .. For example to set + the current locale to US English with a UTF8 encoding, you would pass + "en_US.UTF-8". + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. + + Notes + ----- + This is useful when you want to run a particular block of code under a + particular locale, without globally setting the locale. This probably isn't + thread-safe. + """ + # getlocale is not always compliant with setlocale, use setlocale. GH#46595 + current_locale = locale.setlocale(lc_var) + + try: + locale.setlocale(lc_var, new_locale) + normalized_code, normalized_encoding = locale.getlocale() + if normalized_code is not None and normalized_encoding is not None: + yield f"{normalized_code}.{normalized_encoding}" + else: + yield new_locale + finally: + locale.setlocale(lc_var, current_locale) + + +def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool: + """ + Check to see if we can set a locale, and subsequently get the locale, + without raising an Exception. + + Parameters + ---------- + lc : str + The locale to attempt to set. + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. + + Returns + ------- + bool + Whether the passed locale can be set + """ + try: + with set_locale(lc, lc_var=lc_var): + pass + except (ValueError, locale.Error): + # horrible name for a Exception subclass + return False + else: + return True + + +def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]: + """ + Return a list of normalized locales that do not throw an ``Exception`` + when set. + + Parameters + ---------- + locales : str + A string where each locale is separated by a newline. + normalize : bool + Whether to call ``locale.normalize`` on each locale. + + Returns + ------- + valid_locales : list + A list of valid locales. + """ + return [ + loc + for loc in ( + locale.normalize(loc.strip()) if normalize else loc.strip() + for loc in locales + ) + if can_set_locale(loc) + ] + + +def get_locales( + prefix: str | None = None, + normalize: bool = True, +) -> list[str]: + """ + Get all the locales that are available on the system. + + Parameters + ---------- + prefix : str + If not ``None`` then return only those locales with the prefix + provided. For example to get all English language locales (those that + start with ``"en"``), pass ``prefix="en"``. + normalize : bool + Call ``locale.normalize`` on the resulting list of available locales. + If ``True``, only locales that can be set without throwing an + ``Exception`` are returned. + + Returns + ------- + locales : list of strings + A list of locale strings that can be set with ``locale.setlocale()``. + For example:: + + locale.setlocale(locale.LC_ALL, locale_string) + + On error will return an empty list (no locale available, e.g. Windows) + + """ + if platform.system() in ("Linux", "Darwin"): + raw_locales = subprocess.check_output(["locale", "-a"]) + else: + # Other platforms e.g. windows platforms don't define "locale -a" + # Note: is_platform_windows causes circular import here + return [] + + try: + # raw_locales is "\n" separated list of locales + # it may contain non-decodable parts, so split + # extract what we can and then rejoin. + split_raw_locales = raw_locales.split(b"\n") + out_locales = [] + for x in split_raw_locales: + try: + out_locales.append(str(x, encoding=options.display.encoding)) + except UnicodeError: + # 'locale -a' is used to populated 'raw_locales' and on + # Redhat 7 Linux (and maybe others) prints locale names + # using windows-1252 encoding. Bug only triggered by + # a few special characters and when there is an + # extensive list of installed locales. + out_locales.append(str(x, encoding="windows-1252")) + + except TypeError: + pass + + if prefix is None: + return _valid_locales(out_locales, normalize) + + pattern = re.compile(f"{prefix}.*") + found = pattern.findall("\n".join(out_locales)) + return _valid_locales(found, normalize) diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/__init__.py b/emu3/lib/python3.10/site-packages/pandas/_testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..87d419e2db8dd017688415dc4d97d0d940914123 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_testing/__init__.py @@ -0,0 +1,639 @@ +from __future__ import annotations + +from decimal import Decimal +import operator +import os +from sys import byteorder +from typing import ( + TYPE_CHECKING, + Callable, + ContextManager, + cast, +) +import warnings + +import numpy as np + +from pandas._config.localization import ( + can_set_locale, + get_locales, + set_locale, +) + +from pandas.compat import pa_version_under10p1 + +from pandas.core.dtypes.common import is_string_dtype + +import pandas as pd +from pandas import ( + ArrowDtype, + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, +) +from pandas._testing._io import ( + round_trip_localpath, + round_trip_pathlib, + round_trip_pickle, + write_to_compressed, +) +from pandas._testing._warnings import ( + assert_produces_warning, + maybe_produces_warning, +) +from pandas._testing.asserters import ( + assert_almost_equal, + assert_attr_equal, + assert_categorical_equal, + assert_class_equal, + assert_contains_all, + assert_copy, + assert_datetime_array_equal, + assert_dict_equal, + assert_equal, + assert_extension_array_equal, + assert_frame_equal, + assert_index_equal, + assert_indexing_slices_equivalent, + assert_interval_array_equal, + assert_is_sorted, + assert_is_valid_plot_return_object, + assert_metadata_equivalent, + assert_numpy_array_equal, + assert_period_array_equal, + assert_series_equal, + assert_sp_array_equal, + assert_timedelta_array_equal, + raise_assert_detail, +) +from pandas._testing.compat import ( + get_dtype, + get_obj, +) +from pandas._testing.contexts import ( + assert_cow_warning, + decompress_file, + ensure_clean, + raises_chained_assignment_error, + set_timezone, + use_numexpr, + with_csv_dialect, +) +from pandas.core.arrays import ( + BaseMaskedArray, + ExtensionArray, + NumpyExtensionArray, +) +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.construction import extract_array + +if TYPE_CHECKING: + from pandas._typing import ( + Dtype, + NpDtype, + ) + + from pandas.core.arrays import ArrowExtensionArray + +UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"] +UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"] +SIGNED_INT_NUMPY_DTYPES: list[NpDtype] = [int, "int8", "int16", "int32", "int64"] +SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"] +ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES +ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES +ALL_INT_DTYPES: list[Dtype] = [*ALL_INT_NUMPY_DTYPES, *ALL_INT_EA_DTYPES] + +FLOAT_NUMPY_DTYPES: list[NpDtype] = [float, "float32", "float64"] +FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"] +ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES] + +COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"] +STRING_DTYPES: list[Dtype] = [str, "str", "U"] +COMPLEX_FLOAT_DTYPES: list[Dtype] = [*COMPLEX_DTYPES, *FLOAT_NUMPY_DTYPES] + +DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"] +TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"] + +BOOL_DTYPES: list[Dtype] = [bool, "bool"] +BYTES_DTYPES: list[Dtype] = [bytes, "bytes"] +OBJECT_DTYPES: list[Dtype] = [object, "object"] + +ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES +ALL_REAL_EXTENSION_DTYPES = FLOAT_EA_DTYPES + ALL_INT_EA_DTYPES +ALL_REAL_DTYPES: list[Dtype] = [*ALL_REAL_NUMPY_DTYPES, *ALL_REAL_EXTENSION_DTYPES] +ALL_NUMERIC_DTYPES: list[Dtype] = [*ALL_REAL_DTYPES, *COMPLEX_DTYPES] + +ALL_NUMPY_DTYPES = ( + ALL_REAL_NUMPY_DTYPES + + COMPLEX_DTYPES + + STRING_DTYPES + + DATETIME64_DTYPES + + TIMEDELTA64_DTYPES + + BOOL_DTYPES + + OBJECT_DTYPES + + BYTES_DTYPES +) + +NARROW_NP_DTYPES = [ + np.float16, + np.float32, + np.int8, + np.int16, + np.int32, + np.uint8, + np.uint16, + np.uint32, +] + +PYTHON_DATA_TYPES = [ + str, + int, + float, + complex, + list, + tuple, + range, + dict, + set, + frozenset, + bool, + bytes, + bytearray, + memoryview, +] + +ENDIAN = {"little": "<", "big": ">"}[byteorder] + +NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")] +NP_NAT_OBJECTS = [ + cls("NaT", unit) + for cls in [np.datetime64, np.timedelta64] + for unit in [ + "Y", + "M", + "W", + "D", + "h", + "m", + "s", + "ms", + "us", + "ns", + "ps", + "fs", + "as", + ] +] + +if not pa_version_under10p1: + import pyarrow as pa + + UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()] + SIGNED_INT_PYARROW_DTYPES = [pa.int8(), pa.int16(), pa.int32(), pa.int64()] + ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES + ALL_INT_PYARROW_DTYPES_STR_REPR = [ + str(ArrowDtype(typ)) for typ in ALL_INT_PYARROW_DTYPES + ] + + # pa.float16 doesn't seem supported + # https://github.com/apache/arrow/blob/master/python/pyarrow/src/arrow/python/helpers.cc#L86 + FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()] + FLOAT_PYARROW_DTYPES_STR_REPR = [ + str(ArrowDtype(typ)) for typ in FLOAT_PYARROW_DTYPES + ] + DECIMAL_PYARROW_DTYPES = [pa.decimal128(7, 3)] + STRING_PYARROW_DTYPES = [pa.string()] + BINARY_PYARROW_DTYPES = [pa.binary()] + + TIME_PYARROW_DTYPES = [ + pa.time32("s"), + pa.time32("ms"), + pa.time64("us"), + pa.time64("ns"), + ] + DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()] + DATETIME_PYARROW_DTYPES = [ + pa.timestamp(unit=unit, tz=tz) + for unit in ["s", "ms", "us", "ns"] + for tz in [None, "UTC", "US/Pacific", "US/Eastern"] + ] + TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]] + + BOOL_PYARROW_DTYPES = [pa.bool_()] + + # TODO: Add container like pyarrow types: + # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions + ALL_PYARROW_DTYPES = ( + ALL_INT_PYARROW_DTYPES + + FLOAT_PYARROW_DTYPES + + DECIMAL_PYARROW_DTYPES + + STRING_PYARROW_DTYPES + + BINARY_PYARROW_DTYPES + + TIME_PYARROW_DTYPES + + DATE_PYARROW_DTYPES + + DATETIME_PYARROW_DTYPES + + TIMEDELTA_PYARROW_DTYPES + + BOOL_PYARROW_DTYPES + ) + ALL_REAL_PYARROW_DTYPES_STR_REPR = ( + ALL_INT_PYARROW_DTYPES_STR_REPR + FLOAT_PYARROW_DTYPES_STR_REPR + ) +else: + FLOAT_PYARROW_DTYPES_STR_REPR = [] + ALL_INT_PYARROW_DTYPES_STR_REPR = [] + ALL_PYARROW_DTYPES = [] + ALL_REAL_PYARROW_DTYPES_STR_REPR = [] + +ALL_REAL_NULLABLE_DTYPES = ( + FLOAT_NUMPY_DTYPES + ALL_REAL_EXTENSION_DTYPES + ALL_REAL_PYARROW_DTYPES_STR_REPR +) + +arithmetic_dunder_methods = [ + "__add__", + "__radd__", + "__sub__", + "__rsub__", + "__mul__", + "__rmul__", + "__floordiv__", + "__rfloordiv__", + "__truediv__", + "__rtruediv__", + "__pow__", + "__rpow__", + "__mod__", + "__rmod__", +] + +comparison_dunder_methods = ["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"] + + +# ----------------------------------------------------------------------------- +# Comparators + + +def box_expected(expected, box_cls, transpose: bool = True): + """ + Helper function to wrap the expected output of a test in a given box_class. + + Parameters + ---------- + expected : np.ndarray, Index, Series + box_cls : {Index, Series, DataFrame} + + Returns + ------- + subclass of box_cls + """ + if box_cls is pd.array: + if isinstance(expected, RangeIndex): + # pd.array would return an IntegerArray + expected = NumpyExtensionArray(np.asarray(expected._values)) + else: + expected = pd.array(expected, copy=False) + elif box_cls is Index: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Index(expected) + elif box_cls is Series: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Series(expected) + elif box_cls is DataFrame: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Series(expected).to_frame() + if transpose: + # for vector operations, we need a DataFrame to be a single-row, + # not a single-column, in order to operate against non-DataFrame + # vectors of the same length. But convert to two rows to avoid + # single-row special cases in datetime arithmetic + expected = expected.T + expected = pd.concat([expected] * 2, ignore_index=True) + elif box_cls is np.ndarray or box_cls is np.array: + expected = np.array(expected) + elif box_cls is to_array: + expected = to_array(expected) + else: + raise NotImplementedError(box_cls) + return expected + + +def to_array(obj): + """ + Similar to pd.array, but does not cast numpy dtypes to nullable dtypes. + """ + # temporary implementation until we get pd.array in place + dtype = getattr(obj, "dtype", None) + + if dtype is None: + return np.asarray(obj) + + return extract_array(obj, extract_numpy=True) + + +class SubclassedSeries(Series): + _metadata = ["testattr", "name"] + + @property + def _constructor(self): + # For testing, those properties return a generic callable, and not + # the actual class. In this case that is equivalent, but it is to + # ensure we don't rely on the property returning a class + # See https://github.com/pandas-dev/pandas/pull/46018 and + # https://github.com/pandas-dev/pandas/issues/32638 and linked issues + return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs) + + @property + def _constructor_expanddim(self): + return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs) + + +class SubclassedDataFrame(DataFrame): + _metadata = ["testattr"] + + @property + def _constructor(self): + return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs) + + @property + def _constructor_sliced(self): + return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs) + + +def convert_rows_list_to_csv_str(rows_list: list[str]) -> str: + """ + Convert list of CSV rows to single CSV-formatted string for current OS. + + This method is used for creating expected value of to_csv() method. + + Parameters + ---------- + rows_list : List[str] + Each element represents the row of csv. + + Returns + ------- + str + Expected output of to_csv() in current OS. + """ + sep = os.linesep + return sep.join(rows_list) + sep + + +def external_error_raised(expected_exception: type[Exception]) -> ContextManager: + """ + Helper function to mark pytest.raises that have an external error message. + + Parameters + ---------- + expected_exception : Exception + Expected error to raise. + + Returns + ------- + Callable + Regular `pytest.raises` function with `match` equal to `None`. + """ + import pytest + + return pytest.raises(expected_exception, match=None) + + +cython_table = pd.core.common._cython_table.items() + + +def get_cython_table_params(ndframe, func_names_and_expected): + """ + Combine frame, functions from com._cython_table + keys and expected result. + + Parameters + ---------- + ndframe : DataFrame or Series + func_names_and_expected : Sequence of two items + The first item is a name of a NDFrame method ('sum', 'prod') etc. + The second item is the expected return value. + + Returns + ------- + list + List of three items (DataFrame, function, expected result) + """ + results = [] + for func_name, expected in func_names_and_expected: + results.append((ndframe, func_name, expected)) + results += [ + (ndframe, func, expected) + for func, name in cython_table + if name == func_name + ] + return results + + +def get_op_from_name(op_name: str) -> Callable: + """ + The operator function for a given op name. + + Parameters + ---------- + op_name : str + The op name, in form of "add" or "__add__". + + Returns + ------- + function + A function performing the operation. + """ + short_opname = op_name.strip("_") + try: + op = getattr(operator, short_opname) + except AttributeError: + # Assume it is the reverse operator + rop = getattr(operator, short_opname[1:]) + op = lambda x, y: rop(y, x) + + return op + + +# ----------------------------------------------------------------------------- +# Indexing test helpers + + +def getitem(x): + return x + + +def setitem(x): + return x + + +def loc(x): + return x.loc + + +def iloc(x): + return x.iloc + + +def at(x): + return x.at + + +def iat(x): + return x.iat + + +# ----------------------------------------------------------------------------- + +_UNITS = ["s", "ms", "us", "ns"] + + +def get_finest_unit(left: str, right: str): + """ + Find the higher of two datetime64 units. + """ + if _UNITS.index(left) >= _UNITS.index(right): + return left + return right + + +def shares_memory(left, right) -> bool: + """ + Pandas-compat for np.shares_memory. + """ + if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): + return np.shares_memory(left, right) + elif isinstance(left, np.ndarray): + # Call with reversed args to get to unpacking logic below. + return shares_memory(right, left) + + if isinstance(left, RangeIndex): + return False + if isinstance(left, MultiIndex): + return shares_memory(left._codes, right) + if isinstance(left, (Index, Series)): + return shares_memory(left._values, right) + + if isinstance(left, NDArrayBackedExtensionArray): + return shares_memory(left._ndarray, right) + if isinstance(left, pd.core.arrays.SparseArray): + return shares_memory(left.sp_values, right) + if isinstance(left, pd.core.arrays.IntervalArray): + return shares_memory(left._left, right) or shares_memory(left._right, right) + + if ( + isinstance(left, ExtensionArray) + and is_string_dtype(left.dtype) + and left.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] + ): + # https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669 + left = cast("ArrowExtensionArray", left) + if ( + isinstance(right, ExtensionArray) + and is_string_dtype(right.dtype) + and right.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] + ): + right = cast("ArrowExtensionArray", right) + left_pa_data = left._pa_array + right_pa_data = right._pa_array + left_buf1 = left_pa_data.chunk(0).buffers()[1] + right_buf1 = right_pa_data.chunk(0).buffers()[1] + return left_buf1 == right_buf1 + + if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray): + # By convention, we'll say these share memory if they share *either* + # the _data or the _mask + return np.shares_memory(left._data, right._data) or np.shares_memory( + left._mask, right._mask + ) + + if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1: + arr = left._mgr.arrays[0] + return shares_memory(arr, right) + + raise NotImplementedError(type(left), type(right)) + + +__all__ = [ + "ALL_INT_EA_DTYPES", + "ALL_INT_NUMPY_DTYPES", + "ALL_NUMPY_DTYPES", + "ALL_REAL_NUMPY_DTYPES", + "assert_almost_equal", + "assert_attr_equal", + "assert_categorical_equal", + "assert_class_equal", + "assert_contains_all", + "assert_copy", + "assert_datetime_array_equal", + "assert_dict_equal", + "assert_equal", + "assert_extension_array_equal", + "assert_frame_equal", + "assert_index_equal", + "assert_indexing_slices_equivalent", + "assert_interval_array_equal", + "assert_is_sorted", + "assert_is_valid_plot_return_object", + "assert_metadata_equivalent", + "assert_numpy_array_equal", + "assert_period_array_equal", + "assert_produces_warning", + "assert_series_equal", + "assert_sp_array_equal", + "assert_timedelta_array_equal", + "assert_cow_warning", + "at", + "BOOL_DTYPES", + "box_expected", + "BYTES_DTYPES", + "can_set_locale", + "COMPLEX_DTYPES", + "convert_rows_list_to_csv_str", + "DATETIME64_DTYPES", + "decompress_file", + "ENDIAN", + "ensure_clean", + "external_error_raised", + "FLOAT_EA_DTYPES", + "FLOAT_NUMPY_DTYPES", + "get_cython_table_params", + "get_dtype", + "getitem", + "get_locales", + "get_finest_unit", + "get_obj", + "get_op_from_name", + "iat", + "iloc", + "loc", + "maybe_produces_warning", + "NARROW_NP_DTYPES", + "NP_NAT_OBJECTS", + "NULL_OBJECTS", + "OBJECT_DTYPES", + "raise_assert_detail", + "raises_chained_assignment_error", + "round_trip_localpath", + "round_trip_pathlib", + "round_trip_pickle", + "setitem", + "set_locale", + "set_timezone", + "shares_memory", + "SIGNED_INT_EA_DTYPES", + "SIGNED_INT_NUMPY_DTYPES", + "STRING_DTYPES", + "SubclassedDataFrame", + "SubclassedSeries", + "TIMEDELTA64_DTYPES", + "to_array", + "UNSIGNED_INT_EA_DTYPES", + "UNSIGNED_INT_NUMPY_DTYPES", + "use_numexpr", + "with_csv_dialect", + "write_to_compressed", +] diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..327a7c5bf530f3f8e47c4812fa992d58118e2493 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a58cb78810c5856ef6520eb28d8b7a9cc61832d Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20e7cc602ce911453e2750f4775998f0cb44c8c3 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5271a01efdb70995363c6fa5153e3c68f85dd211 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2ce8c3c2e46869a2f08385560b0a9f833c0583d Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3177d96d8d13078b59210d97ecb9a0ad76d3aecd Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7464cee6ca0bac53c8f478bf3b299b10af5efa14 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py b/emu3/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py new file mode 100644 index 0000000000000000000000000000000000000000..084ca9c306d192a2543108249dbc345d1259be01 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py @@ -0,0 +1,93 @@ +""" +Hypothesis data generator helpers. +""" +from datetime import datetime + +from hypothesis import strategies as st +from hypothesis.extra.dateutil import timezones as dateutil_timezones +from hypothesis.extra.pytz import timezones as pytz_timezones + +from pandas.compat import is_platform_windows + +import pandas as pd + +from pandas.tseries.offsets import ( + BMonthBegin, + BMonthEnd, + BQuarterBegin, + BQuarterEnd, + BYearBegin, + BYearEnd, + MonthBegin, + MonthEnd, + QuarterBegin, + QuarterEnd, + YearBegin, + YearEnd, +) + +OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3) + +OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3) + +OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3) + +OPTIONAL_DICTS = st.lists( + st.one_of(st.none(), st.dictionaries(st.text(), st.integers())), + max_size=10, + min_size=3, +) + +OPTIONAL_LISTS = st.lists( + st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)), + max_size=10, + min_size=3, +) + +OPTIONAL_ONE_OF_ALL = st.one_of( + OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT +) + +if is_platform_windows(): + DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1)) +else: + DATETIME_NO_TZ = st.datetimes() + +DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes( + min_value=pd.Timestamp( + 1900, 1, 1 + ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues] + max_value=pd.Timestamp( + 1900, 1, 1 + ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues] + timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()), +) + +DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes( + min_value=pd.Timestamp.min.to_pydatetime(warn=False), + max_value=pd.Timestamp.max.to_pydatetime(warn=False), +) + +INT_NEG_999_TO_POS_999 = st.integers(-999, 999) + +# The strategy for each type is registered in conftest.py, as they don't carry +# enough runtime information (e.g. type hints) to infer how to build them. +YQM_OFFSET = st.one_of( + *map( + st.from_type, + [ + MonthBegin, + MonthEnd, + BMonthBegin, + BMonthEnd, + QuarterBegin, + QuarterEnd, + BQuarterBegin, + BQuarterEnd, + YearBegin, + YearEnd, + BYearBegin, + BYearEnd, + ], + ) +) diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/_io.py b/emu3/lib/python3.10/site-packages/pandas/_testing/_io.py new file mode 100644 index 0000000000000000000000000000000000000000..95977edb600ade42a8f8a1fada2b5085cee1da56 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_testing/_io.py @@ -0,0 +1,170 @@ +from __future__ import annotations + +import gzip +import io +import pathlib +import tarfile +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) +import uuid +import zipfile + +from pandas.compat import ( + get_bz2_file, + get_lzma_file, +) +from pandas.compat._optional import import_optional_dependency + +import pandas as pd +from pandas._testing.contexts import ensure_clean + +if TYPE_CHECKING: + from pandas._typing import ( + FilePath, + ReadPickleBuffer, + ) + + from pandas import ( + DataFrame, + Series, + ) + +# ------------------------------------------------------------------ +# File-IO + + +def round_trip_pickle( + obj: Any, path: FilePath | ReadPickleBuffer | None = None +) -> DataFrame | Series: + """ + Pickle an object and then read it again. + + Parameters + ---------- + obj : any object + The object to pickle and then re-read. + path : str, path object or file-like object, default None + The path where the pickled object is written and then read. + + Returns + ------- + pandas object + The original object that was pickled and then re-read. + """ + _path = path + if _path is None: + _path = f"__{uuid.uuid4()}__.pickle" + with ensure_clean(_path) as temp_path: + pd.to_pickle(obj, temp_path) + return pd.read_pickle(temp_path) + + +def round_trip_pathlib(writer, reader, path: str | None = None): + """ + Write an object to file specified by a pathlib.Path and read it back + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + pandas object + The original object that was serialized and then re-read. + """ + Path = pathlib.Path + if path is None: + path = "___pathlib___" + with ensure_clean(path) as path: + writer(Path(path)) # type: ignore[arg-type] + obj = reader(Path(path)) # type: ignore[arg-type] + return obj + + +def round_trip_localpath(writer, reader, path: str | None = None): + """ + Write an object to file specified by a py.path LocalPath and read it back. + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + pandas object + The original object that was serialized and then re-read. + """ + import pytest + + LocalPath = pytest.importorskip("py.path").local + if path is None: + path = "___localpath___" + with ensure_clean(path) as path: + writer(LocalPath(path)) + obj = reader(LocalPath(path)) + return obj + + +def write_to_compressed(compression, path, data, dest: str = "test") -> None: + """ + Write data to a compressed file. + + Parameters + ---------- + compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'} + The compression type to use. + path : str + The file path to write the data. + data : str + The data to write. + dest : str, default "test" + The destination file (for ZIP only) + + Raises + ------ + ValueError : An invalid compression value was passed in. + """ + args: tuple[Any, ...] = (data,) + mode = "wb" + method = "write" + compress_method: Callable + + if compression == "zip": + compress_method = zipfile.ZipFile + mode = "w" + args = (dest, data) + method = "writestr" + elif compression == "tar": + compress_method = tarfile.TarFile + mode = "w" + file = tarfile.TarInfo(name=dest) + bytes = io.BytesIO(data) + file.size = len(data) + args = (file, bytes) + method = "addfile" + elif compression == "gzip": + compress_method = gzip.GzipFile + elif compression == "bz2": + compress_method = get_bz2_file() + elif compression == "zstd": + compress_method = import_optional_dependency("zstandard").open + elif compression == "xz": + compress_method = get_lzma_file() + else: + raise ValueError(f"Unrecognized compression type: {compression}") + + with compress_method(path, mode=mode) as f: + getattr(f, method)(*args) diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/_warnings.py b/emu3/lib/python3.10/site-packages/pandas/_testing/_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a287942f2dac5ddbaf49168db280ec2ba3f2c4 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_testing/_warnings.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +from contextlib import ( + contextmanager, + nullcontext, +) +import inspect +import re +import sys +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) +import warnings + +from pandas.compat import PY311 + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Sequence, + ) + + +@contextmanager +def assert_produces_warning( + expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None = Warning, + filter_level: Literal[ + "error", "ignore", "always", "default", "module", "once" + ] = "always", + check_stacklevel: bool = True, + raise_on_extra_warnings: bool = True, + match: str | None = None, +) -> Generator[list[warnings.WarningMessage], None, None]: + """ + Context manager for running code expected to either raise a specific warning, + multiple specific warnings, or not raise any warnings. Verifies that the code + raises the expected warning(s), and that it does not raise any other unexpected + warnings. It is basically a wrapper around ``warnings.catch_warnings``. + + Parameters + ---------- + expected_warning : {Warning, False, tuple[Warning, ...], None}, default Warning + The type of Exception raised. ``exception.Warning`` is the base + class for all warnings. To raise multiple types of exceptions, + pass them as a tuple. To check that no warning is returned, + specify ``False`` or ``None``. + filter_level : str or None, default "always" + Specifies whether warnings are ignored, displayed, or turned + into errors. + Valid values are: + + * "error" - turns matching warnings into exceptions + * "ignore" - discard the warning + * "always" - always emit a warning + * "default" - print the warning the first time it is generated + from each location + * "module" - print the warning the first time it is generated + from each module + * "once" - print the warning the first time it is generated + + check_stacklevel : bool, default True + If True, displays the line that called the function containing + the warning to show were the function is called. Otherwise, the + line that implements the function is displayed. + raise_on_extra_warnings : bool, default True + Whether extra warnings not of the type `expected_warning` should + cause the test to fail. + match : str, optional + Match warning message. + + Examples + -------- + >>> import warnings + >>> with assert_produces_warning(): + ... warnings.warn(UserWarning()) + ... + >>> with assert_produces_warning(False): + ... warnings.warn(RuntimeWarning()) + ... + Traceback (most recent call last): + ... + AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. + >>> with assert_produces_warning(UserWarning): + ... warnings.warn(RuntimeWarning()) + Traceback (most recent call last): + ... + AssertionError: Did not see expected warning of class 'UserWarning'. + + ..warn:: This is *not* thread-safe. + """ + __tracebackhide__ = True + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter(filter_level) + try: + yield w + finally: + if expected_warning: + expected_warning = cast(type[Warning], expected_warning) + _assert_caught_expected_warning( + caught_warnings=w, + expected_warning=expected_warning, + match=match, + check_stacklevel=check_stacklevel, + ) + if raise_on_extra_warnings: + _assert_caught_no_extra_warnings( + caught_warnings=w, + expected_warning=expected_warning, + ) + + +def maybe_produces_warning(warning: type[Warning], condition: bool, **kwargs): + """ + Return a context manager that possibly checks a warning based on the condition + """ + if condition: + return assert_produces_warning(warning, **kwargs) + else: + return nullcontext() + + +def _assert_caught_expected_warning( + *, + caught_warnings: Sequence[warnings.WarningMessage], + expected_warning: type[Warning], + match: str | None, + check_stacklevel: bool, +) -> None: + """Assert that there was the expected warning among the caught warnings.""" + saw_warning = False + matched_message = False + unmatched_messages = [] + + for actual_warning in caught_warnings: + if issubclass(actual_warning.category, expected_warning): + saw_warning = True + + if check_stacklevel: + _assert_raised_with_correct_stacklevel(actual_warning) + + if match is not None: + if re.search(match, str(actual_warning.message)): + matched_message = True + else: + unmatched_messages.append(actual_warning.message) + + if not saw_warning: + raise AssertionError( + f"Did not see expected warning of class " + f"{repr(expected_warning.__name__)}" + ) + + if match and not matched_message: + raise AssertionError( + f"Did not see warning {repr(expected_warning.__name__)} " + f"matching '{match}'. The emitted warning messages are " + f"{unmatched_messages}" + ) + + +def _assert_caught_no_extra_warnings( + *, + caught_warnings: Sequence[warnings.WarningMessage], + expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None, +) -> None: + """Assert that no extra warnings apart from the expected ones are caught.""" + extra_warnings = [] + + for actual_warning in caught_warnings: + if _is_unexpected_warning(actual_warning, expected_warning): + # GH#38630 pytest.filterwarnings does not suppress these. + if actual_warning.category == ResourceWarning: + # GH 44732: Don't make the CI flaky by filtering SSL-related + # ResourceWarning from dependencies + if "unclosed bool: + """Check if the actual warning issued is unexpected.""" + if actual_warning and not expected_warning: + return True + expected_warning = cast(type[Warning], expected_warning) + return bool(not issubclass(actual_warning.category, expected_warning)) + + +def _assert_raised_with_correct_stacklevel( + actual_warning: warnings.WarningMessage, +) -> None: + # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow + frame = inspect.currentframe() + for _ in range(4): + frame = frame.f_back # type: ignore[union-attr] + try: + caller_filename = inspect.getfile(frame) # type: ignore[arg-type] + finally: + # See note in + # https://docs.python.org/3/library/inspect.html#inspect.Traceback + del frame + msg = ( + "Warning not set with correct stacklevel. " + f"File where warning is raised: {actual_warning.filename} != " + f"{caller_filename}. Warning message: {actual_warning.message}" + ) + assert actual_warning.filename == caller_filename, msg diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/asserters.py b/emu3/lib/python3.10/site-packages/pandas/_testing/asserters.py new file mode 100644 index 0000000000000000000000000000000000000000..41d2a7344a4edf2e05664eb599b0049d2c696e4c --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_testing/asserters.py @@ -0,0 +1,1435 @@ +from __future__ import annotations + +import operator +from typing import ( + TYPE_CHECKING, + Literal, + NoReturn, + cast, +) + +import numpy as np + +from pandas._libs import lib +from pandas._libs.missing import is_matching_na +from pandas._libs.sparse import SparseIndex +import pandas._libs.testing as _testing +from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions + +from pandas.core.dtypes.common import ( + is_bool, + is_float_dtype, + is_integer_dtype, + is_number, + is_numeric_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + NumpyEADtype, +) +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + IntervalDtype, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, +) +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays.string_ import StringDtype +from pandas.core.indexes.api import safe_sort_index + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +def assert_almost_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = "equiv", + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + **kwargs, +) -> None: + """ + Check that the left and right objects are approximately equal. + + By approximately equal, we refer to objects that are numbers or that + contain numbers which may be equivalent to specific levels of precision. + + Parameters + ---------- + left : object + right : object + check_dtype : bool or {'equiv'}, default 'equiv' + Check dtype if both a and b are the same type. If 'equiv' is passed in, + then `RangeIndex` and `Index` with int64 dtype are also considered + equivalent when doing type checking. + rtol : float, default 1e-5 + Relative tolerance. + atol : float, default 1e-8 + Absolute tolerance. + """ + if isinstance(left, Index): + assert_index_equal( + left, + right, + check_exact=False, + exact=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + elif isinstance(left, Series): + assert_series_equal( + left, + right, + check_exact=False, + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + elif isinstance(left, DataFrame): + assert_frame_equal( + left, + right, + check_exact=False, + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + else: + # Other sequences. + if check_dtype: + if is_number(left) and is_number(right): + # Do not compare numeric classes, like np.float64 and float. + pass + elif is_bool(left) and is_bool(right): + # Do not compare bool classes, like np.bool_ and bool. + pass + else: + if isinstance(left, np.ndarray) or isinstance(right, np.ndarray): + obj = "numpy array" + else: + obj = "Input" + assert_class_equal(left, right, obj=obj) + + # if we have "equiv", this becomes True + _testing.assert_almost_equal( + left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs + ) + + +def _check_isinstance(left, right, cls) -> None: + """ + Helper method for our assert_* methods that ensures that + the two objects being compared have the right type before + proceeding with the comparison. + + Parameters + ---------- + left : The first object being compared. + right : The second object being compared. + cls : The class type to check against. + + Raises + ------ + AssertionError : Either `left` or `right` is not an instance of `cls`. + """ + cls_name = cls.__name__ + + if not isinstance(left, cls): + raise AssertionError( + f"{cls_name} Expected type {cls}, found {type(left)} instead" + ) + if not isinstance(right, cls): + raise AssertionError( + f"{cls_name} Expected type {cls}, found {type(right)} instead" + ) + + +def assert_dict_equal(left, right, compare_keys: bool = True) -> None: + _check_isinstance(left, right, dict) + _testing.assert_dict_equal(left, right, compare_keys=compare_keys) + + +def assert_index_equal( + left: Index, + right: Index, + exact: bool | str = "equiv", + check_names: bool = True, + check_exact: bool = True, + check_categorical: bool = True, + check_order: bool = True, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + obj: str = "Index", +) -> None: + """ + Check that left and right Index are equal. + + Parameters + ---------- + left : Index + right : Index + exact : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Index with an int64 dtype as well. + check_names : bool, default True + Whether to check the names attribute. + check_exact : bool, default True + Whether to compare number exactly. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_order : bool, default True + Whether to compare the order of index entries as well as their values. + If True, both indexes must contain the same elements, in the same order. + If False, both indexes must contain the same elements, but in any order. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'Index' + Specify object name being compared, internally used to show appropriate + assertion message. + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Index([1, 2, 3]) + >>> b = pd.Index([1, 2, 3]) + >>> tm.assert_index_equal(a, b) + """ + __tracebackhide__ = True + + def _check_types(left, right, obj: str = "Index") -> None: + if not exact: + return + + assert_class_equal(left, right, exact=exact, obj=obj) + assert_attr_equal("inferred_type", left, right, obj=obj) + + # Skip exact dtype checking when `check_categorical` is False + if isinstance(left.dtype, CategoricalDtype) and isinstance( + right.dtype, CategoricalDtype + ): + if check_categorical: + assert_attr_equal("dtype", left, right, obj=obj) + assert_index_equal(left.categories, right.categories, exact=exact) + return + + assert_attr_equal("dtype", left, right, obj=obj) + + # instance validation + _check_isinstance(left, right, Index) + + # class / dtype comparison + _check_types(left, right, obj=obj) + + # level comparison + if left.nlevels != right.nlevels: + msg1 = f"{obj} levels are different" + msg2 = f"{left.nlevels}, {left}" + msg3 = f"{right.nlevels}, {right}" + raise_assert_detail(obj, msg1, msg2, msg3) + + # length comparison + if len(left) != len(right): + msg1 = f"{obj} length are different" + msg2 = f"{len(left)}, {left}" + msg3 = f"{len(right)}, {right}" + raise_assert_detail(obj, msg1, msg2, msg3) + + # If order doesn't matter then sort the index entries + if not check_order: + left = safe_sort_index(left) + right = safe_sort_index(right) + + # MultiIndex special comparison for little-friendly error messages + if isinstance(left, MultiIndex): + right = cast(MultiIndex, right) + + for level in range(left.nlevels): + lobj = f"MultiIndex level [{level}]" + try: + # try comparison on levels/codes to avoid densifying MultiIndex + assert_index_equal( + left.levels[level], + right.levels[level], + exact=exact, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + rtol=rtol, + atol=atol, + obj=lobj, + ) + assert_numpy_array_equal(left.codes[level], right.codes[level]) + except AssertionError: + llevel = left.get_level_values(level) + rlevel = right.get_level_values(level) + + assert_index_equal( + llevel, + rlevel, + exact=exact, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + rtol=rtol, + atol=atol, + obj=lobj, + ) + # get_level_values may change dtype + _check_types(left.levels[level], right.levels[level], obj=obj) + + # skip exact index checking when `check_categorical` is False + elif check_exact and check_categorical: + if not left.equals(right): + mismatch = left._values != right._values + + if not isinstance(mismatch, np.ndarray): + mismatch = cast("ExtensionArray", mismatch).fillna(True) + + diff = np.sum(mismatch.astype(int)) * 100.0 / len(left) + msg = f"{obj} values are different ({np.round(diff, 5)} %)" + raise_assert_detail(obj, msg, left, right) + else: + # if we have "equiv", this becomes True + exact_bool = bool(exact) + _testing.assert_almost_equal( + left.values, + right.values, + rtol=rtol, + atol=atol, + check_dtype=exact_bool, + obj=obj, + lobj=left, + robj=right, + ) + + # metadata comparison + if check_names: + assert_attr_equal("names", left, right, obj=obj) + if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex): + assert_attr_equal("dtype", left, right, obj=obj) + if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex): + assert_interval_array_equal(left._values, right._values) + + if check_categorical: + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + assert_categorical_equal(left._values, right._values, obj=f"{obj} category") + + +def assert_class_equal( + left, right, exact: bool | str = True, obj: str = "Input" +) -> None: + """ + Checks classes are equal. + """ + __tracebackhide__ = True + + def repr_class(x): + if isinstance(x, Index): + # return Index as it is to include values in the error message + return x + + return type(x).__name__ + + def is_class_equiv(idx: Index) -> bool: + """Classes that are a RangeIndex (sub-)instance or exactly an `Index` . + + This only checks class equivalence. There is a separate check that the + dtype is int64. + """ + return type(idx) is Index or isinstance(idx, RangeIndex) + + if type(left) == type(right): + return + + if exact == "equiv": + if is_class_equiv(left) and is_class_equiv(right): + return + + msg = f"{obj} classes are different" + raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) + + +def assert_attr_equal(attr: str, left, right, obj: str = "Attributes") -> None: + """ + Check attributes are equal. Both objects must have attribute. + + Parameters + ---------- + attr : str + Attribute name being compared. + left : object + right : object + obj : str, default 'Attributes' + Specify object name being compared, internally used to show appropriate + assertion message + """ + __tracebackhide__ = True + + left_attr = getattr(left, attr) + right_attr = getattr(right, attr) + + if left_attr is right_attr or is_matching_na(left_attr, right_attr): + # e.g. both np.nan, both NaT, both pd.NA, ... + return None + + try: + result = left_attr == right_attr + except TypeError: + # datetimetz on rhs may raise TypeError + result = False + if (left_attr is pd.NA) ^ (right_attr is pd.NA): + result = False + elif not isinstance(result, bool): + result = result.all() + + if not result: + msg = f'Attribute "{attr}" are different' + raise_assert_detail(obj, msg, left_attr, right_attr) + return None + + +def assert_is_valid_plot_return_object(objs) -> None: + from matplotlib.artist import Artist + from matplotlib.axes import Axes + + if isinstance(objs, (Series, np.ndarray)): + if isinstance(objs, Series): + objs = objs._values + for el in objs.ravel(): + msg = ( + "one of 'objs' is not a matplotlib Axes instance, " + f"type encountered {repr(type(el).__name__)}" + ) + assert isinstance(el, (Axes, dict)), msg + else: + msg = ( + "objs is neither an ndarray of Artist instances nor a single " + "ArtistArtist instance, tuple, or dict, 'objs' is a " + f"{repr(type(objs).__name__)}" + ) + assert isinstance(objs, (Artist, tuple, dict)), msg + + +def assert_is_sorted(seq) -> None: + """Assert that the sequence is sorted.""" + if isinstance(seq, (Index, Series)): + seq = seq.values + # sorting does not change precisions + if isinstance(seq, np.ndarray): + assert_numpy_array_equal(seq, np.sort(np.array(seq))) + else: + assert_extension_array_equal(seq, seq[seq.argsort()]) + + +def assert_categorical_equal( + left, + right, + check_dtype: bool = True, + check_category_order: bool = True, + obj: str = "Categorical", +) -> None: + """ + Test that Categoricals are equivalent. + + Parameters + ---------- + left : Categorical + right : Categorical + check_dtype : bool, default True + Check that integer dtype of the codes are the same. + check_category_order : bool, default True + Whether the order of the categories should be compared, which + implies identical integer codes. If False, only the resulting + values are compared. The ordered attribute is + checked regardless. + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message. + """ + _check_isinstance(left, right, Categorical) + + exact: bool | str + if isinstance(left.categories, RangeIndex) or isinstance( + right.categories, RangeIndex + ): + exact = "equiv" + else: + # We still want to require exact matches for Index + exact = True + + if check_category_order: + assert_index_equal( + left.categories, right.categories, obj=f"{obj}.categories", exact=exact + ) + assert_numpy_array_equal( + left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes" + ) + else: + try: + lc = left.categories.sort_values() + rc = right.categories.sort_values() + except TypeError: + # e.g. '<' not supported between instances of 'int' and 'str' + lc, rc = left.categories, right.categories + assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact) + assert_index_equal( + left.categories.take(left.codes), + right.categories.take(right.codes), + obj=f"{obj}.values", + exact=exact, + ) + + assert_attr_equal("ordered", left, right, obj=obj) + + +def assert_interval_array_equal( + left, right, exact: bool | Literal["equiv"] = "equiv", obj: str = "IntervalArray" +) -> None: + """ + Test that two IntervalArrays are equivalent. + + Parameters + ---------- + left, right : IntervalArray + The IntervalArrays to compare. + exact : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Index with an int64 dtype as well. + obj : str, default 'IntervalArray' + Specify object name being compared, internally used to show appropriate + assertion message + """ + _check_isinstance(left, right, IntervalArray) + + kwargs = {} + if left._left.dtype.kind in "mM": + # We have a DatetimeArray or TimedeltaArray + kwargs["check_freq"] = False + + assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs) + assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs) + + assert_attr_equal("closed", left, right, obj=obj) + + +def assert_period_array_equal(left, right, obj: str = "PeriodArray") -> None: + _check_isinstance(left, right, PeriodArray) + + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + assert_attr_equal("dtype", left, right, obj=obj) + + +def assert_datetime_array_equal( + left, right, obj: str = "DatetimeArray", check_freq: bool = True +) -> None: + __tracebackhide__ = True + _check_isinstance(left, right, DatetimeArray) + + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + if check_freq: + assert_attr_equal("freq", left, right, obj=obj) + assert_attr_equal("tz", left, right, obj=obj) + + +def assert_timedelta_array_equal( + left, right, obj: str = "TimedeltaArray", check_freq: bool = True +) -> None: + __tracebackhide__ = True + _check_isinstance(left, right, TimedeltaArray) + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + if check_freq: + assert_attr_equal("freq", left, right, obj=obj) + + +def raise_assert_detail( + obj, message, left, right, diff=None, first_diff=None, index_values=None +) -> NoReturn: + __tracebackhide__ = True + + msg = f"""{obj} are different + +{message}""" + + if isinstance(index_values, Index): + index_values = np.asarray(index_values) + + if isinstance(index_values, np.ndarray): + msg += f"\n[index]: {pprint_thing(index_values)}" + + if isinstance(left, np.ndarray): + left = pprint_thing(left) + elif isinstance(left, (CategoricalDtype, NumpyEADtype, StringDtype)): + left = repr(left) + + if isinstance(right, np.ndarray): + right = pprint_thing(right) + elif isinstance(right, (CategoricalDtype, NumpyEADtype, StringDtype)): + right = repr(right) + + msg += f""" +[left]: {left} +[right]: {right}""" + + if diff is not None: + msg += f"\n[diff]: {diff}" + + if first_diff is not None: + msg += f"\n{first_diff}" + + raise AssertionError(msg) + + +def assert_numpy_array_equal( + left, + right, + strict_nan: bool = False, + check_dtype: bool | Literal["equiv"] = True, + err_msg=None, + check_same=None, + obj: str = "numpy array", + index_values=None, +) -> None: + """ + Check that 'np.ndarray' is equivalent. + + Parameters + ---------- + left, right : numpy.ndarray or iterable + The two arrays to be compared. + strict_nan : bool, default False + If True, consider NaN and None to be different. + check_dtype : bool, default True + Check dtype if both a and b are np.ndarray. + err_msg : str, default None + If provided, used as assertion message. + check_same : None|'copy'|'same', default None + Ensure left and right refer/do not refer to the same memory area. + obj : str, default 'numpy array' + Specify object name being compared, internally used to show appropriate + assertion message. + index_values : Index | numpy.ndarray, default None + optional index (shared by both left and right), used in output. + """ + __tracebackhide__ = True + + # instance validation + # Show a detailed error message when classes are different + assert_class_equal(left, right, obj=obj) + # both classes must be an np.ndarray + _check_isinstance(left, right, np.ndarray) + + def _get_base(obj): + return obj.base if getattr(obj, "base", None) is not None else obj + + left_base = _get_base(left) + right_base = _get_base(right) + + if check_same == "same": + if left_base is not right_base: + raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}") + elif check_same == "copy": + if left_base is right_base: + raise AssertionError(f"{repr(left_base)} is {repr(right_base)}") + + def _raise(left, right, err_msg) -> NoReturn: + if err_msg is None: + if left.shape != right.shape: + raise_assert_detail( + obj, f"{obj} shapes are different", left.shape, right.shape + ) + + diff = 0 + for left_arr, right_arr in zip(left, right): + # count up differences + if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan): + diff += 1 + + diff = diff * 100.0 / left.size + msg = f"{obj} values are different ({np.round(diff, 5)} %)" + raise_assert_detail(obj, msg, left, right, index_values=index_values) + + raise AssertionError(err_msg) + + # compare shape and values + if not array_equivalent(left, right, strict_nan=strict_nan): + _raise(left, right, err_msg) + + if check_dtype: + if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): + assert_attr_equal("dtype", left, right, obj=obj) + + +def assert_extension_array_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + index_values=None, + check_exact: bool | lib.NoDefault = lib.no_default, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "ExtensionArray", +) -> None: + """ + Check that left and right ExtensionArrays are equal. + + Parameters + ---------- + left, right : ExtensionArray + The two arrays to compare. + check_dtype : bool, default True + Whether to check if the ExtensionArray dtypes are identical. + index_values : Index | numpy.ndarray, default None + Optional index (shared by both left and right), used in output. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'ExtensionArray' + Specify object name being compared, internally used to show appropriate + assertion message. + + .. versionadded:: 2.0.0 + + Notes + ----- + Missing values are checked separately from valid values. + A mask of missing values is computed for each and checked to match. + The remaining all-valid values are cast to object dtype and checked. + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Series([1, 2, 3, 4]) + >>> b, c = a.array, a.array + >>> tm.assert_extension_array_equal(b, c) + """ + if ( + check_exact is lib.no_default + and rtol is lib.no_default + and atol is lib.no_default + ): + check_exact = ( + is_numeric_dtype(left.dtype) + and not is_float_dtype(left.dtype) + or is_numeric_dtype(right.dtype) + and not is_float_dtype(right.dtype) + ) + elif check_exact is lib.no_default: + check_exact = False + + rtol = rtol if rtol is not lib.no_default else 1.0e-5 + atol = atol if atol is not lib.no_default else 1.0e-8 + + assert isinstance(left, ExtensionArray), "left is not an ExtensionArray" + assert isinstance(right, ExtensionArray), "right is not an ExtensionArray" + if check_dtype: + assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") + + if ( + isinstance(left, DatetimeLikeArrayMixin) + and isinstance(right, DatetimeLikeArrayMixin) + and type(right) == type(left) + ): + # GH 52449 + if not check_dtype and left.dtype.kind in "mM": + if not isinstance(left.dtype, np.dtype): + l_unit = cast(DatetimeTZDtype, left.dtype).unit + else: + l_unit = np.datetime_data(left.dtype)[0] + if not isinstance(right.dtype, np.dtype): + r_unit = cast(DatetimeTZDtype, right.dtype).unit + else: + r_unit = np.datetime_data(right.dtype)[0] + if ( + l_unit != r_unit + and compare_mismatched_resolutions( + left._ndarray, right._ndarray, operator.eq + ).all() + ): + return + # Avoid slow object-dtype comparisons + # np.asarray for case where we have a np.MaskedArray + assert_numpy_array_equal( + np.asarray(left.asi8), + np.asarray(right.asi8), + index_values=index_values, + obj=obj, + ) + return + + left_na = np.asarray(left.isna()) + right_na = np.asarray(right.isna()) + assert_numpy_array_equal( + left_na, right_na, obj=f"{obj} NA mask", index_values=index_values + ) + + left_valid = left[~left_na].to_numpy(dtype=object) + right_valid = right[~right_na].to_numpy(dtype=object) + if check_exact: + assert_numpy_array_equal( + left_valid, right_valid, obj=obj, index_values=index_values + ) + else: + _testing.assert_almost_equal( + left_valid, + right_valid, + check_dtype=bool(check_dtype), + rtol=rtol, + atol=atol, + obj=obj, + index_values=index_values, + ) + + +# This could be refactored to use the NDFrame.equals method +def assert_series_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + check_index_type: bool | Literal["equiv"] = "equiv", + check_series_type: bool = True, + check_names: bool = True, + check_exact: bool | lib.NoDefault = lib.no_default, + check_datetimelike_compat: bool = False, + check_categorical: bool = True, + check_category_order: bool = True, + check_freq: bool = True, + check_flags: bool = True, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "Series", + *, + check_index: bool = True, + check_like: bool = False, +) -> None: + """ + Check that left and right Series are equal. + + Parameters + ---------- + left : Series + right : Series + check_dtype : bool, default True + Whether to check the Series dtype is identical. + check_index_type : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. + check_series_type : bool, default True + Whether to check the Series class is identical. + check_names : bool, default True + Whether to check the Series and Index names attribute. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + check_datetimelike_compat : bool, default False + Compare datetime-like which is comparable ignoring dtype. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_category_order : bool, default True + Whether to compare category order of internal Categoricals. + check_freq : bool, default True + Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'Series' + Specify object name being compared, internally used to show appropriate + assertion message. + check_index : bool, default True + Whether to check index equivalence. If False, then compare only values. + + .. versionadded:: 1.3.0 + check_like : bool, default False + If True, ignore the order of the index. Must be False if check_index is False. + Note: same labels must be with the same data. + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Series([1, 2, 3, 4]) + >>> b = pd.Series([1, 2, 3, 4]) + >>> tm.assert_series_equal(a, b) + """ + __tracebackhide__ = True + check_exact_index = False if check_exact is lib.no_default else check_exact + if ( + check_exact is lib.no_default + and rtol is lib.no_default + and atol is lib.no_default + ): + check_exact = ( + is_numeric_dtype(left.dtype) + and not is_float_dtype(left.dtype) + or is_numeric_dtype(right.dtype) + and not is_float_dtype(right.dtype) + ) + elif check_exact is lib.no_default: + check_exact = False + + rtol = rtol if rtol is not lib.no_default else 1.0e-5 + atol = atol if atol is not lib.no_default else 1.0e-8 + + if not check_index and check_like: + raise ValueError("check_like must be False if check_index is False") + + # instance validation + _check_isinstance(left, right, Series) + + if check_series_type: + assert_class_equal(left, right, obj=obj) + + # length comparison + if len(left) != len(right): + msg1 = f"{len(left)}, {left.index}" + msg2 = f"{len(right)}, {right.index}" + raise_assert_detail(obj, "Series length are different", msg1, msg2) + + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + + if check_index: + # GH #38183 + assert_index_equal( + left.index, + right.index, + exact=check_index_type, + check_names=check_names, + check_exact=check_exact_index, + check_categorical=check_categorical, + check_order=not check_like, + rtol=rtol, + atol=atol, + obj=f"{obj}.index", + ) + + if check_like: + left = left.reindex_like(right) + + if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)): + lidx = left.index + ridx = right.index + assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq) + + if check_dtype: + # We want to skip exact dtype checking when `check_categorical` + # is False. We'll still raise if only one is a `Categorical`, + # regardless of `check_categorical` + if ( + isinstance(left.dtype, CategoricalDtype) + and isinstance(right.dtype, CategoricalDtype) + and not check_categorical + ): + pass + else: + assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") + if check_exact: + left_values = left._values + right_values = right._values + # Only check exact if dtype is numeric + if isinstance(left_values, ExtensionArray) and isinstance( + right_values, ExtensionArray + ): + assert_extension_array_equal( + left_values, + right_values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + else: + # convert both to NumPy if not, check_dtype would raise earlier + lv, rv = left_values, right_values + if isinstance(left_values, ExtensionArray): + lv = left_values.to_numpy() + if isinstance(right_values, ExtensionArray): + rv = right_values.to_numpy() + assert_numpy_array_equal( + lv, + rv, + check_dtype=check_dtype, + obj=str(obj), + index_values=left.index, + ) + elif check_datetimelike_compat and ( + needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype) + ): + # we want to check only if we have compat dtypes + # e.g. integer and M|m are NOT compat, but we can simply check + # the values in that case + + # datetimelike may have different objects (e.g. datetime.datetime + # vs Timestamp) but will compare equal + if not Index(left._values).equals(Index(right._values)): + msg = ( + f"[datetimelike_compat=True] {left._values} " + f"is not equal to {right._values}." + ) + raise AssertionError(msg) + elif isinstance(left.dtype, IntervalDtype) and isinstance( + right.dtype, IntervalDtype + ): + assert_interval_array_equal(left.array, right.array) + elif isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + _testing.assert_almost_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=bool(check_dtype), + obj=str(obj), + index_values=left.index, + ) + elif isinstance(left.dtype, ExtensionDtype) and isinstance( + right.dtype, ExtensionDtype + ): + assert_extension_array_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + elif is_extension_array_dtype_and_needs_i8_conversion( + left.dtype, right.dtype + ) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype): + assert_extension_array_equal( + left._values, + right._values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype): + # DatetimeArray or TimedeltaArray + assert_extension_array_equal( + left._values, + right._values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + else: + _testing.assert_almost_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=bool(check_dtype), + obj=str(obj), + index_values=left.index, + ) + + # metadata comparison + if check_names: + assert_attr_equal("name", left, right, obj=obj) + + if check_categorical: + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + assert_categorical_equal( + left._values, + right._values, + obj=f"{obj} category", + check_category_order=check_category_order, + ) + + +# This could be refactored to use the NDFrame.equals method +def assert_frame_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + check_index_type: bool | Literal["equiv"] = "equiv", + check_column_type: bool | Literal["equiv"] = "equiv", + check_frame_type: bool = True, + check_names: bool = True, + by_blocks: bool = False, + check_exact: bool | lib.NoDefault = lib.no_default, + check_datetimelike_compat: bool = False, + check_categorical: bool = True, + check_like: bool = False, + check_freq: bool = True, + check_flags: bool = True, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "DataFrame", +) -> None: + """ + Check that left and right DataFrame are equal. + + This function is intended to compare two DataFrames and output any + differences. It is mostly intended for use in unit tests. + Additional parameters allow varying the strictness of the + equality checks performed. + + Parameters + ---------- + left : DataFrame + First DataFrame to compare. + right : DataFrame + Second DataFrame to compare. + check_dtype : bool, default True + Whether to check the DataFrame dtype is identical. + check_index_type : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. + check_column_type : bool or {'equiv'}, default 'equiv' + Whether to check the columns class, dtype and inferred_type + are identical. Is passed as the ``exact`` argument of + :func:`assert_index_equal`. + check_frame_type : bool, default True + Whether to check the DataFrame class is identical. + check_names : bool, default True + Whether to check that the `names` attribute for both the `index` + and `column` attributes of the DataFrame is identical. + by_blocks : bool, default False + Specify how to compare internal data. If False, compare by columns. + If True, compare by blocks. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + check_datetimelike_compat : bool, default False + Compare datetime-like which is comparable ignoring dtype. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_like : bool, default False + If True, ignore the order of index & columns. + Note: index labels must match their respective rows + (same as in columns) - same labels must be with the same data. + check_freq : bool, default True + Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'DataFrame' + Specify object name being compared, internally used to show appropriate + assertion message. + + See Also + -------- + assert_series_equal : Equivalent method for asserting Series equality. + DataFrame.equals : Check DataFrame equality. + + Examples + -------- + This example shows comparing two DataFrames that are equal + but with columns of differing dtypes. + + >>> from pandas.testing import assert_frame_equal + >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) + >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]}) + + df1 equals itself. + + >>> assert_frame_equal(df1, df1) + + df1 differs from df2 as column 'b' is of a different type. + + >>> assert_frame_equal(df1, df2) + Traceback (most recent call last): + ... + AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different + + Attribute "dtype" are different + [left]: int64 + [right]: float64 + + Ignore differing dtypes in columns with check_dtype. + + >>> assert_frame_equal(df1, df2, check_dtype=False) + """ + __tracebackhide__ = True + _rtol = rtol if rtol is not lib.no_default else 1.0e-5 + _atol = atol if atol is not lib.no_default else 1.0e-8 + _check_exact = check_exact if check_exact is not lib.no_default else False + + # instance validation + _check_isinstance(left, right, DataFrame) + + if check_frame_type: + assert isinstance(left, type(right)) + # assert_class_equal(left, right, obj=obj) + + # shape comparison + if left.shape != right.shape: + raise_assert_detail( + obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}" + ) + + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + + # index comparison + assert_index_equal( + left.index, + right.index, + exact=check_index_type, + check_names=check_names, + check_exact=_check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=_rtol, + atol=_atol, + obj=f"{obj}.index", + ) + + # column comparison + assert_index_equal( + left.columns, + right.columns, + exact=check_column_type, + check_names=check_names, + check_exact=_check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=_rtol, + atol=_atol, + obj=f"{obj}.columns", + ) + + if check_like: + left = left.reindex_like(right) + + # compare by blocks + if by_blocks: + rblocks = right._to_dict_of_blocks() + lblocks = left._to_dict_of_blocks() + for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): + assert dtype in lblocks + assert dtype in rblocks + assert_frame_equal( + lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj + ) + + # compare by columns + else: + for i, col in enumerate(left.columns): + # We have already checked that columns match, so we can do + # fast location-based lookups + lcol = left._ixs(i, axis=1) + rcol = right._ixs(i, axis=1) + + # GH #38183 + # use check_index=False, because we do not want to run + # assert_index_equal for each column, + # as we already checked it for the whole dataframe before. + assert_series_equal( + lcol, + rcol, + check_dtype=check_dtype, + check_index_type=check_index_type, + check_exact=check_exact, + check_names=check_names, + check_datetimelike_compat=check_datetimelike_compat, + check_categorical=check_categorical, + check_freq=check_freq, + obj=f'{obj}.iloc[:, {i}] (column name="{col}")', + rtol=rtol, + atol=atol, + check_index=False, + check_flags=False, + ) + + +def assert_equal(left, right, **kwargs) -> None: + """ + Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. + + Parameters + ---------- + left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray + The two items to be compared. + **kwargs + All keyword arguments are passed through to the underlying assert method. + """ + __tracebackhide__ = True + + if isinstance(left, Index): + assert_index_equal(left, right, **kwargs) + if isinstance(left, (DatetimeIndex, TimedeltaIndex)): + assert left.freq == right.freq, (left.freq, right.freq) + elif isinstance(left, Series): + assert_series_equal(left, right, **kwargs) + elif isinstance(left, DataFrame): + assert_frame_equal(left, right, **kwargs) + elif isinstance(left, IntervalArray): + assert_interval_array_equal(left, right, **kwargs) + elif isinstance(left, PeriodArray): + assert_period_array_equal(left, right, **kwargs) + elif isinstance(left, DatetimeArray): + assert_datetime_array_equal(left, right, **kwargs) + elif isinstance(left, TimedeltaArray): + assert_timedelta_array_equal(left, right, **kwargs) + elif isinstance(left, ExtensionArray): + assert_extension_array_equal(left, right, **kwargs) + elif isinstance(left, np.ndarray): + assert_numpy_array_equal(left, right, **kwargs) + elif isinstance(left, str): + assert kwargs == {} + assert left == right + else: + assert kwargs == {} + assert_almost_equal(left, right) + + +def assert_sp_array_equal(left, right) -> None: + """ + Check that the left and right SparseArray are equal. + + Parameters + ---------- + left : SparseArray + right : SparseArray + """ + _check_isinstance(left, right, pd.arrays.SparseArray) + + assert_numpy_array_equal(left.sp_values, right.sp_values) + + # SparseIndex comparison + assert isinstance(left.sp_index, SparseIndex) + assert isinstance(right.sp_index, SparseIndex) + + left_index = left.sp_index + right_index = right.sp_index + + if not left_index.equals(right_index): + raise_assert_detail( + "SparseArray.index", "index are not equal", left_index, right_index + ) + else: + # Just ensure a + pass + + assert_attr_equal("fill_value", left, right) + assert_attr_equal("dtype", left, right) + assert_numpy_array_equal(left.to_dense(), right.to_dense()) + + +def assert_contains_all(iterable, dic) -> None: + for k in iterable: + assert k in dic, f"Did not contain item: {repr(k)}" + + +def assert_copy(iter1, iter2, **eql_kwargs) -> None: + """ + iter1, iter2: iterables that produce elements + comparable with assert_almost_equal + + Checks that the elements are equal, but not + the same object. (Does not check that items + in sequences are also not the same object) + """ + for elem1, elem2 in zip(iter1, iter2): + assert_almost_equal(elem1, elem2, **eql_kwargs) + msg = ( + f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be " + "different objects, but they were the same object." + ) + assert elem1 is not elem2, msg + + +def is_extension_array_dtype_and_needs_i8_conversion( + left_dtype: DtypeObj, right_dtype: DtypeObj +) -> bool: + """ + Checks that we have the combination of an ExtensionArraydtype and + a dtype that should be converted to int64 + + Returns + ------- + bool + + Related to issue #37609 + """ + return isinstance(left_dtype, ExtensionDtype) and needs_i8_conversion(right_dtype) + + +def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice) -> None: + """ + Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable, + ser[l_slc]. + """ + expected = ser.iloc[i_slc] + + assert_series_equal(ser.loc[l_slc], expected) + + if not is_integer_dtype(ser.index): + # For integer indices, .loc and plain getitem are position-based. + assert_series_equal(ser[l_slc], expected) + + +def assert_metadata_equivalent( + left: DataFrame | Series, right: DataFrame | Series | None = None +) -> None: + """ + Check that ._metadata attributes are equivalent. + """ + for attr in left._metadata: + val = getattr(left, attr, None) + if right is None: + assert val is None + else: + assert val == getattr(right, attr, None) diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/compat.py b/emu3/lib/python3.10/site-packages/pandas/_testing/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..cc352ba7b8f2f5a5548d4d5749d3b48ac838aced --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_testing/compat.py @@ -0,0 +1,29 @@ +""" +Helpers for sharing tests between DataFrame/Series +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas import DataFrame + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +def get_dtype(obj) -> DtypeObj: + if isinstance(obj, DataFrame): + # Note: we are assuming only one column + return obj.dtypes.iat[0] + else: + return obj.dtype + + +def get_obj(df: DataFrame, klass): + """ + For sharing tests using frame_or_series, either return the DataFrame + unchanged or return it's first column as a Series. + """ + if klass is DataFrame: + return df + return df._ixs(0, axis=1) diff --git a/emu3/lib/python3.10/site-packages/pandas/_testing/contexts.py b/emu3/lib/python3.10/site-packages/pandas/_testing/contexts.py new file mode 100644 index 0000000000000000000000000000000000000000..eb6e4a917889aef221b2fc08eb2723c4fe568e04 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/_testing/contexts.py @@ -0,0 +1,257 @@ +from __future__ import annotations + +from contextlib import contextmanager +import os +from pathlib import Path +import tempfile +from typing import ( + IO, + TYPE_CHECKING, + Any, +) +import uuid + +from pandas._config import using_copy_on_write + +from pandas.compat import PYPY +from pandas.errors import ChainedAssignmentError + +from pandas import set_option + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from collections.abc import Generator + + from pandas._typing import ( + BaseBuffer, + CompressionOptions, + FilePath, + ) + + +@contextmanager +def decompress_file( + path: FilePath | BaseBuffer, compression: CompressionOptions +) -> Generator[IO[bytes], None, None]: + """ + Open a compressed file and return a file object. + + Parameters + ---------- + path : str + The path where the file is read from. + + compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None} + Name of the decompression to use + + Returns + ------- + file object + """ + with get_handle(path, "rb", compression=compression, is_text=False) as handle: + yield handle.handle + + +@contextmanager +def set_timezone(tz: str) -> Generator[None, None, None]: + """ + Context manager for temporarily setting a timezone. + + Parameters + ---------- + tz : str + A string representing a valid timezone. + + Examples + -------- + >>> from datetime import datetime + >>> from dateutil.tz import tzlocal + >>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP + 'IST' + + >>> with set_timezone('US/Eastern'): + ... tzlocal().tzname(datetime(2021, 1, 1)) + ... + 'EST' + """ + import time + + def setTZ(tz) -> None: + if tz is None: + try: + del os.environ["TZ"] + except KeyError: + pass + else: + os.environ["TZ"] = tz + time.tzset() + + orig_tz = os.environ.get("TZ") + setTZ(tz) + try: + yield + finally: + setTZ(orig_tz) + + +@contextmanager +def ensure_clean( + filename=None, return_filelike: bool = False, **kwargs: Any +) -> Generator[Any, None, None]: + """ + Gets a temporary path and agrees to remove on close. + + This implementation does not use tempfile.mkstemp to avoid having a file handle. + If the code using the returned path wants to delete the file itself, windows + requires that no program has a file handle to it. + + Parameters + ---------- + filename : str (optional) + suffix of the created file. + return_filelike : bool (default False) + if True, returns a file-like which is *always* cleaned. Necessary for + savefig and other functions which want to append extensions. + **kwargs + Additional keywords are passed to open(). + + """ + folder = Path(tempfile.gettempdir()) + + if filename is None: + filename = "" + filename = str(uuid.uuid4()) + filename + path = folder / filename + + path.touch() + + handle_or_str: str | IO = str(path) + encoding = kwargs.pop("encoding", None) + if return_filelike: + kwargs.setdefault("mode", "w+b") + if encoding is None and "b" not in kwargs["mode"]: + encoding = "utf-8" + handle_or_str = open(path, encoding=encoding, **kwargs) + + try: + yield handle_or_str + finally: + if not isinstance(handle_or_str, str): + handle_or_str.close() + if path.is_file(): + path.unlink() + + +@contextmanager +def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]: + """ + Context manager to temporarily register a CSV dialect for parsing CSV. + + Parameters + ---------- + name : str + The name of the dialect. + kwargs : mapping + The parameters for the dialect. + + Raises + ------ + ValueError : the name of the dialect conflicts with a builtin one. + + See Also + -------- + csv : Python's CSV library. + """ + import csv + + _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} + + if name in _BUILTIN_DIALECTS: + raise ValueError("Cannot override builtin dialect.") + + csv.register_dialect(name, **kwargs) + try: + yield + finally: + csv.unregister_dialect(name) + + +@contextmanager +def use_numexpr(use, min_elements=None) -> Generator[None, None, None]: + from pandas.core.computation import expressions as expr + + if min_elements is None: + min_elements = expr._MIN_ELEMENTS + + olduse = expr.USE_NUMEXPR + oldmin = expr._MIN_ELEMENTS + set_option("compute.use_numexpr", use) + expr._MIN_ELEMENTS = min_elements + try: + yield + finally: + expr._MIN_ELEMENTS = oldmin + set_option("compute.use_numexpr", olduse) + + +def raises_chained_assignment_error(warn=True, extra_warnings=(), extra_match=()): + from pandas._testing import assert_produces_warning + + if not warn: + from contextlib import nullcontext + + return nullcontext() + + if PYPY and not extra_warnings: + from contextlib import nullcontext + + return nullcontext() + elif PYPY and extra_warnings: + return assert_produces_warning( + extra_warnings, + match="|".join(extra_match), + ) + else: + if using_copy_on_write(): + warning = ChainedAssignmentError + match = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment" + ) + else: + warning = FutureWarning # type: ignore[assignment] + # TODO update match + match = "ChainedAssignmentError" + if extra_warnings: + warning = (warning, *extra_warnings) # type: ignore[assignment] + return assert_produces_warning( + warning, + match="|".join((match, *extra_match)), + ) + + +def assert_cow_warning(warn=True, match=None, **kwargs): + """ + Assert that a warning is raised in the CoW warning mode. + + Parameters + ---------- + warn : bool, default True + By default, check that a warning is raised. Can be turned off by passing False. + match : str + The warning message to match against, if different from the default. + kwargs + Passed through to assert_produces_warning + """ + from pandas._testing import assert_produces_warning + + if not warn: + from contextlib import nullcontext + + return nullcontext() + + if not match: + match = "Setting a value on a view" + + return assert_produces_warning(FutureWarning, match=match, **kwargs) diff --git a/emu3/lib/python3.10/site-packages/pandas/api/__init__.py b/emu3/lib/python3.10/site-packages/pandas/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d42b6541fdf8817b996ef9804db8a87a2bcd2c --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/api/__init__.py @@ -0,0 +1,16 @@ +""" public toolkit API """ +from pandas.api import ( + extensions, + indexers, + interchange, + types, + typing, +) + +__all__ = [ + "interchange", + "extensions", + "indexers", + "types", + "typing", +] diff --git a/emu3/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59007f2f8e942b8698e964f33dc0447f3cca4f41 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/api/types/__init__.py b/emu3/lib/python3.10/site-packages/pandas/api/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c601086bb9f86a633d6a3f2245779fea9428bf95 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/api/types/__init__.py @@ -0,0 +1,23 @@ +""" +Public toolkit API. +""" + +from pandas._libs.lib import infer_dtype + +from pandas.core.dtypes.api import * # noqa: F403 +from pandas.core.dtypes.concat import union_categoricals +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) + +__all__ = [ + "infer_dtype", + "union_categoricals", + "CategoricalDtype", + "DatetimeTZDtype", + "IntervalDtype", + "PeriodDtype", +] diff --git a/emu3/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..703bd99b9848229758080f29c06d2d90b51163e9 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/api/typing/__init__.py b/emu3/lib/python3.10/site-packages/pandas/api/typing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b5d2cb06b523508b15025be804a66daaaaf7a45 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/api/typing/__init__.py @@ -0,0 +1,55 @@ +""" +Public API classes that store intermediate results useful for type-hinting. +""" + +from pandas._libs import NaTType +from pandas._libs.missing import NAType + +from pandas.core.groupby import ( + DataFrameGroupBy, + SeriesGroupBy, +) +from pandas.core.resample import ( + DatetimeIndexResamplerGroupby, + PeriodIndexResamplerGroupby, + Resampler, + TimedeltaIndexResamplerGroupby, + TimeGrouper, +) +from pandas.core.window import ( + Expanding, + ExpandingGroupby, + ExponentialMovingWindow, + ExponentialMovingWindowGroupby, + Rolling, + RollingGroupby, + Window, +) + +# TODO: Can't import Styler without importing jinja2 +# from pandas.io.formats.style import Styler +from pandas.io.json._json import JsonReader +from pandas.io.stata import StataReader + +__all__ = [ + "DataFrameGroupBy", + "DatetimeIndexResamplerGroupby", + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "JsonReader", + "NaTType", + "NAType", + "PeriodIndexResamplerGroupby", + "Resampler", + "Rolling", + "RollingGroupby", + "SeriesGroupBy", + "StataReader", + # See TODO above + # "Styler", + "TimedeltaIndexResamplerGroupby", + "TimeGrouper", + "Window", +] diff --git a/emu3/lib/python3.10/site-packages/pandas/compat/__init__.py b/emu3/lib/python3.10/site-packages/pandas/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5ada6d705172feeeea4e7c8fbe0b33f8ab495223 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/compat/__init__.py @@ -0,0 +1,199 @@ +""" +compat +====== + +Cross-compatible functions for different versions of Python. + +Other items: +* platform checker +""" +from __future__ import annotations + +import os +import platform +import sys +from typing import TYPE_CHECKING + +from pandas.compat._constants import ( + IS64, + ISMUSL, + PY310, + PY311, + PY312, + PYPY, +) +import pandas.compat.compressors +from pandas.compat.numpy import is_numpy_dev +from pandas.compat.pyarrow import ( + pa_version_under10p1, + pa_version_under11p0, + pa_version_under13p0, + pa_version_under14p0, + pa_version_under14p1, + pa_version_under16p0, + pa_version_under17p0, +) + +if TYPE_CHECKING: + from pandas._typing import F + + +def set_function_name(f: F, name: str, cls: type) -> F: + """ + Bind the name/qualname attributes of the function. + """ + f.__name__ = name + f.__qualname__ = f"{cls.__name__}.{name}" + f.__module__ = cls.__module__ + return f + + +def is_platform_little_endian() -> bool: + """ + Checking if the running platform is little endian. + + Returns + ------- + bool + True if the running platform is little endian. + """ + return sys.byteorder == "little" + + +def is_platform_windows() -> bool: + """ + Checking if the running platform is windows. + + Returns + ------- + bool + True if the running platform is windows. + """ + return sys.platform in ["win32", "cygwin"] + + +def is_platform_linux() -> bool: + """ + Checking if the running platform is linux. + + Returns + ------- + bool + True if the running platform is linux. + """ + return sys.platform == "linux" + + +def is_platform_mac() -> bool: + """ + Checking if the running platform is mac. + + Returns + ------- + bool + True if the running platform is mac. + """ + return sys.platform == "darwin" + + +def is_platform_arm() -> bool: + """ + Checking if the running platform use ARM architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith( + "armv" + ) + + +def is_platform_power() -> bool: + """ + Checking if the running platform use Power architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("ppc64", "ppc64le") + + +def is_ci_environment() -> bool: + """ + Checking if running in a continuous integration environment by checking + the PANDAS_CI environment variable. + + Returns + ------- + bool + True if the running in a continuous integration environment. + """ + return os.environ.get("PANDAS_CI", "0") == "1" + + +def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]: + """ + Importing the `LZMAFile` class from the `lzma` module. + + Returns + ------- + class + The `LZMAFile` class from the `lzma` module. + + Raises + ------ + RuntimeError + If the `lzma` module was not imported correctly, or didn't exist. + """ + if not pandas.compat.compressors.has_lzma: + raise RuntimeError( + "lzma module not available. " + "A Python re-install with the proper dependencies, " + "might be required to solve this issue." + ) + return pandas.compat.compressors.LZMAFile + + +def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]: + """ + Importing the `BZ2File` class from the `bz2` module. + + Returns + ------- + class + The `BZ2File` class from the `bz2` module. + + Raises + ------ + RuntimeError + If the `bz2` module was not imported correctly, or didn't exist. + """ + if not pandas.compat.compressors.has_bz2: + raise RuntimeError( + "bz2 module not available. " + "A Python re-install with the proper dependencies, " + "might be required to solve this issue." + ) + return pandas.compat.compressors.BZ2File + + +__all__ = [ + "is_numpy_dev", + "pa_version_under10p1", + "pa_version_under11p0", + "pa_version_under13p0", + "pa_version_under14p0", + "pa_version_under14p1", + "pa_version_under16p0", + "pa_version_under17p0", + "IS64", + "ISMUSL", + "PY310", + "PY311", + "PY312", + "PYPY", +] diff --git a/emu3/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcc5edc79b8e9447c1bf475f725ea8f013541c52 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/compat/_constants.py b/emu3/lib/python3.10/site-packages/pandas/compat/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..7bc3fbaaefebf69d8ebd622406dc9357237add1a --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/compat/_constants.py @@ -0,0 +1,30 @@ +""" +_constants +====== + +Constants relevant for the Python implementation. +""" + +from __future__ import annotations + +import platform +import sys +import sysconfig + +IS64 = sys.maxsize > 2**32 + +PY310 = sys.version_info >= (3, 10) +PY311 = sys.version_info >= (3, 11) +PY312 = sys.version_info >= (3, 12) +PYPY = platform.python_implementation() == "PyPy" +ISMUSL = "musl" in (sysconfig.get_config_var("HOST_GNU_TYPE") or "") +REF_COUNT = 2 if PY311 else 3 + +__all__ = [ + "IS64", + "ISMUSL", + "PY310", + "PY311", + "PY312", + "PYPY", +] diff --git a/emu3/lib/python3.10/site-packages/pandas/compat/_optional.py b/emu3/lib/python3.10/site-packages/pandas/compat/_optional.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc6cd46f09a7e4b103658f9c2ec9a69d93d00b9 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/compat/_optional.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +import importlib +import sys +from typing import TYPE_CHECKING +import warnings + +from pandas.util._exceptions import find_stack_level + +from pandas.util.version import Version + +if TYPE_CHECKING: + import types + +# Update install.rst & setup.cfg when updating versions! + +VERSIONS = { + "adbc-driver-postgresql": "0.8.0", + "adbc-driver-sqlite": "0.8.0", + "bs4": "4.11.2", + "blosc": "1.21.3", + "bottleneck": "1.3.6", + "dataframe-api-compat": "0.1.7", + "fastparquet": "2022.12.0", + "fsspec": "2022.11.0", + "html5lib": "1.1", + "hypothesis": "6.46.1", + "gcsfs": "2022.11.0", + "jinja2": "3.1.2", + "lxml.etree": "4.9.2", + "matplotlib": "3.6.3", + "numba": "0.56.4", + "numexpr": "2.8.4", + "odfpy": "1.4.1", + "openpyxl": "3.1.0", + "pandas_gbq": "0.19.0", + "psycopg2": "2.9.6", # (dt dec pq3 ext lo64) + "pymysql": "1.0.2", + "pyarrow": "10.0.1", + "pyreadstat": "1.2.0", + "pytest": "7.3.2", + "python-calamine": "0.1.7", + "pyxlsb": "1.0.10", + "s3fs": "2022.11.0", + "scipy": "1.10.0", + "sqlalchemy": "2.0.0", + "tables": "3.8.0", + "tabulate": "0.9.0", + "xarray": "2022.12.0", + "xlrd": "2.0.1", + "xlsxwriter": "3.0.5", + "zstandard": "0.19.0", + "tzdata": "2022.7", + "qtpy": "2.3.0", + "pyqt5": "5.15.9", +} + +# A mapping from import name to package name (on PyPI) for packages where +# these two names are different. + +INSTALL_MAPPING = { + "bs4": "beautifulsoup4", + "bottleneck": "Bottleneck", + "jinja2": "Jinja2", + "lxml.etree": "lxml", + "odf": "odfpy", + "pandas_gbq": "pandas-gbq", + "python_calamine": "python-calamine", + "sqlalchemy": "SQLAlchemy", + "tables": "pytables", +} + + +def get_version(module: types.ModuleType) -> str: + version = getattr(module, "__version__", None) + + if version is None: + raise ImportError(f"Can't determine version for {module.__name__}") + if module.__name__ == "psycopg2": + # psycopg2 appends " (dt dec pq3 ext lo64)" to it's version + version = version.split()[0] + return version + + +def import_optional_dependency( + name: str, + extra: str = "", + errors: str = "raise", + min_version: str | None = None, +): + """ + Import an optional dependency. + + By default, if a dependency is missing an ImportError with a nice + message will be raised. If a dependency is present, but too old, + we raise. + + Parameters + ---------- + name : str + The module name. + extra : str + Additional text to include in the ImportError message. + errors : str {'raise', 'warn', 'ignore'} + What to do when a dependency is not found or its version is too old. + + * raise : Raise an ImportError + * warn : Only applicable when a module's version is to old. + Warns that the version is too old and returns None + * ignore: If the module is not installed, return None, otherwise, + return the module, even if the version is too old. + It's expected that users validate the version locally when + using ``errors="ignore"`` (see. ``io/html.py``) + min_version : str, default None + Specify a minimum version that is different from the global pandas + minimum version required. + Returns + ------- + maybe_module : Optional[ModuleType] + The imported module, when found and the version is correct. + None is returned when the package is not found and `errors` + is False, or when the package's version is too old and `errors` + is ``'warn'`` or ``'ignore'``. + """ + assert errors in {"warn", "raise", "ignore"} + + package_name = INSTALL_MAPPING.get(name) + install_name = package_name if package_name is not None else name + + msg = ( + f"Missing optional dependency '{install_name}'. {extra} " + f"Use pip or conda to install {install_name}." + ) + try: + module = importlib.import_module(name) + except ImportError: + if errors == "raise": + raise ImportError(msg) + return None + + # Handle submodules: if we have submodule, grab parent module from sys.modules + parent = name.split(".")[0] + if parent != name: + install_name = parent + module_to_get = sys.modules[install_name] + else: + module_to_get = module + minimum_version = min_version if min_version is not None else VERSIONS.get(parent) + if minimum_version: + version = get_version(module_to_get) + if version and Version(version) < Version(minimum_version): + msg = ( + f"Pandas requires version '{minimum_version}' or newer of '{parent}' " + f"(version '{version}' currently installed)." + ) + if errors == "warn": + warnings.warn( + msg, + UserWarning, + stacklevel=find_stack_level(), + ) + return None + elif errors == "raise": + raise ImportError(msg) + else: + return None + + return module diff --git a/emu3/lib/python3.10/site-packages/pandas/compat/compressors.py b/emu3/lib/python3.10/site-packages/pandas/compat/compressors.py new file mode 100644 index 0000000000000000000000000000000000000000..1f31e34c092c9672559ca2f5194cb1da7083d03b --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/compat/compressors.py @@ -0,0 +1,77 @@ +""" +Patched ``BZ2File`` and ``LZMAFile`` to handle pickle protocol 5. +""" + +from __future__ import annotations + +from pickle import PickleBuffer + +from pandas.compat._constants import PY310 + +try: + import bz2 + + has_bz2 = True +except ImportError: + has_bz2 = False + +try: + import lzma + + has_lzma = True +except ImportError: + has_lzma = False + + +def flatten_buffer( + b: bytes | bytearray | memoryview | PickleBuffer, +) -> bytes | bytearray | memoryview: + """ + Return some 1-D `uint8` typed buffer. + + Coerces anything that does not match that description to one that does + without copying if possible (otherwise will copy). + """ + + if isinstance(b, (bytes, bytearray)): + return b + + if not isinstance(b, PickleBuffer): + b = PickleBuffer(b) + + try: + # coerce to 1-D `uint8` C-contiguous `memoryview` zero-copy + return b.raw() + except BufferError: + # perform in-memory copy if buffer is not contiguous + return memoryview(b).tobytes("A") + + +if has_bz2: + + class BZ2File(bz2.BZ2File): + if not PY310: + + def write(self, b) -> int: + # Workaround issue where `bz2.BZ2File` expects `len` + # to return the number of bytes in `b` by converting + # `b` into something that meets that constraint with + # minimal copying. + # + # Note: This is fixed in Python 3.10. + return super().write(flatten_buffer(b)) + + +if has_lzma: + + class LZMAFile(lzma.LZMAFile): + if not PY310: + + def write(self, b) -> int: + # Workaround issue where `lzma.LZMAFile` expects `len` + # to return the number of bytes in `b` by converting + # `b` into something that meets that constraint with + # minimal copying. + # + # Note: This is fixed in Python 3.10. + return super().write(flatten_buffer(b)) diff --git a/emu3/lib/python3.10/site-packages/pandas/compat/pickle_compat.py b/emu3/lib/python3.10/site-packages/pandas/compat/pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..cd98087c06c18634304c29d88837017a6952a4fc --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/compat/pickle_compat.py @@ -0,0 +1,262 @@ +""" +Support pre-0.12 series pickle compatibility. +""" +from __future__ import annotations + +import contextlib +import copy +import io +import pickle as pkl +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.tslibs import BaseOffset + +from pandas import Index +from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.internals import BlockManager + +if TYPE_CHECKING: + from collections.abc import Generator + + +def load_reduce(self) -> None: + stack = self.stack + args = stack.pop() + func = stack[-1] + + try: + stack[-1] = func(*args) + return + except TypeError as err: + # If we have a deprecated function, + # try to replace and try again. + + msg = "_reconstruct: First argument must be a sub-type of ndarray" + + if msg in str(err): + try: + cls = args[0] + stack[-1] = object.__new__(cls) + return + except TypeError: + pass + elif args and isinstance(args[0], type) and issubclass(args[0], BaseOffset): + # TypeError: object.__new__(Day) is not safe, use Day.__new__() + cls = args[0] + stack[-1] = cls.__new__(*args) + return + elif args and issubclass(args[0], PeriodArray): + cls = args[0] + stack[-1] = NDArrayBacked.__new__(*args) + return + + raise + + +# If classes are moved, provide compat here. +_class_locations_map = { + ("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"), + # 15477 + ("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"), + # Re-routing unpickle block logic to go through _unpickle_block instead + # for pandas <= 1.3.5 + ("pandas.core.internals.blocks", "new_block"): ( + "pandas._libs.internals", + "_unpickle_block", + ), + ("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"), + ("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"), + # 10890 + ("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"), + ("pandas.sparse.series", "SparseTimeSeries"): ( + "pandas.core.sparse.series", + "SparseSeries", + ), + # 12588, extensions moving + ("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"), + ("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"), + # 18543 moving period + ("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"), + ("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"), + # 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype + ("pandas.tslib", "__nat_unpickle"): ( + "pandas._libs.tslibs.nattype", + "__nat_unpickle", + ), + ("pandas._libs.tslib", "__nat_unpickle"): ( + "pandas._libs.tslibs.nattype", + "__nat_unpickle", + ), + # 15998 top-level dirs moving + ("pandas.sparse.array", "SparseArray"): ( + "pandas.core.arrays.sparse", + "SparseArray", + ), + ("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"), + ("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"), + ("pandas.indexes.numeric", "Int64Index"): ( + "pandas.core.indexes.base", + "Index", # updated in 50775 + ), + ("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"), + ("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"), + ("pandas.tseries.index", "_new_DatetimeIndex"): ( + "pandas.core.indexes.datetimes", + "_new_DatetimeIndex", + ), + ("pandas.tseries.index", "DatetimeIndex"): ( + "pandas.core.indexes.datetimes", + "DatetimeIndex", + ), + ("pandas.tseries.period", "PeriodIndex"): ( + "pandas.core.indexes.period", + "PeriodIndex", + ), + # 19269, arrays moving + ("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"), + # 19939, add timedeltaindex, float64index compat from 15998 move + ("pandas.tseries.tdi", "TimedeltaIndex"): ( + "pandas.core.indexes.timedeltas", + "TimedeltaIndex", + ), + ("pandas.indexes.numeric", "Float64Index"): ( + "pandas.core.indexes.base", + "Index", # updated in 50775 + ), + # 50775, remove Int64Index, UInt64Index & Float64Index from codabase + ("pandas.core.indexes.numeric", "Int64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.indexes.numeric", "UInt64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.indexes.numeric", "Float64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.arrays.sparse.dtype", "SparseDtype"): ( + "pandas.core.dtypes.dtypes", + "SparseDtype", + ), +} + + +# our Unpickler sub-class to override methods and some dispatcher +# functions for compat and uses a non-public class of the pickle module. + + +class Unpickler(pkl._Unpickler): + def find_class(self, module, name): + # override superclass + key = (module, name) + module, name = _class_locations_map.get(key, key) + return super().find_class(module, name) + + +Unpickler.dispatch = copy.copy(Unpickler.dispatch) +Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce + + +def load_newobj(self) -> None: + args = self.stack.pop() + cls = self.stack[-1] + + # compat + if issubclass(cls, Index): + obj = object.__new__(cls) + elif issubclass(cls, DatetimeArray) and not args: + arr = np.array([], dtype="M8[ns]") + obj = cls.__new__(cls, arr, arr.dtype) + elif issubclass(cls, TimedeltaArray) and not args: + arr = np.array([], dtype="m8[ns]") + obj = cls.__new__(cls, arr, arr.dtype) + elif cls is BlockManager and not args: + obj = cls.__new__(cls, (), [], False) + else: + obj = cls.__new__(cls, *args) + + self.stack[-1] = obj + + +Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj + + +def load_newobj_ex(self) -> None: + kwargs = self.stack.pop() + args = self.stack.pop() + cls = self.stack.pop() + + # compat + if issubclass(cls, Index): + obj = object.__new__(cls) + else: + obj = cls.__new__(cls, *args, **kwargs) + self.append(obj) + + +try: + Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex +except (AttributeError, KeyError): + pass + + +def load(fh, encoding: str | None = None, is_verbose: bool = False): + """ + Load a pickle, with a provided encoding, + + Parameters + ---------- + fh : a filelike object + encoding : an optional encoding + is_verbose : show exception output + """ + try: + fh.seek(0) + if encoding is not None: + up = Unpickler(fh, encoding=encoding) + else: + up = Unpickler(fh) + # "Unpickler" has no attribute "is_verbose" [attr-defined] + up.is_verbose = is_verbose # type: ignore[attr-defined] + + return up.load() + except (ValueError, TypeError): + raise + + +def loads( + bytes_object: bytes, + *, + fix_imports: bool = True, + encoding: str = "ASCII", + errors: str = "strict", +): + """ + Analogous to pickle._loads. + """ + fd = io.BytesIO(bytes_object) + return Unpickler( + fd, fix_imports=fix_imports, encoding=encoding, errors=errors + ).load() + + +@contextlib.contextmanager +def patch_pickle() -> Generator[None, None, None]: + """ + Temporarily patch pickle to use our unpickler. + """ + orig_loads = pkl.loads + try: + setattr(pkl, "loads", loads) + yield + finally: + setattr(pkl, "loads", orig_loads) diff --git a/emu3/lib/python3.10/site-packages/pandas/compat/pyarrow.py b/emu3/lib/python3.10/site-packages/pandas/compat/pyarrow.py new file mode 100644 index 0000000000000000000000000000000000000000..457d26766520d6fe081b5b90145fcd832baee6a1 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/compat/pyarrow.py @@ -0,0 +1,29 @@ +""" support pyarrow compatibility across versions """ + +from __future__ import annotations + +from pandas.util.version import Version + +try: + import pyarrow as pa + + _palv = Version(Version(pa.__version__).base_version) + pa_version_under10p1 = _palv < Version("10.0.1") + pa_version_under11p0 = _palv < Version("11.0.0") + pa_version_under12p0 = _palv < Version("12.0.0") + pa_version_under13p0 = _palv < Version("13.0.0") + pa_version_under14p0 = _palv < Version("14.0.0") + pa_version_under14p1 = _palv < Version("14.0.1") + pa_version_under15p0 = _palv < Version("15.0.0") + pa_version_under16p0 = _palv < Version("16.0.0") + pa_version_under17p0 = _palv < Version("17.0.0") +except ImportError: + pa_version_under10p1 = True + pa_version_under11p0 = True + pa_version_under12p0 = True + pa_version_under13p0 = True + pa_version_under14p0 = True + pa_version_under14p1 = True + pa_version_under15p0 = True + pa_version_under16p0 = True + pa_version_under17p0 = True diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/computation/__init__.py b/emu3/lib/python3.10/site-packages/pandas/tests/computation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c47355e70a3a9c59fe1ad474b086e2bb2da95056 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a872af8fe334bca21295cfa5d79f293a66219ee Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..750010da9fb7c245baecd771db87a779ab92c41c Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/computation/test_compat.py b/emu3/lib/python3.10/site-packages/pandas/tests/computation/test_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..856a5b3a22a95d35cc577050f52d762b065e3ddf --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/tests/computation/test_compat.py @@ -0,0 +1,32 @@ +import pytest + +from pandas.compat._optional import VERSIONS + +import pandas as pd +from pandas.core.computation import expr +from pandas.core.computation.engines import ENGINES +from pandas.util.version import Version + + +def test_compat(): + # test we have compat with our version of numexpr + + from pandas.core.computation.check import NUMEXPR_INSTALLED + + ne = pytest.importorskip("numexpr") + + ver = ne.__version__ + if Version(ver) < Version(VERSIONS["numexpr"]): + assert not NUMEXPR_INSTALLED + else: + assert NUMEXPR_INSTALLED + + +@pytest.mark.parametrize("engine", ENGINES) +@pytest.mark.parametrize("parser", expr.PARSERS) +def test_invalid_numexpr_version(engine, parser): + if engine == "numexpr": + pytest.importorskip("numexpr") + a, b = 1, 2 # noqa: F841 + res = pd.eval("a + b", engine=engine, parser=parser) + assert res == 3 diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py b/emu3/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..e8fad6b8cbd63a042d69de8a6a08c1ca1f35b12e --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/tests/computation/test_eval.py @@ -0,0 +1,2001 @@ +from __future__ import annotations + +from functools import reduce +from itertools import product +import operator + +import numpy as np +import pytest + +from pandas.compat import PY312 +from pandas.errors import ( + NumExprClobberingError, + PerformanceWarning, + UndefinedVariableError, +) +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import ( + is_bool, + is_float, + is_list_like, + is_scalar, +) + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.computation import ( + expr, + pytables, +) +from pandas.core.computation.engines import ENGINES +from pandas.core.computation.expr import ( + BaseExprVisitor, + PandasExprVisitor, + PythonExprVisitor, +) +from pandas.core.computation.expressions import ( + NUMEXPR_INSTALLED, + USE_NUMEXPR, +) +from pandas.core.computation.ops import ( + ARITH_OPS_SYMS, + SPECIAL_CASE_ARITH_OPS_SYMS, + _binary_math_ops, + _binary_ops_dict, + _unary_math_ops, +) +from pandas.core.computation.scope import DEFAULT_GLOBALS + + +@pytest.fixture( + params=( + pytest.param( + engine, + marks=[ + pytest.mark.skipif( + engine == "numexpr" and not USE_NUMEXPR, + reason=f"numexpr enabled->{USE_NUMEXPR}, " + f"installed->{NUMEXPR_INSTALLED}", + ), + td.skip_if_no("numexpr"), + ], + ) + for engine in ENGINES + ) +) +def engine(request): + return request.param + + +@pytest.fixture(params=expr.PARSERS) +def parser(request): + return request.param + + +def _eval_single_bin(lhs, cmp1, rhs, engine): + c = _binary_ops_dict[cmp1] + if ENGINES[engine].has_neg_frac: + try: + return c(lhs, rhs) + except ValueError as e: + if str(e).startswith( + "negative number cannot be raised to a fractional power" + ): + return np.nan + raise + return c(lhs, rhs) + + +# TODO: using range(5) here is a kludge +@pytest.fixture( + params=list(range(5)), + ids=["DataFrame", "Series", "SeriesNaN", "DataFrameNaN", "float"], +) +def lhs(request): + nan_df1 = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + nan_df1[nan_df1 > 0.5] = np.nan + + opts = ( + DataFrame(np.random.default_rng(2).standard_normal((10, 5))), + Series(np.random.default_rng(2).standard_normal(5)), + Series([1, 2, np.nan, np.nan, 5]), + nan_df1, + np.random.default_rng(2).standard_normal(), + ) + return opts[request.param] + + +rhs = lhs +midhs = lhs + + +@pytest.fixture +def idx_func_dict(): + return { + "i": lambda n: Index(np.arange(n), dtype=np.int64), + "f": lambda n: Index(np.arange(n), dtype=np.float64), + "s": lambda n: Index([f"{i}_{chr(i)}" for i in range(97, 97 + n)]), + "dt": lambda n: date_range("2020-01-01", periods=n), + "td": lambda n: timedelta_range("1 day", periods=n), + "p": lambda n: period_range("2020-01-01", periods=n, freq="D"), + } + + +class TestEval: + @pytest.mark.parametrize( + "cmp1", + ["!=", "==", "<=", ">=", "<", ">"], + ids=["ne", "eq", "le", "ge", "lt", "gt"], + ) + @pytest.mark.parametrize("cmp2", [">", "<"], ids=["gt", "lt"]) + @pytest.mark.parametrize("binop", expr.BOOL_OPS_SYMS) + def test_complex_cmp_ops(self, cmp1, cmp2, binop, lhs, rhs, engine, parser): + if parser == "python" and binop in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)" + pd.eval(ex, engine=engine, parser=parser) + return + + lhs_new = _eval_single_bin(lhs, cmp1, rhs, engine) + rhs_new = _eval_single_bin(lhs, cmp2, rhs, engine) + expected = _eval_single_bin(lhs_new, binop, rhs_new, engine) + + ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)" + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("cmp_op", expr.CMP_OPS_SYMS) + def test_simple_cmp_ops(self, cmp_op, lhs, rhs, engine, parser): + lhs = lhs < 0 + rhs = rhs < 0 + + if parser == "python" and cmp_op in ["in", "not in"]: + msg = "'(In|NotIn)' nodes are not implemented" + + with pytest.raises(NotImplementedError, match=msg): + ex = f"lhs {cmp_op} rhs" + pd.eval(ex, engine=engine, parser=parser) + return + + ex = f"lhs {cmp_op} rhs" + msg = "|".join( + [ + r"only list-like( or dict-like)? objects are allowed to be " + r"passed to (DataFrame\.)?isin\(\), you passed a " + r"(`|')bool(`|')", + "argument of type 'bool' is not iterable", + ] + ) + if cmp_op in ("in", "not in") and not is_list_like(rhs): + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + engine=engine, + parser=parser, + local_dict={"lhs": lhs, "rhs": rhs}, + ) + else: + expected = _eval_single_bin(lhs, cmp_op, rhs, engine) + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("op", expr.CMP_OPS_SYMS) + def test_compound_invert_op(self, op, lhs, rhs, request, engine, parser): + if parser == "python" and op in ["in", "not in"]: + msg = "'(In|NotIn)' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + ex = f"~(lhs {op} rhs)" + pd.eval(ex, engine=engine, parser=parser) + return + + if ( + is_float(lhs) + and not is_float(rhs) + and op in ["in", "not in"] + and engine == "python" + and parser == "pandas" + ): + mark = pytest.mark.xfail( + reason="Looks like expected is negative, unclear whether " + "expected is incorrect or result is incorrect" + ) + request.applymarker(mark) + skip_these = ["in", "not in"] + ex = f"~(lhs {op} rhs)" + + msg = "|".join( + [ + r"only list-like( or dict-like)? objects are allowed to be " + r"passed to (DataFrame\.)?isin\(\), you passed a " + r"(`|')float(`|')", + "argument of type 'float' is not iterable", + ] + ) + if is_scalar(rhs) and op in skip_these: + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + engine=engine, + parser=parser, + local_dict={"lhs": lhs, "rhs": rhs}, + ) + else: + # compound + if is_scalar(lhs) and is_scalar(rhs): + lhs, rhs = (np.array([x]) for x in (lhs, rhs)) + expected = _eval_single_bin(lhs, op, rhs, engine) + if is_scalar(expected): + expected = not expected + else: + expected = ~expected + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_almost_equal(expected, result) + + @pytest.mark.parametrize("cmp1", ["<", ">"]) + @pytest.mark.parametrize("cmp2", ["<", ">"]) + def test_chained_cmp_op(self, cmp1, cmp2, lhs, midhs, rhs, engine, parser): + mid = midhs + if parser == "python": + ex1 = f"lhs {cmp1} mid {cmp2} rhs" + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex1, engine=engine, parser=parser) + return + + lhs_new = _eval_single_bin(lhs, cmp1, mid, engine) + rhs_new = _eval_single_bin(mid, cmp2, rhs, engine) + + if lhs_new is not None and rhs_new is not None: + ex1 = f"lhs {cmp1} mid {cmp2} rhs" + ex2 = f"lhs {cmp1} mid and mid {cmp2} rhs" + ex3 = f"(lhs {cmp1} mid) & (mid {cmp2} rhs)" + expected = _eval_single_bin(lhs_new, "&", rhs_new, engine) + + for ex in (ex1, ex2, ex3): + result = pd.eval(ex, engine=engine, parser=parser) + + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize( + "arith1", sorted(set(ARITH_OPS_SYMS).difference(SPECIAL_CASE_ARITH_OPS_SYMS)) + ) + def test_binary_arith_ops(self, arith1, lhs, rhs, engine, parser): + ex = f"lhs {arith1} rhs" + result = pd.eval(ex, engine=engine, parser=parser) + expected = _eval_single_bin(lhs, arith1, rhs, engine) + + tm.assert_almost_equal(result, expected) + ex = f"lhs {arith1} rhs {arith1} rhs" + result = pd.eval(ex, engine=engine, parser=parser) + nlhs = _eval_single_bin(lhs, arith1, rhs, engine) + try: + nlhs, ghs = nlhs.align(rhs) + except (ValueError, TypeError, AttributeError): + # ValueError: series frame or frame series align + # TypeError, AttributeError: series or frame with scalar align + return + else: + if engine == "numexpr": + import numexpr as ne + + # direct numpy comparison + expected = ne.evaluate(f"nlhs {arith1} ghs") + # Update assert statement due to unreliable numerical + # precision component (GH37328) + # TODO: update testing code so that assert_almost_equal statement + # can be replaced again by the assert_numpy_array_equal statement + tm.assert_almost_equal(result.values, expected) + else: + expected = eval(f"nlhs {arith1} ghs") + tm.assert_almost_equal(result, expected) + + # modulus, pow, and floor division require special casing + + def test_modulus(self, lhs, rhs, engine, parser): + ex = r"lhs % rhs" + result = pd.eval(ex, engine=engine, parser=parser) + expected = lhs % rhs + tm.assert_almost_equal(result, expected) + + if engine == "numexpr": + import numexpr as ne + + expected = ne.evaluate(r"expected % rhs") + if isinstance(result, (DataFrame, Series)): + tm.assert_almost_equal(result.values, expected) + else: + tm.assert_almost_equal(result, expected.item()) + else: + expected = _eval_single_bin(expected, "%", rhs, engine) + tm.assert_almost_equal(result, expected) + + def test_floor_division(self, lhs, rhs, engine, parser): + ex = "lhs // rhs" + + if engine == "python": + res = pd.eval(ex, engine=engine, parser=parser) + expected = lhs // rhs + tm.assert_equal(res, expected) + else: + msg = ( + r"unsupported operand type\(s\) for //: 'VariableNode' and " + "'VariableNode'" + ) + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + local_dict={"lhs": lhs, "rhs": rhs}, + engine=engine, + parser=parser, + ) + + @td.skip_if_windows + def test_pow(self, lhs, rhs, engine, parser): + # odd failure on win32 platform, so skip + ex = "lhs ** rhs" + expected = _eval_single_bin(lhs, "**", rhs, engine) + result = pd.eval(ex, engine=engine, parser=parser) + + if ( + is_scalar(lhs) + and is_scalar(rhs) + and isinstance(expected, (complex, np.complexfloating)) + and np.isnan(result) + ): + msg = "(DataFrame.columns|numpy array) are different" + with pytest.raises(AssertionError, match=msg): + tm.assert_numpy_array_equal(result, expected) + else: + tm.assert_almost_equal(result, expected) + + ex = "(lhs ** rhs) ** rhs" + result = pd.eval(ex, engine=engine, parser=parser) + + middle = _eval_single_bin(lhs, "**", rhs, engine) + expected = _eval_single_bin(middle, "**", rhs, engine) + tm.assert_almost_equal(result, expected) + + def test_check_single_invert_op(self, lhs, engine, parser): + # simple + try: + elb = lhs.astype(bool) + except AttributeError: + elb = np.array([bool(lhs)]) + expected = ~elb + result = pd.eval("~elb", engine=engine, parser=parser) + tm.assert_almost_equal(expected, result) + + def test_frame_invert(self, engine, parser): + expr = "~lhs" + + # ~ ## + # frame + # float always raises + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2))) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert_dd'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + # int raises on numexpr + lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # bool always works + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5) + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # object raises + lhs = DataFrame( + {"b": ["a", 1, 2.0], "c": np.random.default_rng(2).standard_normal(3) > 0.5} + ) + if engine == "numexpr": + with pytest.raises(ValueError, match="unknown type object"): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "bad operand type for unary ~: 'str'" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + def test_series_invert(self, engine, parser): + # ~ #### + expr = "~lhs" + + # series + # float raises + lhs = Series(np.random.default_rng(2).standard_normal(5)) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert_dd'" + with pytest.raises(NotImplementedError, match=msg): + result = pd.eval(expr, engine=engine, parser=parser) + else: + msg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + # int raises on numexpr + lhs = Series(np.random.default_rng(2).integers(5, size=5)) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # bool + lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5) + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # float + # int + # bool + + # object + lhs = Series(["a", 1, 2.0]) + if engine == "numexpr": + with pytest.raises(ValueError, match="unknown type object"): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "bad operand type for unary ~: 'str'" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + def test_frame_negate(self, engine, parser): + expr = "-lhs" + + # float + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2))) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # int + lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'neg_bb'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + def test_series_negate(self, engine, parser): + expr = "-lhs" + + # float + lhs = Series(np.random.default_rng(2).standard_normal(5)) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # int + lhs = Series(np.random.default_rng(2).integers(5, size=5)) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'neg_bb'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + @pytest.mark.parametrize( + "lhs", + [ + # Float + DataFrame(np.random.default_rng(2).standard_normal((5, 2))), + # Int + DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))), + # bool doesn't work with numexpr but works elsewhere + DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5), + ], + ) + def test_frame_pos(self, lhs, engine, parser): + expr = "+lhs" + expect = lhs + + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + @pytest.mark.parametrize( + "lhs", + [ + # Float + Series(np.random.default_rng(2).standard_normal(5)), + # Int + Series(np.random.default_rng(2).integers(5, size=5)), + # bool doesn't work with numexpr but works elsewhere + Series(np.random.default_rng(2).standard_normal(5) > 0.5), + ], + ) + def test_series_pos(self, lhs, engine, parser): + expr = "+lhs" + expect = lhs + + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + def test_scalar_unary(self, engine, parser): + msg = "bad operand type for unary ~: 'float'" + warn = None + if PY312 and not (engine == "numexpr" and parser == "pandas"): + warn = DeprecationWarning + with pytest.raises(TypeError, match=msg): + pd.eval("~1.0", engine=engine, parser=parser) + + assert pd.eval("-1.0", parser=parser, engine=engine) == -1.0 + assert pd.eval("+1.0", parser=parser, engine=engine) == +1.0 + assert pd.eval("~1", parser=parser, engine=engine) == ~1 + assert pd.eval("-1", parser=parser, engine=engine) == -1 + assert pd.eval("+1", parser=parser, engine=engine) == +1 + with tm.assert_produces_warning( + warn, match="Bitwise inversion", check_stacklevel=False + ): + assert pd.eval("~True", parser=parser, engine=engine) == ~True + with tm.assert_produces_warning( + warn, match="Bitwise inversion", check_stacklevel=False + ): + assert pd.eval("~False", parser=parser, engine=engine) == ~False + assert pd.eval("-True", parser=parser, engine=engine) == -True + assert pd.eval("-False", parser=parser, engine=engine) == -False + assert pd.eval("+True", parser=parser, engine=engine) == +True + assert pd.eval("+False", parser=parser, engine=engine) == +False + + def test_unary_in_array(self): + # GH 11235 + # TODO: 2022-01-29: result return list with numexpr 2.7.3 in CI + # but cannot reproduce locally + result = np.array( + pd.eval("[-True, True, +True, -False, False, +False, -37, 37, ~37, +37]"), + dtype=np.object_, + ) + expected = np.array( + [ + -True, + True, + +True, + -False, + False, + +False, + -37, + 37, + ~37, + +37, + ], + dtype=np.object_, + ) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", [np.float32, np.float64]) + @pytest.mark.parametrize("expr", ["x < -0.1", "-5 > x"]) + def test_float_comparison_bin_op(self, dtype, expr): + # GH 16363 + df = DataFrame({"x": np.array([0], dtype=dtype)}) + res = df.eval(expr) + assert res.values == np.array([False]) + + def test_unary_in_function(self): + # GH 46471 + df = DataFrame({"x": [0, 1, np.nan]}) + + result = df.eval("x.fillna(-1)") + expected = df.x.fillna(-1) + # column name becomes None if using numexpr + # only check names when the engine is not numexpr + tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR) + + result = df.eval("x.shift(1, fill_value=-1)") + expected = df.x.shift(1, fill_value=-1) + tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR) + + @pytest.mark.parametrize( + "ex", + ( + "1 or 2", + "1 and 2", + "a and b", + "a or b", + "1 or 2 and (3 + 2) > 3", + "2 * x > 2 or 1 and 2", + "2 * df > 3 and 1 or a", + ), + ) + def test_disallow_scalar_bool_ops(self, ex, engine, parser): + x, a, b = np.random.default_rng(2).standard_normal(3), 1, 2 # noqa: F841 + df = DataFrame(np.random.default_rng(2).standard_normal((3, 2))) # noqa: F841 + + msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + + def test_identical(self, engine, parser): + # see gh-10546 + x = 1 + result = pd.eval("x", engine=engine, parser=parser) + assert result == 1 + assert is_scalar(result) + + x = 1.5 + result = pd.eval("x", engine=engine, parser=parser) + assert result == 1.5 + assert is_scalar(result) + + x = False + result = pd.eval("x", engine=engine, parser=parser) + assert not result + assert is_bool(result) + assert is_scalar(result) + + x = np.array([1]) + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([1])) + assert result.shape == (1,) + + x = np.array([1.5]) + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([1.5])) + assert result.shape == (1,) + + x = np.array([False]) # noqa: F841 + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([False])) + assert result.shape == (1,) + + def test_line_continuation(self, engine, parser): + # GH 11149 + exp = """1 + 2 * \ + 5 - 1 + 2 """ + result = pd.eval(exp, engine=engine, parser=parser) + assert result == 12 + + def test_float_truncation(self, engine, parser): + # GH 14241 + exp = "1000000000.006" + result = pd.eval(exp, engine=engine, parser=parser) + expected = np.float64(exp) + assert result == expected + + df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) + cutoff = 1000000000.0006 + result = df.query(f"A < {cutoff:.4f}") + assert result.empty + + cutoff = 1000000000.0010 + result = df.query(f"A > {cutoff:.4f}") + expected = df.loc[[1, 2], :] + tm.assert_frame_equal(expected, result) + + exact = 1000000000.0011 + result = df.query(f"A == {exact:.4f}") + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + def test_disallow_python_keywords(self): + # GH 18221 + df = DataFrame([[0, 0, 0]], columns=["foo", "bar", "class"]) + msg = "Python keyword not valid identifier in numexpr query" + with pytest.raises(SyntaxError, match=msg): + df.query("class == 0") + + df = DataFrame() + df.index.name = "lambda" + with pytest.raises(SyntaxError, match=msg): + df.query("lambda == 0") + + def test_true_false_logic(self): + # GH 25823 + # This behavior is deprecated in Python 3.12 + with tm.maybe_produces_warning( + DeprecationWarning, PY312, check_stacklevel=False + ): + assert pd.eval("not True") == -2 + assert pd.eval("not False") == -1 + assert pd.eval("True and not True") == 0 + + def test_and_logic_string_match(self): + # GH 25823 + event = Series({"a": "hello"}) + assert pd.eval(f"{event.str.match('hello').a}") + assert pd.eval(f"{event.str.match('hello').a and event.str.match('hello').a}") + + +# ------------------------------------- +# gh-12388: Typecasting rules consistency with python + + +class TestTypeCasting: + @pytest.mark.parametrize("op", ["+", "-", "*", "**", "/"]) + # maybe someday... numexpr has too many upcasting rules now + # chain(*(np.core.sctypes[x] for x in ['uint', 'int', 'float'])) + @pytest.mark.parametrize("left_right", [("df", "3"), ("3", "df")]) + def test_binop_typecasting( + self, engine, parser, op, complex_or_float_dtype, left_right, request + ): + # GH#21374 + dtype = complex_or_float_dtype + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)), dtype=dtype) + left, right = left_right + s = f"{left} {op} {right}" + res = pd.eval(s, engine=engine, parser=parser) + if dtype == "complex64" and engine == "numexpr": + mark = pytest.mark.xfail( + reason="numexpr issue with complex that are upcast " + "to complex 128 " + "https://github.com/pydata/numexpr/issues/492" + ) + request.applymarker(mark) + assert df.values.dtype == dtype + assert res.values.dtype == dtype + tm.assert_frame_equal(res, eval(s), check_exact=False) + + +# ------------------------------------- +# Basic and complex alignment + + +def should_warn(*args): + not_mono = not any(map(operator.attrgetter("is_monotonic_increasing"), args)) + only_one_dt = reduce( + operator.xor, (issubclass(x.dtype.type, np.datetime64) for x in args) + ) + return not_mono and only_one_dt + + +class TestAlignment: + index_types = ["i", "s", "dt"] + lhs_index_types = index_types + ["s"] # 'p' + + def test_align_nested_unary_op(self, engine, parser): + s = "df * ~2" + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + res = pd.eval(s, engine=engine, parser=parser) + tm.assert_frame_equal(res, df * ~2) + + @pytest.mark.filterwarnings("always::RuntimeWarning") + @pytest.mark.parametrize("lr_idx_type", lhs_index_types) + @pytest.mark.parametrize("rr_idx_type", index_types) + @pytest.mark.parametrize("c_idx_type", index_types) + def test_basic_frame_alignment( + self, engine, parser, lr_idx_type, rr_idx_type, c_idx_type, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 10)), + index=idx_func_dict[lr_idx_type](10), + columns=idx_func_dict[c_idx_type](10), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((20, 10)), + index=idx_func_dict[rr_idx_type](20), + columns=idx_func_dict[c_idx_type](10), + ) + # only warns if not monotonic and not sortable + if should_warn(df.index, df2.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + df2", engine=engine, parser=parser) + else: + res = pd.eval("df + df2", engine=engine, parser=parser) + tm.assert_frame_equal(res, df + df2) + + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + @pytest.mark.parametrize("c_idx_type", lhs_index_types) + def test_frame_comparison( + self, engine, parser, r_idx_type, c_idx_type, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 10)), + index=idx_func_dict[r_idx_type](10), + columns=idx_func_dict[c_idx_type](10), + ) + res = pd.eval("df < 2", engine=engine, parser=parser) + tm.assert_frame_equal(res, df < 2) + + df3 = DataFrame( + np.random.default_rng(2).standard_normal(df.shape), + index=df.index, + columns=df.columns, + ) + res = pd.eval("df < df3", engine=engine, parser=parser) + tm.assert_frame_equal(res, df < df3) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("r1", lhs_index_types) + @pytest.mark.parametrize("c1", index_types) + @pytest.mark.parametrize("r2", index_types) + @pytest.mark.parametrize("c2", index_types) + def test_medium_complex_frame_alignment( + self, engine, parser, r1, c1, r2, c2, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 2)), + index=idx_func_dict[r1](3), + columns=idx_func_dict[c1](2), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), + index=idx_func_dict[r2](4), + columns=idx_func_dict[c2](2), + ) + df3 = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), + index=idx_func_dict[r2](5), + columns=idx_func_dict[c2](2), + ) + if should_warn(df.index, df2.index, df3.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + df2 + df3", engine=engine, parser=parser) + else: + res = pd.eval("df + df2 + df3", engine=engine, parser=parser) + tm.assert_frame_equal(res, df + df2 + df3) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize("c_idx_type", index_types) + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + def test_basic_frame_series_alignment( + self, engine, parser, index_name, r_idx_type, c_idx_type, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 10)), + index=idx_func_dict[r_idx_type](10), + columns=idx_func_dict[c_idx_type](10), + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + + if should_warn(df.index, s.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + s", engine=engine, parser=parser) + else: + res = pd.eval("df + s", engine=engine, parser=parser) + + if r_idx_type == "dt" or c_idx_type == "dt": + expected = df.add(s) if engine == "numexpr" else df + s + else: + expected = df + s + tm.assert_frame_equal(res, expected) + + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize( + "r_idx_type, c_idx_type", + list(product(["i", "s"], ["i", "s"])) + [("dt", "dt")], + ) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_basic_series_frame_alignment( + self, request, engine, parser, index_name, r_idx_type, c_idx_type, idx_func_dict + ): + if ( + engine == "numexpr" + and parser in ("pandas", "python") + and index_name == "index" + and r_idx_type == "i" + and c_idx_type == "s" + ): + reason = ( + f"Flaky column ordering when engine={engine}, " + f"parser={parser}, index_name={index_name}, " + f"r_idx_type={r_idx_type}, c_idx_type={c_idx_type}" + ) + request.applymarker(pytest.mark.xfail(reason=reason, strict=False)) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 7)), + index=idx_func_dict[r_idx_type](10), + columns=idx_func_dict[c_idx_type](7), + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + if should_warn(s.index, df.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("s + df", engine=engine, parser=parser) + else: + res = pd.eval("s + df", engine=engine, parser=parser) + + if r_idx_type == "dt" or c_idx_type == "dt": + expected = df.add(s) if engine == "numexpr" else s + df + else: + expected = s + df + tm.assert_frame_equal(res, expected) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("c_idx_type", index_types) + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize("op", ["+", "*"]) + def test_series_frame_commutativity( + self, engine, parser, index_name, op, r_idx_type, c_idx_type, idx_func_dict + ): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 10)), + index=idx_func_dict[r_idx_type](10), + columns=idx_func_dict[c_idx_type](10), + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + + lhs = f"s {op} df" + rhs = f"df {op} s" + if should_warn(df.index, s.index): + with tm.assert_produces_warning(RuntimeWarning): + a = pd.eval(lhs, engine=engine, parser=parser) + with tm.assert_produces_warning(RuntimeWarning): + b = pd.eval(rhs, engine=engine, parser=parser) + else: + a = pd.eval(lhs, engine=engine, parser=parser) + b = pd.eval(rhs, engine=engine, parser=parser) + + if r_idx_type != "dt" and c_idx_type != "dt": + if engine == "numexpr": + tm.assert_frame_equal(a, b) + + @pytest.mark.filterwarnings("always::RuntimeWarning") + @pytest.mark.parametrize("r1", lhs_index_types) + @pytest.mark.parametrize("c1", index_types) + @pytest.mark.parametrize("r2", index_types) + @pytest.mark.parametrize("c2", index_types) + def test_complex_series_frame_alignment( + self, engine, parser, r1, c1, r2, c2, idx_func_dict + ): + n = 3 + m1 = 5 + m2 = 2 * m1 + df = DataFrame( + np.random.default_rng(2).standard_normal((m1, n)), + index=idx_func_dict[r1](m1), + columns=idx_func_dict[c1](n), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((m2, n)), + index=idx_func_dict[r2](m2), + columns=idx_func_dict[c2](n), + ) + index = df2.columns + ser = Series(np.random.default_rng(2).standard_normal(n), index[:n]) + + if r2 == "dt" or c2 == "dt": + if engine == "numexpr": + expected2 = df2.add(ser) + else: + expected2 = df2 + ser + else: + expected2 = df2 + ser + + if r1 == "dt" or c1 == "dt": + if engine == "numexpr": + expected = expected2.add(df) + else: + expected = expected2 + df + else: + expected = expected2 + df + + if should_warn(df2.index, ser.index, df.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df2 + ser + df", engine=engine, parser=parser) + else: + res = pd.eval("df2 + ser + df", engine=engine, parser=parser) + assert res.shape == expected.shape + tm.assert_frame_equal(res, expected) + + def test_performance_warning_for_poor_alignment(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((1000, 10))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + if engine == "numexpr": + seen = PerformanceWarning + else: + seen = False + + with tm.assert_produces_warning(seen): + pd.eval("df + s", engine=engine, parser=parser) + + s = Series(np.random.default_rng(2).standard_normal(1000)) + with tm.assert_produces_warning(False): + pd.eval("df + s", engine=engine, parser=parser) + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 10000))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + with tm.assert_produces_warning(False): + pd.eval("df + s", engine=engine, parser=parser) + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 10))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + + is_python_engine = engine == "python" + + if not is_python_engine: + wrn = PerformanceWarning + else: + wrn = False + + with tm.assert_produces_warning(wrn) as w: + pd.eval("df + s", engine=engine, parser=parser) + + if not is_python_engine: + assert len(w) == 1 + msg = str(w[0].message) + logged = np.log10(s.size - df.shape[1]) + expected = ( + f"Alignment difference on axis 1 is larger " + f"than an order of magnitude on term 'df', " + f"by more than {logged:.4g}; performance may suffer." + ) + assert msg == expected + + +# ------------------------------------ +# Slightly more complex ops + + +class TestOperations: + def eval(self, *args, **kwargs): + kwargs["level"] = kwargs.pop("level", 0) + 1 + return pd.eval(*args, **kwargs) + + def test_simple_arith_ops(self, engine, parser): + exclude_arith = [] + if parser == "python": + exclude_arith = ["in", "not in"] + + arith_ops = [ + op + for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS + if op not in exclude_arith + ] + + ops = (op for op in arith_ops if op != "//") + + for op in ops: + ex = f"1 {op} 1" + ex2 = f"x {op} 1" + ex3 = f"1 {op} (x + 1)" + + if op in ("in", "not in"): + msg = "argument of type 'int' is not iterable" + with pytest.raises(TypeError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + else: + expec = _eval_single_bin(1, op, 1, engine) + x = self.eval(ex, engine=engine, parser=parser) + assert x == expec + + expec = _eval_single_bin(x, op, 1, engine) + y = self.eval(ex2, local_dict={"x": x}, engine=engine, parser=parser) + assert y == expec + + expec = _eval_single_bin(1, op, x + 1, engine) + y = self.eval(ex3, local_dict={"x": x}, engine=engine, parser=parser) + assert y == expec + + @pytest.mark.parametrize("rhs", [True, False]) + @pytest.mark.parametrize("lhs", [True, False]) + @pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS) + def test_simple_bool_ops(self, rhs, lhs, op): + ex = f"{lhs} {op} {rhs}" + + if parser == "python" and op in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + self.eval(ex) + return + + res = self.eval(ex) + exp = eval(ex) + assert res == exp + + @pytest.mark.parametrize("rhs", [True, False]) + @pytest.mark.parametrize("lhs", [True, False]) + @pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS) + def test_bool_ops_with_constants(self, rhs, lhs, op): + ex = f"{lhs} {op} {rhs}" + + if parser == "python" and op in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + self.eval(ex) + return + + res = self.eval(ex) + exp = eval(ex) + assert res == exp + + def test_4d_ndarray_fails(self): + x = np.random.default_rng(2).standard_normal((3, 4, 5, 6)) + y = Series(np.random.default_rng(2).standard_normal(10)) + msg = "N-dimensional objects, where N > 2, are not supported with eval" + with pytest.raises(NotImplementedError, match=msg): + self.eval("x + y", local_dict={"x": x, "y": y}) + + def test_constant(self): + x = self.eval("1") + assert x == 1 + + def test_single_variable(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df2 = self.eval("df", local_dict={"df": df}) + tm.assert_frame_equal(df, df2) + + def test_failing_subscript_with_name_error(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841 + with pytest.raises(NameError, match="name 'x' is not defined"): + self.eval("df[x > 2] > 2") + + def test_lhs_expression_subscript(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + result = self.eval("(df + 1)[df > 2]", local_dict={"df": df}) + expected = (df + 1)[df > 2] + tm.assert_frame_equal(result, expected) + + def test_attr_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc") + ) + expr1 = "df.a < df.b" + expec1 = df.a < df.b + expr2 = "df.a + df.b + df.c" + expec2 = df.a + df.b + df.c + expr3 = "df.a + df.b + df.c[df.b < 0]" + expec3 = df.a + df.b + df.c[df.b < 0] + exprs = expr1, expr2, expr3 + expecs = expec1, expec2, expec3 + for e, expec in zip(exprs, expecs): + tm.assert_series_equal(expec, self.eval(e, local_dict={"df": df})) + + def test_assignment_fails(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc") + ) + df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + expr1 = "df = df2" + msg = "cannot assign without a target object" + with pytest.raises(ValueError, match=msg): + self.eval(expr1, local_dict={"df": df, "df2": df2}) + + def test_assignment_column_multiple_raise(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # multiple assignees + with pytest.raises(SyntaxError, match="invalid syntax"): + df.eval("d c = a + b") + + def test_assignment_column_invalid_assign(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # invalid assignees + msg = "left hand side of an assignment must be a single name" + with pytest.raises(SyntaxError, match=msg): + df.eval("d,c = a + b") + + def test_assignment_column_invalid_assign_function_call(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + msg = "cannot assign to function call" + with pytest.raises(SyntaxError, match=msg): + df.eval('Timestamp("20131001") = a + b') + + def test_assignment_single_assign_existing(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # single assignment - existing variable + expected = df.copy() + expected["a"] = expected["a"] + expected["b"] + df.eval("a = a + b", inplace=True) + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_new(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # single assignment - new variable + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + df.eval("c = a + b", inplace=True) + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_local_overlap(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + df = df.copy() + a = 1 # noqa: F841 + df.eval("a = 1 + b", inplace=True) + + expected = df.copy() + expected["a"] = 1 + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_name(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + + a = 1 # noqa: F841 + old_a = df.a.copy() + df.eval("a = a + b", inplace=True) + result = old_a + df.b + tm.assert_series_equal(result, df.a, check_names=False) + assert result.name is None + + def test_assignment_multiple_raises(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # multiple assignment + df.eval("c = a + b", inplace=True) + msg = "can only assign a single expression" + with pytest.raises(SyntaxError, match=msg): + df.eval("c = a = b") + + def test_assignment_explicit(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # explicit targets + self.eval("c = df.a + df.b", local_dict={"df": df}, target=df, inplace=True) + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_column_in(self): + # GH 11235 + df = DataFrame({"a": [11], "b": [-32]}) + result = df.eval("a in [11, -32]") + expected = Series([True]) + # TODO: 2022-01-29: Name check failed with numexpr 2.7.3 in CI + # but cannot reproduce locally + tm.assert_series_equal(result, expected, check_names=False) + + @pytest.mark.xfail(reason="Unknown: Omitted test_ in name prior.") + def test_assignment_not_inplace(self): + # see gh-9297 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + + actual = df.eval("c = a + b", inplace=False) + assert actual is not None + + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_multi_line_expression(self, warn_copy_on_write): + # GH 11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + expected["c"] = expected["a"] + expected["b"] + expected["d"] = expected["c"] + expected["b"] + answer = df.eval( + """ + c = a + b + d = c + b""", + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + expected["a"] = expected["a"] - 1 + expected["e"] = expected["a"] + 2 + answer = df.eval( + """ + a = a - 1 + e = a + 2""", + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + # multi-line not valid if not all assignments + msg = "Multi-line expressions are only valid if all expressions contain" + with pytest.raises(ValueError, match=msg): + df.eval( + """ + a = b + 2 + b - 2""", + inplace=False, + ) + + def test_multi_line_expression_not_inplace(self): + # GH 11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + expected["c"] = expected["a"] + expected["b"] + expected["d"] = expected["c"] + expected["b"] + df = df.eval( + """ + c = a + b + d = c + b""", + inplace=False, + ) + tm.assert_frame_equal(expected, df) + + expected["a"] = expected["a"] - 1 + expected["e"] = expected["a"] + 2 + df = df.eval( + """ + a = a - 1 + e = a + 2""", + inplace=False, + ) + tm.assert_frame_equal(expected, df) + + def test_multi_line_expression_local_variable(self): + # GH 15342 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + local_var = 7 + expected["c"] = expected["a"] * local_var + expected["d"] = expected["c"] + local_var + answer = df.eval( + """ + c = a * @local_var + d = c + @local_var + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_multi_line_expression_callable_local_variable(self): + # 26426 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + def local_func(a, b): + return b + + expected = df.copy() + expected["c"] = expected["a"] * local_func(1, 7) + expected["d"] = expected["c"] + local_func(1, 7) + answer = df.eval( + """ + c = a * @local_func(1, 7) + d = c + @local_func(1, 7) + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_multi_line_expression_callable_local_variable_with_kwargs(self): + # 26426 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + def local_func(a, b): + return b + + expected = df.copy() + expected["c"] = expected["a"] * local_func(b=7, a=1) + expected["d"] = expected["c"] + local_func(b=7, a=1) + answer = df.eval( + """ + c = a * @local_func(b=7, a=1) + d = c + @local_func(b=7, a=1) + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_assignment_in_query(self): + # GH 8664 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + msg = "cannot assign without a target object" + with pytest.raises(ValueError, match=msg): + df.query("a = 1") + tm.assert_frame_equal(df, df_orig) + + def test_query_inplace(self): + # see gh-11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + expected = expected[expected["a"] == 2] + df.query("a == 2", inplace=True) + tm.assert_frame_equal(expected, df) + + df = {} + expected = {"a": 3} + + self.eval("a = 1 + 2", target=df, inplace=True) + tm.assert_dict_equal(df, expected) + + @pytest.mark.parametrize("invalid_target", [1, "cat", [1, 2], np.array([]), (1, 3)]) + def test_cannot_item_assign(self, invalid_target): + msg = "Cannot assign expression output to target" + expression = "a = 1 + 2" + + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=True) + + if hasattr(invalid_target, "copy"): + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=False) + + @pytest.mark.parametrize("invalid_target", [1, "cat", (1, 3)]) + def test_cannot_copy_item(self, invalid_target): + msg = "Cannot return a copy of the target" + expression = "a = 1 + 2" + + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=False) + + @pytest.mark.parametrize("target", [1, "cat", [1, 2], np.array([]), (1, 3), {1: 2}]) + def test_inplace_no_assignment(self, target): + expression = "1 + 2" + + assert self.eval(expression, target=target, inplace=False) == 3 + + msg = "Cannot operate inplace if there is no assignment" + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=target, inplace=True) + + def test_basic_period_index_boolean_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), + columns=period_range("2020-01-01", freq="D", periods=2), + ) + e = df < 2 + r = self.eval("df < 2", local_dict={"df": df}) + x = df < 2 + + tm.assert_frame_equal(r, e) + tm.assert_frame_equal(x, e) + + def test_basic_period_index_subscript_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), + columns=period_range("2020-01-01", freq="D", periods=2), + ) + r = self.eval("df[df < 2 + 3]", local_dict={"df": df}) + e = df[df < 2 + 3] + tm.assert_frame_equal(r, e) + + def test_nested_period_index_subscript_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), + columns=period_range("2020-01-01", freq="D", periods=2), + ) + r = self.eval("df[df[df < 2] < 2] + df * 2", local_dict={"df": df}) + e = df[df[df < 2] < 2] + df * 2 + tm.assert_frame_equal(r, e) + + def test_date_boolean(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + res = self.eval( + "df.dates1 < 20130101", + local_dict={"df": df}, + engine=engine, + parser=parser, + ) + expec = df.dates1 < "20130101" + tm.assert_series_equal(res, expec, check_names=False) + + def test_simple_in_ops(self, engine, parser): + if parser != "python": + res = pd.eval("1 in [1, 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("2 in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("3 in (1, 2)", engine=engine, parser=parser) + assert not res + + res = pd.eval("3 not in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[3] not in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[3] in ([3], 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[[3]] in [[[3]], 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("(3,) in [(3,), 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("(3,) not in [(3,), 2]", engine=engine, parser=parser) + assert not res + + res = pd.eval("[(3,)] in [[(3,)], 2]", engine=engine, parser=parser) + assert res + else: + msg = "'In' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval("1 in [1, 2]", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("2 in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("3 in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("[(3,)] in (1, 2, [(3,)])", engine=engine, parser=parser) + msg = "'NotIn' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval("3 not in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("[3] not in (1, 2, [[3]])", engine=engine, parser=parser) + + def test_check_many_exprs(self, engine, parser): + a = 1 # noqa: F841 + expr = " * ".join("a" * 33) + expected = 1 + res = pd.eval(expr, engine=engine, parser=parser) + assert res == expected + + @pytest.mark.parametrize( + "expr", + [ + "df > 2 and df > 3", + "df > 2 or df > 3", + "not df > 2", + ], + ) + def test_fails_and_or_not(self, expr, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + if parser == "python": + msg = "'BoolOp' nodes are not implemented" + if "not" in expr: + msg = "'Not' nodes are not implemented" + + with pytest.raises(NotImplementedError, match=msg): + pd.eval( + expr, + local_dict={"df": df}, + parser=parser, + engine=engine, + ) + else: + # smoke-test, should not raise + pd.eval( + expr, + local_dict={"df": df}, + parser=parser, + engine=engine, + ) + + @pytest.mark.parametrize("char", ["|", "&"]) + def test_fails_ampersand_pipe(self, char, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841 + ex = f"(df + 2)[df > 1] > 0 {char} (df > 0)" + if parser == "python": + msg = "cannot evaluate scalar only bool ops" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, parser=parser, engine=engine) + else: + # smoke-test, should not raise + pd.eval(ex, parser=parser, engine=engine) + + +class TestMath: + def eval(self, *args, **kwargs): + kwargs["level"] = kwargs.pop("level", 0) + 1 + return pd.eval(*args, **kwargs) + + @pytest.mark.skipif( + not NUMEXPR_INSTALLED, reason="Unary ops only implemented for numexpr" + ) + @pytest.mark.parametrize("fn", _unary_math_ops) + def test_unary_functions(self, fn): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + a = df.a + + expr = f"{fn}(a)" + got = self.eval(expr) + with np.errstate(all="ignore"): + expect = getattr(np, fn)(a) + tm.assert_series_equal(got, expect, check_names=False) + + @pytest.mark.parametrize("fn", _binary_math_ops) + def test_binary_functions(self, fn): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + a = df.a + b = df.b + + expr = f"{fn}(a, b)" + got = self.eval(expr) + with np.errstate(all="ignore"): + expect = getattr(np, fn)(a, b) + tm.assert_almost_equal(got, expect, check_names=False) + + def test_df_use_case(self, engine, parser): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + df.eval( + "e = arctan2(sin(a), b)", + engine=engine, + parser=parser, + inplace=True, + ) + got = df.e + expect = np.arctan2(np.sin(df.a), df.b) + tm.assert_series_equal(got, expect, check_names=False) + + def test_df_arithmetic_subexpression(self, engine, parser): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + df.eval("e = sin(a + b)", engine=engine, parser=parser, inplace=True) + got = df.e + expect = np.sin(df.a + df.b) + tm.assert_series_equal(got, expect, check_names=False) + + @pytest.mark.parametrize( + "dtype, expect_dtype", + [ + (np.int32, np.float64), + (np.int64, np.float64), + (np.float32, np.float32), + (np.float64, np.float64), + pytest.param(np.complex128, np.complex128, marks=td.skip_if_windows), + ], + ) + def test_result_types(self, dtype, expect_dtype, engine, parser): + # xref https://github.com/pandas-dev/pandas/issues/12293 + # this fails on Windows, apparently a floating point precision issue + + # Did not test complex64 because DataFrame is converting it to + # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952 + df = DataFrame( + {"a": np.random.default_rng(2).standard_normal(10).astype(dtype)} + ) + assert df.a.dtype == dtype + df.eval("b = sin(a)", engine=engine, parser=parser, inplace=True) + got = df.b + expect = np.sin(df.a) + assert expect.dtype == got.dtype + assert expect_dtype == got.dtype + tm.assert_series_equal(got, expect, check_names=False) + + def test_undefined_func(self, engine, parser): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + msg = '"mysin" is not a supported function' + + with pytest.raises(ValueError, match=msg): + df.eval("mysin(a)", engine=engine, parser=parser) + + def test_keyword_arg(self, engine, parser): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + msg = 'Function "sin" does not support keyword arguments' + + with pytest.raises(TypeError, match=msg): + df.eval("sin(x=a)", engine=engine, parser=parser) + + +_var_s = np.random.default_rng(2).standard_normal(10) + + +class TestScope: + def test_global_scope(self, engine, parser): + e = "_var_s * 2" + tm.assert_numpy_array_equal( + _var_s * 2, pd.eval(e, engine=engine, parser=parser) + ) + + def test_no_new_locals(self, engine, parser): + x = 1 + lcls = locals().copy() + pd.eval("x + 1", local_dict=lcls, engine=engine, parser=parser) + lcls2 = locals().copy() + lcls2.pop("lcls") + assert lcls == lcls2 + + def test_no_new_globals(self, engine, parser): + x = 1 # noqa: F841 + gbls = globals().copy() + pd.eval("x + 1", engine=engine, parser=parser) + gbls2 = globals().copy() + assert gbls == gbls2 + + def test_empty_locals(self, engine, parser): + # GH 47084 + x = 1 # noqa: F841 + msg = "name 'x' is not defined" + with pytest.raises(UndefinedVariableError, match=msg): + pd.eval("x + 1", engine=engine, parser=parser, local_dict={}) + + def test_empty_globals(self, engine, parser): + # GH 47084 + msg = "name '_var_s' is not defined" + e = "_var_s * 2" + with pytest.raises(UndefinedVariableError, match=msg): + pd.eval(e, engine=engine, parser=parser, global_dict={}) + + +@td.skip_if_no("numexpr") +def test_invalid_engine(): + msg = "Invalid engine 'asdf' passed" + with pytest.raises(KeyError, match=msg): + pd.eval("x + y", local_dict={"x": 1, "y": 2}, engine="asdf") + + +@td.skip_if_no("numexpr") +@pytest.mark.parametrize( + ("use_numexpr", "expected"), + ( + (True, "numexpr"), + (False, "python"), + ), +) +def test_numexpr_option_respected(use_numexpr, expected): + # GH 32556 + from pandas.core.computation.eval import _check_engine + + with pd.option_context("compute.use_numexpr", use_numexpr): + result = _check_engine(None) + assert result == expected + + +@td.skip_if_no("numexpr") +def test_numexpr_option_incompatible_op(): + # GH 32556 + with pd.option_context("compute.use_numexpr", False): + df = DataFrame( + {"A": [True, False, True, False, None, None], "B": [1, 2, 3, 4, 5, 6]} + ) + result = df.query("A.isnull()") + expected = DataFrame({"A": [None, None], "B": [5, 6]}, index=[4, 5]) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("numexpr") +def test_invalid_parser(): + msg = "Invalid parser 'asdf' passed" + with pytest.raises(KeyError, match=msg): + pd.eval("x + y", local_dict={"x": 1, "y": 2}, parser="asdf") + + +_parsers: dict[str, type[BaseExprVisitor]] = { + "python": PythonExprVisitor, + "pytables": pytables.PyTablesExprVisitor, + "pandas": PandasExprVisitor, +} + + +@pytest.mark.parametrize("engine", ENGINES) +@pytest.mark.parametrize("parser", _parsers) +def test_disallowed_nodes(engine, parser): + VisitorClass = _parsers[parser] + inst = VisitorClass("x + 1", engine, parser) + + for ops in VisitorClass.unsupported_nodes: + msg = "nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + getattr(inst, ops)() + + +def test_syntax_error_exprs(engine, parser): + e = "s +" + with pytest.raises(SyntaxError, match="invalid syntax"): + pd.eval(e, engine=engine, parser=parser) + + +def test_name_error_exprs(engine, parser): + e = "s + t" + msg = "name 's' is not defined" + with pytest.raises(NameError, match=msg): + pd.eval(e, engine=engine, parser=parser) + + +@pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"]) +def test_invalid_local_variable_reference(engine, parser, express): + a, b = 1, 2 # noqa: F841 + + if parser != "pandas": + with pytest.raises(SyntaxError, match="The '@' prefix is only"): + pd.eval(express, engine=engine, parser=parser) + else: + with pytest.raises(SyntaxError, match="The '@' prefix is not"): + pd.eval(express, engine=engine, parser=parser) + + +def test_numexpr_builtin_raises(engine, parser): + sin, dotted_line = 1, 2 + if engine == "numexpr": + msg = "Variables in expression .+" + with pytest.raises(NumExprClobberingError, match=msg): + pd.eval("sin + dotted_line", engine=engine, parser=parser) + else: + res = pd.eval("sin + dotted_line", engine=engine, parser=parser) + assert res == sin + dotted_line + + +def test_bad_resolver_raises(engine, parser): + cannot_resolve = 42, 3.0 + with pytest.raises(TypeError, match="Resolver of type .+"): + pd.eval("1 + 2", resolvers=cannot_resolve, engine=engine, parser=parser) + + +def test_empty_string_raises(engine, parser): + # GH 13139 + with pytest.raises(ValueError, match="expr cannot be an empty string"): + pd.eval("", engine=engine, parser=parser) + + +def test_more_than_one_expression_raises(engine, parser): + with pytest.raises(SyntaxError, match="only a single expression is allowed"): + pd.eval("1 + 1; 2 + 2", engine=engine, parser=parser) + + +@pytest.mark.parametrize("cmp", ("and", "or")) +@pytest.mark.parametrize("lhs", (int, float)) +@pytest.mark.parametrize("rhs", (int, float)) +def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser): + gen = { + int: lambda: np.random.default_rng(2).integers(10), + float: np.random.default_rng(2).standard_normal, + } + + mid = gen[lhs]() # noqa: F841 + lhs = gen[lhs]() + rhs = gen[rhs]() + + ex1 = f"lhs {cmp} mid {cmp} rhs" + ex2 = f"lhs {cmp} mid and mid {cmp} rhs" + ex3 = f"(lhs {cmp} mid) & (mid {cmp} rhs)" + for ex in (ex1, ex2, ex3): + msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + + +@pytest.mark.parametrize( + "other", + [ + "'x'", + "...", + ], +) +def test_equals_various(other): + df = DataFrame({"A": ["a", "b", "c"]}, dtype=object) + result = df.eval(f"A == {other}") + expected = Series([False, False, False], name="A") + if USE_NUMEXPR: + # https://github.com/pandas-dev/pandas/issues/10239 + # lose name with numexpr engine. Remove when that's fixed. + expected.name = None + tm.assert_series_equal(result, expected) + + +def test_inf(engine, parser): + s = "inf + 1" + expected = np.inf + result = pd.eval(s, engine=engine, parser=parser) + assert result == expected + + +@pytest.mark.parametrize("column", ["Temp(°C)", "Capacitance(μF)"]) +def test_query_token(engine, column): + # See: https://github.com/pandas-dev/pandas/pull/42826 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=[column, "b"] + ) + expected = df[df[column] > 5] + query_string = f"`{column}` > 5" + result = df.query(query_string, engine=engine) + tm.assert_frame_equal(result, expected) + + +def test_negate_lt_eq_le(engine, parser): + df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"]) + expected = df[~(df.cat > 0)] + + result = df.query("~(cat > 0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + if parser == "python": + msg = "'Not' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("not (cat > 0)", engine=engine, parser=parser) + else: + result = df.query("not (cat > 0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "column", + DEFAULT_GLOBALS.keys(), +) +def test_eval_no_support_column_name(request, column): + # GH 44603 + if column in ["True", "False", "inf", "Inf"]: + request.applymarker( + pytest.mark.xfail( + raises=KeyError, + reason=f"GH 47859 DataFrame eval not supported with {column}", + ) + ) + + df = DataFrame( + np.random.default_rng(2).integers(0, 100, size=(10, 2)), + columns=[column, "col1"], + ) + expected = df[df[column] > 6] + result = df.query(f"{column}>6") + + tm.assert_frame_equal(result, expected) + + +def test_set_inplace(using_copy_on_write, warn_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/47449 + # Ensure we don't only update the DataFrame inplace, but also the actual + # column values, such that references to this column also get updated + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result_view = df[:] + ser = df["A"] + with tm.assert_cow_warning(warn_copy_on_write): + df.eval("A = B + C", inplace=True) + expected = DataFrame({"A": [11, 13, 15], "B": [4, 5, 6], "C": [7, 8, 9]}) + tm.assert_frame_equal(df, expected) + if not using_copy_on_write: + tm.assert_series_equal(ser, expected["A"]) + tm.assert_series_equal(result_view["A"], expected["A"]) + else: + expected = Series([1, 2, 3], name="A") + tm.assert_series_equal(ser, expected) + tm.assert_series_equal(result_view["A"], expected) + + +class TestValidate: + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, value): + msg = 'For argument "inplace" expected type bool, received type' + with pytest.raises(ValueError, match=msg): + pd.eval("2+2", inplace=value) diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4bedc999382a349c0e05b726507cecc77df5c51 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13f650a289a36e2650f1b707ca9bf8ae90f5da87 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_api.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_arithmetic.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da5c336a6b5362a8ad038fef3ba61b29fcc3732f Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_constructors.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba212cb5002df493e13591f76ae2cd0745b9833e Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_cumulative.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_cumulative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef42a829e0c80a07bd3ca25b8b07111f872eaea4 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_cumulative.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_formats.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07df8915103a511798c0f4e7e798985dfc8ca738 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_formats.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_iteration.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_iteration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f0bd694bdf4339695e9584a7b040218621e4ff7 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_iteration.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_logical_ops.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_logical_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f47ca20059ccd6c244d43ebc8f1f891637bd998 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_logical_ops.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_missing.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e67add6b7bd8b0abe99124579b24429f62a1dcc4 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_missing.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_npfuncs.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_npfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bfbde02b300e4279620f31df4ae58a8014509b3 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_npfuncs.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_reductions.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1018c87b97a0ebe3f56528aee22f208e1b116f33 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_reductions.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_subclass.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30ae8b7409be27b15bfa2cf8b98393b762627203 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_subclass.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_ufunc.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_ufunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6eae83c02df1b28543ecc5b4105a8ccb336cec9a Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_ufunc.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_unary.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_unary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81833c27084a8407f4a778c9930bafc434226317 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_unary.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_validate.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_validate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..569ee20e04b0bf8c6fd151780788131a82e75bcb Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/__pycache__/test_validate.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__init__.py b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cf06e5ccf18deab78edadaccf3c10f38fb8b815 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cf92c1db85ce9ed82fde7405a55bb8a6e80678b Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54ddf613ca90d744f987a13181e1f0cf01990768 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c869c0fa70b17782e49137d7ccee3a95c74ac6b5 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f298fbfd7423d5eec6fd6ab25dd073efd0e3806 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c04682d5ec5a68a83b20627f509581e35c21dae Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43bf6515722f9179e1a99f04e9f3a860cff2e889 Binary files /dev/null and b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc differ diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..2d50b0f36904a94e7e16aac426e1d098fe320c94 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_cat_accessor.py @@ -0,0 +1,258 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + Index, + Series, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays.categorical import CategoricalAccessor +from pandas.core.indexes.accessors import Properties + + +class TestCatAccessor: + @pytest.mark.parametrize( + "method", + [ + lambda x: x.cat.set_categories([1, 2, 3]), + lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True), + lambda x: x.cat.rename_categories([1, 2, 3]), + lambda x: x.cat.remove_unused_categories(), + lambda x: x.cat.remove_categories([2]), + lambda x: x.cat.add_categories([4]), + lambda x: x.cat.as_ordered(), + lambda x: x.cat.as_unordered(), + ], + ) + def test_getname_categorical_accessor(self, method): + # GH#17509 + ser = Series([1, 2, 3], name="A").astype("category") + expected = "A" + result = method(ser).name + assert result == expected + + def test_cat_accessor(self): + ser = Series(Categorical(["a", "b", np.nan, "a"])) + tm.assert_index_equal(ser.cat.categories, Index(["a", "b"])) + assert not ser.cat.ordered, False + + exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"]) + + res = ser.cat.set_categories(["b", "a"]) + tm.assert_categorical_equal(res.values, exp) + + ser[:] = "a" + ser = ser.cat.remove_unused_categories() + tm.assert_index_equal(ser.cat.categories, Index(["a"])) + + def test_cat_accessor_api(self): + # GH#9322 + + assert Series.cat is CategoricalAccessor + ser = Series(list("aabbcde")).astype("category") + assert isinstance(ser.cat, CategoricalAccessor) + + invalid = Series([1]) + with pytest.raises(AttributeError, match="only use .cat accessor"): + invalid.cat + assert not hasattr(invalid, "cat") + + def test_cat_accessor_no_new_attributes(self): + # https://github.com/pandas-dev/pandas/issues/10673 + cat = Series(list("aabbcde")).astype("category") + with pytest.raises(AttributeError, match="You cannot add any new attribute"): + cat.cat.xlabel = "a" + + def test_categorical_delegations(self): + # invalid accessor + msg = r"Can only use \.cat accessor with a 'category' dtype" + with pytest.raises(AttributeError, match=msg): + Series([1, 2, 3]).cat + with pytest.raises(AttributeError, match=msg): + Series([1, 2, 3]).cat() + with pytest.raises(AttributeError, match=msg): + Series(["a", "b", "c"]).cat + with pytest.raises(AttributeError, match=msg): + Series(np.arange(5.0)).cat + with pytest.raises(AttributeError, match=msg): + Series([Timestamp("20130101")]).cat + + # Series should delegate calls to '.categories', '.codes', '.ordered' + # and the methods '.set_categories()' 'drop_unused_categories()' to the + # categorical + ser = Series(Categorical(["a", "b", "c", "a"], ordered=True)) + exp_categories = Index(["a", "b", "c"]) + tm.assert_index_equal(ser.cat.categories, exp_categories) + ser = ser.cat.rename_categories([1, 2, 3]) + exp_categories = Index([1, 2, 3]) + tm.assert_index_equal(ser.cat.categories, exp_categories) + + exp_codes = Series([0, 1, 2, 0], dtype="int8") + tm.assert_series_equal(ser.cat.codes, exp_codes) + + assert ser.cat.ordered + ser = ser.cat.as_unordered() + assert not ser.cat.ordered + + ser = ser.cat.as_ordered() + assert ser.cat.ordered + + # reorder + ser = Series(Categorical(["a", "b", "c", "a"], ordered=True)) + exp_categories = Index(["c", "b", "a"]) + exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_) + ser = ser.cat.set_categories(["c", "b", "a"]) + tm.assert_index_equal(ser.cat.categories, exp_categories) + tm.assert_numpy_array_equal(ser.values.__array__(), exp_values) + tm.assert_numpy_array_equal(ser.__array__(), exp_values) + + # remove unused categories + ser = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"])) + exp_categories = Index(["a", "b"]) + exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_) + ser = ser.cat.remove_unused_categories() + tm.assert_index_equal(ser.cat.categories, exp_categories) + tm.assert_numpy_array_equal(ser.values.__array__(), exp_values) + tm.assert_numpy_array_equal(ser.__array__(), exp_values) + + # This method is likely to be confused, so test that it raises an error + # on wrong inputs: + msg = "'Series' object has no attribute 'set_categories'" + with pytest.raises(AttributeError, match=msg): + ser.set_categories([4, 3, 2, 1]) + + # right: ser.cat.set_categories([4,3,2,1]) + + # GH#18862 (let Series.cat.rename_categories take callables) + ser = Series(Categorical(["a", "b", "c", "a"], ordered=True)) + result = ser.cat.rename_categories(lambda x: x.upper()) + expected = Series( + Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True) + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "idx", + [ + date_range("1/1/2015", periods=5), + date_range("1/1/2015", periods=5, tz="MET"), + period_range("1/1/2015", freq="D", periods=5), + timedelta_range("1 days", "10 days"), + ], + ) + def test_dt_accessor_api_for_categorical(self, idx): + # https://github.com/pandas-dev/pandas/issues/10661 + + ser = Series(idx) + cat = ser.astype("category") + + # only testing field (like .day) + # and bool (is_month_start) + attr_names = type(ser._values)._datetimelike_ops + + assert isinstance(cat.dt, Properties) + + special_func_defs = [ + ("strftime", ("%Y-%m-%d",), {}), + ("round", ("D",), {}), + ("floor", ("D",), {}), + ("ceil", ("D",), {}), + ("asfreq", ("D",), {}), + ("as_unit", ("s"), {}), + ] + if idx.dtype == "M8[ns]": + # exclude dt64tz since that is already localized and would raise + tup = ("tz_localize", ("UTC",), {}) + special_func_defs.append(tup) + elif idx.dtype.kind == "M": + # exclude dt64 since that is not localized so would raise + tup = ("tz_convert", ("EST",), {}) + special_func_defs.append(tup) + + _special_func_names = [f[0] for f in special_func_defs] + + _ignore_names = ["components", "tz_localize", "tz_convert"] + + func_names = [ + fname + for fname in dir(ser.dt) + if not ( + fname.startswith("_") + or fname in attr_names + or fname in _special_func_names + or fname in _ignore_names + ) + ] + + func_defs = [(fname, (), {}) for fname in func_names] + func_defs.extend( + f_def for f_def in special_func_defs if f_def[0] in dir(ser.dt) + ) + + for func, args, kwargs in func_defs: + warn_cls = [] + if func == "to_period" and getattr(idx, "tz", None) is not None: + # dropping TZ + warn_cls.append(UserWarning) + if func == "to_pydatetime": + # deprecated to return Index[object] + warn_cls.append(FutureWarning) + if warn_cls: + warn_cls = tuple(warn_cls) + else: + warn_cls = None + with tm.assert_produces_warning(warn_cls): + res = getattr(cat.dt, func)(*args, **kwargs) + exp = getattr(ser.dt, func)(*args, **kwargs) + + tm.assert_equal(res, exp) + + for attr in attr_names: + res = getattr(cat.dt, attr) + exp = getattr(ser.dt, attr) + + tm.assert_equal(res, exp) + + def test_dt_accessor_api_for_categorical_invalid(self): + invalid = Series([1, 2, 3]).astype("category") + msg = "Can only use .dt accessor with datetimelike" + + with pytest.raises(AttributeError, match=msg): + invalid.dt + assert not hasattr(invalid, "str") + + def test_set_categories_setitem(self): + # GH#43334 + + df = DataFrame({"Survived": [1, 0, 1], "Sex": [0, 1, 1]}, dtype="category") + + df["Survived"] = df["Survived"].cat.rename_categories(["No", "Yes"]) + df["Sex"] = df["Sex"].cat.rename_categories(["female", "male"]) + + # values should not be coerced to NaN + assert list(df["Sex"]) == ["female", "male", "male"] + assert list(df["Survived"]) == ["Yes", "No", "Yes"] + + df["Sex"] = Categorical(df["Sex"], categories=["female", "male"], ordered=False) + df["Survived"] = Categorical( + df["Survived"], categories=["No", "Yes"], ordered=False + ) + + # values should not be coerced to NaN + assert list(df["Sex"]) == ["female", "male", "male"] + assert list(df["Survived"]) == ["Yes", "No", "Yes"] + + def test_categorical_of_booleans_is_boolean(self): + # https://github.com/pandas-dev/pandas/issues/46313 + df = DataFrame( + {"int_cat": [1, 2, 3], "bool_cat": [True, False, False]}, dtype="category" + ) + value = df["bool_cat"].cat.categories.dtype + expected = np.dtype(np.bool_) + assert value is expected diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..1c60567c1a5300dd4118ae729c3c784eabd4d747 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_list_accessor.py @@ -0,0 +1,129 @@ +import re + +import pytest + +from pandas import ( + ArrowDtype, + Series, +) +import pandas._testing as tm + +pa = pytest.importorskip("pyarrow") + +from pandas.compat import pa_version_under11p0 + + +@pytest.mark.parametrize( + "list_dtype", + ( + pa.list_(pa.int64()), + pa.list_(pa.int64(), list_size=3), + pa.large_list(pa.int64()), + ), +) +def test_list_getitem(list_dtype): + ser = Series( + [[1, 2, 3], [4, None, 5], None], + dtype=ArrowDtype(list_dtype), + ) + actual = ser.list[1] + expected = Series([2, None, None], dtype="int64[pyarrow]") + tm.assert_series_equal(actual, expected) + + +def test_list_getitem_slice(): + ser = Series( + [[1, 2, 3], [4, None, 5], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + if pa_version_under11p0: + with pytest.raises( + NotImplementedError, match="List slice not supported by pyarrow " + ): + ser.list[1:None:None] + else: + actual = ser.list[1:None:None] + expected = Series( + [[2, 3], [None, 5], None], dtype=ArrowDtype(pa.list_(pa.int64())) + ) + tm.assert_series_equal(actual, expected) + + +def test_list_len(): + ser = Series( + [[1, 2, 3], [4, None], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + actual = ser.list.len() + expected = Series([3, 2, None], dtype=ArrowDtype(pa.int32())) + tm.assert_series_equal(actual, expected) + + +def test_list_flatten(): + ser = Series( + [[1, 2, 3], [4, None], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + actual = ser.list.flatten() + expected = Series([1, 2, 3, 4, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(actual, expected) + + +def test_list_getitem_slice_invalid(): + ser = Series( + [[1, 2, 3], [4, None, 5], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + if pa_version_under11p0: + with pytest.raises( + NotImplementedError, match="List slice not supported by pyarrow " + ): + ser.list[1:None:0] + else: + with pytest.raises(pa.lib.ArrowInvalid, match=re.escape("`step` must be >= 1")): + ser.list[1:None:0] + + +def test_list_accessor_non_list_dtype(): + ser = Series( + [1, 2, 4], + dtype=ArrowDtype(pa.int64()), + ) + with pytest.raises( + AttributeError, + match=re.escape( + "Can only use the '.list' accessor with 'list[pyarrow]' dtype, " + "not int64[pyarrow]." + ), + ): + ser.list[1:None:0] + + +@pytest.mark.parametrize( + "list_dtype", + ( + pa.list_(pa.int64()), + pa.list_(pa.int64(), list_size=3), + pa.large_list(pa.int64()), + ), +) +def test_list_getitem_invalid_index(list_dtype): + ser = Series( + [[1, 2, 3], [4, None, 5], None], + dtype=ArrowDtype(list_dtype), + ) + with pytest.raises(pa.lib.ArrowInvalid, match="Index -1 is out of bounds"): + ser.list[-1] + with pytest.raises(pa.lib.ArrowInvalid, match="Index 5 is out of bounds"): + ser.list[5] + with pytest.raises(ValueError, match="key must be an int or slice, got str"): + ser.list["abc"] + + +def test_list_accessor_not_iterable(): + ser = Series( + [[1, 2, 3], [4, None], None], + dtype=ArrowDtype(pa.list_(pa.int64())), + ) + with pytest.raises(TypeError, match="'ListAccessor' object is not iterable"): + iter(ser.list) diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..118095b5dcdbc4f63ba511feb954673485f34427 --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_sparse_accessor.py @@ -0,0 +1,9 @@ +from pandas import Series + + +class TestSparseAccessor: + def test_sparse_accessor_updates_on_inplace(self): + ser = Series([1, 1, 2, 3], dtype="Sparse[int]") + return_value = ser.drop([0, 1], inplace=True) + assert return_value is None + assert ser.sparse.density == 1.0 diff --git a/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..09d965ef1f32268550bbf35a17f11529545054ed --- /dev/null +++ b/emu3/lib/python3.10/site-packages/pandas/tests/series/accessors/test_str_accessor.py @@ -0,0 +1,25 @@ +import pytest + +from pandas import Series +import pandas._testing as tm + + +class TestStrAccessor: + def test_str_attribute(self): + # GH#9068 + methods = ["strip", "rstrip", "lstrip"] + ser = Series([" jack", "jill ", " jesse ", "frank"]) + for method in methods: + expected = Series([getattr(str, method)(x) for x in ser.values]) + tm.assert_series_equal(getattr(Series.str, method)(ser.str), expected) + + # str accessor only valid with string values + ser = Series(range(5)) + with pytest.raises(AttributeError, match="only use .str accessor"): + ser.str.repeat(2) + + def test_str_accessor_updates_on_inplace(self): + ser = Series(list("abc")) + return_value = ser.drop([0], inplace=True) + assert return_value is None + assert len(ser.str.lower()) == 2