diff --git a/.gitattributes b/.gitattributes index 96cf6f0730ae7034c55d36e0b4c5ed67550e37ba..3ba87a98b9e71d22a8939f429932a8899318454f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1630,3 +1630,4 @@ evalkit_tf437/lib/python3.10/site-packages/orjson/orjson.cpython-310-x86_64-linu evalkit_internvl/lib/python3.10/site-packages/sympy/printing/tests/__pycache__/test_latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/printing/__pycache__/latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_tf437/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1800 filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_tf437/lib/python3.10/site-packages/__pycache__/decorator.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f6b53ff73cca773813fcb4679eb72d7f793140b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/decorator.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/__pycache__/ffmpy.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/ffmpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c92ecaee5a02b6e30c9ff0db017e6cf5d4b2a660 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/ffmpy.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/__pycache__/isympy.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/isympy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb67646a58ae751ef716d2ff8a0271065779e2e3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/isympy.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/__pycache__/pylab.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/pylab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38d494a376413f63793341e1ad4cd156521e8b4a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/pylab.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/__pycache__/six.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/six.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd6640b296ab33c4e32c7e855b74503f97572a4f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/six.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/__pycache__/threadpoolctl.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/threadpoolctl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d905d51c37c1fcf336d194155bb0b5c91bbeec07 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/threadpoolctl.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/__pycache__/typing_extensions.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/typing_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32b86a3c1f5114d1e78d92ffa3930d61ba473995 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/__pycache__/typing_extensions.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..193ccf1b0886e7e328a7441d6db3f195d5b74a00 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbd1cdecb1b4b9a2f67130f12f0df3c65d8acfa9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80d8a9e6dfdd9f6dc630123f1c068dbcffd18673 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e62ef0d1ed50a3d79f5d446a89aa39a0c9a74ba9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8fa7a83a073dc6f5d2bbd3d2f14fa371bd367f3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bff430f5ad802511a3ace40a15458da63bddaa1c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ea631e63a9d0422dbd4717e7e706c84006452f2 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..624dfd3362207e83ce49676f15ed0ec2dffb49c3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f0bb7b68025ae4fe0c2c76c095eb36b4e64f2c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py @@ -0,0 +1,65 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet +""" + +import os +import sys +from contextlib import contextmanager + + +# Windows only +if os.name == "nt": + import ctypes + import msvcrt # noqa + + class CursorInfo(ctypes.Structure): + # _fields is a specific attr expected by ctypes + _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] + + +def hide_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = False + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25l") + sys.stdout.flush() + + +def show_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = True + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25h") + sys.stdout.flush() + + +@contextmanager +def hide(): + "Context manager to hide the terminal cursor" + try: + hide_cursor() + yield + finally: + show_cursor() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/input.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/input.py new file mode 100644 index 0000000000000000000000000000000000000000..266f7e7dba33e045073f935fb3a16e4eef1ddf8a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/input.py @@ -0,0 +1,86 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file contains utilities for handling input from the user and registering specific keys to specific functions, +based on https://github.com/bchao1/bullet +""" + +from typing import List + +from .keymap import KEYMAP, get_character + + +def mark(key: str): + """ + Mark the function with the key code so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += [key] + setattr(func, "handle_key", handle) + return func + + return decorator + + +def mark_multiple(*keys: List[str]): + """ + Mark the function with the key codes so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += keys + setattr(func, "handle_key", handle) + return func + + return decorator + + +class KeyHandler(type): + """ + Metaclass that adds the key handlers to the class + """ + + def __new__(cls, name, bases, attrs): + new_cls = super().__new__(cls, name, bases, attrs) + if not hasattr(new_cls, "key_handler"): + setattr(new_cls, "key_handler", {}) + setattr(new_cls, "handle_input", KeyHandler.handle_input) + + for value in attrs.values(): + handled_keys = getattr(value, "handle_key", []) + for key in handled_keys: + new_cls.key_handler[key] = value + return new_cls + + @staticmethod + def handle_input(cls): + "Finds and returns the selected character if it exists in the handler" + char = get_character() + if char != KEYMAP["undefined"]: + char = ord(char) + handler = cls.key_handler.get(char) + if handler: + cls.current_selection = char + return handler(cls) + else: + return None + + +def register(cls): + """Adds KeyHandler metaclass to the class""" + return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy()) diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py new file mode 100644 index 0000000000000000000000000000000000000000..b32ff4623b184d7613a773b6ed403462b9e2f341 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py @@ -0,0 +1,134 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet +""" + + +import os +import string +import sys + + +ARROW_KEY_FLAG = 1 << 8 + +KEYMAP = { + "tab": ord("\t"), + "newline": ord("\r"), + "esc": 27, + "up": 65 + ARROW_KEY_FLAG, + "down": 66 + ARROW_KEY_FLAG, + "right": 67 + ARROW_KEY_FLAG, + "left": 68 + ARROW_KEY_FLAG, + "mod_int": 91, + "undefined": sys.maxsize, + "interrupt": 3, + "insert": 50, + "delete": 51, + "pg_up": 53, + "pg_down": 54, +} + +KEYMAP["arrow_begin"] = KEYMAP["up"] +KEYMAP["arrow_end"] = KEYMAP["left"] + +if sys.platform == "win32": + WIN_CH_BUFFER = [] + WIN_KEYMAP = { + b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, + b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, + } + +for i in range(10): + KEYMAP[str(i)] = ord(str(i)) + + +def get_raw_chars(): + "Gets raw characters from inputs" + if os.name == "nt": + import msvcrt + + encoding = "mbcs" + # Flush the keyboard buffer + while msvcrt.kbhit(): + msvcrt.getch() + if len(WIN_CH_BUFFER) == 0: + # Read the keystroke + ch = msvcrt.getch() + + # If it is a prefix char, get second part + if ch in (b"\x00", b"\xe0"): + ch2 = ch + msvcrt.getch() + # Translate actual Win chars to bullet char types + try: + chx = chr(WIN_KEYMAP[ch2]) + WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"])) + WIN_CH_BUFFER.append(chx) + if ord(chx) in ( + KEYMAP["insert"] - 1 << 9, + KEYMAP["delete"] - 1 << 9, + KEYMAP["pg_up"] - 1 << 9, + KEYMAP["pg_down"] - 1 << 9, + ): + WIN_CH_BUFFER.append(chr(126)) + ch = chr(KEYMAP["esc"]) + except KeyError: + ch = ch2[1] + else: + ch = ch.decode(encoding) + else: + ch = WIN_CH_BUFFER.pop(0) + elif os.name == "posix": + import termios + import tty + + fd = sys.stdin.fileno() + old_settings = termios.tcgetattr(fd) + try: + tty.setraw(fd) + ch = sys.stdin.read(1) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + return ch + + +def get_character(): + "Gets a character from the keyboard and returns the key code" + char = get_raw_chars() + if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]: + return char + + elif ord(char) == KEYMAP["esc"]: + combo = get_raw_chars() + if ord(combo) == KEYMAP["mod_int"]: + key = get_raw_chars() + if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: + return chr(ord(key) + ARROW_KEY_FLAG) + else: + return KEYMAP["undefined"] + else: + return get_raw_chars() + + else: + if char in string.printable: + return char + else: + return KEYMAP["undefined"] diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py new file mode 100644 index 0000000000000000000000000000000000000000..c0ed92bc0b5e5f09eb2d64f5bf3aa49f04996e4c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py @@ -0,0 +1,143 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Main driver for the selection menu, based on https://github.com/bchao1/bullet +""" +import builtins +import sys + +from ...utils.imports import _is_package_available +from . import cursor, input +from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor +from .keymap import KEYMAP + + +in_colab = False +try: + in_colab = _is_package_available("google.colab") +except ModuleNotFoundError: + pass + + +@input.register +class BulletMenu: + """ + A CLI menu to select a choice from a list of choices using the keyboard. + """ + + def __init__(self, prompt: str = None, choices: list = []): + self.position = 0 + self.choices = choices + self.prompt = prompt + if sys.platform == "win32": + self.arrow_char = "*" + else: + self.arrow_char = "➔ " + + def write_choice(self, index, end: str = ""): + if sys.platform != "win32": + writeColor(self.choices[index], 32, end) + else: + forceWrite(self.choices[index], end) + + def print_choice(self, index: int): + "Prints the choice at the given index" + if index == self.position: + forceWrite(f" {self.arrow_char} ") + self.write_choice(index) + else: + forceWrite(f" {self.choices[index]}") + reset_cursor() + + def move_direction(self, direction: Direction, num_spaces: int = 1): + "Should not be directly called, used to move a direction of either up or down" + old_position = self.position + if direction == Direction.DOWN: + if self.position + 1 >= len(self.choices): + return + self.position += num_spaces + else: + if self.position - 1 < 0: + return + self.position -= num_spaces + clear_line() + self.print_choice(old_position) + move_cursor(num_spaces, direction.name) + self.print_choice(self.position) + + @input.mark(KEYMAP["up"]) + def move_up(self): + self.move_direction(Direction.UP) + + @input.mark(KEYMAP["down"]) + def move_down(self): + self.move_direction(Direction.DOWN) + + @input.mark(KEYMAP["newline"]) + def select(self): + move_cursor(len(self.choices) - self.position, "DOWN") + return self.position + + @input.mark(KEYMAP["interrupt"]) + def interrupt(self): + move_cursor(len(self.choices) - self.position, "DOWN") + raise KeyboardInterrupt + + @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)]) + def select_row(self): + index = int(chr(self.current_selection)) + movement = index - self.position + if index == self.position: + return + if index < len(self.choices): + if self.position > index: + self.move_direction(Direction.UP, -movement) + elif self.position < index: + self.move_direction(Direction.DOWN, movement) + else: + return + else: + return + + def run(self, default_choice: int = 0): + "Start the menu and return the selected choice" + if self.prompt: + linebreak() + forceWrite(self.prompt, "\n") + if in_colab: + forceWrite("Please input a choice index (starting from 0), and press enter", "\n") + else: + forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") + self.position = default_choice + for i in range(len(self.choices)): + self.print_choice(i) + forceWrite("\n") + move_cursor(len(self.choices) - self.position, "UP") + with cursor.hide(): + while True: + if in_colab: + try: + choice = int(builtins.input()) + except ValueError: + choice = default_choice + else: + choice = self.handle_input() + if choice is not None: + reset_cursor() + for _ in range(len(self.choices) + 1): + move_cursor(1, "UP") + clear_line() + self.write_choice(choice, "\n") + return choice diff --git a/evalkit_tf437/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/RECORD b/evalkit_tf437/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a97062195605de298aecc4aff8f3f5a5b93380fd --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/RECORD @@ -0,0 +1,33 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/curand/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/curand/__pycache__/__init__.cpython-310.pyc,, +nvidia/curand/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/curand/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/curand/include/curand.h,sha256=y393HddG5_OdV-3cK2l2Q_fxzGbEc1cyCcYjkOE3oIk,43966 +nvidia/curand/include/curand_discrete.h,sha256=2qD3BkI622XEu0444wVP7HeYkKAx0Rjr2HDhqU4SA7E,3486 +nvidia/curand/include/curand_discrete2.h,sha256=ZrQTO5R9x83AMX88uq7M8M94DLSC5VEz0PAkfcwtQeg,10883 +nvidia/curand/include/curand_globals.h,sha256=bES1Kx0NrATXk1DReMMkqWrB062nOnaAp39y22wViXU,3717 +nvidia/curand/include/curand_kernel.h,sha256=SjfAeh13ybXIxiekcgczzua02kIAqETopJKRhYvCat8,53133 +nvidia/curand/include/curand_lognormal.h,sha256=-X-iNkJSzWpAYYjogm689EJTZfzore9sxU7ObddljLk,28142 +nvidia/curand/include/curand_mrg32k3a.h,sha256=ZVVREjGNsJQJ-3IzZZ_LKGtGteslicb8E0Aly49BKPs,170296 +nvidia/curand/include/curand_mtgp32.h,sha256=Qhrmx0pHWF-P2Uu5bKwYE9ymEWq3c7qBzCITVMaKMfI,7845 +nvidia/curand/include/curand_mtgp32_host.h,sha256=SXqzmSQkzTLSRJ4pojTg_TNCC3T-G89HdBK-boSDqr4,18274 +nvidia/curand/include/curand_mtgp32_kernel.h,sha256=ajZnXr5ZXnQExElf6LPpigrrKPTmMIZbRyTEnJ-BDhw,13731 +nvidia/curand/include/curand_mtgp32dc_p_11213.h,sha256=7_gGYUH47UugIAEt60vYH5nFa-QUwTpDwSEgLg9cZts,276889 +nvidia/curand/include/curand_normal.h,sha256=lnmYVk2fn0oEVWOytdKhXrHL36GLCjMnB8OnZeCaYcA,26953 +nvidia/curand/include/curand_normal_static.h,sha256=5K4iTC9AuSWCe1LVxuj_0y3BVjtp0bxO6hndv2rbmiw,4727 +nvidia/curand/include/curand_philox4x32_x.h,sha256=T21IP-Rdg3_tSVU9Je4dLKuwEqE4ovfwi7r1hOY92Dw,7166 +nvidia/curand/include/curand_poisson.h,sha256=KrhXOmO_D7aclnj8geIyHqdpSQwWHurS9V_pVtgzodM,25461 +nvidia/curand/include/curand_precalc.h,sha256=I6NZdgT42fMm9qSCtP-rlOAqt4Zsqgal0ajktcPmEak,1392393 +nvidia/curand/include/curand_uniform.h,sha256=gpmRgQu5r6ppgLTg60NXoDdVJS6wMUy6jC5bh8l04e8,17472 +nvidia/curand/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/curand/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/curand/lib/libcurand.so.10,sha256=Qah4DXdgqpHMpyMtCF9VGDx-bPrsq8LzLnGSYIMQNfw,96681392 +nvidia_curand_cu12-10.3.2.106.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_curand_cu12-10.3.2.106.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_curand_cu12-10.3.2.106.dist-info/METADATA,sha256=TTSGosdrLNURQYusjHa9N5vwOD1zz1DnnZV0im74NT4,1507 +nvidia_curand_cu12-10.3.2.106.dist-info/RECORD,, +nvidia_curand_cu12-10.3.2.106.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia_curand_cu12-10.3.2.106.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_curand_cu12-10.3.2.106.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/evalkit_tf437/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/WHEEL b/evalkit_tf437/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/nvidia_curand_cu12-10.3.2.106.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/INSTALLER b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/LICENSE b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..733963a744c3d6710058de9f0ea316893c5492ca --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/LICENSE @@ -0,0 +1,1250 @@ +BSD 3-Clause License + +Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. + +Copyright (c) 2011-2023, Open source contributors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Copyright (c) 2010-2019 Keith Goodman +Copyright (c) 2019 Bottleneck Developers +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.Copyright 2017- Paul Ganssle +Copyright 2017- dateutil contributors (see AUTHORS file) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +The above license applies to all contributions after 2017-12-01, as well as +all contributions that have been re-licensed (see AUTHORS file for the list of +contributors who have re-licensed their code). +-------------------------------------------------------------------------------- +dateutil - Extensions to the standard Python datetime module. + +Copyright (c) 2003-2011 - Gustavo Niemeyer +Copyright (c) 2012-2014 - Tomi Pieviläinen +Copyright (c) 2014-2016 - Yaron de Leeuw +Copyright (c) 2015- - Paul Ganssle +Copyright (c) 2015- - dateutil contributors (see AUTHORS file) + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The above BSD License Applies to all code, even that also covered by Apache 2.0.# MIT License + +Copyright (c) 2019 Hadley Wickham; RStudio; and Evan Miller + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +Based on http://opensource.org/licenses/MIT + +This is a template. Complete and ship as file LICENSE the following 2 +lines (only) + +YEAR: +COPYRIGHT HOLDER: + +and specify as + +License: MIT + file LICENSE + +Copyright (c) , + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +The MIT License + +Copyright (c) 2008- Attractive Chaos + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.musl as a whole is licensed under the following standard MIT license: + +---------------------------------------------------------------------- +Copyright © 2005-2020 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +---------------------------------------------------------------------- + +Authors/contributors include: + +A. Wilcox +Ada Worcester +Alex Dowad +Alex Suykov +Alexander Monakov +Andre McCurdy +Andrew Kelley +Anthony G. Basile +Aric Belsito +Arvid Picciani +Bartosz Brachaczek +Benjamin Peterson +Bobby Bingham +Boris Brezillon +Brent Cook +Chris Spiegel +Clément Vasseur +Daniel Micay +Daniel Sabogal +Daurnimator +David Carlier +David Edelsohn +Denys Vlasenko +Dmitry Ivanov +Dmitry V. Levin +Drew DeVault +Emil Renner Berthing +Fangrui Song +Felix Fietkau +Felix Janda +Gianluca Anzolin +Hauke Mehrtens +He X +Hiltjo Posthuma +Isaac Dunham +Jaydeep Patil +Jens Gustedt +Jeremy Huntwork +Jo-Philipp Wich +Joakim Sindholt +John Spencer +Julien Ramseier +Justin Cormack +Kaarle Ritvanen +Khem Raj +Kylie McClain +Leah Neukirchen +Luca Barbato +Luka Perkov +M Farkas-Dyck (Strake) +Mahesh Bodapati +Markus Wichmann +Masanori Ogino +Michael Clark +Michael Forney +Mikhail Kremnyov +Natanael Copa +Nicholas J. Kain +orc +Pascal Cuoq +Patrick Oppenlander +Petr Hosek +Petr Skocik +Pierre Carrier +Reini Urban +Rich Felker +Richard Pennington +Ryan Fairfax +Samuel Holland +Segev Finer +Shiz +sin +Solar Designer +Stefan Kristiansson +Stefan O'Rear +Szabolcs Nagy +Timo Teräs +Trutz Behn +Valentin Ochs +Will Dietz +William Haddon +William Pitcock + +Portions of this software are derived from third-party works licensed +under terms compatible with the above MIT license: + +The TRE regular expression implementation (src/regex/reg* and +src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed +under a 2-clause BSD license (license text in the source files). The +included version has been heavily modified by Rich Felker in 2012, in +the interests of size, simplicity, and namespace cleanliness. + +Much of the math library code (src/math/* and src/complex/*) is +Copyright © 1993,2004 Sun Microsystems or +Copyright © 2003-2011 David Schultz or +Copyright © 2003-2009 Steven G. Kargl or +Copyright © 2003-2009 Bruce D. Evans or +Copyright © 2008 Stephen L. Moshier or +Copyright © 2017-2018 Arm Limited +and labelled as such in comments in the individual source files. All +have been licensed under extremely permissive terms. + +The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 +The Android Open Source Project and is licensed under a two-clause BSD +license. It was taken from Bionic libc, used on Android. + +The AArch64 memcpy and memset code (src/string/aarch64/*) are +Copyright © 1999-2019, Arm Limited. + +The implementation of DES for crypt (src/crypt/crypt_des.c) is +Copyright © 1994 David Burren. It is licensed under a BSD license. + +The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was +originally written by Solar Designer and placed into the public +domain. The code also comes with a fallback permissive license for use +in jurisdictions that may not recognize the public domain. + +The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 +Valentin Ochs and is licensed under an MIT-style license. + +The x86_64 port was written by Nicholas J. Kain and is licensed under +the standard MIT terms. + +The mips and microblaze ports were originally written by Richard +Pennington for use in the ellcc project. The original code was adapted +by Rich Felker for build system and code conventions during upstream +integration. It is licensed under the standard MIT terms. + +The mips64 port was contributed by Imagination Technologies and is +licensed under the standard MIT terms. + +The powerpc port was also originally written by Richard Pennington, +and later supplemented and integrated by John Spencer. It is licensed +under the standard MIT terms. + +All other files which have no copyright comments are original works +produced specifically for use as part of this library, written either +by Rich Felker, the main author of the library, or by one or more +contibutors listed above. Details on authorship of individual files +can be found in the git version control history of the project. The +omission of copyright and license comments in each file is in the +interest of source tree size. + +In addition, permission is hereby granted for all public header files +(include/* and arch/*/bits/*) and crt files intended to be linked into +applications (crt/*, ldso/dlstart.c, and arch/*/crt_arch.h) to omit +the copyright notice and permission notice otherwise required by the +license, and to use these files without any requirement of +attribution. These files include substantial contributions from: + +Bobby Bingham +John Spencer +Nicholas J. Kain +Rich Felker +Richard Pennington +Stefan Kristiansson +Szabolcs Nagy + +all of whom have explicitly granted such permission. + +This file previously contained text expressing a belief that most of +the files covered by the above exception were sufficiently trivial not +to be subject to copyright, resulting in confusion over whether it +negated the permissions granted in the license. In the spirit of +permissive licensing, and of not having licensing issues being an +obstacle to adoption, that text has been removed.Copyright (c) 2005-2023, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations, which became +Zope Corporation. In 2001, the Python Software Foundation (PSF, see +https://www.python.org/psf/) was formed, a non-profit organization +created specifically to own Python-related Intellectual Property. +Zope Corporation was a sponsoring member of the PSF. + +All Python releases are Open Source (see https://opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +Python software and documentation are licensed under the +Python Software Foundation License Version 2. + +Starting with Python 3.8.6, examples, recipes, and other code in +the documentation are dual licensed under the PSF License Version 2 +and the Zero-Clause BSD license. + +Some software incorporated into Python is under different licenses. +The licenses are listed with code falling under that license. + + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION +---------------------------------------------------------------------- + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. +Copyright (c) 2014, Al Sweigart +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.Copyright (c) 2017 Anthony Sottile + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE.Copyright (c) 2015-2019 Jared Hobbs + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.Developed by ESN, an Electronic Arts Inc. studio. +Copyright (c) 2014, Electronic Arts Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of ESN, Electronic Arts Inc. nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS INC. BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) +https://github.com/client9/stringencoders + + Copyright 2005, 2006, 2007 + Nick Galbreath -- nickg [at] modp [dot] com + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + Neither the name of the modp.com nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + This is the standard "new" BSD license: + http://www.opensource.org/licenses/bsd-license.php + +https://github.com/client9/stringencoders/blob/cfd5c1507325ae497ea9bacdacba12c0ffd79d30/COPYING + +---- + +Numeric decoder derived from from TCL library +https://opensource.apple.com/source/tcl/tcl-14/tcl/license.terms + * Copyright (c) 1988-1993 The Regents of the University of California. + * Copyright (c) 1994 Sun Microsystems, Inc. + + This software is copyrighted by the Regents of the University of + California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState + Corporation and other parties. The following terms apply to all files + associated with the software unless explicitly disclaimed in + individual files. + + The authors hereby grant permission to use, copy, modify, distribute, + and license this software and its documentation for any purpose, provided + that existing copyright notices are retained in all copies and that this + notice is included verbatim in any distributions. No written agreement, + license, or royalty fee is required for any of the authorized uses. + Modifications to this software may be copyrighted by their authors + and need not follow the licensing terms described here, provided that + the new terms are clearly indicated on the first page of each file where + they apply. + + IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY + FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY + DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE + IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE + NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR + MODIFICATIONS. + + GOVERNMENT USE: If you are acquiring this software on behalf of the + U.S. government, the Government shall have only "Restricted Rights" + in the software and related documentation as defined in the Federal + Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you + are acquiring the software on behalf of the Department of Defense, the + software shall be classified as "Commercial Computer Software" and the + Government shall have only "Restricted Rights" as defined in Clause + 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the + authors grant the U.S. Government and others acting in its behalf + permission to use and distribute the software in accordance with the + terms specified in this license.Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/METADATA b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..344345f4c22cf91766ee26bc3bf91d625e7330f7 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/METADATA @@ -0,0 +1,1573 @@ +Metadata-Version: 2.1 +Name: pandas +Version: 2.2.3 +Summary: Powerful data structures for data analysis, time series, and statistics +Home-page: https://pandas.pydata.org +Author-Email: The Pandas Development Team +License: BSD 3-Clause License + + Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team + All rights reserved. + + Copyright (c) 2011-2023, Open source contributors. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Copyright (c) 2010-2019 Keith Goodman + Copyright (c) 2019 Bottleneck Developers + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE.Copyright 2017- Paul Ganssle + Copyright 2017- dateutil contributors (see AUTHORS file) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + The above license applies to all contributions after 2017-12-01, as well as + all contributions that have been re-licensed (see AUTHORS file for the list of + contributors who have re-licensed their code). + -------------------------------------------------------------------------------- + dateutil - Extensions to the standard Python datetime module. + + Copyright (c) 2003-2011 - Gustavo Niemeyer + Copyright (c) 2012-2014 - Tomi Pieviläinen + Copyright (c) 2014-2016 - Yaron de Leeuw + Copyright (c) 2015- - Paul Ganssle + Copyright (c) 2015- - dateutil contributors (see AUTHORS file) + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + The above BSD License Applies to all code, even that also covered by Apache 2.0.# MIT License + + Copyright (c) 2019 Hadley Wickham; RStudio; and Evan Miller + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + Based on http://opensource.org/licenses/MIT + + This is a template. Complete and ship as file LICENSE the following 2 + lines (only) + + YEAR: + COPYRIGHT HOLDER: + + and specify as + + License: MIT + file LICENSE + + Copyright (c) , + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + The MIT License + + Copyright (c) 2008- Attractive Chaos + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE.musl as a whole is licensed under the following standard MIT license: + + ---------------------------------------------------------------------- + Copyright © 2005-2020 Rich Felker, et al. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + ---------------------------------------------------------------------- + + Authors/contributors include: + + A. Wilcox + Ada Worcester + Alex Dowad + Alex Suykov + Alexander Monakov + Andre McCurdy + Andrew Kelley + Anthony G. Basile + Aric Belsito + Arvid Picciani + Bartosz Brachaczek + Benjamin Peterson + Bobby Bingham + Boris Brezillon + Brent Cook + Chris Spiegel + Clément Vasseur + Daniel Micay + Daniel Sabogal + Daurnimator + David Carlier + David Edelsohn + Denys Vlasenko + Dmitry Ivanov + Dmitry V. Levin + Drew DeVault + Emil Renner Berthing + Fangrui Song + Felix Fietkau + Felix Janda + Gianluca Anzolin + Hauke Mehrtens + He X + Hiltjo Posthuma + Isaac Dunham + Jaydeep Patil + Jens Gustedt + Jeremy Huntwork + Jo-Philipp Wich + Joakim Sindholt + John Spencer + Julien Ramseier + Justin Cormack + Kaarle Ritvanen + Khem Raj + Kylie McClain + Leah Neukirchen + Luca Barbato + Luka Perkov + M Farkas-Dyck (Strake) + Mahesh Bodapati + Markus Wichmann + Masanori Ogino + Michael Clark + Michael Forney + Mikhail Kremnyov + Natanael Copa + Nicholas J. Kain + orc + Pascal Cuoq + Patrick Oppenlander + Petr Hosek + Petr Skocik + Pierre Carrier + Reini Urban + Rich Felker + Richard Pennington + Ryan Fairfax + Samuel Holland + Segev Finer + Shiz + sin + Solar Designer + Stefan Kristiansson + Stefan O'Rear + Szabolcs Nagy + Timo Teräs + Trutz Behn + Valentin Ochs + Will Dietz + William Haddon + William Pitcock + + Portions of this software are derived from third-party works licensed + under terms compatible with the above MIT license: + + The TRE regular expression implementation (src/regex/reg* and + src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed + under a 2-clause BSD license (license text in the source files). The + included version has been heavily modified by Rich Felker in 2012, in + the interests of size, simplicity, and namespace cleanliness. + + Much of the math library code (src/math/* and src/complex/*) is + Copyright © 1993,2004 Sun Microsystems or + Copyright © 2003-2011 David Schultz or + Copyright © 2003-2009 Steven G. Kargl or + Copyright © 2003-2009 Bruce D. Evans or + Copyright © 2008 Stephen L. Moshier or + Copyright © 2017-2018 Arm Limited + and labelled as such in comments in the individual source files. All + have been licensed under extremely permissive terms. + + The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 + The Android Open Source Project and is licensed under a two-clause BSD + license. It was taken from Bionic libc, used on Android. + + The AArch64 memcpy and memset code (src/string/aarch64/*) are + Copyright © 1999-2019, Arm Limited. + + The implementation of DES for crypt (src/crypt/crypt_des.c) is + Copyright © 1994 David Burren. It is licensed under a BSD license. + + The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was + originally written by Solar Designer and placed into the public + domain. The code also comes with a fallback permissive license for use + in jurisdictions that may not recognize the public domain. + + The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 + Valentin Ochs and is licensed under an MIT-style license. + + The x86_64 port was written by Nicholas J. Kain and is licensed under + the standard MIT terms. + + The mips and microblaze ports were originally written by Richard + Pennington for use in the ellcc project. The original code was adapted + by Rich Felker for build system and code conventions during upstream + integration. It is licensed under the standard MIT terms. + + The mips64 port was contributed by Imagination Technologies and is + licensed under the standard MIT terms. + + The powerpc port was also originally written by Richard Pennington, + and later supplemented and integrated by John Spencer. It is licensed + under the standard MIT terms. + + All other files which have no copyright comments are original works + produced specifically for use as part of this library, written either + by Rich Felker, the main author of the library, or by one or more + contibutors listed above. Details on authorship of individual files + can be found in the git version control history of the project. The + omission of copyright and license comments in each file is in the + interest of source tree size. + + In addition, permission is hereby granted for all public header files + (include/* and arch/*/bits/*) and crt files intended to be linked into + applications (crt/*, ldso/dlstart.c, and arch/*/crt_arch.h) to omit + the copyright notice and permission notice otherwise required by the + license, and to use these files without any requirement of + attribution. These files include substantial contributions from: + + Bobby Bingham + John Spencer + Nicholas J. Kain + Rich Felker + Richard Pennington + Stefan Kristiansson + Szabolcs Nagy + + all of whom have explicitly granted such permission. + + This file previously contained text expressing a belief that most of + the files covered by the above exception were sufficiently trivial not + to be subject to copyright, resulting in confusion over whether it + negated the permissions granted in the license. In the spirit of + permissive licensing, and of not having licensing issues being an + obstacle to adoption, that text has been removed.Copyright (c) 2005-2023, NumPy Developers. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + + Copyright (c) Donald Stufft and individual contributors. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.A. HISTORY OF THE SOFTWARE + ========================== + + Python was created in the early 1990s by Guido van Rossum at Stichting + Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands + as a successor of a language called ABC. Guido remains Python's + principal author, although it includes many contributions from others. + + In 1995, Guido continued his work on Python at the Corporation for + National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) + in Reston, Virginia where he released several versions of the + software. + + In May 2000, Guido and the Python core development team moved to + BeOpen.com to form the BeOpen PythonLabs team. In October of the same + year, the PythonLabs team moved to Digital Creations, which became + Zope Corporation. In 2001, the Python Software Foundation (PSF, see + https://www.python.org/psf/) was formed, a non-profit organization + created specifically to own Python-related Intellectual Property. + Zope Corporation was a sponsoring member of the PSF. + + All Python releases are Open Source (see https://opensource.org for + the Open Source Definition). Historically, most, but not all, Python + releases have also been GPL-compatible; the table below summarizes + the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes + + Footnotes: + + (1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + + (2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + + Thanks to the many outside volunteers who have worked under Guido's + direction to make these releases possible. + + + B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON + =============================================================== + + Python software and documentation are licensed under the + Python Software Foundation License Version 2. + + Starting with Python 3.8.6, examples, recipes, and other code in + the documentation are dual licensed under the PSF License Version 2 + and the Zero-Clause BSD license. + + Some software incorporated into Python is under different licenses. + The licenses are listed with code falling under that license. + + + PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 + -------------------------------------------- + + 1. This LICENSE AGREEMENT is between the Python Software Foundation + ("PSF"), and the Individual or Organization ("Licensee") accessing and + otherwise using this software ("Python") in source or binary form and + its associated documentation. + + 2. Subject to the terms and conditions of this License Agreement, PSF hereby + grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, + analyze, test, perform and/or display publicly, prepare derivative works, + distribute, and otherwise use Python alone or in any derivative version, + provided, however, that PSF's License Agreement and PSF's notice of copyright, + i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, + 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; + All Rights Reserved" are retained in Python alone or in any derivative version + prepared by Licensee. + + 3. In the event Licensee prepares a derivative work that is based on + or incorporates Python or any part thereof, and wants to make + the derivative work available to others as provided herein, then + Licensee hereby agrees to include in any such work a brief summary of + the changes made to Python. + + 4. PSF is making Python available to Licensee on an "AS IS" + basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR + IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND + DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS + FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT + INFRINGE ANY THIRD PARTY RIGHTS. + + 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON + FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS + A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, + OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + + 6. This License Agreement will automatically terminate upon a material + breach of its terms and conditions. + + 7. Nothing in this License Agreement shall be deemed to create any + relationship of agency, partnership, or joint venture between PSF and + Licensee. This License Agreement does not grant permission to use PSF + trademarks or trade name in a trademark sense to endorse or promote + products or services of Licensee, or any third party. + + 8. By copying, installing or otherwise using Python, Licensee + agrees to be bound by the terms and conditions of this License + Agreement. + + + BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 + ------------------------------------------- + + BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + + 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an + office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the + Individual or Organization ("Licensee") accessing and otherwise using + this software in source or binary form and its associated + documentation ("the Software"). + + 2. Subject to the terms and conditions of this BeOpen Python License + Agreement, BeOpen hereby grants Licensee a non-exclusive, + royalty-free, world-wide license to reproduce, analyze, test, perform + and/or display publicly, prepare derivative works, distribute, and + otherwise use the Software alone or in any derivative version, + provided, however, that the BeOpen Python License is retained in the + Software, alone or in any derivative version prepared by Licensee. + + 3. BeOpen is making the Software available to Licensee on an "AS IS" + basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR + IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND + DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS + FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT + INFRINGE ANY THIRD PARTY RIGHTS. + + 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE + SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS + AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY + DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + + 5. This License Agreement will automatically terminate upon a material + breach of its terms and conditions. + + 6. This License Agreement shall be governed by and interpreted in all + respects by the law of the State of California, excluding conflict of + law provisions. Nothing in this License Agreement shall be deemed to + create any relationship of agency, partnership, or joint venture + between BeOpen and Licensee. This License Agreement does not grant + permission to use BeOpen trademarks or trade names in a trademark + sense to endorse or promote products or services of Licensee, or any + third party. As an exception, the "BeOpen Python" logos available at + http://www.pythonlabs.com/logos.html may be used according to the + permissions granted on that web page. + + 7. By copying, installing or otherwise using the software, Licensee + agrees to be bound by the terms and conditions of this License + Agreement. + + + CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 + --------------------------------------- + + 1. This LICENSE AGREEMENT is between the Corporation for National + Research Initiatives, having an office at 1895 Preston White Drive, + Reston, VA 20191 ("CNRI"), and the Individual or Organization + ("Licensee") accessing and otherwise using Python 1.6.1 software in + source or binary form and its associated documentation. + + 2. Subject to the terms and conditions of this License Agreement, CNRI + hereby grants Licensee a nonexclusive, royalty-free, world-wide + license to reproduce, analyze, test, perform and/or display publicly, + prepare derivative works, distribute, and otherwise use Python 1.6.1 + alone or in any derivative version, provided, however, that CNRI's + License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) + 1995-2001 Corporation for National Research Initiatives; All Rights + Reserved" are retained in Python 1.6.1 alone or in any derivative + version prepared by Licensee. Alternately, in lieu of CNRI's License + Agreement, Licensee may substitute the following text (omitting the + quotes): "Python 1.6.1 is made available subject to the terms and + conditions in CNRI's License Agreement. This Agreement together with + Python 1.6.1 may be located on the internet using the following + unique, persistent identifier (known as a handle): 1895.22/1013. This + Agreement may also be obtained from a proxy server on the internet + using the following URL: http://hdl.handle.net/1895.22/1013". + + 3. In the event Licensee prepares a derivative work that is based on + or incorporates Python 1.6.1 or any part thereof, and wants to make + the derivative work available to others as provided herein, then + Licensee hereby agrees to include in any such work a brief summary of + the changes made to Python 1.6.1. + + 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" + basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR + IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND + DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS + FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT + INFRINGE ANY THIRD PARTY RIGHTS. + + 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON + 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS + A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, + OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + + 6. This License Agreement will automatically terminate upon a material + breach of its terms and conditions. + + 7. This License Agreement shall be governed by the federal + intellectual property law of the United States, including without + limitation the federal copyright law, and, to the extent such + U.S. federal law does not apply, by the law of the Commonwealth of + Virginia, excluding Virginia's conflict of law provisions. + Notwithstanding the foregoing, with regard to derivative works based + on Python 1.6.1 that incorporate non-separable material that was + previously distributed under the GNU General Public License (GPL), the + law of the Commonwealth of Virginia shall govern this License + Agreement only as to issues arising under or with respect to + Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this + License Agreement shall be deemed to create any relationship of + agency, partnership, or joint venture between CNRI and Licensee. This + License Agreement does not grant permission to use CNRI trademarks or + trade name in a trademark sense to endorse or promote products or + services of Licensee, or any third party. + + 8. By clicking on the "ACCEPT" button where indicated, or by copying, + installing or otherwise using Python 1.6.1, Licensee agrees to be + bound by the terms and conditions of this License Agreement. + + ACCEPT + + + CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 + -------------------------------------------------- + + Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, + The Netherlands. All rights reserved. + + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose and without fee is hereby granted, + provided that the above copyright notice appear in all copies and that + both that copyright notice and this permission notice appear in + supporting documentation, and that the name of Stichting Mathematisch + Centrum or CWI not be used in advertising or publicity pertaining to + distribution of the software without specific, written prior + permission. + + STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO + THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE + FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION + ---------------------------------------------------------------------- + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH + REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, + INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR + OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THIS SOFTWARE. + Copyright (c) 2014, Al Sweigart + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.Copyright (c) 2017 Anthony Sottile + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE.Copyright (c) 2015-2019 Jared Hobbs + + Permission is hereby granted, free of charge, to any person obtaining a copy of + this software and associated documentation files (the "Software"), to deal in + the Software without restriction, including without limitation the rights to + use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is furnished to do + so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE.Developed by ESN, an Electronic Arts Inc. studio. + Copyright (c) 2014, Electronic Arts Inc. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of ESN, Electronic Arts Inc. nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS INC. BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---- + + Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) + https://github.com/client9/stringencoders + + Copyright 2005, 2006, 2007 + Nick Galbreath -- nickg [at] modp [dot] com + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + Neither the name of the modp.com nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + This is the standard "new" BSD license: + http://www.opensource.org/licenses/bsd-license.php + + https://github.com/client9/stringencoders/blob/cfd5c1507325ae497ea9bacdacba12c0ffd79d30/COPYING + + ---- + + Numeric decoder derived from from TCL library + https://opensource.apple.com/source/tcl/tcl-14/tcl/license.terms + * Copyright (c) 1988-1993 The Regents of the University of California. + * Copyright (c) 1994 Sun Microsystems, Inc. + + This software is copyrighted by the Regents of the University of + California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState + Corporation and other parties. The following terms apply to all files + associated with the software unless explicitly disclaimed in + individual files. + + The authors hereby grant permission to use, copy, modify, distribute, + and license this software and its documentation for any purpose, provided + that existing copyright notices are retained in all copies and that this + notice is included verbatim in any distributions. No written agreement, + license, or royalty fee is required for any of the authorized uses. + Modifications to this software may be copyrighted by their authors + and need not follow the licensing terms described here, provided that + the new terms are clearly indicated on the first page of each file where + they apply. + + IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY + FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY + DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE + IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE + NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR + MODIFICATIONS. + + GOVERNMENT USE: If you are acquiring this software on behalf of the + U.S. government, the Government shall have only "Restricted Rights" + in the software and related documentation as defined in the Federal + Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you + are acquiring the software on behalf of the Department of Defense, the + software shall be classified as "Commercial Computer Software" and the + Government shall have only "Restricted Rights" as defined in Clause + 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the + authors grant the U.S. Government and others acting in its behalf + permission to use and distribute the software in accordance with the + terms specified in this license.Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, and + distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by the copyright + owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all other entities + that control, are controlled by, or are under common control with that entity. + For the purposes of this definition, "control" means (i) the power, direct or + indirect, to cause the direction or management of such entity, whether by + contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity exercising + permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, including + but not limited to software source code, documentation source, and configuration + files. + + "Object" form shall mean any form resulting from mechanical transformation or + translation of a Source form, including but not limited to compiled object code, + generated documentation, and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or Object form, made + available under the License, as indicated by a copyright notice that is included + in or attached to the work (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object form, that + is based on (or derived from) the Work and for which the editorial revisions, + annotations, elaborations, or other modifications represent, as a whole, an + original work of authorship. For the purposes of this License, Derivative Works + shall not include works that remain separable from, or merely link (or bind by + name) to the interfaces of, the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including the original version + of the Work and any modifications or additions to that Work or Derivative Works + thereof, that is intentionally submitted to Licensor for inclusion in the Work + by the copyright owner or by an individual or Legal Entity authorized to submit + on behalf of the copyright owner. For the purposes of this definition, + "submitted" means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, and + issue tracking systems that are managed by, or on behalf of, the Licensor for + the purpose of discussing and improving the Work, but excluding communication + that is conspicuously marked or otherwise designated in writing by the copyright + owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity on behalf + of whom a Contribution has been received by Licensor and subsequently + incorporated within the Work. + + 2. Grant of Copyright License. + + Subject to the terms and conditions of this License, each Contributor hereby + grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, + irrevocable copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the Work and such + Derivative Works in Source or Object form. + + 3. Grant of Patent License. + + Subject to the terms and conditions of this License, each Contributor hereby + grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to make, have + made, use, offer to sell, sell, import, and otherwise transfer the Work, where + such license applies only to those patent claims licensable by such Contributor + that are necessarily infringed by their Contribution(s) alone or by combination + of their Contribution(s) with the Work to which such Contribution(s) was + submitted. If You institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work or a + Contribution incorporated within the Work constitutes direct or contributory + patent infringement, then any patent licenses granted to You under this License + for that Work shall terminate as of the date such litigation is filed. + + 4. Redistribution. + + You may reproduce and distribute copies of the Work or Derivative Works thereof + in any medium, with or without modifications, and in Source or Object form, + provided that You meet the following conditions: + + You must give any other recipients of the Work or Derivative Works a copy of + this License; and + You must cause any modified files to carry prominent notices stating that You + changed the files; and + You must retain, in the Source form of any Derivative Works that You distribute, + all copyright, patent, trademark, and attribution notices from the Source form + of the Work, excluding those notices that do not pertain to any part of the + Derivative Works; and + If the Work includes a "NOTICE" text file as part of its distribution, then any + Derivative Works that You distribute must include a readable copy of the + attribution notices contained within such NOTICE file, excluding those notices + that do not pertain to any part of the Derivative Works, in at least one of the + following places: within a NOTICE text file distributed as part of the + Derivative Works; within the Source form or documentation, if provided along + with the Derivative Works; or, within a display generated by the Derivative + Works, if and wherever such third-party notices normally appear. The contents of + the NOTICE file are for informational purposes only and do not modify the + License. You may add Your own attribution notices within Derivative Works that + You distribute, alongside or as an addendum to the NOTICE text from the Work, + provided that such additional attribution notices cannot be construed as + modifying the License. + You may add Your own copyright statement to Your modifications and may provide + additional or different license terms and conditions for use, reproduction, or + distribution of Your modifications, or for any such Derivative Works as a whole, + provided Your use, reproduction, and distribution of the Work otherwise complies + with the conditions stated in this License. + + 5. Submission of Contributions. + + Unless You explicitly state otherwise, any Contribution intentionally submitted + for inclusion in the Work by You to the Licensor shall be under the terms and + conditions of this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify the terms of + any separate license agreement you may have executed with Licensor regarding + such Contributions. + + 6. Trademarks. + + This License does not grant permission to use the trade names, trademarks, + service marks, or product names of the Licensor, except as required for + reasonable and customary use in describing the origin of the Work and + reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. + + Unless required by applicable law or agreed to in writing, Licensor provides the + Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, + including, without limitation, any warranties or conditions of TITLE, + NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are + solely responsible for determining the appropriateness of using or + redistributing the Work and assume any risks associated with Your exercise of + permissions under this License. + + 8. Limitation of Liability. + + In no event and under no legal theory, whether in tort (including negligence), + contract, or otherwise, unless required by applicable law (such as deliberate + and grossly negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, incidental, + or consequential damages of any character arising as a result of this License or + out of the use or inability to use the Work (including but not limited to + damages for loss of goodwill, work stoppage, computer failure or malfunction, or + any and all other commercial damages or losses), even if such Contributor has + been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. + + While redistributing the Work or Derivative Works thereof, You may choose to + offer, and charge a fee for, acceptance of support, warranty, indemnity, or + other liability obligations and/or rights consistent with this License. However, + in accepting such obligations, You may act only on Your own behalf and on Your + sole responsibility, not on behalf of any other Contributor, and only if You + agree to indemnify, defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason of your + accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work + + To apply the Apache License to your work, attach the following boilerplate + notice, with the fields enclosed by brackets "[]" replaced with your own + identifying information. (Don't include the brackets!) The text should be + enclosed in the appropriate comment syntax for the file format. We also + recommend that a file or class name and description of purpose be included on + the same "printed page" as the copyright notice for easier identification within + third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Scientific/Engineering +Project-URL: Homepage, https://pandas.pydata.org +Project-URL: Documentation, https://pandas.pydata.org/docs/ +Project-URL: Repository, https://github.com/pandas-dev/pandas +Requires-Python: >=3.9 +Requires-Dist: numpy>=1.22.4; python_version < "3.11" +Requires-Dist: numpy>=1.23.2; python_version == "3.11" +Requires-Dist: numpy>=1.26.0; python_version >= "3.12" +Requires-Dist: python-dateutil>=2.8.2 +Requires-Dist: pytz>=2020.1 +Requires-Dist: tzdata>=2022.7 +Requires-Dist: hypothesis>=6.46.1; extra == "test" +Requires-Dist: pytest>=7.3.2; extra == "test" +Requires-Dist: pytest-xdist>=2.2.0; extra == "test" +Requires-Dist: pyarrow>=10.0.1; extra == "pyarrow" +Requires-Dist: bottleneck>=1.3.6; extra == "performance" +Requires-Dist: numba>=0.56.4; extra == "performance" +Requires-Dist: numexpr>=2.8.4; extra == "performance" +Requires-Dist: scipy>=1.10.0; extra == "computation" +Requires-Dist: xarray>=2022.12.0; extra == "computation" +Requires-Dist: fsspec>=2022.11.0; extra == "fss" +Requires-Dist: s3fs>=2022.11.0; extra == "aws" +Requires-Dist: gcsfs>=2022.11.0; extra == "gcp" +Requires-Dist: pandas-gbq>=0.19.0; extra == "gcp" +Requires-Dist: odfpy>=1.4.1; extra == "excel" +Requires-Dist: openpyxl>=3.1.0; extra == "excel" +Requires-Dist: python-calamine>=0.1.7; extra == "excel" +Requires-Dist: pyxlsb>=1.0.10; extra == "excel" +Requires-Dist: xlrd>=2.0.1; extra == "excel" +Requires-Dist: xlsxwriter>=3.0.5; extra == "excel" +Requires-Dist: pyarrow>=10.0.1; extra == "parquet" +Requires-Dist: pyarrow>=10.0.1; extra == "feather" +Requires-Dist: tables>=3.8.0; extra == "hdf5" +Requires-Dist: pyreadstat>=1.2.0; extra == "spss" +Requires-Dist: SQLAlchemy>=2.0.0; extra == "postgresql" +Requires-Dist: psycopg2>=2.9.6; extra == "postgresql" +Requires-Dist: adbc-driver-postgresql>=0.8.0; extra == "postgresql" +Requires-Dist: SQLAlchemy>=2.0.0; extra == "mysql" +Requires-Dist: pymysql>=1.0.2; extra == "mysql" +Requires-Dist: SQLAlchemy>=2.0.0; extra == "sql-other" +Requires-Dist: adbc-driver-postgresql>=0.8.0; extra == "sql-other" +Requires-Dist: adbc-driver-sqlite>=0.8.0; extra == "sql-other" +Requires-Dist: beautifulsoup4>=4.11.2; extra == "html" +Requires-Dist: html5lib>=1.1; extra == "html" +Requires-Dist: lxml>=4.9.2; extra == "html" +Requires-Dist: lxml>=4.9.2; extra == "xml" +Requires-Dist: matplotlib>=3.6.3; extra == "plot" +Requires-Dist: jinja2>=3.1.2; extra == "output-formatting" +Requires-Dist: tabulate>=0.9.0; extra == "output-formatting" +Requires-Dist: PyQt5>=5.15.9; extra == "clipboard" +Requires-Dist: qtpy>=2.3.0; extra == "clipboard" +Requires-Dist: zstandard>=0.19.0; extra == "compression" +Requires-Dist: dataframe-api-compat>=0.1.7; extra == "consortium-standard" +Requires-Dist: adbc-driver-postgresql>=0.8.0; extra == "all" +Requires-Dist: adbc-driver-sqlite>=0.8.0; extra == "all" +Requires-Dist: beautifulsoup4>=4.11.2; extra == "all" +Requires-Dist: bottleneck>=1.3.6; extra == "all" +Requires-Dist: dataframe-api-compat>=0.1.7; extra == "all" +Requires-Dist: fastparquet>=2022.12.0; extra == "all" +Requires-Dist: fsspec>=2022.11.0; extra == "all" +Requires-Dist: gcsfs>=2022.11.0; extra == "all" +Requires-Dist: html5lib>=1.1; extra == "all" +Requires-Dist: hypothesis>=6.46.1; extra == "all" +Requires-Dist: jinja2>=3.1.2; extra == "all" +Requires-Dist: lxml>=4.9.2; extra == "all" +Requires-Dist: matplotlib>=3.6.3; extra == "all" +Requires-Dist: numba>=0.56.4; extra == "all" +Requires-Dist: numexpr>=2.8.4; extra == "all" +Requires-Dist: odfpy>=1.4.1; extra == "all" +Requires-Dist: openpyxl>=3.1.0; extra == "all" +Requires-Dist: pandas-gbq>=0.19.0; extra == "all" +Requires-Dist: psycopg2>=2.9.6; extra == "all" +Requires-Dist: pyarrow>=10.0.1; extra == "all" +Requires-Dist: pymysql>=1.0.2; extra == "all" +Requires-Dist: PyQt5>=5.15.9; extra == "all" +Requires-Dist: pyreadstat>=1.2.0; extra == "all" +Requires-Dist: pytest>=7.3.2; extra == "all" +Requires-Dist: pytest-xdist>=2.2.0; extra == "all" +Requires-Dist: python-calamine>=0.1.7; extra == "all" +Requires-Dist: pyxlsb>=1.0.10; extra == "all" +Requires-Dist: qtpy>=2.3.0; extra == "all" +Requires-Dist: scipy>=1.10.0; extra == "all" +Requires-Dist: s3fs>=2022.11.0; extra == "all" +Requires-Dist: SQLAlchemy>=2.0.0; extra == "all" +Requires-Dist: tables>=3.8.0; extra == "all" +Requires-Dist: tabulate>=0.9.0; extra == "all" +Requires-Dist: xarray>=2022.12.0; extra == "all" +Requires-Dist: xlrd>=2.0.1; extra == "all" +Requires-Dist: xlsxwriter>=3.0.5; extra == "all" +Requires-Dist: zstandard>=0.19.0; extra == "all" +Provides-Extra: test +Provides-Extra: pyarrow +Provides-Extra: performance +Provides-Extra: computation +Provides-Extra: fss +Provides-Extra: aws +Provides-Extra: gcp +Provides-Extra: excel +Provides-Extra: parquet +Provides-Extra: feather +Provides-Extra: hdf5 +Provides-Extra: spss +Provides-Extra: postgresql +Provides-Extra: mysql +Provides-Extra: sql-other +Provides-Extra: html +Provides-Extra: xml +Provides-Extra: plot +Provides-Extra: output-formatting +Provides-Extra: clipboard +Provides-Extra: compression +Provides-Extra: consortium-standard +Provides-Extra: all +Description-Content-Type: text/markdown + +
+
+
+ +----------------- + +# pandas: powerful Python data analysis toolkit + +| | | +| --- | --- | +| Testing | [![CI - Test](https://github.com/pandas-dev/pandas/actions/workflows/unit-tests.yml/badge.svg)](https://github.com/pandas-dev/pandas/actions/workflows/unit-tests.yml) [![Coverage](https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=main)](https://codecov.io/gh/pandas-dev/pandas) | +| Package | [![PyPI Latest Release](https://img.shields.io/pypi/v/pandas.svg)](https://pypi.org/project/pandas/) [![PyPI Downloads](https://img.shields.io/pypi/dm/pandas.svg?label=PyPI%20downloads)](https://pypi.org/project/pandas/) [![Conda Latest Release](https://anaconda.org/conda-forge/pandas/badges/version.svg)](https://anaconda.org/conda-forge/pandas) [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/pandas.svg?label=Conda%20downloads)](https://anaconda.org/conda-forge/pandas) | +| Meta | [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3509134.svg)](https://doi.org/10.5281/zenodo.3509134) [![License - BSD 3-Clause](https://img.shields.io/pypi/l/pandas.svg)](https://github.com/pandas-dev/pandas/blob/main/LICENSE) [![Slack](https://img.shields.io/badge/join_Slack-information-brightgreen.svg?logo=slack)](https://pandas.pydata.org/docs/dev/development/community.html?highlight=slack#community-slack) | + + +## What is it? + +**pandas** is a Python package that provides fast, flexible, and expressive data +structures designed to make working with "relational" or "labeled" data both +easy and intuitive. It aims to be the fundamental high-level building block for +doing practical, **real world** data analysis in Python. Additionally, it has +the broader goal of becoming **the most powerful and flexible open source data +analysis / manipulation tool available in any language**. It is already well on +its way towards this goal. + +## Table of Contents + +- [Main Features](#main-features) +- [Where to get it](#where-to-get-it) +- [Dependencies](#dependencies) +- [Installation from sources](#installation-from-sources) +- [License](#license) +- [Documentation](#documentation) +- [Background](#background) +- [Getting Help](#getting-help) +- [Discussion and Development](#discussion-and-development) +- [Contributing to pandas](#contributing-to-pandas) + +## Main Features +Here are just a few of the things that pandas does well: + + - Easy handling of [**missing data**][missing-data] (represented as + `NaN`, `NA`, or `NaT`) in floating point as well as non-floating point data + - Size mutability: columns can be [**inserted and + deleted**][insertion-deletion] from DataFrame and higher dimensional + objects + - Automatic and explicit [**data alignment**][alignment]: objects can + be explicitly aligned to a set of labels, or the user can simply + ignore the labels and let `Series`, `DataFrame`, etc. automatically + align the data for you in computations + - Powerful, flexible [**group by**][groupby] functionality to perform + split-apply-combine operations on data sets, for both aggregating + and transforming data + - Make it [**easy to convert**][conversion] ragged, + differently-indexed data in other Python and NumPy data structures + into DataFrame objects + - Intelligent label-based [**slicing**][slicing], [**fancy + indexing**][fancy-indexing], and [**subsetting**][subsetting] of + large data sets + - Intuitive [**merging**][merging] and [**joining**][joining] data + sets + - Flexible [**reshaping**][reshape] and [**pivoting**][pivot-table] of + data sets + - [**Hierarchical**][mi] labeling of axes (possible to have multiple + labels per tick) + - Robust IO tools for loading data from [**flat files**][flat-files] + (CSV and delimited), [**Excel files**][excel], [**databases**][db], + and saving/loading data from the ultrafast [**HDF5 format**][hdfstore] + - [**Time series**][timeseries]-specific functionality: date range + generation and frequency conversion, moving window statistics, + date shifting and lagging + + + [missing-data]: https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html + [insertion-deletion]: https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html#column-selection-addition-deletion + [alignment]: https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html?highlight=alignment#intro-to-data-structures + [groupby]: https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#group-by-split-apply-combine + [conversion]: https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html#dataframe + [slicing]: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#slicing-ranges + [fancy-indexing]: https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#advanced + [subsetting]: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#boolean-indexing + [merging]: https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#database-style-dataframe-or-named-series-joining-merging + [joining]: https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#joining-on-index + [reshape]: https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html + [pivot-table]: https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html + [mi]: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#hierarchical-indexing-multiindex + [flat-files]: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#csv-text-files + [excel]: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#excel-files + [db]: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#sql-queries + [hdfstore]: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#hdf5-pytables + [timeseries]: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#time-series-date-functionality + +## Where to get it +The source code is currently hosted on GitHub at: +https://github.com/pandas-dev/pandas + +Binary installers for the latest released version are available at the [Python +Package Index (PyPI)](https://pypi.org/project/pandas) and on [Conda](https://docs.conda.io/en/latest/). + +```sh +# conda +conda install -c conda-forge pandas +``` + +```sh +# or PyPI +pip install pandas +``` + +The list of changes to pandas between each release can be found +[here](https://pandas.pydata.org/pandas-docs/stable/whatsnew/index.html). For full +details, see the commit logs at https://github.com/pandas-dev/pandas. + +## Dependencies +- [NumPy - Adds support for large, multi-dimensional arrays, matrices and high-level mathematical functions to operate on these arrays](https://www.numpy.org) +- [python-dateutil - Provides powerful extensions to the standard datetime module](https://dateutil.readthedocs.io/en/stable/index.html) +- [pytz - Brings the Olson tz database into Python which allows accurate and cross platform timezone calculations](https://github.com/stub42/pytz) + +See the [full installation instructions](https://pandas.pydata.org/pandas-docs/stable/install.html#dependencies) for minimum supported versions of required, recommended and optional dependencies. + +## Installation from sources +To install pandas from source you need [Cython](https://cython.org/) in addition to the normal +dependencies above. Cython can be installed from PyPI: + +```sh +pip install cython +``` + +In the `pandas` directory (same one where you found this file after +cloning the git repo), execute: + +```sh +pip install . +``` + +or for installing in [development mode](https://pip.pypa.io/en/latest/cli/pip_install/#install-editable): + + +```sh +python -m pip install -ve . --no-build-isolation --config-settings=editable-verbose=true +``` + +See the full instructions for [installing from source](https://pandas.pydata.org/docs/dev/development/contributing_environment.html). + +## License +[BSD 3](LICENSE) + +## Documentation +The official documentation is hosted on [PyData.org](https://pandas.pydata.org/pandas-docs/stable/). + +## Background +Work on ``pandas`` started at [AQR](https://www.aqr.com/) (a quantitative hedge fund) in 2008 and +has been under active development since then. + +## Getting Help + +For usage questions, the best place to go to is [StackOverflow](https://stackoverflow.com/questions/tagged/pandas). +Further, general questions and discussions can also take place on the [pydata mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata). + +## Discussion and Development +Most development discussions take place on GitHub in this repo, via the [GitHub issue tracker](https://github.com/pandas-dev/pandas/issues). + +Further, the [pandas-dev mailing list](https://mail.python.org/mailman/listinfo/pandas-dev) can also be used for specialized discussions or design issues, and a [Slack channel](https://pandas.pydata.org/docs/dev/development/community.html?highlight=slack#community-slack) is available for quick development related questions. + +There are also frequent [community meetings](https://pandas.pydata.org/docs/dev/development/community.html#community-meeting) for project maintainers open to the community as well as monthly [new contributor meetings](https://pandas.pydata.org/docs/dev/development/community.html#new-contributor-meeting) to help support new contributors. + +Additional information on the communication channels can be found on the [contributor community](https://pandas.pydata.org/docs/development/community.html) page. + +## Contributing to pandas + +[![Open Source Helpers](https://www.codetriage.com/pandas-dev/pandas/badges/users.svg)](https://www.codetriage.com/pandas-dev/pandas) + +All contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas are welcome. + +A detailed overview on how to contribute can be found in the **[contributing guide](https://pandas.pydata.org/docs/dev/development/contributing.html)**. + +If you are simply looking to start working with the pandas codebase, navigate to the [GitHub "issues" tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [good first issue](https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open) where you could start out. + +You can also triage issues which may include reproducing bug reports, or asking for vital information such as version numbers or reproduction instructions. If you would like to start triaging issues, one easy way to get started is to [subscribe to pandas on CodeTriage](https://www.codetriage.com/pandas-dev/pandas). + +Or maybe through using pandas you have an idea of your own or are looking for something in the documentation and thinking ‘this can be improved’...you can do something about it! + +Feel free to ask questions on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Slack](https://pandas.pydata.org/docs/dev/development/community.html?highlight=slack#community-slack). + +As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/.github/blob/master/CODE_OF_CONDUCT.md) + +
+ +[Go to Top](#table-of-contents) diff --git a/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/RECORD b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ba636c5a55ec2a7b1da8f0f0b46c97a38b16d562 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/RECORD @@ -0,0 +1,2922 @@ +pandas-2.2.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pandas-2.2.3.dist-info/LICENSE,sha256=HeougO0cvQIz-EzuRIaylonxM7q6zPTSleUcQwUfMhY,62399 +pandas-2.2.3.dist-info/METADATA,sha256=8FQjE5gG0NddUnq6QmiSYAhNx6La0fTzKxZ3D5fR9w8,89901 +pandas-2.2.3.dist-info/RECORD,, +pandas-2.2.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas-2.2.3.dist-info/WHEEL,sha256=sZM_NeUMz2G4fDenMf11eikcCxcLaQWiYRmjwQBavQs,137 +pandas-2.2.3.dist-info/entry_points.txt,sha256=OVLKNEPs-Q7IWypWBL6fxv56_zt4sRnEI7zawo6y_0w,69 +pandas/__init__.py,sha256=EIvoyjrhoqXHZe5vh-iGfYfC-1qJEH5sLTpqzJZhK3s,8658 +pandas/__pycache__/__init__.cpython-310.pyc,, +pandas/__pycache__/_typing.cpython-310.pyc,, +pandas/__pycache__/_version.cpython-310.pyc,, +pandas/__pycache__/_version_meson.cpython-310.pyc,, +pandas/__pycache__/conftest.cpython-310.pyc,, +pandas/__pycache__/testing.cpython-310.pyc,, +pandas/_config/__init__.py,sha256=hdg_O-v73cCSrIj6uLoz1NRxgYtOILOs8mOPKdDoEUk,1437 +pandas/_config/__pycache__/__init__.cpython-310.pyc,, +pandas/_config/__pycache__/config.cpython-310.pyc,, +pandas/_config/__pycache__/dates.cpython-310.pyc,, +pandas/_config/__pycache__/display.cpython-310.pyc,, +pandas/_config/__pycache__/localization.cpython-310.pyc,, +pandas/_config/config.py,sha256=YwadTnEN93OFAxyzW289d_v4dhWLzxpMHGZrl3xt_XY,25454 +pandas/_config/dates.py,sha256=HgZFPT02hugJO7uhSTjwebcKOd34JkcYY2gSPtOydmg,668 +pandas/_config/display.py,sha256=xv_TetWUhFlVpog23QzyhMYsScops_OOsWIAGnmKdJ8,1804 +pandas/_config/localization.py,sha256=79Q2KU1aHxX6Q8Wn8EGOEUAyv3XIjQ4YaTaEzeFbtwM,5190 +pandas/_libs/__init__.py,sha256=6i-pdZncVhiCRL_ChKyrTLNhn14aDbsYw243-PfAnJQ,673 +pandas/_libs/__pycache__/__init__.cpython-310.pyc,, +pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so,sha256=5ChEnphXkK-PqiIn1sLJicVMwJMRD0diANaPNrSlJN8,2194056 +pandas/_libs/algos.pyi,sha256=KEF48zZLn3TSUCmd8thdo4DzYvJ5zaCK60hYX6nzyZI,15182 +pandas/_libs/arrays.cpython-310-x86_64-linux-gnu.so,sha256=19RTaFJVH9oC19Gy0vSq9VmHfbBqakDo1fpzvA4dBIw,133184 +pandas/_libs/arrays.pyi,sha256=PfpeOMplxyN2vbfFCdkkSKGCg21SFRydvqBdeJhBVqQ,1105 +pandas/_libs/byteswap.cpython-310-x86_64-linux-gnu.so,sha256=n9SBOq6SSgiX02DKIJIe1iAXoTB9DD-aYQDUdg8sYLQ,61696 +pandas/_libs/byteswap.pyi,sha256=SxL2I1rKqe73WZgkO511PWJx20P160V4hrws1TG0JTk,423 +pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so,sha256=QSMPvfVYV1_tj2Uw28I1Ew03kphzZiBOLEF7BOsNpVI,2601544 +pandas/_libs/groupby.pyi,sha256=Q-jrhgZskMvXOhpHP6EhPhutdW4zAoNI2TQ7iE_68qc,7251 +pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so,sha256=qqWA8oGr1JJxA-tlC1hihH8AnTbKwmII8GXq_sKdL6Q,221160 +pandas/_libs/hashing.pyi,sha256=cdNwppEilaMnVN77ABt3TBadjUawMtMFgSQb1PCqwQk,181 +pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so,sha256=1Mwq0KKO8m5OIwRUhXEDENz4TnP-5X2MI_tffUfihVo,2162216 +pandas/_libs/hashtable.pyi,sha256=jBv8QuQii-ikWklP76_DPCYfms88fpj6pPOaCOK6s0M,7424 +pandas/_libs/index.cpython-310-x86_64-linux-gnu.so,sha256=t8TruXuQ_tMPPDpi0bQVfdmaE6rWg2Po9DYvMLqhujw,988040 +pandas/_libs/index.pyi,sha256=w3sVStmL_0qAt8C_OyGXSyzugtcnELzL7ZJuCiTHXzY,3695 +pandas/_libs/indexing.cpython-310-x86_64-linux-gnu.so,sha256=JnXOoO7yEHAeVFzAruuIvZW7dUXpGFHeyZYvYMwt-lY,66560 +pandas/_libs/indexing.pyi,sha256=hlJwakbofPRdt1Lm31bfQ3CvHW-nMxm0nrInSWAey58,427 +pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so,sha256=ZwPMFJO20d3V0Bes2XuCKjBQp7WTkkIRG42yDj_GHP8,415592 +pandas/_libs/internals.pyi,sha256=1zfOoULlHvpbbRHvPlcrV_kbY7WI3qEXYExbENEDdtE,2761 +pandas/_libs/interval.cpython-310-x86_64-linux-gnu.so,sha256=VdiQZckY9djq4JzCW3shR5wojhtcextNDvg78Ir41SE,1528808 +pandas/_libs/interval.pyi,sha256=cotxOfoqp7DX7XgIeKrGd31mfAeNerW1WD-yBrLfTlE,5378 +pandas/_libs/join.cpython-310-x86_64-linux-gnu.so,sha256=hWvtf3EuPUrlaRQevEYvpEiHNOFovULUMIa0uTQq_tk,1401736 +pandas/_libs/join.pyi,sha256=O61ZOIYi-I3gZJjDpLYIWWEe3iG0pogEQIB0ZxJ_E3Y,2780 +pandas/_libs/json.cpython-310-x86_64-linux-gnu.so,sha256=UhO3hHMuqrvKwaD99KM_tbm6jpFubA5Re3vqe5yxdFc,64272 +pandas/_libs/json.pyi,sha256=kbqlmh7HTk4cc2hIDWdXZSFqOfh0cqGpBwcys3m32XM,496 +pandas/_libs/lib.cpython-310-x86_64-linux-gnu.so,sha256=UkyS7izLVYP4yorayRrgr6P0PbDqBFarCuzpzKy4cnM,933480 +pandas/_libs/lib.pyi,sha256=CjVLZ1Jm6BKylmgNXq4YZxMNS1_ITw_paZznoH27S4g,7103 +pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so,sha256=_hEM5rOxAJXBxML3dFwCIjrsb81KpsvGeC-D33LndiA,211400 +pandas/_libs/missing.pyi,sha256=iIftmSeHBcfgz7d9JWW_FQcyyAkuBzRiSnZw690OhDw,524 +pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so,sha256=TIkCjwyqDfclyz6JUU7ffopPSr2qzjNti99AQNspGSg,270472 +pandas/_libs/ops.pyi,sha256=99NSmMUkneVNWOojl8Dsb8GmbUa5y_uhKUtfIgwgwek,1318 +pandas/_libs/ops_dispatch.cpython-310-x86_64-linux-gnu.so,sha256=jXjDYZRusf92Y0-jy3_EkXzAreyRptQaiD729GAovMY,61664 +pandas/_libs/ops_dispatch.pyi,sha256=Yxq3SUJ-qoMZ8ErL7wfHfCsTTcETOuu0FuoCOyhmGl0,124 +pandas/_libs/pandas_datetime.cpython-310-x86_64-linux-gnu.so,sha256=ZSzI-EVHN_S1ZHJ2b4O2lb8yo5_7Ct-bDWnCCo4FBa4,39264 +pandas/_libs/pandas_parser.cpython-310-x86_64-linux-gnu.so,sha256=8eCpdz4eZnDYGgcedPM9no_H7bp1dOQHk3Z-rqbw4Nc,43424 +pandas/_libs/parsers.cpython-310-x86_64-linux-gnu.so,sha256=yeGkiZtj46mmQxiCiXJz0Xl6nC0rr_J0Bsi4IR7OrzQ,594760 +pandas/_libs/parsers.pyi,sha256=raoGhPLoRKLQAthm9JQT5rTjLR1PGFDS179aqtQdgnY,2378 +pandas/_libs/properties.cpython-310-x86_64-linux-gnu.so,sha256=0TRNUu2lgw10xJywm6cntx0So5-EttSLCnabGIjAreY,91904 +pandas/_libs/properties.pyi,sha256=HF93vy5OSNtQKz5NL_zwTnOj6tzBtW9Cog-5Zk2bnAA,717 +pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so,sha256=N54JaMQDB6HdLuk1k7DviIsfzf_J57Te-bbUG8_4DOE,309608 +pandas/_libs/reshape.pyi,sha256=xaU-NNnRhXVT9AVrksVXrbKfAC7Ny9p-Vwp6srRoGns,419 +pandas/_libs/sas.cpython-310-x86_64-linux-gnu.so,sha256=EbMxFkElLMNZSguq0CnU1d-LuvqVH-Ef84cdOkSI-SQ,267112 +pandas/_libs/sas.pyi,sha256=qkrJiuBd7GQbw3DQyhH9M6cMfNSkovArOXRdhJ8PFDA,224 +pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so,sha256=2dS4HR2i0oSx9hNww-h7VN4Q8kmj3rsU9617vywQBSM,988968 +pandas/_libs/sparse.pyi,sha256=Yyi7QHpAt7K6l2iEhxgufRqbvSRfYpBHeC_hJaSK8Ho,1485 +pandas/_libs/testing.cpython-310-x86_64-linux-gnu.so,sha256=9ffE7MhcHZsEUuxLAxdXxemqCok9RvR8rq1qMhFf-3M,127456 +pandas/_libs/testing.pyi,sha256=_fpEWiBmlWGR_3QUj1RU42WCTtW2Ug-EXHpM-kP6vB0,243 +pandas/_libs/tslib.cpython-310-x86_64-linux-gnu.so,sha256=8o7CEulKddYGbjg75CQ-ExfeEznmGtRFuKefCu0hYTY,340264 +pandas/_libs/tslib.pyi,sha256=aWJDfzlbmlF6sAst1BTMKMcWt3me50-sqCS5YwWt0HI,969 +pandas/_libs/tslibs/__init__.py,sha256=dowITNV3Gxq8wB3XdqiyRCtEDn83_GkLcGJiQnzM1mA,2125 +pandas/_libs/tslibs/__pycache__/__init__.cpython-310.pyc,, +pandas/_libs/tslibs/base.cpython-310-x86_64-linux-gnu.so,sha256=F5xYTFuqLDSyFdXaReKT1U-N2-t2TwUcy05hYta9v0M,62272 +pandas/_libs/tslibs/ccalendar.cpython-310-x86_64-linux-gnu.so,sha256=t2BW5QnY-ZFEoKzc5WJkKd98nFg_wzC1Y57HEcKUgfI,102752 +pandas/_libs/tslibs/ccalendar.pyi,sha256=dizWWmYtxWa5Lc4Hv69iRaJoazRhegJaDGWYgWtJu-U,502 +pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so,sha256=TeZnxebcdALSphxwCT62k9sAHDXqTtz4DZLfrDgtPkA,308168 +pandas/_libs/tslibs/conversion.pyi,sha256=sHO9CBRrDh0wovkr736kI5G6gaW1WY9tSOOAkBi63MA,300 +pandas/_libs/tslibs/dtypes.cpython-310-x86_64-linux-gnu.so,sha256=p7_QW33doRFmjMZSZLQz9kvKVmJujwd5VPU8u-RCMxk,202624 +pandas/_libs/tslibs/dtypes.pyi,sha256=ZNUPcAyhkkh7kIGLDIDTfUmwefbtdxxvn668YN-AAeE,1988 +pandas/_libs/tslibs/fields.cpython-310-x86_64-linux-gnu.so,sha256=7IV0lFVQ93SlOGlcyz2sJa3jXEGewZHRldvkkZtz4t8,345064 +pandas/_libs/tslibs/fields.pyi,sha256=LOke0XZ9XJnzX2MC9nL3u-JpbmddBfpy0UQ_d-_NvN8,1860 +pandas/_libs/tslibs/nattype.cpython-310-x86_64-linux-gnu.so,sha256=xmaa0848Pvvag9aQl-lj6F-GF9kaOI2jmxK-qUAEi1g,237184 +pandas/_libs/tslibs/nattype.pyi,sha256=R3qw7RgZFLG7IgKTssmJdjm-lP3V18GEz810nzVHsTs,4116 +pandas/_libs/tslibs/np_datetime.cpython-310-x86_64-linux-gnu.so,sha256=KasxtU1Gnzg3M4hraq6Z_br-t1HJZTl7uN6MA1p-DLM,152192 +pandas/_libs/tslibs/np_datetime.pyi,sha256=Y6l1KVdyKTMiYfzOjXNwV946GjoFAHaCEEhLDsHRCxI,831 +pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so,sha256=I7wwosuY451XdjTZ9Ec8qTvITU5WPcigagUOQVUDM_Y,1175424 +pandas/_libs/tslibs/offsets.pyi,sha256=QkYq2CgQ4aZ-92e_8wSpuxaACBIKjk2eI4-M-6wSeZU,8345 +pandas/_libs/tslibs/parsing.cpython-310-x86_64-linux-gnu.so,sha256=zf8zYjDtm7LZtFKOWEMWVAO4xwVl5MfZgOaHRgE9j0o,457032 +pandas/_libs/tslibs/parsing.pyi,sha256=cbS8tHb95ygwDU-9gNaFs83FpbVj8aoQfw7gwJGEE6o,914 +pandas/_libs/tslibs/period.cpython-310-x86_64-linux-gnu.so,sha256=sI05pPttPdTSDVBDogKfa08cOA75yLimAyLJGa9Qxoc,532232 +pandas/_libs/tslibs/period.pyi,sha256=Bf0lYd6dh9O61Gq_TReVI4NcRf-5aINkdYJNDaq5az8,3908 +pandas/_libs/tslibs/strptime.cpython-310-x86_64-linux-gnu.so,sha256=Zg-kfnYuvUzfj4-Rcn69oY9m5pyT9YQg8qv8o4wcFvQ,410440 +pandas/_libs/tslibs/strptime.pyi,sha256=dizASoJenvjCydaWDo72_FQmiNOjLmnCZbUZgCm8EnI,349 +pandas/_libs/tslibs/timedeltas.cpython-310-x86_64-linux-gnu.so,sha256=ToEcM4hM6Y1KfFctytza2NvgnRJFSLBbxHdCrXMSqT4,652008 +pandas/_libs/tslibs/timedeltas.pyi,sha256=6MW61MbVDqOH4JUQoR32z8qYUWRfPECV3fcQSrOkI_M,5009 +pandas/_libs/tslibs/timestamps.cpython-310-x86_64-linux-gnu.so,sha256=_pG6DknsOBjslvDdAu75y5V3RXPi_xNrR9ulZeNcAQU,660840 +pandas/_libs/tslibs/timestamps.pyi,sha256=zCu9cAbFf_IVDb1sf5j_Ww5LYSFzGVwMhpZZUP370kw,7831 +pandas/_libs/tslibs/timezones.cpython-310-x86_64-linux-gnu.so,sha256=D7xZwuvR19e1wLicaAiUU5RVG-uSqxRNCYA2PtwiUd4,294952 +pandas/_libs/tslibs/timezones.pyi,sha256=MZ9kC5E1J3XlVqyBwFuVd7NsqL8STztzT8W8NK-_2r0,600 +pandas/_libs/tslibs/tzconversion.cpython-310-x86_64-linux-gnu.so,sha256=e1h4AszIpJ1cC7cMH8lsRZFdaicFeH3oM9tE-5hg25A,340840 +pandas/_libs/tslibs/tzconversion.pyi,sha256=MW4HtIKZpf7ZcOUQ4U6FL24BiJpASXI-mN0DOADtl10,560 +pandas/_libs/tslibs/vectorized.cpython-310-x86_64-linux-gnu.so,sha256=vU0CZIpCdzxtYGyLvQXL8F9wiUVfH-voC-AggUC7zhM,249768 +pandas/_libs/tslibs/vectorized.pyi,sha256=Dv5ryF4HiPZcHWMyxyfP4D_tONdqLm2Mn4MpVi5RKCc,1239 +pandas/_libs/window/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/_libs/window/__pycache__/__init__.cpython-310.pyc,, +pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so,sha256=GwueBkAya2k2_TCN49v1qkUrj4tnUpghaTXei-_UO54,406552 +pandas/_libs/window/aggregations.pyi,sha256=vVjfgqY4cBPmjFadcrDc6heCiFbJ5Lz65bCadbHJbwU,4063 +pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so,sha256=UaUJLgvImnCLgMl1bOBzdZed-0TYG2evMDUgfZulgRk,217032 +pandas/_libs/window/indexers.pyi,sha256=53aBxew7jBcAc9sbSoOlvpQHhiLDSWPXFcVbCeJDbQA,319 +pandas/_libs/writers.cpython-310-x86_64-linux-gnu.so,sha256=yZHCNbd-_4O3UHt8JOBd9AHIO8TH2FWKf9uBm_xs6ms,258952 +pandas/_libs/writers.pyi,sha256=RvwFCzrsU4RkKm7Mc3wo12RqdGdo-PuANkMo3Z9hLiU,516 +pandas/_testing/__init__.py,sha256=9-kM4agqo_qFFPSVACpl1zoM5D06biVAQUoDUK92Y0A,17554 +pandas/_testing/__pycache__/__init__.cpython-310.pyc,, +pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc,, +pandas/_testing/__pycache__/_io.cpython-310.pyc,, +pandas/_testing/__pycache__/_warnings.cpython-310.pyc,, +pandas/_testing/__pycache__/asserters.cpython-310.pyc,, +pandas/_testing/__pycache__/compat.cpython-310.pyc,, +pandas/_testing/__pycache__/contexts.cpython-310.pyc,, +pandas/_testing/_hypothesis.py,sha256=WS4ysEJwmMor02cwMw15kBtAR0SLvUUpTfYEpc0c6iI,2426 +pandas/_testing/_io.py,sha256=OwfQ9L0XZgD_Yfi5mF8_BLgPx8pgGZbTzq46uTa7jDo,4448 +pandas/_testing/_warnings.py,sha256=x7YMaPkmSaimJquGT3vAt9pKn0r_Hj5lE1uV0eCoDiU,8357 +pandas/_testing/asserters.py,sha256=nCygrO7daDLBmBtPpJyp87exFgT65accJYgQcjJhNB8,47201 +pandas/_testing/compat.py,sha256=0o_biVI-wLh7kcw9FHvbwYyzNvM0PI06QRD2ZhiD2Fs,658 +pandas/_testing/contexts.py,sha256=TmKKWG1VF-lZTz_6DUuUAbcW7ZqcQ59nVNUxNoLzz3g,6551 +pandas/_typing.py,sha256=gVSimiU46Dduc2Ez8ZaOczv8c-UHTH4FZeg6LL6mnGk,14037 +pandas/_version.py,sha256=yFQDRxMdgDLf9OmPX4d0cyT6tsYJEWLV4QgS5yuNgkE,23612 +pandas/_version_meson.py,sha256=LCx3vAY16LA5zd6bZeSfh2kVnOxvilsYwcRrvSgWEQk,79 +pandas/api/__init__.py,sha256=QnoYVW828TM17uq-3ELeethZm8XN2Y0DkEaTc3sLr3Q,219 +pandas/api/__pycache__/__init__.cpython-310.pyc,, +pandas/api/extensions/__init__.py,sha256=O7tmzpvIT0uv9H5K-yMTKcwZpml9cEaB5CLVMiUkRCk,685 +pandas/api/extensions/__pycache__/__init__.cpython-310.pyc,, +pandas/api/indexers/__init__.py,sha256=kNbZv9nja9iLVmGZU2D6w2dqB2ndsbqTfcsZsGz_Yo0,357 +pandas/api/indexers/__pycache__/__init__.cpython-310.pyc,, +pandas/api/interchange/__init__.py,sha256=J2hQIYAvL7gyh8hG9r3XYPX69lK7nJS3IIHZl4FESjw,230 +pandas/api/interchange/__pycache__/__init__.cpython-310.pyc,, +pandas/api/types/__init__.py,sha256=bOU3TUuskT12Dpp-SsCYtCWdHvBDp3MWf3Etq4ZMdT8,447 +pandas/api/types/__pycache__/__init__.cpython-310.pyc,, +pandas/api/typing/__init__.py,sha256=IC4_ZmjsX4804Nnu-lQDccQr0zt5mzIZEaB3Bzdva8Y,1244 +pandas/api/typing/__pycache__/__init__.cpython-310.pyc,, +pandas/arrays/__init__.py,sha256=gMhtojH1KdRwxMmM_Ulblxk4L09o7WLUsXLp6qdUS-I,1227 +pandas/arrays/__pycache__/__init__.cpython-310.pyc,, +pandas/compat/__init__.py,sha256=kDGb_S-Lep8ZdS_-pGPINcrvoSPbj3x0hVVR1Z8pgy8,4280 +pandas/compat/__pycache__/__init__.cpython-310.pyc,, +pandas/compat/__pycache__/_constants.cpython-310.pyc,, +pandas/compat/__pycache__/_optional.cpython-310.pyc,, +pandas/compat/__pycache__/compressors.cpython-310.pyc,, +pandas/compat/__pycache__/pickle_compat.cpython-310.pyc,, +pandas/compat/__pycache__/pyarrow.cpython-310.pyc,, +pandas/compat/_constants.py,sha256=3_ryOkmiJTO-iTQAla_ApEJfp3V_lClbnepSM3Gi9S4,536 +pandas/compat/_optional.py,sha256=96Zlc2gqUYneSzSlraVRGfh2BsTWp4cOUcG2gHjw2E0,5089 +pandas/compat/compressors.py,sha256=GdDWdKzWqkImjdwzuVBwW2JvI7aMzpPV8QyhxWgJo0g,1975 +pandas/compat/numpy/__init__.py,sha256=UO-06Rj2g2Mk9rptXZG_fLtA3BhSPMVF4JhTLdSt5AM,1366 +pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc,, +pandas/compat/numpy/__pycache__/function.cpython-310.pyc,, +pandas/compat/numpy/function.py,sha256=Qvflr9h4rYCw9o8I3RggkhdRqxvav1yioq_JeEUh2T4,13291 +pandas/compat/pickle_compat.py,sha256=MTp_LYeueJWVJBWKzWUyiwcwu9MvjEtBzEC0SozvWs8,7723 +pandas/compat/pyarrow.py,sha256=WzcKBzCsZapuUPMIUL7IVMiXm1L7yzA4QHv3CF5AuxI,1006 +pandas/conftest.py,sha256=dYirfwk99jRP_W1R20BvUz92mrqoWevlW3Zdfu9fQ5A,48625 +pandas/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/__pycache__/__init__.cpython-310.pyc,, +pandas/core/__pycache__/accessor.cpython-310.pyc,, +pandas/core/__pycache__/algorithms.cpython-310.pyc,, +pandas/core/__pycache__/api.cpython-310.pyc,, +pandas/core/__pycache__/apply.cpython-310.pyc,, +pandas/core/__pycache__/arraylike.cpython-310.pyc,, +pandas/core/__pycache__/base.cpython-310.pyc,, +pandas/core/__pycache__/common.cpython-310.pyc,, +pandas/core/__pycache__/config_init.cpython-310.pyc,, +pandas/core/__pycache__/construction.cpython-310.pyc,, +pandas/core/__pycache__/flags.cpython-310.pyc,, +pandas/core/__pycache__/frame.cpython-310.pyc,, +pandas/core/__pycache__/generic.cpython-310.pyc,, +pandas/core/__pycache__/indexing.cpython-310.pyc,, +pandas/core/__pycache__/missing.cpython-310.pyc,, +pandas/core/__pycache__/nanops.cpython-310.pyc,, +pandas/core/__pycache__/resample.cpython-310.pyc,, +pandas/core/__pycache__/roperator.cpython-310.pyc,, +pandas/core/__pycache__/sample.cpython-310.pyc,, +pandas/core/__pycache__/series.cpython-310.pyc,, +pandas/core/__pycache__/shared_docs.cpython-310.pyc,, +pandas/core/__pycache__/sorting.cpython-310.pyc,, +pandas/core/_numba/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/_numba/__pycache__/__init__.cpython-310.pyc,, +pandas/core/_numba/__pycache__/executor.cpython-310.pyc,, +pandas/core/_numba/__pycache__/extensions.cpython-310.pyc,, +pandas/core/_numba/executor.py,sha256=vsH8jIzWRHho1Au4euWT2opfg5YLG4SBD7xlpvvXGUs,7530 +pandas/core/_numba/extensions.py,sha256=xRSojNahM3OPU28Bns1S4MXALqHKCmfK2SGleZhkm68,18374 +pandas/core/_numba/kernels/__init__.py,sha256=Z1t4IUC2MO0a5KbA0LurWfRZL4wNksHVBDLprGtPLlo,520 +pandas/core/_numba/kernels/__pycache__/__init__.cpython-310.pyc,, +pandas/core/_numba/kernels/__pycache__/mean_.cpython-310.pyc,, +pandas/core/_numba/kernels/__pycache__/min_max_.cpython-310.pyc,, +pandas/core/_numba/kernels/__pycache__/shared.cpython-310.pyc,, +pandas/core/_numba/kernels/__pycache__/sum_.cpython-310.pyc,, +pandas/core/_numba/kernels/__pycache__/var_.cpython-310.pyc,, +pandas/core/_numba/kernels/mean_.py,sha256=BesqY1gwFXPIeuXAQtDvvDBZuegsszFVTnl4lxguXEA,5646 +pandas/core/_numba/kernels/min_max_.py,sha256=tJ7OSKhne7jXpy4XSBpQS0tkP_0LggkH6iqWlxQ-FeE,3284 +pandas/core/_numba/kernels/shared.py,sha256=JUBa96LX4NmXhgXNyo859IwMXEl29EyhmRdMoQo1n78,611 +pandas/core/_numba/kernels/sum_.py,sha256=FeKOQl22qO6kN4hAmwmA3wXihrph5S03ucSt65GBquU,6488 +pandas/core/_numba/kernels/var_.py,sha256=5BaLdr7HKzdUvKvyifvL9qM36W16SAqk3Ny11OtpW9o,6973 +pandas/core/accessor.py,sha256=u57BIkm61_SNRzSdQjL210Jtil7BWFUB0HPNl9wCKdo,10044 +pandas/core/algorithms.py,sha256=8ENQpDWiM0ESh8EZEnXzw_XtbUUiFojvFi7MOZE16dY,55181 +pandas/core/api.py,sha256=9tm275sTpOKtdUvsFCXYQHmBdeJczGNBV1QGv3TQOOc,2911 +pandas/core/apply.py,sha256=TexZlMDp-LmxlCReQuHB4g_jCM-CHKD-NR9Fy_d5NUc,67345 +pandas/core/array_algos/__init__.py,sha256=8YLlO6TysEPxltfbNKdG9MlVXeDLfTIGNo2nUR-Zwl0,408 +pandas/core/array_algos/__pycache__/__init__.cpython-310.pyc,, +pandas/core/array_algos/__pycache__/datetimelike_accumulations.cpython-310.pyc,, +pandas/core/array_algos/__pycache__/masked_accumulations.cpython-310.pyc,, +pandas/core/array_algos/__pycache__/masked_reductions.cpython-310.pyc,, +pandas/core/array_algos/__pycache__/putmask.cpython-310.pyc,, +pandas/core/array_algos/__pycache__/quantile.cpython-310.pyc,, +pandas/core/array_algos/__pycache__/replace.cpython-310.pyc,, +pandas/core/array_algos/__pycache__/take.cpython-310.pyc,, +pandas/core/array_algos/__pycache__/transforms.cpython-310.pyc,, +pandas/core/array_algos/datetimelike_accumulations.py,sha256=BCy87HXqI2WO0_cCGK-redvi2STJzCxswYYs06YdxB4,1686 +pandas/core/array_algos/masked_accumulations.py,sha256=PL-ZAMai7H1PIXLKE2f9LSL2Ow6WZqkusSQkFfIE8d4,2618 +pandas/core/array_algos/masked_reductions.py,sha256=iUFmp_Fu3-BXM0EBiFfiPERteITlIFFI7IEpHXVkvoY,4855 +pandas/core/array_algos/putmask.py,sha256=g02wtMt5MTIuT4IS6ukE1Eh8KWb3Hi932hc47dszqJ4,4593 +pandas/core/array_algos/quantile.py,sha256=zdzcwgoVRP3eBSM4NJHwocBJC3PINYN1jB02mJubFow,6548 +pandas/core/array_algos/replace.py,sha256=p8CdDslj7WwVNYjpLsT_36e8dmrxfeWzh5ECHe4uxCQ,3918 +pandas/core/array_algos/take.py,sha256=n_pjn9mU7QQJ77SFXogEc5ofoMqRgNbkimwXFunz79M,20815 +pandas/core/array_algos/transforms.py,sha256=TPpSPX5CiePVGTFUwnimpcC5YeBOtjAPK20wQvG92QI,1104 +pandas/core/arraylike.py,sha256=BD2ZQP4zGPd4rJas9lS5C-_qp3XXDL2udU8tzD9bQIQ,17655 +pandas/core/arrays/__init__.py,sha256=dE6WRTblcq40JKhXJQDsOwvhFPJstj_8cegiLthH0ks,1314 +pandas/core/arrays/__pycache__/__init__.cpython-310.pyc,, +pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc,, +pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc,, +pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc,, +pandas/core/arrays/__pycache__/_utils.cpython-310.pyc,, +pandas/core/arrays/__pycache__/base.cpython-310.pyc,, +pandas/core/arrays/__pycache__/boolean.cpython-310.pyc,, +pandas/core/arrays/__pycache__/categorical.cpython-310.pyc,, +pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc,, +pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc,, +pandas/core/arrays/__pycache__/floating.cpython-310.pyc,, +pandas/core/arrays/__pycache__/integer.cpython-310.pyc,, +pandas/core/arrays/__pycache__/interval.cpython-310.pyc,, +pandas/core/arrays/__pycache__/masked.cpython-310.pyc,, +pandas/core/arrays/__pycache__/numeric.cpython-310.pyc,, +pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc,, +pandas/core/arrays/__pycache__/period.cpython-310.pyc,, +pandas/core/arrays/__pycache__/string_.cpython-310.pyc,, +pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc,, +pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc,, +pandas/core/arrays/_arrow_string_mixins.py,sha256=EaRHU4W7E3cOVkXhdp7wT2UGFD_FI9HYIzf26BBtleE,2608 +pandas/core/arrays/_mixins.py,sha256=NxUqWabMVxhv85tKqBu8JAAptApEq_avk6wbnN46xtI,17396 +pandas/core/arrays/_ranges.py,sha256=Ig3E_ROJ5mbOtK639SJ0UqcI229BrtsAfa_avbqrO8g,6996 +pandas/core/arrays/_utils.py,sha256=RmwOy6xNhgZ61qmk_PFnQ5sW-RVrkOhsl4AvQyqOuAY,1901 +pandas/core/arrays/arrow/__init__.py,sha256=-EKwaHww-yrbm7Z5d3AN_KETWmXYgZ2dW6KHaE2iiLI,221 +pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc,, +pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc,, +pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc,, +pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc,, +pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc,, +pandas/core/arrays/arrow/_arrow_utils.py,sha256=KjsV7ts963RSyNEGLGQliypzHJ_hs3mTslWPMXZpGpE,2151 +pandas/core/arrays/arrow/accessors.py,sha256=XxV7NzS1PHca7-Feesus0W8K3HwUHa-aSIIawdCTE8g,13863 +pandas/core/arrays/arrow/array.py,sha256=foZ2kgb9oYx_tx2PhreM_c3THxTy_f6ZdpKPCejU93E,102796 +pandas/core/arrays/arrow/extension_types.py,sha256=NJLTuf_8U8u-Fjt_qfWm7zhUtPQdvjH1JV8fY3oRv-Y,5459 +pandas/core/arrays/base.py,sha256=vEQaNCkTKkgjmuFxlzScuLpMJFLCbscDGrRA0f7WMFk,85047 +pandas/core/arrays/boolean.py,sha256=ln7GjlHHTtByAhQKX9XuymhifZTCNSpk1j7I-fQKObo,12440 +pandas/core/arrays/categorical.py,sha256=cmLBaSSjzKaANAbMi64Euk-Lxvu6n-PZ0zLH-vxrS2o,98997 +pandas/core/arrays/datetimelike.py,sha256=0t3rDfaBmvj8qSyVfxc1DDPdnhAU7m4HRaJsmvTTYUw,89288 +pandas/core/arrays/datetimes.py,sha256=DqOHEGkUk18oIQdDj8CiBbE_XGWWTQeE7uqMxX7S2VU,92345 +pandas/core/arrays/floating.py,sha256=pvZ72VDstzgslAM5-36KEyJ0z5PBVwTNogcJAxhhMP8,4286 +pandas/core/arrays/integer.py,sha256=FWsrgzs_DB3eG8VX1kfzUTMcKOHfa-ACFQh_xVpZPJU,6470 +pandas/core/arrays/interval.py,sha256=ro_WPcUzlxWKjocGLgEseEcljbBInjzrl8yPOCt87G0,63176 +pandas/core/arrays/masked.py,sha256=gVJpxoiEaY5kca8umAkRt6Az2L0GK3ffwjB63ELG62M,55634 +pandas/core/arrays/numeric.py,sha256=lVpSpsG_66z2QMHghCRoYef6dVJJ_QZAf9vkpLMJokI,9165 +pandas/core/arrays/numpy_.py,sha256=6A5ErMvdNooTI91M1qkV6RWL1B9bN1JJgjueq2QRvdU,17493 +pandas/core/arrays/period.py,sha256=M0cLFbFBn_yD99y_kQ8YY4NWMaH1nVdywBf56CrpiJk,40756 +pandas/core/arrays/sparse/__init__.py,sha256=iwvVqa2GG9TjYrd1rxCBjdLeGQBoRqUO2fZnILElbZg,356 +pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc,, +pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc,, +pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc,, +pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc,, +pandas/core/arrays/sparse/accessor.py,sha256=lZa3hwvXJKLMkXhqiWU8eev8qthvYQ1HgtW875qQe7g,12503 +pandas/core/arrays/sparse/array.py,sha256=vZng8r0PWtKVb74jc0NlEipWyuevqueRWtOhAt8x6xA,63884 +pandas/core/arrays/sparse/scipy_sparse.py,sha256=rVaj3PtVRrMPlzkoVFSkIopWV0xg0GJnpt1YljWT_zg,6462 +pandas/core/arrays/string_.py,sha256=LhTi6O0RG-XesI-VCunUjOmu-WBvFb6MaMv49JX2gW8,21814 +pandas/core/arrays/string_arrow.py,sha256=7wLrp0wOUJuPmwB9_9BLWOm4BAYiQHahSy0umtg7YIY,24913 +pandas/core/arrays/timedeltas.py,sha256=eTi8b16Jumac8WIx8LLf_9ZeFzA4u1nipHMUoc5-lyM,37830 +pandas/core/base.py,sha256=tvXmsVrlfhc-Br3bae5HKiuQlmKPWL9tRVbi7MhhZpw,40940 +pandas/core/common.py,sha256=WwkpCOI8b9j5rxkhL_Dh5l-7EdkHFfSjIIx-QBsefa0,17449 +pandas/core/computation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/computation/__pycache__/__init__.cpython-310.pyc,, +pandas/core/computation/__pycache__/align.cpython-310.pyc,, +pandas/core/computation/__pycache__/api.cpython-310.pyc,, +pandas/core/computation/__pycache__/check.cpython-310.pyc,, +pandas/core/computation/__pycache__/common.cpython-310.pyc,, +pandas/core/computation/__pycache__/engines.cpython-310.pyc,, +pandas/core/computation/__pycache__/eval.cpython-310.pyc,, +pandas/core/computation/__pycache__/expr.cpython-310.pyc,, +pandas/core/computation/__pycache__/expressions.cpython-310.pyc,, +pandas/core/computation/__pycache__/ops.cpython-310.pyc,, +pandas/core/computation/__pycache__/parsing.cpython-310.pyc,, +pandas/core/computation/__pycache__/pytables.cpython-310.pyc,, +pandas/core/computation/__pycache__/scope.cpython-310.pyc,, +pandas/core/computation/align.py,sha256=IBp-G1qbFMICrgm8DYOF-Kt18iCcY_P3peeIGsDkNv4,6161 +pandas/core/computation/api.py,sha256=CQ2AF0hwydcgTHycMCFiyZIAU57RcZT-TVid17SIsV4,65 +pandas/core/computation/check.py,sha256=Vb1YqLq381-nUp8Vjkg6ycJOxP3dV2aO9XjyM1uhe2Q,226 +pandas/core/computation/common.py,sha256=-2EHScxo2jfEQ1oqnnlQ_2eOvtAIn8O2krBaveSwmjs,1442 +pandas/core/computation/engines.py,sha256=g9eiyVCUtNmJGbexh7KvTreAKKhs5mQaWx4Z5UeOZ5s,3314 +pandas/core/computation/eval.py,sha256=21MaqANbDE4xoBk1Ts_iAj_l7Nn3iERNFX0dHvbeN4Y,14047 +pandas/core/computation/expr.py,sha256=utZSSpAHz692aNvfUpMsqgUyOEfC53B6Gv8ybtppDnE,25064 +pandas/core/computation/expressions.py,sha256=K0vu_v8JBVjJn6eQqNocC4ciNKsIYnEZrq8xwvhik2M,7503 +pandas/core/computation/ops.py,sha256=x5Qe3PfjHF5v-FHBerUr39iNXk_T0hLvw0Wchm0RiAQ,14829 +pandas/core/computation/parsing.py,sha256=VhYh3en2onhyJkzTelz32-U4Vc3XadyjTwOVctsqlEI,6399 +pandas/core/computation/pytables.py,sha256=7-L2GZ43aWNKG6hz-j8RhL8BIEGAEvpYi6rX6Zsvm_4,20745 +pandas/core/computation/scope.py,sha256=eyMdfx-gcgJaVIRY2NBgQDt2nW5KSdUZ3M9VRPYUJtU,10203 +pandas/core/config_init.py,sha256=1WBaE0u_9DDurUtAmLL9vy9rdS4hK5SudW9vhmD8taA,26460 +pandas/core/construction.py,sha256=QbsRsoxwBh0EQFoF8Wr5PgU96s7Lt4LW0f7BFtEwyQk,26339 +pandas/core/dtypes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/api.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/astype.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/base.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/cast.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/common.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/concat.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/generic.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/inference.cpython-310.pyc,, +pandas/core/dtypes/__pycache__/missing.cpython-310.pyc,, +pandas/core/dtypes/api.py,sha256=5mtML1OspdDbsWShw1fsDq93pg2pmuUGSBrvQWQcCgg,1819 +pandas/core/dtypes/astype.py,sha256=awzOpnfZ0dCYhzw_J4fekT7u0F0VwgsIapuiAbBkxxg,9207 +pandas/core/dtypes/base.py,sha256=EeL8zNbMtrvObdEJtqjkG_vIsoQE8zDZiIR2dHzHKPI,17042 +pandas/core/dtypes/cast.py,sha256=Uny9eaZD1jGRfWmh6p6_65HSevmNm16vP0BvTzvH5hQ,62164 +pandas/core/dtypes/common.py,sha256=0KQTfjHC6fhcpXg69-aMF54SuRIQFeiHzU_1VBVyGzg,47319 +pandas/core/dtypes/concat.py,sha256=Q_QujfB0C-CIWbcTlktmB02RgxCf7xQsOgEkOV67VPo,12579 +pandas/core/dtypes/dtypes.py,sha256=w3P6AphBpTFUBqdZLpJ36ziH23YdMGMH0r75KsTObYs,76035 +pandas/core/dtypes/generic.py,sha256=avKoJBzIQ0pJiFg9mmQ1D5ltkZsYxu8uPa46Hat70Ro,4122 +pandas/core/dtypes/inference.py,sha256=OqA9itS2osQBP-mp8jJK9RJZJps4VPsTIvQFCX4EbGM,9012 +pandas/core/dtypes/missing.py,sha256=BPzbmr7O7ihmjLKE9A31ck54ANjAtrp8-dVT20MR5fQ,23632 +pandas/core/flags.py,sha256=NxbTcYlNEaO8MKCpbEc22PEpInFn7f7za7EAO6-mxEE,3763 +pandas/core/frame.py,sha256=m4ExWWBrYq8Dv-dqcInaKC0tYJHzCM8vTZDdBmGgEQU,447104 +pandas/core/generic.py,sha256=UEIXZg--GqdP-YsxvqCTvZBDzWHDWronIHNGTzjkbyI,474370 +pandas/core/groupby/__init__.py,sha256=KamY9WI5B4cMap_3wZ5ycMdXM_rOxGSL7RtoKKPfjAo,301 +pandas/core/groupby/__pycache__/__init__.cpython-310.pyc,, +pandas/core/groupby/__pycache__/base.cpython-310.pyc,, +pandas/core/groupby/__pycache__/categorical.cpython-310.pyc,, +pandas/core/groupby/__pycache__/generic.cpython-310.pyc,, +pandas/core/groupby/__pycache__/groupby.cpython-310.pyc,, +pandas/core/groupby/__pycache__/grouper.cpython-310.pyc,, +pandas/core/groupby/__pycache__/indexing.cpython-310.pyc,, +pandas/core/groupby/__pycache__/numba_.cpython-310.pyc,, +pandas/core/groupby/__pycache__/ops.cpython-310.pyc,, +pandas/core/groupby/base.py,sha256=OrqG2_h_Bp8Z_MeLrAGWGROG-MtSloGqeaJ79qYbJm0,2740 +pandas/core/groupby/categorical.py,sha256=iCsl3d_unK4zAh_lR3eDIBVOhwsv9Bj9X1wbnaR90pw,3047 +pandas/core/groupby/generic.py,sha256=LCsrCIjuhcEz-yw3gyk5nYKNiMF1h8en6nQO1hhTywE,96885 +pandas/core/groupby/groupby.py,sha256=yjNsgJmutrWxZj-6oveCtkDFgJg6S4rcXHsJ8rsjVTU,195730 +pandas/core/groupby/grouper.py,sha256=utxyUS7M-sTYaiWek9uRaIaAHHm0jaTbIqDX-GjEHYE,38672 +pandas/core/groupby/indexing.py,sha256=QY4GZ4wDd-1K-we0EfdiFvmdAZ_VxVgPrYB0kBZf6wU,9510 +pandas/core/groupby/numba_.py,sha256=XjfPfYGbYJgkIKYFiq7Gjnr5wwZ8mKrkeHKTW42HZMg,4894 +pandas/core/groupby/ops.py,sha256=qZPzps8n5_67_FcGpByM9G4PFqr7f4PWcwf52Os16uI,38234 +pandas/core/indexers/__init__.py,sha256=M4CyNLiQoQ5ohoAMH5HES9Rh2lpryAM1toL-b1TJXj0,736 +pandas/core/indexers/__pycache__/__init__.cpython-310.pyc,, +pandas/core/indexers/__pycache__/objects.cpython-310.pyc,, +pandas/core/indexers/__pycache__/utils.cpython-310.pyc,, +pandas/core/indexers/objects.py,sha256=PR063DVlu8_-ti7GsLRb0e7o4oAz2xpMil0nMee18z0,14737 +pandas/core/indexers/utils.py,sha256=TgVCAX9r4MZw3QPH6aE-d55gRZcKN9H9X-MTZ4u-LiY,16069 +pandas/core/indexes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/indexes/__pycache__/__init__.cpython-310.pyc,, +pandas/core/indexes/__pycache__/accessors.cpython-310.pyc,, +pandas/core/indexes/__pycache__/api.cpython-310.pyc,, +pandas/core/indexes/__pycache__/base.cpython-310.pyc,, +pandas/core/indexes/__pycache__/category.cpython-310.pyc,, +pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc,, +pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc,, +pandas/core/indexes/__pycache__/extension.cpython-310.pyc,, +pandas/core/indexes/__pycache__/frozen.cpython-310.pyc,, +pandas/core/indexes/__pycache__/interval.cpython-310.pyc,, +pandas/core/indexes/__pycache__/multi.cpython-310.pyc,, +pandas/core/indexes/__pycache__/period.cpython-310.pyc,, +pandas/core/indexes/__pycache__/range.cpython-310.pyc,, +pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc,, +pandas/core/indexes/accessors.py,sha256=MdP8zNlSQeeU7psOXwGUdQ1-8XKzYCl5mKMIcpMCiN8,19152 +pandas/core/indexes/api.py,sha256=tDBBn84I19nvPFQKj0GAZhb0zioLJqTUJjSVqyc4Fn4,10426 +pandas/core/indexes/base.py,sha256=LVD4AAYIKU0HTYvxSpzcSJ9L3TK6W4HyM7zQDETD_yQ,264290 +pandas/core/indexes/category.py,sha256=_6LpQtBGFsgB4KSZhxEQT4QX57W3172MbvLIAzxboPA,16128 +pandas/core/indexes/datetimelike.py,sha256=JH8_o2NJNQj1A0N0YFcC3m5nQGStacI5bv1G-dzYKVA,28377 +pandas/core/indexes/datetimes.py,sha256=b0B5j5HGthjGLy4FLsMQtjPPjleZEH8agIWfLVne9v0,38327 +pandas/core/indexes/extension.py,sha256=Wy4XfMrJdc4HxuApZw4D-Xr3RyBlGCOKbI27L16tHEE,5188 +pandas/core/indexes/frozen.py,sha256=QuFW2zV8wqY7PD5PHbUMJQc3a-c5Eyfkjblp4umOylM,3482 +pandas/core/indexes/interval.py,sha256=79ddOFRsEoj7glRYNcq-L7rPB5y3jFxuOFnCD5lQW-o,38190 +pandas/core/indexes/multi.py,sha256=JskZSKvKotqbeS-2UE5hTB5eyJ5tuuotoZ3u47v33UY,143195 +pandas/core/indexes/period.py,sha256=ohh7J43CgV1ijxn9ozNhO5Vwu0k1-3yURIWTWeNPRgg,18978 +pandas/core/indexes/range.py,sha256=qt5IS2batjnOHe90UK5jES7pZhglppW_-1wieLlZysA,39511 +pandas/core/indexes/timedeltas.py,sha256=9a5m2wLQUA2v2O6JibpDSssNvNzV8Af6dAJETEpD4qM,10960 +pandas/core/indexing.py,sha256=TRrbtBeUrELiuFiCpAVuP0yIsfrxVLvBbT9bPvlCAmY,97236 +pandas/core/interchange/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/interchange/__pycache__/__init__.cpython-310.pyc,, +pandas/core/interchange/__pycache__/buffer.cpython-310.pyc,, +pandas/core/interchange/__pycache__/column.cpython-310.pyc,, +pandas/core/interchange/__pycache__/dataframe.cpython-310.pyc,, +pandas/core/interchange/__pycache__/dataframe_protocol.cpython-310.pyc,, +pandas/core/interchange/__pycache__/from_dataframe.cpython-310.pyc,, +pandas/core/interchange/__pycache__/utils.cpython-310.pyc,, +pandas/core/interchange/buffer.py,sha256=KujVQ1qeXMjgRdvwea37FqO9f2ULmLa6Rtr_mTQ11XU,3453 +pandas/core/interchange/column.py,sha256=tlHYyU6RP9ESD693d4WpDUNP0hq7MaTZnm6tLJXSq98,17547 +pandas/core/interchange/dataframe.py,sha256=M1mWjS70pYLFJau534NtgslcpY_8NiY4dRmRgT73TVo,3879 +pandas/core/interchange/dataframe_protocol.py,sha256=L9Wy8vB5oTsuYJQ9NBY4RIEAWXBclnTOH3I_txkIbZk,16177 +pandas/core/interchange/from_dataframe.py,sha256=VOTMZlybK4-omYdw-roufRYmQd9qJ-ryldcim0fyw_w,17043 +pandas/core/interchange/utils.py,sha256=mYIOfiwjnZd-I2j-SNRWRLFcSIlBtA9MLFCdzgynSFM,4837 +pandas/core/internals/__init__.py,sha256=LE8M58WRu_cvQZns2dxUMeBVjqNfwRWw6vtWKiBrr2I,2615 +pandas/core/internals/__pycache__/__init__.cpython-310.pyc,, +pandas/core/internals/__pycache__/api.cpython-310.pyc,, +pandas/core/internals/__pycache__/array_manager.cpython-310.pyc,, +pandas/core/internals/__pycache__/base.cpython-310.pyc,, +pandas/core/internals/__pycache__/blocks.cpython-310.pyc,, +pandas/core/internals/__pycache__/concat.cpython-310.pyc,, +pandas/core/internals/__pycache__/construction.cpython-310.pyc,, +pandas/core/internals/__pycache__/managers.cpython-310.pyc,, +pandas/core/internals/__pycache__/ops.cpython-310.pyc,, +pandas/core/internals/api.py,sha256=s78Hb4dHuBAufRH9vTd1KO6o0bs-9CoBOsRF6GP03lE,4695 +pandas/core/internals/array_manager.py,sha256=q_QKlETGKdb1r8aFKVfV4ZrMoVO1wFNAC2JNHCZ6rGE,43927 +pandas/core/internals/base.py,sha256=pO6sju5EIq7u23J7CGPZNTEotbL4KYKzRgyIEmBhqpg,11161 +pandas/core/internals/blocks.py,sha256=W7IYinBFN8KW5Rt0o7BJvmcgpgStQeLUq3PxpVEsrY8,98485 +pandas/core/internals/concat.py,sha256=Q_MnHIKSMBvIvA6DpMNkcsQSv8aU9DivUn1mlA_9zEs,19151 +pandas/core/internals/construction.py,sha256=IsWPruU6jDjeUAQqxsFJUIFr0MHUXNQNatP-AJYv2IA,33987 +pandas/core/internals/managers.py,sha256=toDgoWhpnOJiwytqyR_X5AmJkmqetYvBq6KbMR9T6-U,81576 +pandas/core/internals/ops.py,sha256=Rh2-gWjeSwXnjkiacohSNM5iNvqQqBiAqgblwP6rD9o,5145 +pandas/core/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/core/methods/__pycache__/describe.cpython-310.pyc,, +pandas/core/methods/__pycache__/selectn.cpython-310.pyc,, +pandas/core/methods/__pycache__/to_dict.cpython-310.pyc,, +pandas/core/methods/describe.py,sha256=IeCkAFDUdVNxoPPqP1R1HzDlKFQHvlg46AgIxntD5Cs,11961 +pandas/core/methods/selectn.py,sha256=oomBEebumUfbJ5OLi9vw7saH31vbiy3lK-i63VKWBOw,7696 +pandas/core/methods/to_dict.py,sha256=sep0EfimrQ5UNJu-KwC1uYzx1BvbrackOe2-qxl2F5Y,8649 +pandas/core/missing.py,sha256=x_XOmge6_k9uIij2tyJZBEFKpAju1xUS9knQhe5kleU,35270 +pandas/core/nanops.py,sha256=kJpYqWg4E-D89HOXcufquZH0_rPFRbgbmZAULygpDnU,50984 +pandas/core/ops/__init__.py,sha256=CQ7tQB-QPUxD6ZnbS2SzFVjjvCD7-ciglexkdbbn7y8,1620 +pandas/core/ops/__pycache__/__init__.cpython-310.pyc,, +pandas/core/ops/__pycache__/array_ops.cpython-310.pyc,, +pandas/core/ops/__pycache__/common.cpython-310.pyc,, +pandas/core/ops/__pycache__/dispatch.cpython-310.pyc,, +pandas/core/ops/__pycache__/docstrings.cpython-310.pyc,, +pandas/core/ops/__pycache__/invalid.cpython-310.pyc,, +pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc,, +pandas/core/ops/__pycache__/missing.cpython-310.pyc,, +pandas/core/ops/array_ops.py,sha256=wNV7RL-HZoB_I61YlF5nskpH-4RxA2n3P_gj31i18FM,19079 +pandas/core/ops/common.py,sha256=jVf_L_oN6bKcUOuH6FgaKOx18se9C3Hl2JPd0Uoj4t4,3500 +pandas/core/ops/dispatch.py,sha256=5XFIr7HV1Dicohgm0ZJu-6argn2Qd0OwES2bBxQwCj0,635 +pandas/core/ops/docstrings.py,sha256=WlGWcWjNsldPW73krxbgRwQvkacmKqRqJsN4VVz-FXU,18448 +pandas/core/ops/invalid.py,sha256=5-gRzdBfk2F8qIZ_vzUlnI-vo1HsAh2F5BYJUEN--m0,1433 +pandas/core/ops/mask_ops.py,sha256=0sm9L1LB_USp8DxNBuCdoB8cJ_MzzvSAb_u3QQmQrKI,5409 +pandas/core/ops/missing.py,sha256=0WlqN_us0LU5RAdoitM-Ko_4xghJ_HBRkteLQ53fU14,5140 +pandas/core/resample.py,sha256=lckInCT6z43EuK8cf0Qono3W_LeJ2UqAoxMTWMxnvtQ,95578 +pandas/core/reshape/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/reshape/__pycache__/__init__.cpython-310.pyc,, +pandas/core/reshape/__pycache__/api.cpython-310.pyc,, +pandas/core/reshape/__pycache__/concat.cpython-310.pyc,, +pandas/core/reshape/__pycache__/encoding.cpython-310.pyc,, +pandas/core/reshape/__pycache__/melt.cpython-310.pyc,, +pandas/core/reshape/__pycache__/merge.cpython-310.pyc,, +pandas/core/reshape/__pycache__/pivot.cpython-310.pyc,, +pandas/core/reshape/__pycache__/reshape.cpython-310.pyc,, +pandas/core/reshape/__pycache__/tile.cpython-310.pyc,, +pandas/core/reshape/__pycache__/util.cpython-310.pyc,, +pandas/core/reshape/api.py,sha256=Qk5y-D5-OdRYKkCgc-ktcxKGNGSCPteISEsByXFWI9M,680 +pandas/core/reshape/concat.py,sha256=qwXsAlI9pnLld1pj9uqHf2zinXd-fj8GE3kZ-XNVacU,28253 +pandas/core/reshape/encoding.py,sha256=BN4hXm16hkz6bFQ56BgvoRb0YfdK-4CjWb4FcYRFBfk,18970 +pandas/core/reshape/melt.py,sha256=Zj6PSyI3Dbi_aQPhYyFTz_cWi9m8kIubwItq57JNCFQ,17400 +pandas/core/reshape/merge.py,sha256=K31zarc63I1cEl74TSPPITrLdpcVVAlpOslP2dTLYUo,99585 +pandas/core/reshape/pivot.py,sha256=ylkSVYQcoMmuxqvEoyEP6YHzeVtGL9y6ueAEfN6_RzY,28917 +pandas/core/reshape/reshape.py,sha256=_slnrYBb1ZFgqP1501D5JNF5LmWzD2PQGDtrzwk-eP0,34661 +pandas/core/reshape/tile.py,sha256=bDzSjjPydhiCce0DOJab1327a613mhs98PimwfIddjQ,21947 +pandas/core/reshape/util.py,sha256=zrShSZARSsWULoXI5tdWqwgZSLQ-u_3xNPS5cpB4QbY,2014 +pandas/core/roperator.py,sha256=ljko3iHhBm5ZvEVqrGEbwGV4z0cXd4TE1uSzf-LZlQ8,1114 +pandas/core/sample.py,sha256=QEPzbFmeMRMxAIqfkRrJLnIjUZgSupbP8YUEezW-Pcw,4626 +pandas/core/series.py,sha256=C6VAX8KkmsW-0T4ydIMqw7LoruFa5Yy14hcrvmTR8NA,213072 +pandas/core/shared_docs.py,sha256=Fdd7Xi1TQ_esZXq32Gu-ZPiShIHE2VROSSRtzet509s,30103 +pandas/core/sorting.py,sha256=kxr4Phz8HHAsEbyx9J5SCYZ4xENhoZnFmMEAUI-NpIU,22976 +pandas/core/sparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/sparse/__pycache__/__init__.cpython-310.pyc,, +pandas/core/sparse/__pycache__/api.cpython-310.pyc,, +pandas/core/sparse/api.py,sha256=y0onCpBKCj_5Iaybw5e-gxk8zAa9d1p5Zu58RLzPT1k,143 +pandas/core/strings/__init__.py,sha256=KYCMtwb7XWzZXsIZGijtjw9ofs2DIqE9psfKoxRsHuw,1087 +pandas/core/strings/__pycache__/__init__.cpython-310.pyc,, +pandas/core/strings/__pycache__/accessor.cpython-310.pyc,, +pandas/core/strings/__pycache__/base.cpython-310.pyc,, +pandas/core/strings/__pycache__/object_array.cpython-310.pyc,, +pandas/core/strings/accessor.py,sha256=_IAdGo_ZBjznkpvqStUZ2xyYyCuCoYtze9Trxb8Gd5I,112575 +pandas/core/strings/base.py,sha256=AdPlNkPgT218Mffx6Blt4aJF1GGxSYII3mem6EjWntY,5528 +pandas/core/strings/object_array.py,sha256=mCAo6lx6V1_UaoxcGWOgiAV0N-381rKcyAkqUHJ9kic,15438 +pandas/core/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/tools/__pycache__/__init__.cpython-310.pyc,, +pandas/core/tools/__pycache__/datetimes.cpython-310.pyc,, +pandas/core/tools/__pycache__/numeric.cpython-310.pyc,, +pandas/core/tools/__pycache__/timedeltas.cpython-310.pyc,, +pandas/core/tools/__pycache__/times.cpython-310.pyc,, +pandas/core/tools/datetimes.py,sha256=3KYS9voe_xTCyaVnZnqqEqt1lv1YVM9BzcvuV27sI_c,43404 +pandas/core/tools/numeric.py,sha256=f8HKUnKTNIPvlrFa4bbLy6pMH3ULSgce04qRzK5qV_Y,11025 +pandas/core/tools/timedeltas.py,sha256=kyDgKp9yRpw-gzucChvvekVQKy1sHu8J5qQwbwWaukg,8858 +pandas/core/tools/times.py,sha256=_-z5faRW4NA04LKN-eUgvklqOjRIncQyndFdSzwzDXI,5373 +pandas/core/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/util/__pycache__/__init__.cpython-310.pyc,, +pandas/core/util/__pycache__/hashing.cpython-310.pyc,, +pandas/core/util/__pycache__/numba_.cpython-310.pyc,, +pandas/core/util/hashing.py,sha256=LlYoJfn80z0zj0xNt5P3PYRVFJafXI3bRnSYV361Avs,9657 +pandas/core/util/numba_.py,sha256=U-2_obqjB_DwLc7Bu6swTCdPdNU62Z9l0QpxYM5Edng,2582 +pandas/core/window/__init__.py,sha256=DewB8XXkLGEDgtQqICYPmnkZZ3Y4tN6zPoTYvpNuJGE,450 +pandas/core/window/__pycache__/__init__.cpython-310.pyc,, +pandas/core/window/__pycache__/common.cpython-310.pyc,, +pandas/core/window/__pycache__/doc.cpython-310.pyc,, +pandas/core/window/__pycache__/ewm.cpython-310.pyc,, +pandas/core/window/__pycache__/expanding.cpython-310.pyc,, +pandas/core/window/__pycache__/numba_.cpython-310.pyc,, +pandas/core/window/__pycache__/online.cpython-310.pyc,, +pandas/core/window/__pycache__/rolling.cpython-310.pyc,, +pandas/core/window/common.py,sha256=LZBddjEy7C_nb-9gmsk2wQr-FsF1WBMsGKd8ptmMdug,6714 +pandas/core/window/doc.py,sha256=iCAs_hJ_pwstet2FHwSilVSXoTaKRuuMHwyZ9l2dz_c,4158 +pandas/core/window/ewm.py,sha256=nniOOhhrrx88wUd1iG2C2vyhT6mfd1N4UbDt4pY1F78,35190 +pandas/core/window/expanding.py,sha256=MnepmpreeY11OX9nQHj5TxgYdnOPJIRC-Cr3MyDnC38,27845 +pandas/core/window/numba_.py,sha256=7x9RvcIvPab0C5uXT4U9cP1VNaI7Yym0CevTsMIu27U,10606 +pandas/core/window/online.py,sha256=NKHkFpehR5QDT5VrCESEqjZ9a_Fq0JkchzmXFtzLRds,3735 +pandas/core/window/rolling.py,sha256=jj5NmCV28NgsWXMaBVqV-j8-JPwZOCu3heLi9AAbTMU,95504 +pandas/errors/__init__.py,sha256=DotJJfd-bS7FSQbnLC6SKWCfz_GqGYS6Gy6Fc9AJZg0,27164 +pandas/errors/__pycache__/__init__.cpython-310.pyc,, +pandas/io/__init__.py,sha256=4YJcSmLT6iTWceVgxGNSyRJq91wxhrgsNr47uc4Rw-I,293 +pandas/io/__pycache__/__init__.cpython-310.pyc,, +pandas/io/__pycache__/_util.cpython-310.pyc,, +pandas/io/__pycache__/api.cpython-310.pyc,, +pandas/io/__pycache__/clipboards.cpython-310.pyc,, +pandas/io/__pycache__/common.cpython-310.pyc,, +pandas/io/__pycache__/feather_format.cpython-310.pyc,, +pandas/io/__pycache__/gbq.cpython-310.pyc,, +pandas/io/__pycache__/html.cpython-310.pyc,, +pandas/io/__pycache__/orc.cpython-310.pyc,, +pandas/io/__pycache__/parquet.cpython-310.pyc,, +pandas/io/__pycache__/pickle.cpython-310.pyc,, +pandas/io/__pycache__/pytables.cpython-310.pyc,, +pandas/io/__pycache__/spss.cpython-310.pyc,, +pandas/io/__pycache__/sql.cpython-310.pyc,, +pandas/io/__pycache__/stata.cpython-310.pyc,, +pandas/io/__pycache__/xml.cpython-310.pyc,, +pandas/io/_util.py,sha256=0_dKFBocN0FV3XTzhOlDP55ToeHCre22RIKe6d6tRZs,961 +pandas/io/api.py,sha256=w7Ux3U8PI-SeP13hD3PMjWMf3YbOGog6zCDqj0nfnpI,1264 +pandas/io/clipboard/__init__.py,sha256=3aFzdoqbabE8XM-FYjdYIctTps_sTAJDJMrhEbDv_UU,24235 +pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc,, +pandas/io/clipboards.py,sha256=t88NnxP8TOpmM1V438o6jgvlEMzlRLaqWBxUQiH_EQ8,6320 +pandas/io/common.py,sha256=hsjBpZc8i9O_aKMpCms0tuQ2jAqbkVzLXnUKI01TVcU,40615 +pandas/io/excel/__init__.py,sha256=w62gHQ9nF3XgBOmjhM8eHmV-YXF7gflz1lFqxFq7io8,486 +pandas/io/excel/__pycache__/__init__.cpython-310.pyc,, +pandas/io/excel/__pycache__/_base.cpython-310.pyc,, +pandas/io/excel/__pycache__/_calamine.cpython-310.pyc,, +pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc,, +pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc,, +pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc,, +pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc,, +pandas/io/excel/__pycache__/_util.cpython-310.pyc,, +pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc,, +pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc,, +pandas/io/excel/_base.py,sha256=tEBB5m3LcL8ZHv62Kv7G4Ul9MElr2X8JrkXvadypzC4,59073 +pandas/io/excel/_calamine.py,sha256=7O8I8yg-dpaK6OqdZflV14ggDbNDJrinhgAPxXgh9ro,3474 +pandas/io/excel/_odfreader.py,sha256=vMVZ-lNJpMB0vQ8cewanVpjj3-sFzUAS-I-w28nOmoY,8262 +pandas/io/excel/_odswriter.py,sha256=o7dP9MQYRyDO88kFeJMiyW5SmCxusykb8vew4QHMjsg,11210 +pandas/io/excel/_openpyxl.py,sha256=CshETVibZ0_rwbNq0y7sPkzSgnXpwI7FUtvAj8efU6Q,19861 +pandas/io/excel/_pyxlsb.py,sha256=74huu-7ISIsfvguwDID84B3KIooHtU53XOP3PFkX6ts,4358 +pandas/io/excel/_util.py,sha256=1fwMlNjLSd_qlCGLGBcXDPLnZ_SOpAZTIaUgYUVr0_0,8105 +pandas/io/excel/_xlrd.py,sha256=tddoGt7ugmyTTryMeqSvU6FE9vgajsMYfrSLQytMEV0,4556 +pandas/io/excel/_xlsxwriter.py,sha256=b0o_2MRgeTNG0loBRybT-xDoa65CjUeVC2wmuTUoR0M,9191 +pandas/io/feather_format.py,sha256=BjbwRVYhEvbp8w05sYd838VfgS4dNktb8T7m2PicjCM,4443 +pandas/io/formats/__init__.py,sha256=MGhPbyRcirFXg_uAGxyQ_q8Bky6ZUpBZ0nHXQa5LYd8,238 +pandas/io/formats/__pycache__/__init__.cpython-310.pyc,, +pandas/io/formats/__pycache__/_color_data.cpython-310.pyc,, +pandas/io/formats/__pycache__/console.cpython-310.pyc,, +pandas/io/formats/__pycache__/css.cpython-310.pyc,, +pandas/io/formats/__pycache__/csvs.cpython-310.pyc,, +pandas/io/formats/__pycache__/excel.cpython-310.pyc,, +pandas/io/formats/__pycache__/format.cpython-310.pyc,, +pandas/io/formats/__pycache__/html.cpython-310.pyc,, +pandas/io/formats/__pycache__/info.cpython-310.pyc,, +pandas/io/formats/__pycache__/printing.cpython-310.pyc,, +pandas/io/formats/__pycache__/string.cpython-310.pyc,, +pandas/io/formats/__pycache__/style.cpython-310.pyc,, +pandas/io/formats/__pycache__/style_render.cpython-310.pyc,, +pandas/io/formats/__pycache__/xml.cpython-310.pyc,, +pandas/io/formats/_color_data.py,sha256=fZ_QluvMFUNKUE4-T32x7Pn0nulQgxmsEMHB9URcBOY,4332 +pandas/io/formats/console.py,sha256=dcoFM-rirR8qdc1bvgJySPhZvk23S6Nkz3-2Lc30pMk,2748 +pandas/io/formats/css.py,sha256=gCSjRV6QatAMY-La26wnrQmyF78G4BruMfpWrDIKIkk,12793 +pandas/io/formats/csvs.py,sha256=JAI3kO6xKSMjsLxlYk4EijBuktOHRwU9U91a92OvYnQ,10526 +pandas/io/formats/excel.py,sha256=vW5_Pii4i_wv_VNVR0wn-7IFwdgf2tzROor4eThVO68,32994 +pandas/io/formats/format.py,sha256=FPeKW4UASjOLB-N73HfVZWVviqUbDPoBoVLCQxhJJjE,66127 +pandas/io/formats/html.py,sha256=AiROfWxTRrMT75LZsrBMJTIs3ky9n1x3nUnXzKpZILM,24165 +pandas/io/formats/info.py,sha256=heCm4flQPvNMNW6zecz_XUrfV5O-_zWdpam_dk3V2Tc,32621 +pandas/io/formats/printing.py,sha256=Hrs0vaaacrfswH7FuPCM9FnVg5kKL5vGYl8-ZxAQC4Q,17950 +pandas/io/formats/string.py,sha256=f6UNLnvUV-iO-7k7zXqWBOs7hOoU7_fWQzogyeY8c7I,6707 +pandas/io/formats/style.py,sha256=7bM9ookFZdRMVjamIUzCcyfiYAWM0ksTd9m3UnvyDA0,155854 +pandas/io/formats/style_render.py,sha256=TgyXK40A4dp8geKIeGWMwNm_v597jWQmJZH-H-TSSdQ,90899 +pandas/io/formats/templates/html.tpl,sha256=KA-w_npfnHM_1c5trtJtkd3OD9j8hqtoQAY4GCC5UgI,412 +pandas/io/formats/templates/html_style.tpl,sha256=_gCqktLyUGAo5TzL3I-UCp1Njj8KyeLCWunHz4nYHsE,694 +pandas/io/formats/templates/html_table.tpl,sha256=MJxwJFwOa4KNli-ix7vYAGjRzw59FLAmYKHMy9nC32k,1811 +pandas/io/formats/templates/latex.tpl,sha256=m-YMxqKVJ52kLd61CA9V2MiC_Dtwwa-apvU8YtH8TYU,127 +pandas/io/formats/templates/latex_longtable.tpl,sha256=opn-JNfuMX81g1UOWYFJLKdQSUwoSP_UAKbK4kYRph4,2877 +pandas/io/formats/templates/latex_table.tpl,sha256=YNvnvjtwYXrWFVXndQZdJqKFIXYTUj8f1YOUdMmxXmQ,2221 +pandas/io/formats/templates/string.tpl,sha256=Opr87f1tY8yp_G7GOY8ouFllR_7vffN_ok7Ndf98joE,344 +pandas/io/formats/xml.py,sha256=dLBpVLGltVRiOxYCIVLb4okLXwhPneRp7whi2VbV1gk,16029 +pandas/io/gbq.py,sha256=nkdYZ0w5ZetYdWpIIKALLh5_3nNhFE1hvVV9rJ5yyhk,9372 +pandas/io/html.py,sha256=E4rdZT6DVcMRSeDaceBsMpWrc-A9aAEvF5sbW4DstIg,39546 +pandas/io/json/__init__.py,sha256=ArWTQnIKhxDVaMI1j0Whgpk0ci6dP0mpUiGwMRqEdtY,270 +pandas/io/json/__pycache__/__init__.cpython-310.pyc,, +pandas/io/json/__pycache__/_json.cpython-310.pyc,, +pandas/io/json/__pycache__/_normalize.cpython-310.pyc,, +pandas/io/json/__pycache__/_table_schema.cpython-310.pyc,, +pandas/io/json/_json.py,sha256=KxErOL4x5IkSnFsgivvNBs6ZWuSxAWX29cKguk2OEQs,48572 +pandas/io/json/_normalize.py,sha256=rbyrEKwuxotrABiv6Jmb9JN6k6rCXd99ONrEZv2IbXI,17212 +pandas/io/json/_table_schema.py,sha256=Ld6OMQsdCutRvmGHPayKOTf08BNTnhuFwcQGRnlCq_w,11594 +pandas/io/orc.py,sha256=c6HnmrCBhfe6dzGu4LxfKwxDOraHckh-WYf9UNN_xno,8385 +pandas/io/parquet.py,sha256=npe1SKcJbfLM6vSPVDQDeoEsPIIfJzlAKstzNGmoOVQ,23835 +pandas/io/parsers/__init__.py,sha256=7BLx4kn9y5ipgfZUWZ4y_MLEUNgX6MQ5DyDwshhJxVM,204 +pandas/io/parsers/__pycache__/__init__.cpython-310.pyc,, +pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc,, +pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc,, +pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc,, +pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc,, +pandas/io/parsers/__pycache__/readers.cpython-310.pyc,, +pandas/io/parsers/arrow_parser_wrapper.py,sha256=XE5SuEdcu2M-wlEgAC8gAZaZDvA_O31_vJRFJqMAbWg,11409 +pandas/io/parsers/base_parser.py,sha256=HRzZBK2fm9dmi7OcBiljVtQdd6c-cWsCZcHLFRiblo0,49443 +pandas/io/parsers/c_parser_wrapper.py,sha256=yXK-ZrUOxZcXdZ9rtINgRl7l426tdoch8GyZIS_nCMI,14199 +pandas/io/parsers/python_parser.py,sha256=9fnAQ5iFQwBETy-6ptu66-3Ppu8tn81CGSRyYxhgE2I,48456 +pandas/io/parsers/readers.py,sha256=yP4xBAdreacpmmKamh7w6O4CTl0NQ5z0UVSuA7LSs0c,87157 +pandas/io/pickle.py,sha256=t4OulGy7CQL60LXTC8kebegWM7QaJOmudlynAgWxo4w,6582 +pandas/io/pytables.py,sha256=o8JItkD0B5Uewjks5IPtyv5JtHzqD94yUO6xVQ90kX8,177239 +pandas/io/sas/__init__.py,sha256=AIAudC9f784kcEzuho8GiXU63vj2ThRitKznl7Imkq4,69 +pandas/io/sas/__pycache__/__init__.cpython-310.pyc,, +pandas/io/sas/__pycache__/sas7bdat.cpython-310.pyc,, +pandas/io/sas/__pycache__/sas_constants.cpython-310.pyc,, +pandas/io/sas/__pycache__/sas_xport.cpython-310.pyc,, +pandas/io/sas/__pycache__/sasreader.cpython-310.pyc,, +pandas/io/sas/sas7bdat.py,sha256=IGdDWp_EivJPJYmfv3jbJNtt6BZtbupRskKPvQ16KIo,27534 +pandas/io/sas/sas_constants.py,sha256=CM1wSNzXn6nkjLMSTeBhBJlL6d0hU-1YdNwEO8HE-9U,8719 +pandas/io/sas/sas_xport.py,sha256=_N7sGHw4Z80u-emCxS4lv6UFs6N01eKj5CZkTzq7XiM,15134 +pandas/io/sas/sasreader.py,sha256=S7bRlsXahhpoTkKdsHoWY9TLo_jgzNJJdsb6gxpcfuY,4885 +pandas/io/spss.py,sha256=p4vW9rJEFLPBqEIHMR5fCmo2U-JBTvgnDNd74Y7DFuI,2182 +pandas/io/sql.py,sha256=AezlzGw76UejHVblu1x9tGpex6bmSg2_QuuvBVnSf0g,101704 +pandas/io/stata.py,sha256=dgPveWarql9uZ6oSOO02Dfnf7ZmiFye2LUG0-9IrEiY,135790 +pandas/io/xml.py,sha256=ZKHsFACIJhlNJqU8nNBpG-OjHZ2uE_wzh94OOBuj8iI,38656 +pandas/plotting/__init__.py,sha256=W_2wP9v02mNCK4lV5ekG1iJHYSF8dD1NbByJiNq3g8I,2826 +pandas/plotting/__pycache__/__init__.cpython-310.pyc,, +pandas/plotting/__pycache__/_core.cpython-310.pyc,, +pandas/plotting/__pycache__/_misc.cpython-310.pyc,, +pandas/plotting/_core.py,sha256=BLIzDrRcaDDYBpXj8nfw3aIXabos6YlwPjondYmh6II,66558 +pandas/plotting/_matplotlib/__init__.py,sha256=jGq_ouunQTV3zzX_crl9kCVX2ztk1p62McqD2WVRnAk,2044 +pandas/plotting/_matplotlib/__pycache__/__init__.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/boxplot.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/converter.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/core.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/groupby.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/hist.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/misc.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/style.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/timeseries.cpython-310.pyc,, +pandas/plotting/_matplotlib/__pycache__/tools.cpython-310.pyc,, +pandas/plotting/_matplotlib/boxplot.py,sha256=xzXBEoBmC1U9VGlYCvqXEfjAabAFgI69nUbOzHm9zmc,18261 +pandas/plotting/_matplotlib/converter.py,sha256=EcdgaqQPOqYIO2noB-6J2xkODsBwATamuwA315SCVog,37033 +pandas/plotting/_matplotlib/core.py,sha256=20oTgXZwzTQDfqBY6g_HT9CsGd1RkuNtnu0YE-rtO5U,71826 +pandas/plotting/_matplotlib/groupby.py,sha256=vg8RYC3SxN2Khc-34GDV3UpCVSPnawt4zwYqIuzb5HE,4343 +pandas/plotting/_matplotlib/hist.py,sha256=uljuycUD16A6u3GdktvZwXdU3qMKPfFLFMgYmBX4zQU,16816 +pandas/plotting/_matplotlib/misc.py,sha256=tzbAVRDGc1Ep6BR3QbYAEKEHgkX2vwMBX9k9uwN-j8c,13358 +pandas/plotting/_matplotlib/style.py,sha256=mKDcq4cBmYF9zDrBv3st3fNFvSn-91rYEH5cLXaYiw0,8368 +pandas/plotting/_matplotlib/timeseries.py,sha256=Mw3zTUVL8NR1bUCxWrait8kPCB9DHBkm8skT_RdEQ3k,11531 +pandas/plotting/_matplotlib/tools.py,sha256=7YrV3B-bXVm6AI-QekcC4CSKRLB9ZM7fakEOzm5gm1k,15389 +pandas/plotting/_misc.py,sha256=sbOaqkE9lA5HbikzcFBcXe9tdqHMVAxxMH3V9QfYr-c,20929 +pandas/pyproject.toml,sha256=zVYCcwPAWYkyN0xtApuGIQUfWpd-c-VJ55WzQNx2LkQ,24456 +pandas/testing.py,sha256=3XTHuY440lezW7rxw4LW9gfxzDEa7s0l16cdnkRYwwM,313 +pandas/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/__pycache__/test_aggregation.cpython-310.pyc,, +pandas/tests/__pycache__/test_algos.cpython-310.pyc,, +pandas/tests/__pycache__/test_common.cpython-310.pyc,, +pandas/tests/__pycache__/test_downstream.cpython-310.pyc,, +pandas/tests/__pycache__/test_errors.cpython-310.pyc,, +pandas/tests/__pycache__/test_expressions.cpython-310.pyc,, +pandas/tests/__pycache__/test_flags.cpython-310.pyc,, +pandas/tests/__pycache__/test_multilevel.cpython-310.pyc,, +pandas/tests/__pycache__/test_nanops.cpython-310.pyc,, +pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc,, +pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc,, +pandas/tests/__pycache__/test_sorting.cpython-310.pyc,, +pandas/tests/__pycache__/test_take.cpython-310.pyc,, +pandas/tests/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/api/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/api/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/api/__pycache__/test_types.cpython-310.pyc,, +pandas/tests/api/test_api.py,sha256=ZQI3_TgIuolTfuKy-a4eds0io74Q4kvy8fG6NZDoj-M,9394 +pandas/tests/api/test_types.py,sha256=ZR8n_efaY7HWGY6XnRZKNIiRWmaszpNU8p22kvAbyEQ,1711 +pandas/tests/apply/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/apply/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/apply/__pycache__/common.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_frame_apply.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_frame_apply_relabeling.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_frame_transform.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_invalid_arg.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_numba.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_series_apply.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_series_apply_relabeling.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_series_transform.cpython-310.pyc,, +pandas/tests/apply/__pycache__/test_str.cpython-310.pyc,, +pandas/tests/apply/common.py,sha256=A8TqjvKR4h4WaLtovGR9hDULpWs4rV-1Jx_Q4Zz5Dew,298 +pandas/tests/apply/test_frame_apply.py,sha256=MNA70UiPF9BisXVGpvQTt1SjZTMj7-J5p_43BaZ-4Ao,54256 +pandas/tests/apply/test_frame_apply_relabeling.py,sha256=jHfewakLcFvc1nartXtElv7HM5eGUIelIcm-McXX2KQ,3772 +pandas/tests/apply/test_frame_transform.py,sha256=bbAcYmXxlfEo8-zPQdxlp26s9LPlRbpVKpQu9yEVkCI,8020 +pandas/tests/apply/test_invalid_arg.py,sha256=4X6SQ_1Y21KMxhpn7CFx5l1Gdky70ERJwRClcGxYJwA,10983 +pandas/tests/apply/test_numba.py,sha256=XUiNthXaQTEB1mJSD_wkNEE_h0Blk1lMkVt9DBwBHCs,3810 +pandas/tests/apply/test_series_apply.py,sha256=Mak1zJWdYx6mX-0-OzHImTzYSGl9UVPSsPKVUAdMNoI,22485 +pandas/tests/apply/test_series_apply_relabeling.py,sha256=_HkoIybNJQFEpIaafHvD1Q0nx_U9J2aL8ualcwhp5Fs,1510 +pandas/tests/apply/test_series_transform.py,sha256=rrJO-C5HagNKJo542h32eB5TOWVDxirJv1u5PXJkh_I,2404 +pandas/tests/apply/test_str.py,sha256=k34l2s3s5p2NUzwUFOtW6sePl9ureo6Q8EaY5PEqy1w,11043 +pandas/tests/arithmetic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arithmetic/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/common.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/test_array_ops.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/test_categorical.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/test_datetime64.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/test_interval.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/test_numeric.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/test_object.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/test_period.cpython-310.pyc,, +pandas/tests/arithmetic/__pycache__/test_timedelta64.cpython-310.pyc,, +pandas/tests/arithmetic/common.py,sha256=C_s1Zc2_0U_oBciQNt5xJp-8FaLmkscEdmnX2Nq16UY,4362 +pandas/tests/arithmetic/conftest.py,sha256=uUtu5-T5FBdFQAo21vRLQSHPiNEjWkc69UwH6llpnsM,3473 +pandas/tests/arithmetic/test_array_ops.py,sha256=4lmZRZAlbJEnphzzwfcvsO4kEv1LG9l3uCmaF_8kcAA,1064 +pandas/tests/arithmetic/test_categorical.py,sha256=lK5fXv4cRIu69ocvOHfKL5bjeK0jDdW3psvrrssjDoA,742 +pandas/tests/arithmetic/test_datetime64.py,sha256=f97V90PrRZrFZ_IrBxfEtgDXvYI_JGqMsIl__9b0y9E,90255 +pandas/tests/arithmetic/test_interval.py,sha256=2TG1Lh4VZXaxwjs5y5RjXzIukOfoVetyLfPlOo5h4vQ,10951 +pandas/tests/arithmetic/test_numeric.py,sha256=569JY7Pjl453iXP_txrlktVyUyH1CR_3677due2sfwU,55511 +pandas/tests/arithmetic/test_object.py,sha256=PJ-_UpBqHXs1_29Q60xlhK4J0m2yX-HIvW_Auknsr98,13525 +pandas/tests/arithmetic/test_period.py,sha256=uxdkrPIpMM7BWUKmwloViCEE1JtOsxkXKCdfxLQ6E1A,59617 +pandas/tests/arithmetic/test_timedelta64.py,sha256=OH0dD4KNrVEf8FlC75MezthgEDohA8dyk3uxwouF8LM,78911 +pandas/tests/arrays/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/__pycache__/masked_shared.cpython-310.pyc,, +pandas/tests/arrays/__pycache__/test_array.cpython-310.pyc,, +pandas/tests/arrays/__pycache__/test_datetimelike.cpython-310.pyc,, +pandas/tests/arrays/__pycache__/test_datetimes.cpython-310.pyc,, +pandas/tests/arrays/__pycache__/test_ndarray_backed.cpython-310.pyc,, +pandas/tests/arrays/__pycache__/test_period.cpython-310.pyc,, +pandas/tests/arrays/__pycache__/test_timedeltas.cpython-310.pyc,, +pandas/tests/arrays/boolean/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/boolean/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_comparison.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_construction.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_function.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_logical.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_ops.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_reduction.cpython-310.pyc,, +pandas/tests/arrays/boolean/__pycache__/test_repr.cpython-310.pyc,, +pandas/tests/arrays/boolean/test_arithmetic.py,sha256=C6y1v1C7kz73WCfl3MS-KleylT4aYdVUDdtP0RBxrg8,4241 +pandas/tests/arrays/boolean/test_astype.py,sha256=0AEVw8lNNjHomdqgpQ7ZYCauUb23QHvxY3NPDe7vIQw,1614 +pandas/tests/arrays/boolean/test_comparison.py,sha256=QIX85ffCwMvtzXtLkWePFQkso_mVtIffWpbgy4ykEz0,1976 +pandas/tests/arrays/boolean/test_construction.py,sha256=1KGaMjJ3FTmoisMbEnKUuxAkylVyzTsfuRXZV5UXlIk,12332 +pandas/tests/arrays/boolean/test_function.py,sha256=eAVsu1XUeokLh7Ko0-bDNUQqmVrGAyOvv9vJdWCQj0M,4061 +pandas/tests/arrays/boolean/test_indexing.py,sha256=BorrK8_ZJbN5HWcIX9fCP-BbTCaJsgAGUiza5IwhYr4,361 +pandas/tests/arrays/boolean/test_logical.py,sha256=7kJTl0KbLA7n8dOV0PZtiZ7gPm65Ggc3p0tHOF5i0d0,9335 +pandas/tests/arrays/boolean/test_ops.py,sha256=iM_FRYMtvvdEpMtLUSuBd_Ww5nHr284v2fRxHaydvIM,975 +pandas/tests/arrays/boolean/test_reduction.py,sha256=eBdonU5n9zsbC86AscHCLxF68XqiqhWWyBJV-7YCOdA,2183 +pandas/tests/arrays/boolean/test_repr.py,sha256=RRljPIDi6jDNhUdbjKMc75Mst-wm92l-H6b5Y-lCCJA,437 +pandas/tests/arrays/categorical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/categorical/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_algos.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_analytics.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_dtypes.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_map.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_missing.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_operators.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_replace.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_repr.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_sorting.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_subclass.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_take.cpython-310.pyc,, +pandas/tests/arrays/categorical/__pycache__/test_warnings.cpython-310.pyc,, +pandas/tests/arrays/categorical/test_algos.py,sha256=SLguZHlE5eyi14kRoMUGpIohPJM7jQqboKlnTvidpg0,2710 +pandas/tests/arrays/categorical/test_analytics.py,sha256=Bl7A_lPouoS7uK8EnybqvtMXp6WatI7U89OQwMecWVY,13213 +pandas/tests/arrays/categorical/test_api.py,sha256=Ivy3G6MW43fLMYwWn9QdE9wXRxLrpF8IFoUpB-TplCc,19879 +pandas/tests/arrays/categorical/test_astype.py,sha256=EJc8J2mrxN2Epg_6ufPxf3qLlqIsV66dyDbvjJoJDJg,5546 +pandas/tests/arrays/categorical/test_constructors.py,sha256=NFmmMYKBtBHxrM3d4nBxG0Zck1-n-uEQsbADePuAbl0,30705 +pandas/tests/arrays/categorical/test_dtypes.py,sha256=h1ZhuPvbHp9aFA4doAkmQ96zQW4A5UX6y6Yv2G5QTb8,5523 +pandas/tests/arrays/categorical/test_indexing.py,sha256=u43KuLMFtxe5ZAs0dphmGqpHsygyxtmTHxdGEfoDVQg,12972 +pandas/tests/arrays/categorical/test_map.py,sha256=TO6GY6B2n2dhkcNRQinbvID9eBfwtVnWsT1yexQg00U,5152 +pandas/tests/arrays/categorical/test_missing.py,sha256=5KdSj982_KUkfB8Cg-l7Jcir5I8n7Gz6SbnHnIqmu8A,7814 +pandas/tests/arrays/categorical/test_operators.py,sha256=NDc6FKDGOrGIdvSDpJ9Mq9O-aE0xw-LoI6L-rcrW0cI,15968 +pandas/tests/arrays/categorical/test_replace.py,sha256=I3jiQGmNSQ2i1WTLgVjIKcH-D919sf9EWTOm-hh_emE,4102 +pandas/tests/arrays/categorical/test_repr.py,sha256=4ft4OCt7r3qZDFd5CPrsyYSq7ZVrxcrRwpxQs91Mm-A,27107 +pandas/tests/arrays/categorical/test_sorting.py,sha256=gEhLklhDxhqf8UDOB17TMKhrabxS5n0evPg9DWSMd5s,5052 +pandas/tests/arrays/categorical/test_subclass.py,sha256=Y4nURd4hFM0Q3aVET1OO-z11pZzzZ0HFfl2s-9OWemw,903 +pandas/tests/arrays/categorical/test_take.py,sha256=O4g_LYDeK0NzHDId5cBBEp1ns_a762NsYHn088ocYzg,3501 +pandas/tests/arrays/categorical/test_warnings.py,sha256=XqvGeAb9lrXP1VdwKSOvbDuytqDuJ5VSDsLKQAa5gIk,682 +pandas/tests/arrays/datetimes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc,, +pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc,, +pandas/tests/arrays/datetimes/test_constructors.py,sha256=xZsxdsUxxbk7UCawlCS3_aAkhsuexX0-uf3XQMlvSA8,11050 +pandas/tests/arrays/datetimes/test_cumulative.py,sha256=X_SHtt9n_WzA_C2wPlRJHRS8LUmjNNmr2-XL6AszJd0,1307 +pandas/tests/arrays/datetimes/test_reductions.py,sha256=Cg1qwq8wASnMeOdZ5_wowrILL6e1ZT_j8m-rIOkwrkg,5787 +pandas/tests/arrays/floating/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/floating/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_comparison.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_concat.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_construction.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_contains.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_function.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_repr.cpython-310.pyc,, +pandas/tests/arrays/floating/__pycache__/test_to_numpy.cpython-310.pyc,, +pandas/tests/arrays/floating/conftest.py,sha256=PkAOd0oDvePBtXL-N0MnmEGCmDMP3_Dw-YwpxgNfl-k,1161 +pandas/tests/arrays/floating/test_arithmetic.py,sha256=z2y4ca3ntG7WZPntefFBO5IbwLSc98uNUHJeoKzi_Dc,8353 +pandas/tests/arrays/floating/test_astype.py,sha256=pvgAFQ0bTRyuoBpgmiyQza_zPOXBC7RYdGJc7F6tP4c,4047 +pandas/tests/arrays/floating/test_comparison.py,sha256=C-rwNTv5FtUvo3oWB8XNquCOa_XQHf6R9JRYX6JVAG0,2071 +pandas/tests/arrays/floating/test_concat.py,sha256=-RO-pwRRY93FQnOjBLs1fMVf7uBCoEGRkGWPAdX8ltU,573 +pandas/tests/arrays/floating/test_construction.py,sha256=weDvGh2hSfHmVnQ-6Kc5QmAUaGTF9mvEI3qtZSEHHAk,6455 +pandas/tests/arrays/floating/test_contains.py,sha256=oTsN_kyhRi7hHdKRzi9PzwSu2gHiE3EP4FkuR31BZFM,204 +pandas/tests/arrays/floating/test_function.py,sha256=YiXRdFHEU2iAGXwd68kDyfsjBZ8ztoC8fikZU6AnbRE,6403 +pandas/tests/arrays/floating/test_repr.py,sha256=N_BX7NbU8Pljiz2bouWMzrP22xh_6w_8pHePEB2ycVw,1157 +pandas/tests/arrays/floating/test_to_numpy.py,sha256=d0k_2WXrkIu4JOGkIQlzijmgsm7X-XW2XmobaN_3Q_s,4954 +pandas/tests/arrays/integer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/integer/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_comparison.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_concat.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_construction.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_dtypes.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_function.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_reduction.cpython-310.pyc,, +pandas/tests/arrays/integer/__pycache__/test_repr.cpython-310.pyc,, +pandas/tests/arrays/integer/conftest.py,sha256=TejO1KxvoPETsN-ZdefGePhwJ-szaoYanP9AQXHgY18,1555 +pandas/tests/arrays/integer/test_arithmetic.py,sha256=JAlNBmbw3cR-XhUVHqyRk61Xth__oFFq_h6H-88LRaI,12391 +pandas/tests/arrays/integer/test_comparison.py,sha256=jUr8dmk_6FQsTNjDkYsazWnioHis4cLi94noy4txG54,1212 +pandas/tests/arrays/integer/test_concat.py,sha256=TmHNsCxxvp-KDLD5SaTmeEuWJDzUS51Eg04uSWet9Pg,2351 +pandas/tests/arrays/integer/test_construction.py,sha256=jnzOs0w8i4X55JOrtXc0ylMaiBo8mhRl6uwrnEWr_0o,7768 +pandas/tests/arrays/integer/test_dtypes.py,sha256=5pq5zqlv9oEGyk8v0KOUF_BCwnsogS08uEnCV1cF0yk,8756 +pandas/tests/arrays/integer/test_function.py,sha256=hCqZIrrISPtn_7mlX92wpQNItAF1o-q-g56W93wnyhI,6627 +pandas/tests/arrays/integer/test_indexing.py,sha256=rgwcafGbwJztl_N4CalvAnW6FKfKVNzJcE-RjcXMpR8,498 +pandas/tests/arrays/integer/test_reduction.py,sha256=vOyzjEWQTpsGXLOa2H8ehkahUiYBBcKvEI__6YQtzdo,4215 +pandas/tests/arrays/integer/test_repr.py,sha256=fLTZusgFHPXO4orpygmHIOG6JQLzYcdbTJHRvvsN0sM,1652 +pandas/tests/arrays/interval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/interval/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/interval/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/arrays/interval/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/arrays/interval/__pycache__/test_interval.cpython-310.pyc,, +pandas/tests/arrays/interval/__pycache__/test_interval_pyarrow.cpython-310.pyc,, +pandas/tests/arrays/interval/__pycache__/test_overlaps.cpython-310.pyc,, +pandas/tests/arrays/interval/test_astype.py,sha256=8rb7rssqvIoSztzCfFb5pY4oIH_GjDStKrXkC6bnUZk,776 +pandas/tests/arrays/interval/test_formats.py,sha256=AARSRfiyQa0Fu6jCBdhx83yJOXdCWtfs0q0Yd8mMxwg,317 +pandas/tests/arrays/interval/test_interval.py,sha256=cfZXy6J5AtUqwd5HY4m9lxTyu0m0xsZbD9FlcBebuio,8082 +pandas/tests/arrays/interval/test_interval_pyarrow.py,sha256=PkPTrpsrTLL_3Vd17ENP0I3NFE71XpSQi38HG09hXxo,5202 +pandas/tests/arrays/interval/test_overlaps.py,sha256=4QNJBVY5Fb150Rf3lS5a6p_ScHy8U-sAuWTWetbCmVc,3279 +pandas/tests/arrays/masked/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/masked/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/masked/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/arrays/masked/__pycache__/test_arrow_compat.cpython-310.pyc,, +pandas/tests/arrays/masked/__pycache__/test_function.cpython-310.pyc,, +pandas/tests/arrays/masked/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/arrays/masked/test_arithmetic.py,sha256=wchNK8BesRBPSclagK_egl_EG9J4KPCquzL9iRZOK20,8175 +pandas/tests/arrays/masked/test_arrow_compat.py,sha256=0uJbBERGPJs4G_BweVktYjW2Z82LD48zdx7rHGQybfM,7193 +pandas/tests/arrays/masked/test_function.py,sha256=qkFCkI5KNijaX2SurVoilnhtBFbismLBS4SyEybNXZ8,1954 +pandas/tests/arrays/masked/test_indexing.py,sha256=xjr8EECp7WStcIeEY8YNhmkZ90Q2o-l3izolkLpG2W0,1916 +pandas/tests/arrays/masked_shared.py,sha256=ANp_CU9Hcly9-NBxknm7g-uWxljstTmriq3S8f5kPsM,5194 +pandas/tests/arrays/numpy_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/numpy_/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/numpy_/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/arrays/numpy_/__pycache__/test_numpy.cpython-310.pyc,, +pandas/tests/arrays/numpy_/test_indexing.py,sha256=-0lB-Mw-gzM4Mpe-SRCj-w4C6QxLfp3BH65U_DVULNY,1452 +pandas/tests/arrays/numpy_/test_numpy.py,sha256=N4s8S8Kp8YwUZgtza6wUB5RnI_5WYaXMAFQxvEMOXKo,8764 +pandas/tests/arrays/period/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/period/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/period/__pycache__/test_arrow_compat.cpython-310.pyc,, +pandas/tests/arrays/period/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/arrays/period/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/arrays/period/__pycache__/test_reductions.cpython-310.pyc,, +pandas/tests/arrays/period/test_arrow_compat.py,sha256=YuEM6oIOfRhdFaTFs5X0um9nLqygEkuxIZGl9V-qQcg,3709 +pandas/tests/arrays/period/test_astype.py,sha256=lKLDDqZSdU7s6PyHbrywkaCJnMJ4TKSphRqmno7BcbU,2344 +pandas/tests/arrays/period/test_constructors.py,sha256=C6J0nmKRSK5nyEja7-gZgf5tCZpPA0aZ9lux-z6gHxA,5089 +pandas/tests/arrays/period/test_reductions.py,sha256=gYiheQK3Z0Bwdo-0UaHIyfXGpmL1_UvoMP9FVIpztlM,1050 +pandas/tests/arrays/sparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/sparse/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_accessor.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_arithmetics.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_array.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_combine_concat.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_dtype.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_libsparse.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_reductions.cpython-310.pyc,, +pandas/tests/arrays/sparse/__pycache__/test_unary.cpython-310.pyc,, +pandas/tests/arrays/sparse/test_accessor.py,sha256=EReITkC1ib-_36L6gS5UfjWai_Brp8Iaf4w7WObJZjM,9025 +pandas/tests/arrays/sparse/test_arithmetics.py,sha256=TC2Af6gA4OkRIxDTWy_5jmHNIrgsqWGmOVF707wOn8M,20152 +pandas/tests/arrays/sparse/test_array.py,sha256=HbW0y7KLlWPz3QI6gtE44ZRZF5vS8ZwjM3IjOQfNNSQ,16794 +pandas/tests/arrays/sparse/test_astype.py,sha256=JwcFBWzfg2KOv9_6GsP0oV4WWDmFugT8dHrXDWCLZwM,4763 +pandas/tests/arrays/sparse/test_combine_concat.py,sha256=3NMQXaRQc7Bxn5HhSHffcUE24GZi_VYflnFLnixOgbs,2651 +pandas/tests/arrays/sparse/test_constructors.py,sha256=N5GJ8SrwVZ4hNGaM_QlALl283EM13nSVbtO8uBRSAwY,10835 +pandas/tests/arrays/sparse/test_dtype.py,sha256=xcZIrh0SPqvPzMt9EbMF04ADSu5Xueemvl81llkjq64,6122 +pandas/tests/arrays/sparse/test_indexing.py,sha256=8INC1paA06XrCp8L63FSllr0OK48pgiKda5sOgrUhf8,10425 +pandas/tests/arrays/sparse/test_libsparse.py,sha256=_hfr36t-jm-QOhI9Gwbd6sQZI5aVWMMixHY-OYOqKuM,19293 +pandas/tests/arrays/sparse/test_reductions.py,sha256=D7R_jhlFtmH8l-tERmhtP1K3KbcAyPuyIy_Y_gVcN6Q,9721 +pandas/tests/arrays/sparse/test_unary.py,sha256=GtqeMdylKdtu-0HPxmTDVjo32riOcEtqPhjI_XK5LkM,2864 +pandas/tests/arrays/string_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/string_/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/string_/__pycache__/test_string.cpython-310.pyc,, +pandas/tests/arrays/string_/__pycache__/test_string_arrow.cpython-310.pyc,, +pandas/tests/arrays/string_/test_string.py,sha256=2Jb7cC3pDkGA0nhEwsIgW0tNJ4meEy8htFwX3zO9Flc,23988 +pandas/tests/arrays/string_/test_string_arrow.py,sha256=JbP4dLrozNXsjwzqiiTXzbrPo7CVGYFl9nsmAEPSkFg,9140 +pandas/tests/arrays/test_array.py,sha256=xFDZAd6Lls_wI782aoOOFJGPA-XEiyE5JvftU03RRkg,15663 +pandas/tests/arrays/test_datetimelike.py,sha256=SpCxw8hR_Ocbm5tNrUcGPTTIduEGdSJPxRkyEtKZLkI,45423 +pandas/tests/arrays/test_datetimes.py,sha256=FoODE0J_-8KIBbNS5ROkEWVgNnF3PwaToqJ38YtiAYU,29112 +pandas/tests/arrays/test_ndarray_backed.py,sha256=6unFuF9S6hG5FDJDjiqbKg3rL8ItzJQHwY9vMdju4-0,2331 +pandas/tests/arrays/test_period.py,sha256=S_7TMRLEmVamhGKlVO50qJIj3OFDWRzY_oxEcXzp3zs,5572 +pandas/tests/arrays/test_timedeltas.py,sha256=VdMdnCrOL5_oUa4RxL-gaVre6Qp3iu__qNMaUb7kqfE,10673 +pandas/tests/arrays/timedeltas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/timedeltas/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/arrays/timedeltas/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/arrays/timedeltas/__pycache__/test_cumulative.cpython-310.pyc,, +pandas/tests/arrays/timedeltas/__pycache__/test_reductions.cpython-310.pyc,, +pandas/tests/arrays/timedeltas/test_constructors.py,sha256=gwBy_iuOc-EEMusjK2bITGQhCyeeI9OzI9uI8xOact0,4248 +pandas/tests/arrays/timedeltas/test_cumulative.py,sha256=cRR6I-lIsefG95vEZb8TuXdvmw7pdPFedpBneLVKBG8,692 +pandas/tests/arrays/timedeltas/test_reductions.py,sha256=cw6I3Bxi0R2_DD2y1WD-AHTYR_ufAtN9ztCtDGypQnM,6520 +pandas/tests/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/base/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/base/__pycache__/common.cpython-310.pyc,, +pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc,, +pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc,, +pandas/tests/base/__pycache__/test_misc.cpython-310.pyc,, +pandas/tests/base/__pycache__/test_transpose.cpython-310.pyc,, +pandas/tests/base/__pycache__/test_unique.cpython-310.pyc,, +pandas/tests/base/__pycache__/test_value_counts.cpython-310.pyc,, +pandas/tests/base/common.py,sha256=-cLXvhzuQi0XMfU-NdqTQAiruN0MU9A9HE2goo7ZzJQ,266 +pandas/tests/base/test_constructors.py,sha256=mFPWRfNgWYQyYqhYKErJ-obd1hVWfm50aMFH5wgHkU0,5309 +pandas/tests/base/test_conversion.py,sha256=fm58V7TCp45uXmJAQRyF7yz3e6ydG6JVCQ2oanNb7xY,17685 +pandas/tests/base/test_fillna.py,sha256=q9LZhUp2HXaVQw4wSxK0VU4Z9z62WI12r9ivsZu0gOg,1522 +pandas/tests/base/test_misc.py,sha256=FwzkBajbi3JLRuzaapLTrRI803DqKgME68WWo1jhhjc,6040 +pandas/tests/base/test_transpose.py,sha256=138_O_JwwdCmfmyjp47PSVa-4Sr7SOuLprr0PzRm6BQ,1694 +pandas/tests/base/test_unique.py,sha256=tMDzvNfhUYXRl2IOYvlHo0cuFbLrE-oR1bPJc0hFAio,4370 +pandas/tests/base/test_value_counts.py,sha256=e-OG-UOKyEIxOim4TXa4rGbaOzeb8l6XemkruFfDyw4,11778 +pandas/tests/computation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/computation/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc,, +pandas/tests/computation/__pycache__/test_eval.cpython-310.pyc,, +pandas/tests/computation/test_compat.py,sha256=dHstyvdaXybrwm1WQndV9aQBwOsOvCIVZb5pxLXsYfM,872 +pandas/tests/computation/test_eval.py,sha256=tAJDNLwur6BJUEqUzOce3Gr2UcrpJWFHE7dRDi4Hfb4,71739 +pandas/tests/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/config/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/config/__pycache__/test_config.cpython-310.pyc,, +pandas/tests/config/__pycache__/test_localization.cpython-310.pyc,, +pandas/tests/config/test_config.py,sha256=T3PKV_lWTp_4ZU566fpWt_N9_tr3BfsxHlJ_vqnQiiQ,15858 +pandas/tests/config/test_localization.py,sha256=xC7SJfih_Kus5WGpSWZdwyAQR3ttgpsxxlNesbwrYfM,4479 +pandas/tests/construction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/construction/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/construction/__pycache__/test_extract_array.cpython-310.pyc,, +pandas/tests/construction/test_extract_array.py,sha256=L3fEjATPsAy3a6zrdQJaXXaQ7FvR2LOeiPJMjGNkwKQ,637 +pandas/tests/copy_view/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/copy_view/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_array.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_chained_assignment_deprecation.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_clip.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_core_functionalities.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_functions.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_internals.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_interp_fillna.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_methods.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_replace.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_setitem.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/test_util.cpython-310.pyc,, +pandas/tests/copy_view/__pycache__/util.cpython-310.pyc,, +pandas/tests/copy_view/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/copy_view/index/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/copy_view/index/__pycache__/test_datetimeindex.cpython-310.pyc,, +pandas/tests/copy_view/index/__pycache__/test_index.cpython-310.pyc,, +pandas/tests/copy_view/index/__pycache__/test_periodindex.cpython-310.pyc,, +pandas/tests/copy_view/index/__pycache__/test_timedeltaindex.cpython-310.pyc,, +pandas/tests/copy_view/index/test_datetimeindex.py,sha256=Sl224XCNK_lx-N6k9heXS_g2_bwmqCJJyKDv7pE_HQw,1980 +pandas/tests/copy_view/index/test_index.py,sha256=B849E4vf72tsWv11NfixJU6vjX0gpMlyvHRKSBk0V1Q,5363 +pandas/tests/copy_view/index/test_periodindex.py,sha256=qSR4PUuAHEPq1o8NUeif_MSrN43rvSeWQtsmTK6I1a4,653 +pandas/tests/copy_view/index/test_timedeltaindex.py,sha256=L1fGDsy2dmZqf_y3bXVo9mUMr1Jsli9BdScChOEQkns,661 +pandas/tests/copy_view/test_array.py,sha256=t4Tk1_-bwXOpuE80MqCkVsEsb753CPq6A87ZCI3WJBo,5840 +pandas/tests/copy_view/test_astype.py,sha256=SHB7qM1GIjguoiqzO9tPosiPVG7cftVbIzlau9lgXW0,8935 +pandas/tests/copy_view/test_chained_assignment_deprecation.py,sha256=BJqJ30DdsTUeoUZZm2kZKFOwUoz9Rkmg5AH3R6nk0F4,5750 +pandas/tests/copy_view/test_clip.py,sha256=ahKf7EUwJeYahLnPVhUuNanG4Va53Ez5kULzCdzeX60,3077 +pandas/tests/copy_view/test_constructors.py,sha256=JMWj_yBB7tNSTkUxdbhtzTVyJ03jho9imeKMXZaTb38,13950 +pandas/tests/copy_view/test_core_functionalities.py,sha256=M-ExonPcx6W-8z_TLTaP16DJtelSVeQHZKO1aWObSuA,3506 +pandas/tests/copy_view/test_functions.py,sha256=FZP92GSOEUNCVogDxngdGS2eodNwhw7w7Xs6jQgZGyg,15505 +pandas/tests/copy_view/test_indexing.py,sha256=4OUGrcgMHlai3p7tQt0sXopNYTrGdEFSUaVf6S7ZzyI,42980 +pandas/tests/copy_view/test_internals.py,sha256=mBEJH08zBch3LBtSzU7wXqBKc01uH2GTzZgUx3otcC8,5020 +pandas/tests/copy_view/test_interp_fillna.py,sha256=ztjjLWcR07fHYSaaTrrSD6S5s6rrOvUt_2S1BE3tRlQ,15235 +pandas/tests/copy_view/test_methods.py,sha256=ka2yDAm6yXDQC5rpLyxLHYq80XIQGhXUt4RUURvYjSk,71109 +pandas/tests/copy_view/test_replace.py,sha256=5KVB1Xc1qTBOCrhTL0my-36NoDUDRozzZ5bV0oyJLVk,17120 +pandas/tests/copy_view/test_setitem.py,sha256=ewuJiYuD9VI2wuFZiDjGYVP7gnlP4H9uVFnjjelW55U,4822 +pandas/tests/copy_view/test_util.py,sha256=ClWLprMJhf6okUNu9AX6Ar9IXZgKkY0nNuDzHRO70Hk,385 +pandas/tests/copy_view/util.py,sha256=oNtCgxmTmkiM1DiUxjnzTeAxCj_7jjeewtby-3gdoo0,899 +pandas/tests/dtypes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/dtypes/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/dtypes/__pycache__/test_common.cpython-310.pyc,, +pandas/tests/dtypes/__pycache__/test_concat.cpython-310.pyc,, +pandas/tests/dtypes/__pycache__/test_dtypes.cpython-310.pyc,, +pandas/tests/dtypes/__pycache__/test_generic.cpython-310.pyc,, +pandas/tests/dtypes/__pycache__/test_inference.cpython-310.pyc,, +pandas/tests/dtypes/__pycache__/test_missing.cpython-310.pyc,, +pandas/tests/dtypes/cast/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/dtypes/cast/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_can_hold_element.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_construct_from_scalar.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_construct_ndarray.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_construct_object_arr.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_dict_compat.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_downcast.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_find_common_type.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_infer_datetimelike.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_infer_dtype.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_maybe_box_native.cpython-310.pyc,, +pandas/tests/dtypes/cast/__pycache__/test_promote.cpython-310.pyc,, +pandas/tests/dtypes/cast/test_can_hold_element.py,sha256=2zASUgxB7l8ttG2fKjCpIjtt_TQ7j4NJ2L9xFzcyUPU,2408 +pandas/tests/dtypes/cast/test_construct_from_scalar.py,sha256=INdOiQ7MowXLr6ZReCiq0JykUeFvRWocxk3f-ilk9v0,1780 +pandas/tests/dtypes/cast/test_construct_ndarray.py,sha256=Z4tTuoWxUoXiMVq8sJx2PPGIyRoz1dzzRIC1w8npDKQ,1303 +pandas/tests/dtypes/cast/test_construct_object_arr.py,sha256=eOmUu4q0ihGTbYpCleoCnYtvwh1TBCEZQQjLeJaUMNA,717 +pandas/tests/dtypes/cast/test_dict_compat.py,sha256=qyn7kP5b14MywtqOUL5C-NOvjf2qK4PsXGpCvqmo-4E,476 +pandas/tests/dtypes/cast/test_downcast.py,sha256=CzuywDTWQ3xTi__4Nd36qgcx6mDs2tpYUsVztduVC9s,2778 +pandas/tests/dtypes/cast/test_find_common_type.py,sha256=c__GbgnRawwgqWut8g5Q928en8-_O3oTZEQVbqQ8MrE,5226 +pandas/tests/dtypes/cast/test_infer_datetimelike.py,sha256=6vor_eqEbMKcBLEkfayXzVzwwf5BZcCvQhFZuqhvyKU,603 +pandas/tests/dtypes/cast/test_infer_dtype.py,sha256=WCLts2TG3Zs4V69O2f_HYmuXEkSHPUXVTIuGpVvICuY,6001 +pandas/tests/dtypes/cast/test_maybe_box_native.py,sha256=uEkoLnSVi4kR8-c5FMhpEba7luZum3PeRIrxIdeGeM4,996 +pandas/tests/dtypes/cast/test_promote.py,sha256=B4dgs3EWIm8qKuoQMn6FNaGGf_qAm_EAm4l2X3cHDMM,20755 +pandas/tests/dtypes/test_common.py,sha256=8XCSOz1J9y9K9Dxe3c55YOy-ONRlvRc3CWF0EVaxEa8,26390 +pandas/tests/dtypes/test_concat.py,sha256=vlsumyKcJ7b8EdJKONU5txCA34zMaoKDvA0KmcuP8XU,1799 +pandas/tests/dtypes/test_dtypes.py,sha256=7GaJl1ZXzioL7mll-RdNie3eVoki91FogtyBYQdFRkQ,43847 +pandas/tests/dtypes/test_generic.py,sha256=TzUIinbvMdsyxH_y2VYQ2XCYLQXh005qij9LWWF9bDc,4842 +pandas/tests/dtypes/test_inference.py,sha256=uNEZEE9cgR2T3ZTe0pBld9rnbupyx5XilbQj_PmiB20,70781 +pandas/tests/dtypes/test_missing.py,sha256=_FPqIAM5yZbYSlcndWuaItNVkgs3ylKEPb-o63QRzEE,30750 +pandas/tests/extension/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/extension/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/extension/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_arrow.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_categorical.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_common.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_datetime.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_extension.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_interval.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_masked.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_numpy.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_period.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_sparse.cpython-310.pyc,, +pandas/tests/extension/__pycache__/test_string.cpython-310.pyc,, +pandas/tests/extension/array_with_attr/__init__.py,sha256=bXkwWSW6GRX8Xw221iMyaQOQVaWmyuRP3tGhvjXtiV8,149 +pandas/tests/extension/array_with_attr/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/extension/array_with_attr/__pycache__/array.cpython-310.pyc,, +pandas/tests/extension/array_with_attr/__pycache__/test_array_with_attr.cpython-310.pyc,, +pandas/tests/extension/array_with_attr/array.py,sha256=Vo6gYBpAJHAztlq8m3gH-9GqKUkxSOHg2fk6cApHgFE,2496 +pandas/tests/extension/array_with_attr/test_array_with_attr.py,sha256=TuuBA1lCxjVOgWsWM9jhgc-PyGuXzajO3UWWKZEquZA,1373 +pandas/tests/extension/base/__init__.py,sha256=5OjQDaQnbihqkwRdCBAV-eF-QRE8p3V4frJ764P5-jQ,4353 +pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/base.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/index.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/io.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc,, +pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc,, +pandas/tests/extension/base/accumulate.py,sha256=66bubZOQfkzzpcca1jz2WVky4mxi4uGyq8TsQpV288k,1411 +pandas/tests/extension/base/base.py,sha256=aSfTPvuvzzQUxEIrGUASWuwcVv6Uw5bvkFXvqjhRV1M,35 +pandas/tests/extension/base/casting.py,sha256=KWGZGeC1Kh2mDXUto7Xap6lkSja8661Qi1g58HgFpSM,3077 +pandas/tests/extension/base/constructors.py,sha256=Y2Pny2SrEj7jsCEUN6KRKi_9G2HA7RIfVs5GVf9Nz5w,5609 +pandas/tests/extension/base/dim2.py,sha256=8Ni4nnBW5wxH3e6f0kX1yTDjecmd12sAZdkBt-1tTss,11992 +pandas/tests/extension/base/dtype.py,sha256=4v3RO3H-2xDIPujcTYdjb0AzWpctqALOXUHLHyHBLDg,4006 +pandas/tests/extension/base/getitem.py,sha256=leq9dxp_KexAv7mhexLCWXcIMKNBPOVfhFv6Nuc5PkQ,15673 +pandas/tests/extension/base/groupby.py,sha256=5A_X0G3x1MD13QXpX-v0nYABeU9TRINcvOwVhd3JBpQ,6465 +pandas/tests/extension/base/index.py,sha256=fD5Jugbt_39nZ1eVjPNdAgoDRuNXTcnZB9lA4w687vM,517 +pandas/tests/extension/base/interface.py,sha256=rdJUhxcnMwnHUoGzhj0_89ik5JETiTz0kjDmepTU5lU,4699 +pandas/tests/extension/base/io.py,sha256=SNvCa6LXo-4V92Bm6A1RZPXwfDdu3hTWLje8_D3Xwo8,1475 +pandas/tests/extension/base/methods.py,sha256=xQvGXCoxo-_1A-fonAgDbB7GyFeg22RAxbjxTeL2lnM,26723 +pandas/tests/extension/base/missing.py,sha256=D4by9EHLsc32icNeDutH7JdoGyHE8pD0XPM2o7FiGQU,6606 +pandas/tests/extension/base/ops.py,sha256=EmsLXfCMbJf4RAru_ewAhc_Epd-ZAROKygwjBf7EzYg,11058 +pandas/tests/extension/base/printing.py,sha256=pVwGn1id_vO_b9nrz3M9Q_Qh9vqDqC0eZHom0_oGr-A,1109 +pandas/tests/extension/base/reduce.py,sha256=IaF6nI-fMTYzG4fNVUoPei_lf9vCHHIf0NnKCssnYlk,5968 +pandas/tests/extension/base/reshaping.py,sha256=Hf8czQWubrTjZrkYTL3FdOh6h97pCQaN5fK49GbRyRA,13931 +pandas/tests/extension/base/setitem.py,sha256=VcSUUuSqnLftzeeaIlBJIeoo841vVenX_FL5JceS91g,15075 +pandas/tests/extension/conftest.py,sha256=nvR8zq82gsIqh5rbOWj7_sOYLgL8J3M0loXw_L-OGag,5061 +pandas/tests/extension/date/__init__.py,sha256=-pIaBe_vmgnM_ok6T_-t-wVHetXtNw30SOMWVWNDqLI,118 +pandas/tests/extension/date/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/extension/date/__pycache__/array.cpython-310.pyc,, +pandas/tests/extension/date/array.py,sha256=da7NoKcUFxS78IIEAsY6kXzL-mOCrV0yyhFWQUN6p8k,5971 +pandas/tests/extension/decimal/__init__.py,sha256=wgvjyfS3v3AHfh3sEfb5C8rSuOyo2satof8ESijM7bw,191 +pandas/tests/extension/decimal/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/extension/decimal/__pycache__/array.cpython-310.pyc,, +pandas/tests/extension/decimal/__pycache__/test_decimal.cpython-310.pyc,, +pandas/tests/extension/decimal/array.py,sha256=8YbmByqfIzEXW9i3-Ct6VM6M0QkmEEB9CQp79udfmYw,9694 +pandas/tests/extension/decimal/test_decimal.py,sha256=82ggMNpjkSYDu4Tk3vmS0zTwn8AZ3VqCi-MkQTP2paA,19459 +pandas/tests/extension/json/__init__.py,sha256=JvjCnVMfzIUSoHKL-umrkT9H5T8J3Alt8-QoKXMSB4I,146 +pandas/tests/extension/json/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/extension/json/__pycache__/array.cpython-310.pyc,, +pandas/tests/extension/json/__pycache__/test_json.cpython-310.pyc,, +pandas/tests/extension/json/array.py,sha256=Lt-hgallWZaJiaDjBbkE7ztKDM9S8FFj23GzxrxxIkY,8335 +pandas/tests/extension/json/test_json.py,sha256=usY52SN9Yd8lUugiCxI1B7DB06l2Lc8mr9tbxu9iOgI,17951 +pandas/tests/extension/list/__init__.py,sha256=FlpTrgdAMl_5puN2zDjvdmosw8aTvaCD-Hi2GtIK-k0,146 +pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/extension/list/__pycache__/array.cpython-310.pyc,, +pandas/tests/extension/list/__pycache__/test_list.cpython-310.pyc,, +pandas/tests/extension/list/array.py,sha256=ngSHFQPRfmOkDOo54sX-l5JjQvr7ZTE9OzS9aPicc3o,4001 +pandas/tests/extension/list/test_list.py,sha256=VFPo5wGu-UvtAOFx3hoxILmRdI9kTOxCIIJM4fqgRBk,671 +pandas/tests/extension/test_arrow.py,sha256=Fil3KeJKWxmy9_Yb7Tkwl5uiK-3EwaSnptfpwpfCfm8,116199 +pandas/tests/extension/test_categorical.py,sha256=DhFKjDxrDfg4q6LXIgdIGVnv7VIK3IxUHsQ0rK_nQfE,6828 +pandas/tests/extension/test_common.py,sha256=4LO2slr0E0zODDK_Es4g9bPBH1U77nI8x9O1Mdddn1U,2975 +pandas/tests/extension/test_datetime.py,sha256=eBTSFWcQp2M1TgYzr01F-KQrdCJLHPrcPMGvuCsIj1s,4614 +pandas/tests/extension/test_extension.py,sha256=eyLZa4imT1Qdd7PCbDX9l0EtDu39T80eCrSre2wmTuE,559 +pandas/tests/extension/test_interval.py,sha256=TFLuAsCeXdkWLPfyYY2v4IdhvI7plwcaatL8LJl9kGI,2711 +pandas/tests/extension/test_masked.py,sha256=jrBlSzzwlXMAYj3fYXzDhiOKwUW7WBzyHLp-ce4VDf8,14338 +pandas/tests/extension/test_numpy.py,sha256=eFM6D2CiLgrsmwN5KQm_kYrzIdG7lmFXUuUiNoFrelE,15586 +pandas/tests/extension/test_period.py,sha256=e3RIO2xBPhF-PxPZtPM8VkVhkjYdUNtch9vcoRpHuEE,3528 +pandas/tests/extension/test_sparse.py,sha256=HIUEftSLmtr-LV7xrkP99vKwNj2zyXv4z1Ij_LWJd7Q,18011 +pandas/tests/extension/test_string.py,sha256=v3DaptVQ4lBckrCg_nLfWlJNBRHVaDjc8fE1bIlN4rU,8165 +pandas/tests/frame/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/frame/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/frame/__pycache__/common.cpython-310.pyc,, +pandas/tests/frame/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_alter_axes.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_arrow_interface.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_block_internals.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_cumulative.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_iteration.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_logical_ops.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_nonunique_indexes.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_npfuncs.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_query_eval.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_reductions.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_repr.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_stack_unstack.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_subclass.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_ufunc.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_unary.cpython-310.pyc,, +pandas/tests/frame/__pycache__/test_validate.cpython-310.pyc,, +pandas/tests/frame/common.py,sha256=BmnEMlREF7G0B5zdaJRsdzqIRdh8diiTisBbCVI6Fp0,1873 +pandas/tests/frame/conftest.py,sha256=q2Oh2Ej-YIJvDdhsPCNDGvtTr5XWPRKZ2sylqmv5dys,2644 +pandas/tests/frame/constructors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/frame/constructors/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/frame/constructors/__pycache__/test_from_dict.cpython-310.pyc,, +pandas/tests/frame/constructors/__pycache__/test_from_records.cpython-310.pyc,, +pandas/tests/frame/constructors/test_from_dict.py,sha256=CTTFXUB5bamlx91XWQnmmG3DIlY8v6Qnc1ycZvjClT8,8152 +pandas/tests/frame/constructors/test_from_records.py,sha256=O6NwCZK5wa9w9a8Om6LHA2kWSLfTerakkjrgYXGJIao,18601 +pandas/tests/frame/indexing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/frame/indexing/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_coercion.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_delitem.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_get.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_get_value.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_getitem.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_insert.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_mask.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_set_value.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_setitem.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_take.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_where.cpython-310.pyc,, +pandas/tests/frame/indexing/__pycache__/test_xs.cpython-310.pyc,, +pandas/tests/frame/indexing/test_coercion.py,sha256=rHCkOLIlUkukh-P0XzPMtD4B8Lha3i1hqdvvZwCIAm8,5991 +pandas/tests/frame/indexing/test_delitem.py,sha256=-YERBfZbhTZ3eKzjmWln8AjoQEO7Yvae6elau4njhM0,1832 +pandas/tests/frame/indexing/test_get.py,sha256=N00_igU25_HjYuvAqDQKqBpqbz6HjB97o9Exvbo9BzM,662 +pandas/tests/frame/indexing/test_get_value.py,sha256=A-GbCHlbDfVPGB10dNGnGg4DtrKrlRbRspYfuDTUmPM,679 +pandas/tests/frame/indexing/test_getitem.py,sha256=9xogr1RzStjgP4HvWm_tm9VWUol660FgSmBwN-wC5Tw,15002 +pandas/tests/frame/indexing/test_indexing.py,sha256=XIcq7eJfuJgAsI1ZgZ-Eojw32fSBzhrQqQsb4aMycEk,70208 +pandas/tests/frame/indexing/test_insert.py,sha256=0XsNprKi0XQ9od6dOImwzQwh8YMdgdE0BZFGFHGPEYg,4074 +pandas/tests/frame/indexing/test_mask.py,sha256=1Bql-TBfyBDmlXkECYXk-ZH_y4SPSOZYjCR2Ex7Km1k,4862 +pandas/tests/frame/indexing/test_set_value.py,sha256=q0Bzs0u_q5G6VzFvU5mRSxohG5FTh4sw7sRrRdhY0YM,2622 +pandas/tests/frame/indexing/test_setitem.py,sha256=z5mPGNnxv5nNlpPJiDeXbtdURAQeXIeddsKzPd-3OWE,51434 +pandas/tests/frame/indexing/test_take.py,sha256=SMBM5BO7ybxTq8gTAX1Qg1UW8vcNiRrHTQwrt1f-Rig,3230 +pandas/tests/frame/indexing/test_where.py,sha256=Y3oOgYjYJxUAHn_PiW2eWHrz1tWNVuNXtJcf3RJE2PY,38125 +pandas/tests/frame/indexing/test_xs.py,sha256=86w_A-gePZXZETqs9UYKfEZrNKXyvmd0DwScTbHH9Dg,15980 +pandas/tests/frame/methods/__init__.py,sha256=M6dCS5d750Fzf9GX7xyNka-SZ2wJFCL66y5j-moHhwo,229 +pandas/tests/frame/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_add_prefix_suffix.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_align.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_asfreq.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_asof.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_assign.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_at_time.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_between_time.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_clip.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_combine.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_combine_first.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_compare.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_convert_dtypes.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_copy.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_count.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_cov_corr.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_describe.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_diff.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_dot.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_drop.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_drop_duplicates.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_droplevel.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_dropna.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_dtypes.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_duplicated.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_equals.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_explode.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_fillna.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_filter.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_first_and_last.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_first_valid_index.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_get_numeric_data.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_head_tail.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_infer_objects.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_info.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_interpolate.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_is_homogeneous_dtype.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_isetitem.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_isin.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_iterrows.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_map.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_matmul.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_nlargest.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_pct_change.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_pipe.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_pop.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_quantile.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_rank.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_reindex.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_reindex_like.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_rename.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_rename_axis.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_reorder_levels.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_replace.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_reset_index.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_round.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_sample.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_select_dtypes.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_set_axis.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_set_index.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_shift.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_size.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_sort_index.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_sort_values.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_swapaxes.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_swaplevel.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_to_csv.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_to_dict.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_to_dict_of_blocks.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_to_numpy.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_to_period.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_to_records.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_to_timestamp.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_transpose.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_truncate.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_tz_convert.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_tz_localize.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_update.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_value_counts.cpython-310.pyc,, +pandas/tests/frame/methods/__pycache__/test_values.cpython-310.pyc,, +pandas/tests/frame/methods/test_add_prefix_suffix.py,sha256=iPfzSPx0CArx79na7xcI9ZcPTAwq73IdOCcREVO7k4E,1910 +pandas/tests/frame/methods/test_align.py,sha256=FwQrqdCesXbgkQ8bfYPlf3LfK-Sdvud9pHEC2tCnwQ0,17941 +pandas/tests/frame/methods/test_asfreq.py,sha256=MCJkjukZtOVCauc4FZDbor1h99AvG4eMNfQZW8L1h5c,9341 +pandas/tests/frame/methods/test_asof.py,sha256=bkK2i5xcGvz2oy1MVbf_C1oVixMy_1qYqYcuOg-K2Bk,6732 +pandas/tests/frame/methods/test_assign.py,sha256=xFGREzLhP1wj3MowBimeYbMWBNiII0280DiOXI6WDB0,2982 +pandas/tests/frame/methods/test_astype.py,sha256=lIFj0WqQvEZVESqYOfP8flVquMoVEYp1ubJGYaCZJgQ,32102 +pandas/tests/frame/methods/test_at_time.py,sha256=JrQYFlNIIyW1xDvgmGE7zRfjXnmKMELh9Stiw0btGbM,4708 +pandas/tests/frame/methods/test_between_time.py,sha256=rD-k1a4LVOa-nMlLXOaZO7iTa3hL_C9tghqt8DWW0Qs,8083 +pandas/tests/frame/methods/test_clip.py,sha256=6h1zwE0SKP-uknyuE5Pi5X9vTS4L5ZBts_iSbs6cSL8,7554 +pandas/tests/frame/methods/test_combine.py,sha256=wNaQqokqHsJmrZ9NQIao58ZT0hSkkTH14I7_Oq8tADs,1359 +pandas/tests/frame/methods/test_combine_first.py,sha256=K0YQAGhGyaK_j5tmP9IbQx8zO56ID9GhbTaT9v-3T1M,19726 +pandas/tests/frame/methods/test_compare.py,sha256=j7Z_-yBVts4-xl1fVsJtOBAXYbLao2hwzI2x3aniFz0,9615 +pandas/tests/frame/methods/test_convert_dtypes.py,sha256=sLJ-7LM95vWBzEUFKuWfpj9j5TmX5339pRXZW3dFXCw,7958 +pandas/tests/frame/methods/test_copy.py,sha256=QeDoh44tS__y9LK7LwUBAc-SD5RS-phPA4eYWPl5yIg,1873 +pandas/tests/frame/methods/test_count.py,sha256=avzIu1dZ3pls4SM6g173M7Q4i8zMUzeAVI2EeIzWC0c,1083 +pandas/tests/frame/methods/test_cov_corr.py,sha256=ydpNMfWcjDf6zNVvLGFc8eOHWs_scU6rvMobn3EPm2U,17873 +pandas/tests/frame/methods/test_describe.py,sha256=DAY04ar1XixwEscl6taSddki4Y_rYnQnV8zF61-z1ZY,14500 +pandas/tests/frame/methods/test_diff.py,sha256=Dyz4lYFWrLVm5fN_B0Z1xZ_l8gyGFQhzwhmRKMuA6io,10099 +pandas/tests/frame/methods/test_dot.py,sha256=tfZD1HWlbO78DEgdjpBctgjWHtzjC3K9essVl_5XBMA,4623 +pandas/tests/frame/methods/test_drop.py,sha256=41RTmD-suQbCnZjpFcG56VlIx1ZP-ReC-j5YIhpJ3WA,20362 +pandas/tests/frame/methods/test_drop_duplicates.py,sha256=GSJ7VundpGtt6KBhl2mld6CwNc9La_pGRwXuNNiRE9Y,14503 +pandas/tests/frame/methods/test_droplevel.py,sha256=L1gAMjYYPB6eYmSppXfbwPVKa3HCNofqPVUZ3gxLldA,1253 +pandas/tests/frame/methods/test_dropna.py,sha256=9l8GBOLpvmEowzFaq0kRxN3815gJCuNamX4S5dn5Mmw,10315 +pandas/tests/frame/methods/test_dtypes.py,sha256=YrKxnM9gY4UlcsXjLBLJRTHE_8CthS71mIMZH3ubPpg,5093 +pandas/tests/frame/methods/test_duplicated.py,sha256=1DQFuK4KjfSpsl8W0jXne8PPUsL1nFe3lI_9VYBd33I,3305 +pandas/tests/frame/methods/test_equals.py,sha256=AFmbc9SmfgpQV0PD9hCXuktRCRkNvDF5S1Z7z31E2xE,2996 +pandas/tests/frame/methods/test_explode.py,sha256=ZK-uow3VD8csy96x6hkDItUOh4U2kkYSwrdH83tkjAM,8824 +pandas/tests/frame/methods/test_fillna.py,sha256=GAPSWMAQ8MXdOVwj3ionMLvG8W_N9FolE6cSnU39uSM,34156 +pandas/tests/frame/methods/test_filter.py,sha256=oT63-WLaQv3isFsWJFtqZwxiw2J-7xZwyOOxpn-kTNo,5422 +pandas/tests/frame/methods/test_first_and_last.py,sha256=hKvLBnx3YtQLilE_9PlL9804dAI6E7Hk2gHDgXqbcsU,5349 +pandas/tests/frame/methods/test_first_valid_index.py,sha256=DRoZKic0mpCom31NeygnBftZlxc6wsCT4-DN2KV5wWI,2574 +pandas/tests/frame/methods/test_get_numeric_data.py,sha256=jXqHisuyym78GRZTo0c2uN1U4YPcMkXUJ9eDRZE8BPA,3313 +pandas/tests/frame/methods/test_head_tail.py,sha256=quuFkpS5IgonJDSb9_Po4eO3Wi5wlcNKq723EMYL6Ns,1935 +pandas/tests/frame/methods/test_infer_objects.py,sha256=LNOf2VJsV17FDT9ogEDba6la414yUmm5z_7B97nLN24,1241 +pandas/tests/frame/methods/test_info.py,sha256=gvIGMmte029dnuvDjFxuVs20VblEBuQof2-xjzSe6EI,16867 +pandas/tests/frame/methods/test_interpolate.py,sha256=8A7qxsIgVgdC9-P_WClkvIWbRObBd4aw2Hf78elyx4c,20120 +pandas/tests/frame/methods/test_is_homogeneous_dtype.py,sha256=8Ndf_2Z07SAqrN0ookvH0PDAmECGVJkUieeqSaz2aRQ,1455 +pandas/tests/frame/methods/test_isetitem.py,sha256=VoxA-yXow_CRikJ1tlni1PsAAOT1D2X8PtTZyJOGQXU,1428 +pandas/tests/frame/methods/test_isin.py,sha256=P2TVUsL_p366aSxwWcq27VlT9zFstOXlsJSTFlw2n20,7599 +pandas/tests/frame/methods/test_iterrows.py,sha256=hfFRA20tRYmXJAoJZLGI04J131Z7QaaEbINm3FwfVbQ,338 +pandas/tests/frame/methods/test_join.py,sha256=oGHrJh9Gb6k8Cgg1iHNVoJuamkIHqnzs5EoU_XdY9hM,17523 +pandas/tests/frame/methods/test_map.py,sha256=UIY-wd0ozerUNyILMavuJ47qdWwp8dREjeKeeR8zvc8,5994 +pandas/tests/frame/methods/test_matmul.py,sha256=i1BG41S9da2R0nATvc3kZXsiwl5t6MHDFIb0IJ4lAbQ,3137 +pandas/tests/frame/methods/test_nlargest.py,sha256=6G_UUSJT858jxia3p92pf4jivcg6yhj4xiXRZ7EUeW0,8195 +pandas/tests/frame/methods/test_pct_change.py,sha256=s0Ho617mHdRHBEV-9cRAz3_Z_Q5BzTd_cd6MuobTlbo,6530 +pandas/tests/frame/methods/test_pipe.py,sha256=ts5ghk8g6PYXKpdsBdovBXxPGO2qq75FEVzBgjAVfRw,1023 +pandas/tests/frame/methods/test_pop.py,sha256=e0CBRelgiASCGdB1NFRMSr04BbaggjyHAZYvmUUh1sM,2223 +pandas/tests/frame/methods/test_quantile.py,sha256=HK6wwPSW-yLLkxbj8Cn0C1nPho8WBsjPhcvGtiDCaPM,36280 +pandas/tests/frame/methods/test_rank.py,sha256=SnZTqSgarPjHAFSnyLCSZevOBMyXNb13QAAI-qz0Z1c,17566 +pandas/tests/frame/methods/test_reindex.py,sha256=tmNvHk4dcGnrZ81EA5UGtPq6LdSa0Y64yQ5MzIZoKP8,48343 +pandas/tests/frame/methods/test_reindex_like.py,sha256=2qgqaHDSEKYO1hwE9MaPTFJhl4m7rejHyuOcrmvqaBg,1187 +pandas/tests/frame/methods/test_rename.py,sha256=P-SIwbh-n6QdPqFns4ebPtGFwdXd7vmeWt5_dwo0Kq4,15354 +pandas/tests/frame/methods/test_rename_axis.py,sha256=90QFtDi0p-8bxEdFfLs75EtJQtJEOTmCdXoiS7h9F-Y,4091 +pandas/tests/frame/methods/test_reorder_levels.py,sha256=VJVEdltyRoz89mQR1Xp0A9yKlTeEFIpsPaKWQujT-C8,2729 +pandas/tests/frame/methods/test_replace.py,sha256=IW0My1nADFCpbkjMFLWfXT36Nxbcs9m7FSiksSHM4jc,64846 +pandas/tests/frame/methods/test_reset_index.py,sha256=yo9nZBpcOblU-3bgfmTg3-CQT7p-p3mciEroMVDfmDE,27931 +pandas/tests/frame/methods/test_round.py,sha256=dcPlBxHqpKJ6JTBJskvw2CE3IYfa-Xt020jfSslwLjs,7978 +pandas/tests/frame/methods/test_sample.py,sha256=vPDSUU6oBD5X2C5rKUhIHk6o2xftm0zzMTwvuipelRM,13431 +pandas/tests/frame/methods/test_select_dtypes.py,sha256=SsvEwmjNFFwfOqxMlA-Z72qHJDtxNtvFWbtV-sbIODg,16638 +pandas/tests/frame/methods/test_set_axis.py,sha256=xiyZyjgDIO0B5HWGLeV_fVDyXj3YMDBfLyEDh5rQvcw,4608 +pandas/tests/frame/methods/test_set_index.py,sha256=h2a7zL_ZgN6zoRNcAV3QrgfqI59PR5jsiPPGC-V8F_U,26598 +pandas/tests/frame/methods/test_shift.py,sha256=unBlSwoV0OwFfysSr8ZKrqrrfoH7FRbPlGp18XW84OQ,27731 +pandas/tests/frame/methods/test_size.py,sha256=zFzVSvOpjHkA9_tEB2mPnfq9PJIBuBa4lCi6BvXbBDE,571 +pandas/tests/frame/methods/test_sort_index.py,sha256=BbCjfh_Zke1R7M9fPoRASORNfXS2KZ0IgWOF6jNnor0,34826 +pandas/tests/frame/methods/test_sort_values.py,sha256=NTmGhvm_flc6gzdtOeAOXsO3ai6K3peyH476Sj-qfLA,32982 +pandas/tests/frame/methods/test_swapaxes.py,sha256=-IuPIvjEz7X8-qxnWy1no5hG2WklPn6qERkmQQ-gAv0,1466 +pandas/tests/frame/methods/test_swaplevel.py,sha256=Y8npUpIQM0lSdIwY7auGcLJaF21JOb-KlVU3cvSLsOg,1277 +pandas/tests/frame/methods/test_to_csv.py,sha256=xkx76kpxWG7ZK6hcTEb0etllFg5_uSy0dLo1O6kfugI,51721 +pandas/tests/frame/methods/test_to_dict.py,sha256=BEKNs7rUFnd_cZZ7wQz0AmKJ7U-7KsEI6V3eApb1chw,18640 +pandas/tests/frame/methods/test_to_dict_of_blocks.py,sha256=dFL2fLKCQl-GXp2ephKiYgwjuQI_SEvsrG13RUIo1gE,2524 +pandas/tests/frame/methods/test_to_numpy.py,sha256=47-d29xA6qzZYnd08lBaKK3yj9aBZ9TKkoqgguGl1oQ,1795 +pandas/tests/frame/methods/test_to_period.py,sha256=Xiebi3IA_vUKrFNftLBkhF4N0gMbpI76ZCQpqhgO4iU,2863 +pandas/tests/frame/methods/test_to_records.py,sha256=35K3btxiApCcRVPG429FZAqqXIKRHKx4bVc8Sg3DCmE,18553 +pandas/tests/frame/methods/test_to_timestamp.py,sha256=1j6yjp4_WlxcDXSBKOk-IfrEbWtC4HvbIIHeM2x25ys,5866 +pandas/tests/frame/methods/test_transpose.py,sha256=JNhwvci37DlDMYHBaJz4Km998vw8NGfl7f4UYwwnsmM,6830 +pandas/tests/frame/methods/test_truncate.py,sha256=ZTnK8yZYqEhG3pe8KVwmJf4K890RMu8a60A4nC_qznM,5216 +pandas/tests/frame/methods/test_tz_convert.py,sha256=vsJm9M19ciCPqG0t5d_BlxuCmDphDkgb75SuYPtOhmE,4707 +pandas/tests/frame/methods/test_tz_localize.py,sha256=rMvd0K3W7N24qn7Q_tTkvbz7dOemIv3w89hthc6c5Y0,2084 +pandas/tests/frame/methods/test_update.py,sha256=npFHtPQmLMdhHa5xHbEL_zxXBuL4YK23CAnfIhTGn1k,6904 +pandas/tests/frame/methods/test_value_counts.py,sha256=YpYs0AZ8YgJE75W84O1KMfhd5oqpiuIJvLjz_YIz2KE,5556 +pandas/tests/frame/methods/test_values.py,sha256=ASljAwM9CEBMX6bA3FqWoSv4sOcRjuz8ZTfLSjo_F6Y,9406 +pandas/tests/frame/test_alter_axes.py,sha256=yHyCho1zs84UETsGGtw-gf3eTIyPj9zYUUA7wHTdRVk,873 +pandas/tests/frame/test_api.py,sha256=tn9xTbXzsDYRjqK3QJmBh64vNA8eV0JoGV2YAJrxTnU,12439 +pandas/tests/frame/test_arithmetic.py,sha256=xS3sOPjFEzALlHpwe-TMARDdV0xkpwBQ8NkqKMdhA9I,73152 +pandas/tests/frame/test_arrow_interface.py,sha256=KpAkREuJwWnlDBC45RvqogU_o1NSG0k44oMQAbgWCNw,1273 +pandas/tests/frame/test_block_internals.py,sha256=eG32ki-zsd8rMs7mI6Lc7j_2gM4Ga5aNqh2M1rTCZNA,16432 +pandas/tests/frame/test_constructors.py,sha256=_IWa5cMtmTMZcqH8Cs5M36Et2npqCskwgvQbx1oXbb8,123649 +pandas/tests/frame/test_cumulative.py,sha256=Ku20LYWW1hrycH8gslF8oNwXMv88RmaJC7x0a5GPbYw,2389 +pandas/tests/frame/test_iteration.py,sha256=BuyW6QePxoNZl-Cgxp5WLah_e-kSK2hsN8Gud_g0aoc,5077 +pandas/tests/frame/test_logical_ops.py,sha256=pUkVXQdIekK-OcmaqprYMp7cwND3t84Y2U25aMtYUq0,7352 +pandas/tests/frame/test_nonunique_indexes.py,sha256=wtBZpClv_46EwBSk59H1iXay2SR6Wv7m4ajh0tjisJg,11937 +pandas/tests/frame/test_npfuncs.py,sha256=DRLl7MSP7e5vRrVs3FgOooI4pZNmECurbVqkAAqvlUI,2751 +pandas/tests/frame/test_query_eval.py,sha256=Pe0xN0v1H4YPz55WIZs5tyV6zJwRkrLak_x1xXH6dBQ,54906 +pandas/tests/frame/test_reductions.py,sha256=j2qm6fsJfT8z0B1aeto9QbbY4LRSA36158tVk-033QY,77136 +pandas/tests/frame/test_repr.py,sha256=ycc0HNsBrKxPA88FIQturuMotBtqJco-ppO1NZE4JaI,16942 +pandas/tests/frame/test_stack_unstack.py,sha256=oRnTFLWq9JneBPR8vOElUmVgAVE120r71F1Sf5fhKhI,97375 +pandas/tests/frame/test_subclass.py,sha256=XqNKwBK-Zj06S4ATYGd59nKPzrzu8jmk_VbpStvB7ts,27880 +pandas/tests/frame/test_ufunc.py,sha256=YcUXnFE2n7lO5XN9aUvOJfeJyGqIDui0VhH-H1gUf1I,10554 +pandas/tests/frame/test_unary.py,sha256=fkB8LKCsctsyM9WS0g4JDiTD56gJy2l-cK7NcIQ2FHc,6603 +pandas/tests/frame/test_validate.py,sha256=hSQAfdZOKBe2MnbTBgWULmtA459zctixj7Qjy6bRg20,1094 +pandas/tests/generic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/generic/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc,, +pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc,, +pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc,, +pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc,, +pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc,, +pandas/tests/generic/__pycache__/test_series.cpython-310.pyc,, +pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc,, +pandas/tests/generic/test_duplicate_labels.py,sha256=-t-hhIiI3E1Byv1-jjvXDRAS8_tJzZaOIf-EsK6hrXg,14506 +pandas/tests/generic/test_finalize.py,sha256=HWv668IFuaSNElG3g1J5DL-wMHpU5T_iQYTOkaJA80U,28852 +pandas/tests/generic/test_frame.py,sha256=h6r5f3L-_V4JV5pP0AoFyvjtJP1ng7DJplN6Rrx4gzI,7332 +pandas/tests/generic/test_generic.py,sha256=MUhx9EVhCuo-fTOYRH2nzhQH8ip9-5QaNMjEPWx-NI4,17447 +pandas/tests/generic/test_label_or_level_utils.py,sha256=PhsVWjYjOHPZRqX4mwUc7jlOH3tnd7p9pkMFh87CtKU,10244 +pandas/tests/generic/test_series.py,sha256=oyFxVdh9G2GCBiTQktXNuafAw0wrbXs6Af8UnwUUiow,5677 +pandas/tests/generic/test_to_xarray.py,sha256=qGohtFgMtA8FS5y9AtnJQsd_-4Mg_Oach84Z8qDbHeg,4265 +pandas/tests/groupby/__init__.py,sha256=O41hwVGLyFtIhv-zbe2JBZiqD3heGA7LOk10RuxfcKc,659 +pandas/tests/groupby/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_all_methods.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_apply.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_apply_mutate.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_bin_groupby.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_categorical.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_counting.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_cumulative.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_filters.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_groupby.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_groupby_dropna.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_groupby_subclass.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_grouping.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_index_as_string.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_libgroupby.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_missing.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_numba.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_numeric_only.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_pipe.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_raises.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_reductions.cpython-310.pyc,, +pandas/tests/groupby/__pycache__/test_timegrouper.cpython-310.pyc,, +pandas/tests/groupby/aggregate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/groupby/aggregate/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/groupby/aggregate/__pycache__/test_aggregate.cpython-310.pyc,, +pandas/tests/groupby/aggregate/__pycache__/test_cython.cpython-310.pyc,, +pandas/tests/groupby/aggregate/__pycache__/test_numba.cpython-310.pyc,, +pandas/tests/groupby/aggregate/__pycache__/test_other.cpython-310.pyc,, +pandas/tests/groupby/aggregate/test_aggregate.py,sha256=4S6PEKvRgk0ULSozn37cOsh6ohnEHZ3yaSGEp0Dmh9k,55554 +pandas/tests/groupby/aggregate/test_cython.py,sha256=XWKVeZTdLnpbaKlU128KkVmtIHntdpu_auCaHyyapXg,12800 +pandas/tests/groupby/aggregate/test_numba.py,sha256=Ba1zZzFC2-cjXE4OMOAStDvh_CeHy3hZwUhwDLDGkcY,13039 +pandas/tests/groupby/aggregate/test_other.py,sha256=LAuSm_tjHQlp_0oekNc14NCbj2gei4RR9jowjb_u65o,20669 +pandas/tests/groupby/conftest.py,sha256=uxnebcMXbaC_tH4Pg2wRZvXlWMZ_WnNIUeX8ftK7gWo,4785 +pandas/tests/groupby/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/groupby/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_corrwith.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_describe.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_groupby_shift_diff.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_is_monotonic.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_nlargest_nsmallest.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_nth.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_quantile.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_rank.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_sample.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_size.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_skew.cpython-310.pyc,, +pandas/tests/groupby/methods/__pycache__/test_value_counts.cpython-310.pyc,, +pandas/tests/groupby/methods/test_corrwith.py,sha256=nseP6eDkLjiNIOSxm2EDFTkemTqNFUNqvvNJpMiNZVY,615 +pandas/tests/groupby/methods/test_describe.py,sha256=KFu1CeWWqpy3NWNh9IbzsirR36OKB9Q27yEUGZaM7og,9672 +pandas/tests/groupby/methods/test_groupby_shift_diff.py,sha256=4XMAhqV0JrGeXQn1_07ec9Nu25Dy1LOcDfojo4qEhNI,7925 +pandas/tests/groupby/methods/test_is_monotonic.py,sha256=OpnlOamR5gX1S7MVtZFGxnbt1Fem_wWH1Irc5aqkdq4,2566 +pandas/tests/groupby/methods/test_nlargest_nsmallest.py,sha256=MFS6cWChs3aBw3vb-n234pOV8_YYet2jOdDNN0lrMkg,3401 +pandas/tests/groupby/methods/test_nth.py,sha256=k2Pe1sTNCELszUfRU3SVf-54WHRjigSp0D51Cq2pmRA,28189 +pandas/tests/groupby/methods/test_quantile.py,sha256=deK9SMCVErwfQUDF_bc9DTH3fhxRBxuCxC4OHc3G6q0,16354 +pandas/tests/groupby/methods/test_rank.py,sha256=NE_ciV_TwLbTGoq1OFUFX5yadyiYoP3m5ppVOoD5264,24263 +pandas/tests/groupby/methods/test_sample.py,sha256=n_dLYblQo9MWnpngMRIIGLZFGEGOeAfEqsL9c9gLCKg,5155 +pandas/tests/groupby/methods/test_size.py,sha256=PQ2op8vrqyDhNYwQyM2x19v2jJzrTvUH0GCSv0xE_eU,4250 +pandas/tests/groupby/methods/test_skew.py,sha256=_FTlnXtE_fic6ZZ322S583IXUY5hEQggi-3Xbuboahw,841 +pandas/tests/groupby/methods/test_value_counts.py,sha256=8awMEsjBh7R_8s-w5roAq-mZrhl_9NrWQOU8lV8CYPs,39874 +pandas/tests/groupby/test_all_methods.py,sha256=eQsLKoyDyGZNPecbxC1HRzdIwW_DBEp0x_r3gD620pw,3077 +pandas/tests/groupby/test_api.py,sha256=IpMVl4g9F2317jWVTSiHoAsZKaOQWFx0Oi_jLWfv_DQ,8481 +pandas/tests/groupby/test_apply.py,sha256=z0nCK9dbF8ww3RoA3MhwZX5_BE-WF8AAF8672e2YnVs,54859 +pandas/tests/groupby/test_apply_mutate.py,sha256=b5rtOE-IwkLsEp5VTcyPDtKfCTB9MYw95a0U8ThKLE0,5047 +pandas/tests/groupby/test_bin_groupby.py,sha256=nZGe01NsuZmS88cMqq8fGFbKl-umvmWjXd8BGmR3jTo,1769 +pandas/tests/groupby/test_categorical.py,sha256=73Njrb3YH6fGMnL-9x1_rlT4_jGROdM5sVIQeCRaU-A,74271 +pandas/tests/groupby/test_counting.py,sha256=59N0fV7J8XRijMaIU0Cu5-odZmaoS73cGvzCuuOKrBA,13623 +pandas/tests/groupby/test_cumulative.py,sha256=c6C7ZNo0O5DH9SowsAXp4j_SF-wskjrUlNtfDJomjxQ,10588 +pandas/tests/groupby/test_filters.py,sha256=uFvXjXF2fpQJSwZUhGOUfguyJk7xoXYyL0ShN2KfXx8,21870 +pandas/tests/groupby/test_groupby.py,sha256=btnUSXmk3LKZJ5WBvaWe8ylA2FyaLbc1na2X6S3DTtU,108274 +pandas/tests/groupby/test_groupby_dropna.py,sha256=8OcPba3g6S_FwrxLBsrF8QGFO3Y9VH-bAPeu15h50eQ,23530 +pandas/tests/groupby/test_groupby_subclass.py,sha256=b13F2oZyPfzGngygHRHtBK_vlPfzdik-DiLaWWDcKZ8,4568 +pandas/tests/groupby/test_grouping.py,sha256=0KurYwG--QzwakI0Da37ZfP82qEpMYp22y-wWrvIll8,45862 +pandas/tests/groupby/test_index_as_string.py,sha256=bwAMXa4aSzVDUY1t3HmzK4y-jO5jIwbbRu85Jmb8-U0,2274 +pandas/tests/groupby/test_indexing.py,sha256=Ln_43WnuxtAVrWoaUHWh1IqUSY0i42nY9VnEnw86oXg,9521 +pandas/tests/groupby/test_libgroupby.py,sha256=xiFJcUw_cwTUpQh6E9L47EZm8HopmDrKuYSTI0gHnDs,10457 +pandas/tests/groupby/test_missing.py,sha256=u6mv6_D1ydhkK3jLXqfvidDlOXYdUsN44ySzFksaIlU,5358 +pandas/tests/groupby/test_numba.py,sha256=B2ygkBddeTyLE7a6okHM_CbFwsOaqMceHh4h6fmmQNg,3260 +pandas/tests/groupby/test_numeric_only.py,sha256=gmxCGXKDLN_AZr4dQ2lA4zKrU84uDtwfFrdAUdFzDNA,18573 +pandas/tests/groupby/test_pipe.py,sha256=BpMDqw-ZGT-tHUJN7k6XoWz2H46sBqSxmouppbWMHsU,2098 +pandas/tests/groupby/test_raises.py,sha256=lzNGHyOBhvWL71QarVtajs8_ZSjIvSBJIwrk4-YbgdY,22214 +pandas/tests/groupby/test_reductions.py,sha256=vcpcNtIckgBbcv2gCCA6b3pP3WoUquLqnutMpNwAOMA,36833 +pandas/tests/groupby/test_timegrouper.py,sha256=5EdFromkRWltGo9xlkfRRb2eSwIpNrV834F3P_bqnNI,34779 +pandas/tests/groupby/transform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/groupby/transform/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/groupby/transform/__pycache__/test_numba.cpython-310.pyc,, +pandas/tests/groupby/transform/__pycache__/test_transform.cpython-310.pyc,, +pandas/tests/groupby/transform/test_numba.py,sha256=6GJOeWL6kOIJQQaBCAD9ajv_-m6NmCrpxB9wwoCSr0A,9684 +pandas/tests/groupby/transform/test_transform.py,sha256=0rG5_Lma8MEIs_l_GS8g0eDhALsk5_wokg9IuzRvSRs,57218 +pandas/tests/indexes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_any_index.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_base.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_common.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_datetimelike.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_engines.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_frozen.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_index_new.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_numpy_compat.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_old_base.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/__pycache__/test_subclass.cpython-310.pyc,, +pandas/tests/indexes/base_class/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/base_class/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/base_class/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/indexes/base_class/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/indexes/base_class/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/base_class/__pycache__/test_pickle.cpython-310.pyc,, +pandas/tests/indexes/base_class/__pycache__/test_reshape.cpython-310.pyc,, +pandas/tests/indexes/base_class/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/base_class/__pycache__/test_where.cpython-310.pyc,, +pandas/tests/indexes/base_class/test_constructors.py,sha256=c4hEi_fFI9WNCKw-HhXTtb6fX9bV7RmL4IoTxag5GH4,2763 +pandas/tests/indexes/base_class/test_formats.py,sha256=TfviPEyXl7e0N6iiySBiaPBiMaNc8hDpiY7iEpBXXcE,6329 +pandas/tests/indexes/base_class/test_indexing.py,sha256=1zbBHv-nJCIfXRicDPXPtyLBL3Iy-LvH5bkamnoFGrI,3687 +pandas/tests/indexes/base_class/test_pickle.py,sha256=ANKn2SirZRA2AHaZoCDHCB1AjLEuUTgXU2mXI6n3Tvw,309 +pandas/tests/indexes/base_class/test_reshape.py,sha256=F5i0CHj5vPBP1Xvg71l7bTt9u2krFVqltowHc_2l9FA,3168 +pandas/tests/indexes/base_class/test_setops.py,sha256=X84dGTmkrEJ2oSQfr-WfozQA3moGUpnmbhkTYzJWH7k,9076 +pandas/tests/indexes/base_class/test_where.py,sha256=uq7oB-lk7rsgYQer8qeUsqD5aSECtRPSEUfKzn91BiE,341 +pandas/tests/indexes/categorical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/categorical/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_category.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_reindex.cpython-310.pyc,, +pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/categorical/test_append.py,sha256=LjLMq8GkNrsIVNfTrujLv_TlKo79oA_XbpNUFs-pqVQ,2191 +pandas/tests/indexes/categorical/test_astype.py,sha256=mQjQ9hbRf940DjzvC9OD6t8BzwphBXJdrROyEul1tzU,2860 +pandas/tests/indexes/categorical/test_category.py,sha256=-jO0jW9IJDvFWnl41MkcGuiWgkeuedUdRDIgVyoyB38,14683 +pandas/tests/indexes/categorical/test_constructors.py,sha256=g3hEVtOS576z11miVwakwud3cLXkFI2ErImUaFW9N6U,5536 +pandas/tests/indexes/categorical/test_equals.py,sha256=AIrr-W5WeqDj5KbELqjHm3-hqqx3q8YxBrv1z2oco94,3569 +pandas/tests/indexes/categorical/test_fillna.py,sha256=sH68aWCabI2qy5dbgxQCXeTfvn1NQgDfM1OT4ojFmaU,1850 +pandas/tests/indexes/categorical/test_formats.py,sha256=Rw-qSZ8zLRJkATk1UhPNAuVJMbbHBpuoALCXUDPR5PM,6297 +pandas/tests/indexes/categorical/test_indexing.py,sha256=zBvryPgX3VF5P4HqUQ1h1FD2warHLfSvb0nBq6rxjrc,14978 +pandas/tests/indexes/categorical/test_map.py,sha256=VHsSFGWEBmgQLvvquC6-y3QDq3lwzSpqPWZHTLiGdzw,4664 +pandas/tests/indexes/categorical/test_reindex.py,sha256=vPCV9O582vxJpubqCm33UHcaOKMZNg8OMzDF3lQQDiM,2938 +pandas/tests/indexes/categorical/test_setops.py,sha256=YiBoQN3Dor2p32HCUColWIZBH620H1aPa4easA5FMgc,462 +pandas/tests/indexes/conftest.py,sha256=aP9iTl0n1HpZWIP_02i__XxFnSMJF8iCM5Ein2MRK80,987 +pandas/tests/indexes/datetimelike_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/datetimelike_/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/datetimelike_/__pycache__/test_drop_duplicates.cpython-310.pyc,, +pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc,, +pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc,, +pandas/tests/indexes/datetimelike_/__pycache__/test_nat.cpython-310.pyc,, +pandas/tests/indexes/datetimelike_/__pycache__/test_sort_values.cpython-310.pyc,, +pandas/tests/indexes/datetimelike_/__pycache__/test_value_counts.cpython-310.pyc,, +pandas/tests/indexes/datetimelike_/test_drop_duplicates.py,sha256=UEmTzsZerSOIE6mPfaw4kQd7UFEo02H-EW5GOPpDTKU,2600 +pandas/tests/indexes/datetimelike_/test_equals.py,sha256=7Jnk1MjPYvI-I_YMRNRF29-g5CLaFmU3ZqQ6aO9KqIE,6348 +pandas/tests/indexes/datetimelike_/test_indexing.py,sha256=QoTXbCiqjK4tBDHUbq1TKPp0NroYkeheFjRq-VxlsP0,1310 +pandas/tests/indexes/datetimelike_/test_is_monotonic.py,sha256=_5PXF7mVilu1S4EJv7F-XMYIoz40kBkdSs4RJ8jTVdI,1522 +pandas/tests/indexes/datetimelike_/test_nat.py,sha256=6-Yr-n4JskfsjbaEPFgaRPKX4S7R-LhQOEQSC7cBybw,1335 +pandas/tests/indexes/datetimelike_/test_sort_values.py,sha256=iIhZOW7CEwVD3KuJUFEOM2z18KORCx04W09bwsdKSNs,11463 +pandas/tests/indexes/datetimelike_/test_value_counts.py,sha256=o090A9QuhmahJjH0WgKBIxXdBVxPkAc8vikXqZLuoD4,3150 +pandas/tests/indexes/datetimes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/datetimes/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_date_range.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_datetime.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_freq_attr.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_iter.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_npfuncs.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_ops.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_partial_slicing.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_pickle.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_reindex.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_scalar_compat.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/datetimes/__pycache__/test_timezones.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/datetimes/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_asof.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_delete.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_factorize.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_fillna.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_insert.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_isocalendar.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_map.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_normalize.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_repeat.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_resolution.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_round.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_shift.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_snap.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_to_frame.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_to_julian_date.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_to_period.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_to_pydatetime.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_to_series.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_tz_convert.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_tz_localize.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/__pycache__/test_unique.cpython-310.pyc,, +pandas/tests/indexes/datetimes/methods/test_asof.py,sha256=gd-nBXLe-Dc5Voc_Ksgmq9mOU6S_I5ZZqlXcapgKzfE,738 +pandas/tests/indexes/datetimes/methods/test_astype.py,sha256=S04yQ6BdlxSXpR5DFRCsWlDTO4IUEDNSHIuMP1Jk6Zw,12201 +pandas/tests/indexes/datetimes/methods/test_delete.py,sha256=JaaHDwYuTarkta3Qd2fbteZd9k0oOzJsWCPEHUHHG4k,4441 +pandas/tests/indexes/datetimes/methods/test_factorize.py,sha256=Mif09gcfRfIO2uhCqNN9OC_NXggKizbuwaz6ScGzMUE,4468 +pandas/tests/indexes/datetimes/methods/test_fillna.py,sha256=eESnVTQ8J3iBL24bWKt7TmHxC5FJiLZMpKjw1V376qY,2004 +pandas/tests/indexes/datetimes/methods/test_insert.py,sha256=StmxdK3meNNEDO_CGzVIqltbXxwfX0pQxsngnPQfdtA,9343 +pandas/tests/indexes/datetimes/methods/test_isocalendar.py,sha256=JEABIm6LNySCbSUq6HLS-_qTGK3HgVcScSXLpDsrJ8o,908 +pandas/tests/indexes/datetimes/methods/test_map.py,sha256=1JR2lb_zk_8aIgRqnuWHfeXRPZBsFtdT4tRXeTDNqsQ,1358 +pandas/tests/indexes/datetimes/methods/test_normalize.py,sha256=rztamd3kwUZMcVQjeR1JcaIKr7pT0ACFcU4-FFynZkA,3041 +pandas/tests/indexes/datetimes/methods/test_repeat.py,sha256=GN-wTWws2sjodNibctZOi_NDX85y36Lr2BBmAs3LLMM,2740 +pandas/tests/indexes/datetimes/methods/test_resolution.py,sha256=RzkIL8IX63X1fgwr8o4_xuKvdOtPHdodPbsS75u9BRM,785 +pandas/tests/indexes/datetimes/methods/test_round.py,sha256=Ic1FFoRHdPv4TF1dSnOWVzVX90GowbXumbuNgTFPYlM,7822 +pandas/tests/indexes/datetimes/methods/test_shift.py,sha256=NhyUs0PMDuzSM573tqUamx3THf03WUNKz0nSOzDta5M,5933 +pandas/tests/indexes/datetimes/methods/test_snap.py,sha256=smwfWvN33B6UgLagKaBQkllTuGAm7Wiaq87M9nxu8g8,1305 +pandas/tests/indexes/datetimes/methods/test_to_frame.py,sha256=C6glyGdxSs-hMDQSt9jkftmRlTGPMCGdIQlfChR9iGk,998 +pandas/tests/indexes/datetimes/methods/test_to_julian_date.py,sha256=u6JLYazILIdltbe1uZE3iBAqE_ixXwx0oqwS6T-Mpng,1608 +pandas/tests/indexes/datetimes/methods/test_to_period.py,sha256=IIzHPLsk8BR43Ib5-8-EVxLQc_rkTcGBSk1M4-9OhYw,7986 +pandas/tests/indexes/datetimes/methods/test_to_pydatetime.py,sha256=sM22b33Cxwrpc5nShAp5QH2KQPOlEpi5d8G6fM3vVI8,1345 +pandas/tests/indexes/datetimes/methods/test_to_series.py,sha256=8ZW3AxMkHj3IV1wVgM797SH_rRLKQ9zld1UVkhk1C8Q,493 +pandas/tests/indexes/datetimes/methods/test_tz_convert.py,sha256=-Tuxq1egpSCBnBB7E_rAj1FudFgTm2DDYQ_wPMKgzwQ,11295 +pandas/tests/indexes/datetimes/methods/test_tz_localize.py,sha256=Q7A54lsovDxBDEqU7XNBJql3PoNLF7NVeXwvMFgrVI0,14830 +pandas/tests/indexes/datetimes/methods/test_unique.py,sha256=qZorAPI_oWcz5WdBEr0nQuT_mrApTgShqg3JVlzpVKU,2096 +pandas/tests/indexes/datetimes/test_arithmetic.py,sha256=l2q_n3zBT98OvI4gV7XZOZMCvo54xgM9frByNKCsbyU,1796 +pandas/tests/indexes/datetimes/test_constructors.py,sha256=zzICypvVbu8_PCfL3jiDGjSJWSflWjJbpqS5iNkd1kA,43922 +pandas/tests/indexes/datetimes/test_date_range.py,sha256=2CECH8fOYUP7LxyqlehEHVme2oSN4ZvEl3hjH8t-TDY,61363 +pandas/tests/indexes/datetimes/test_datetime.py,sha256=Q_dwJTXtSuVYTlMmnGhiNGCRrqHONu9wu2N5wgZw4pY,7305 +pandas/tests/indexes/datetimes/test_formats.py,sha256=rN90ZOq3e83t7X6uyd-cR1czM4A01nr3z_GIJJ0sy0k,12738 +pandas/tests/indexes/datetimes/test_freq_attr.py,sha256=oX_cweTcpKd27ywN976KCYpg0oFe77MeDWqnRJQwVRo,1732 +pandas/tests/indexes/datetimes/test_indexing.py,sha256=MncSVI_l914qEW2CUg_livQrJ6AcOxvzmaiNOdzlOoA,25241 +pandas/tests/indexes/datetimes/test_iter.py,sha256=7r3wuHLeCBHfX8kaHNK-4Ecr6ZqR89Dhzkisx2C7jOI,2590 +pandas/tests/indexes/datetimes/test_join.py,sha256=LUV-a1_kCQ4BCr8R-iBWU7VmlOhYK4OZYIgDd-9E0cg,4742 +pandas/tests/indexes/datetimes/test_npfuncs.py,sha256=YJihZytss-MVNprp4p5pAL_emeC5pb3hBwtaS3yMCcU,384 +pandas/tests/indexes/datetimes/test_ops.py,sha256=h9MI1sM5I_T4a7kEPdZs2QuXTdlcnvKQJdI5jh6j4h4,1340 +pandas/tests/indexes/datetimes/test_partial_slicing.py,sha256=OlC1IDbJ2y_qjp-HCFERReBOHb07DnlPZ3lMlhwMSLA,16495 +pandas/tests/indexes/datetimes/test_pickle.py,sha256=cpuQl8fsaqJhP4qroLU0LUQjqFQ0uaX3sHql2UYOSg4,1358 +pandas/tests/indexes/datetimes/test_reindex.py,sha256=s1pt3OlK_JdWcaHsxlsvSh34mqFsR4wrONAwFBo5yVw,2145 +pandas/tests/indexes/datetimes/test_scalar_compat.py,sha256=pJz6r8-pnr5nl_KkUaCkTu2A3SGzJbH_0dpTFRjUUz8,11156 +pandas/tests/indexes/datetimes/test_setops.py,sha256=HThtofPALvrCNqwnFk-tqdvCIe_ij2f-VOObJfZQ93w,23574 +pandas/tests/indexes/datetimes/test_timezones.py,sha256=LfELNHXgQN5-7zwBW5OweUZm6y8Ogtm-ir7l-RQAJpQ,8046 +pandas/tests/indexes/interval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/interval/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_equals.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_interval.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_interval_range.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_interval_tree.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_pickle.cpython-310.pyc,, +pandas/tests/indexes/interval/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/interval/test_astype.py,sha256=7h7n8euKiXPnRU2d-4FYTAf-6iqPDR703dU7Oq10qwM,8809 +pandas/tests/indexes/interval/test_constructors.py,sha256=THCXDlRG7AncX5wzRlp9w1RNrYA0bTpWmzErMVfT0-w,19853 +pandas/tests/indexes/interval/test_equals.py,sha256=a7GA_whLbOiS4WxUdtDrqKOUhsfqq3TL0nkhqPccuss,1226 +pandas/tests/indexes/interval/test_formats.py,sha256=sUwFbFSiq-BBvB73vB1gZ4G5BTqQHWRRsmZzdx372DI,3921 +pandas/tests/indexes/interval/test_indexing.py,sha256=ig3f396aAkl3Lh1VX-MWOrDCn5t8bOop7xjOWjuCF7U,25320 +pandas/tests/indexes/interval/test_interval.py,sha256=L4Zo4GWIMVzHpOQ3Q09-GH_0Ixtge5ATR6kIgMYYjoc,34741 +pandas/tests/indexes/interval/test_interval_range.py,sha256=z_ZiNlL_7esHwH4Kd77k2gPm5Ev0Zy_NgACSkKoy4vA,13758 +pandas/tests/indexes/interval/test_interval_tree.py,sha256=yHyolu5v8YRazksfOBRgWd3O3eFVtzPc6NePpcV0ceU,7560 +pandas/tests/indexes/interval/test_join.py,sha256=HQJQLS9-RT7de6nBHsw50lBo4arBmXEVZhVMt4iuHyg,1148 +pandas/tests/indexes/interval/test_pickle.py,sha256=Jsmm_p3_qQpfJ9OqCpD3uLMzBkpsxufj1w6iUorYqmk,435 +pandas/tests/indexes/interval/test_setops.py,sha256=nwBz1MHuHiM7JQc74w2doEpgTSwg3NYfGwGbQFXWKw8,8346 +pandas/tests/indexes/multi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/multi/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_analytics.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_compat.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_conversion.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_copy.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_drop.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_duplicates.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_equivalence.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_get_level_values.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_get_set.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_integrity.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_isin.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_lexsort.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_missing.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_monotonic.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_names.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_partial_indexing.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_pickle.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_reindex.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_reshape.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_sorting.cpython-310.pyc,, +pandas/tests/indexes/multi/__pycache__/test_take.cpython-310.pyc,, +pandas/tests/indexes/multi/conftest.py,sha256=42mdJqtqvX3PlBSdch1Y6jRBvhe0IzZxOoLt-BGX03Q,698 +pandas/tests/indexes/multi/test_analytics.py,sha256=FeKERG9vHP-fAeGhlrzKO3IfAFpOOQnxQD7fRu2ycLY,6710 +pandas/tests/indexes/multi/test_astype.py,sha256=YmTnPF6qXwvYY82wZfQ8XFwVwOYYsIls3LSrdADDW-4,924 +pandas/tests/indexes/multi/test_compat.py,sha256=q53DVV5fYOKRVEQBl_2ws6WXrNsrGr5w4FXvXLUBeuQ,3918 +pandas/tests/indexes/multi/test_constructors.py,sha256=LP51k4lUfQgpfu7tjeIvvxaFgv-x_6VapDDx9I-y00I,26775 +pandas/tests/indexes/multi/test_conversion.py,sha256=8okPvlaOQgJzneUiy3MTwHU4Z9_th4cadqAxPiV-nLc,4957 +pandas/tests/indexes/multi/test_copy.py,sha256=9Xperk7a4yBTQKo8fgk3gCa2SwJr30mH2JYYMYWguWY,2405 +pandas/tests/indexes/multi/test_drop.py,sha256=Mv5FB-riRSuwwvVFJ60GwxRGbuFkU_LU5DPW8KY8NTk,6089 +pandas/tests/indexes/multi/test_duplicates.py,sha256=7_FP6fYuzDdffF2Wvgl8VKW4Auzq0xJ5ZVfp5Evnm3A,11559 +pandas/tests/indexes/multi/test_equivalence.py,sha256=LKBMAg82PbzkuMMy18u6Iktjzuavo1PIY-IxtPGBpZE,8530 +pandas/tests/indexes/multi/test_formats.py,sha256=Ra7L6T0N4zh6rZUg3gFP6bGC902uhBKV4kyLku7HCuI,9538 +pandas/tests/indexes/multi/test_get_level_values.py,sha256=WFSDmHIAXZ1RvDl-mK2HtXmWRO6IwSX5F0J7j5z0cm8,3971 +pandas/tests/indexes/multi/test_get_set.py,sha256=S3n29xb_Em0uKOsd6MPc_HR2bCQ54DHSdGi1bj1RSAE,12801 +pandas/tests/indexes/multi/test_indexing.py,sha256=lbx9kPQFf5EFfdCZ-yg1nGSqmJOYcpuHCBMC6vs_ZvA,36399 +pandas/tests/indexes/multi/test_integrity.py,sha256=VzyV3RrhWkQxwWzzLeLT6Lmc-njl4FnpoAIshI1BFW8,9031 +pandas/tests/indexes/multi/test_isin.py,sha256=OtlwJ9zZDvwgZOgbeY_oidWPOUmii_JBCCBpHnLw8us,3426 +pandas/tests/indexes/multi/test_join.py,sha256=aRp18UCIgoSXazdYdirOwGV0k8Gj4o5eNRJL56p56Bc,8440 +pandas/tests/indexes/multi/test_lexsort.py,sha256=KbwMnYF6GTIdefQ7eACQusNNuehbtiuqzBMqsOSfDU0,1358 +pandas/tests/indexes/multi/test_missing.py,sha256=hHjKWxl5vkG5k9B9fxglrYB4eQldKamkMbACAu6OvUY,3348 +pandas/tests/indexes/multi/test_monotonic.py,sha256=5xlESrQOEcFWdr0iB3OipJtA6-RzriU3Yq2OQGgP0M4,7007 +pandas/tests/indexes/multi/test_names.py,sha256=zx_8kapVXzDS_SsylRzTFia2OrNJeEq3kmNHUA4RVPM,6601 +pandas/tests/indexes/multi/test_partial_indexing.py,sha256=sVNIk9_NxMDsHuRQzPCernPmchTF5INAUFkzQV7t8T0,4765 +pandas/tests/indexes/multi/test_pickle.py,sha256=ZJVZo0DcXDtV6BAUuPAKbwMV8aGfazJLU7Lw6lRmBcw,259 +pandas/tests/indexes/multi/test_reindex.py,sha256=ww8fSIx426wfqBTogkJrKS533CjKorf-B4bhyKdEnD4,5856 +pandas/tests/indexes/multi/test_reshape.py,sha256=yRcnTGS0M5749jUZGEZA8_UxSZ-CeOeCsWYBbTS0nTY,6711 +pandas/tests/indexes/multi/test_setops.py,sha256=74Ob19TAIflChAm-jfGmi5KTC8fnQkHvprHBxLYSELM,25466 +pandas/tests/indexes/multi/test_sorting.py,sha256=69C8BENuzyUvnQXEbjVvADmBAr5G6wzM-ELHOMLV2Do,10745 +pandas/tests/indexes/multi/test_take.py,sha256=4MaxPM4ZJQPXJKiqgwEwhZ71TyH4KQfIs5LgS40vvLM,2487 +pandas/tests/indexes/numeric/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/numeric/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/numeric/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/indexes/numeric/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/numeric/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/indexes/numeric/__pycache__/test_numeric.cpython-310.pyc,, +pandas/tests/indexes/numeric/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/numeric/test_astype.py,sha256=P19W9zZl8tN0EK-PaEi2gIFHLwCbruTMEUm7_ALGH9Q,3618 +pandas/tests/indexes/numeric/test_indexing.py,sha256=nDzkrokWvcmHkeHWjE8umPfxX4lR6AnQorAV7ppElCI,22761 +pandas/tests/indexes/numeric/test_join.py,sha256=OuSnYPH-jIM4UZRUKQ9NFxxd8Ot1HEP7KA3_ZpPX3Ks,15039 +pandas/tests/indexes/numeric/test_numeric.py,sha256=mEAFY8sSQdkVA0rJCTZb8cqjVAsTvL6mXzQSEXyxEgc,18586 +pandas/tests/indexes/numeric/test_setops.py,sha256=nO-3m7tb_ytjXx0Z8SqBkPSAnPVDz_PL3r2fzWtE7fg,5874 +pandas/tests/indexes/object/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/object/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/object/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/indexes/object/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/object/test_astype.py,sha256=id6izR4uYcs_9q9ej3-_07n7uvIh8eC_qb9ZFVYjT0s,1060 +pandas/tests/indexes/object/test_indexing.py,sha256=pyc-tiHJlAMkIj67-wN3k079C_gtiNQVJWpxklRJ5l4,9410 +pandas/tests/indexes/period/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/period/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_freq_attr.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_monotonic.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_partial_slicing.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_period.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_period_range.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_pickle.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_resolution.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_scalar_compat.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_searchsorted.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/period/__pycache__/test_tools.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/period/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_asfreq.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_factorize.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_fillna.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_insert.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_is_full.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_repeat.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_shift.cpython-310.pyc,, +pandas/tests/indexes/period/methods/__pycache__/test_to_timestamp.cpython-310.pyc,, +pandas/tests/indexes/period/methods/test_asfreq.py,sha256=PAqk5Zktd2OvLYwNoUGeXOh39HIIz9-5FqXnzrH6rtA,7080 +pandas/tests/indexes/period/methods/test_astype.py,sha256=k_xiGDPZOip3iw26LcB2E7UiRGHBZ39EOrsJxQoci6k,5469 +pandas/tests/indexes/period/methods/test_factorize.py,sha256=FXQh6VmGkuGkB2IAT4Y-2V5UaD2LCUNjQZ6amfBao80,1425 +pandas/tests/indexes/period/methods/test_fillna.py,sha256=jAYnaWGMuUaG993yxLwr1eT6J1ut43CcBaKds4Ce3-0,1125 +pandas/tests/indexes/period/methods/test_insert.py,sha256=JT9lBhbF90m2zRgIwarhPqPtVbrvkLiihZxO-4WHvTU,482 +pandas/tests/indexes/period/methods/test_is_full.py,sha256=RqIErBofIn5Ewh-MomVePHOn0hViZbe4laMC2vh8nPs,570 +pandas/tests/indexes/period/methods/test_repeat.py,sha256=1Nwn-ePYBEXWY4N9pFdHaqcZoKhWuinKdFJ-EjZtFlY,772 +pandas/tests/indexes/period/methods/test_shift.py,sha256=P7XDpMkLEYarH06RLBglFJKoGPkax4oLdiuI676KLek,4405 +pandas/tests/indexes/period/methods/test_to_timestamp.py,sha256=DCFf_Dt5cNsuSWJnYQAGfJrx1y2Z0GQiSTh0ajQJhjA,4888 +pandas/tests/indexes/period/test_constructors.py,sha256=LkRK-O65VdhX3EDQJHDdeGVQHfA6BQHT_PCi97M4xIs,27175 +pandas/tests/indexes/period/test_formats.py,sha256=DFLAMAPFzX2DI1iAAEjVY_nM9TuoYmCje3m7Q17A0EU,13259 +pandas/tests/indexes/period/test_freq_attr.py,sha256=KL1xaip5r7nY-3oLW16bmogfkYljsGJEJGKxn6w72Fo,646 +pandas/tests/indexes/period/test_indexing.py,sha256=jms77VvgkIgm0bSCHX-IMOtYuR0w2jd5uW3UoC2fm_4,27893 +pandas/tests/indexes/period/test_join.py,sha256=mwVL-OKx7tKTvMeSLNTh8jv6ViU6-NXcWr5O4hCmkOc,1835 +pandas/tests/indexes/period/test_monotonic.py,sha256=9Sb4WOykj99hn3MQOfm_MqYRxO5kADZt6OuakhSukp4,1258 +pandas/tests/indexes/period/test_partial_slicing.py,sha256=gXvS-qB0jPHYLKvjaP2rBW4p2UAm-ahM6KCCpT-u7ns,7433 +pandas/tests/indexes/period/test_period.py,sha256=91AawBQiPn_J3b6aG4sEzU24VaNJBTMn8shm_qkcE2g,7861 +pandas/tests/indexes/period/test_period_range.py,sha256=PB_VIuobx3NgnGOSmYZ0fyk79Zpoop22oYDP-TW-36Y,8979 +pandas/tests/indexes/period/test_pickle.py,sha256=l9A79u5PTcoa70g26wFPLTGnbvYpe76hPk1Iv334gb0,692 +pandas/tests/indexes/period/test_resolution.py,sha256=0TmnJeZCOaTWneeWA66DlxKgaUZJTfP0jKgLAY1jiyg,571 +pandas/tests/indexes/period/test_scalar_compat.py,sha256=CJuW0w6SdwDPtlk2Dl14g0ewuCcsIKPwtnmIMBSYEuc,1350 +pandas/tests/indexes/period/test_searchsorted.py,sha256=_u7DlvBnFx0_c8u3FIKYVOUcjlvN7p0gojLl9fZDkMQ,2604 +pandas/tests/indexes/period/test_setops.py,sha256=BcwDXv1-fnqOJLtzNqY2rEOye97Smyk2iXMnZx_IQE8,12547 +pandas/tests/indexes/period/test_tools.py,sha256=DFoxBsCYRWqodmNaDNPnQrZTTXiaSvwNZkwrybe7cl0,1361 +pandas/tests/indexes/ranges/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/ranges/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/ranges/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/indexes/ranges/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/ranges/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/indexes/ranges/__pycache__/test_range.cpython-310.pyc,, +pandas/tests/indexes/ranges/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/ranges/test_constructors.py,sha256=ceX79fbjGyc5VNkmz29Q1N7WGXLj40BvTuz5PfNAw4I,5328 +pandas/tests/indexes/ranges/test_indexing.py,sha256=WCJFjnEzFIqQUv_i2cy-wHRQ4Txfi8uq4UBp20s4LRw,5171 +pandas/tests/indexes/ranges/test_join.py,sha256=lniHRyuEJWY7UGc0TpJ20xzUftn6BpYJbZQPo2I0dxE,6268 +pandas/tests/indexes/ranges/test_range.py,sha256=AaoOQ_PufgrgnOmS7ARYRydbdU1jsb6-DKu2oX52LuI,20937 +pandas/tests/indexes/ranges/test_setops.py,sha256=yuiXAKlZJ5c3LkjPzFltAKFQmhVqaBleiJ7nzXs4_eA,17534 +pandas/tests/indexes/test_any_index.py,sha256=QgHuIfkF_E3BFaNveFThmGAbrMpyR_UL-KQ0FhPFTyY,5131 +pandas/tests/indexes/test_base.py,sha256=Te-rph8bo9g9lXscCEsVesaT6SYu2OB_8jIBOVsSN5Q,60624 +pandas/tests/indexes/test_common.py,sha256=sFM-TarVMvFsE8VGYMxcKVikmviNNPJSDxwGGijBvPY,17899 +pandas/tests/indexes/test_datetimelike.py,sha256=6ue74lBTp8Es6PZoE1e_5Fo6k3j7Hq_HkpLnBjAYspE,5598 +pandas/tests/indexes/test_engines.py,sha256=rq3JzDXNc2mZS5ZC2mQLpTeydheOX9OLoq1FLR53wbI,6699 +pandas/tests/indexes/test_frozen.py,sha256=ocwmaa3rzwC7UrU2Ng6o9xxQgxc8lDnrlAhlGNvQE0E,3125 +pandas/tests/indexes/test_index_new.py,sha256=6tO12VIGCoGKN3uk1SlPdPXn5vQaOJ9tECa3oVyWC8c,14923 +pandas/tests/indexes/test_indexing.py,sha256=jwcq_dujP7z8tfnLqQ-G2NoJ0CxrDIa33jWwRLKk-8w,11309 +pandas/tests/indexes/test_numpy_compat.py,sha256=fnrc8fNrV7v3BRTY7Huu9cyrBw2aNUrv5i4UUEublFE,5776 +pandas/tests/indexes/test_old_base.py,sha256=NnfN4Wb-Ua9i1WlibiNBrsaI6-YCLi760URJjJJJD0Q,39926 +pandas/tests/indexes/test_setops.py,sha256=0q7sa-WferJk9rjM9Lz-J4bWTac3O8WK_yYd9OV2O_U,32938 +pandas/tests/indexes/test_subclass.py,sha256=lmZHuQ8OSlwP3xcR8Xy2Mfvjxp2ry2zUL4DO2P4hbnk,1058 +pandas/tests/indexes/timedeltas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/timedeltas/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_delete.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_freq_attr.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_ops.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_pickle.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_scalar_compat.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_searchsorted.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_setops.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_timedelta.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/__pycache__/test_timedelta_range.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/timedeltas/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/methods/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/methods/__pycache__/test_factorize.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/methods/__pycache__/test_fillna.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/methods/__pycache__/test_insert.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/methods/__pycache__/test_repeat.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/methods/__pycache__/test_shift.cpython-310.pyc,, +pandas/tests/indexes/timedeltas/methods/test_astype.py,sha256=gnbDreTvP4IrdYSzScM0jlpj9SJdzvTRt2sOL54hA8E,6129 +pandas/tests/indexes/timedeltas/methods/test_factorize.py,sha256=aqhhwRKZvfGxa3v09X5vZ7uBup8n5OjaUadfJpV6FoI,1292 +pandas/tests/indexes/timedeltas/methods/test_fillna.py,sha256=F7fBoEG-mnu16ypWYmK5wbIovQJKL0h86C1MzGkhPoE,597 +pandas/tests/indexes/timedeltas/methods/test_insert.py,sha256=fDYCuOIefgjNBJ7zhAUYniNVl5SltSs275XaNoL0S-s,4713 +pandas/tests/indexes/timedeltas/methods/test_repeat.py,sha256=vPcNBkY4H2RxsykW1bjTg-FSlTlQ2H1yLb-ZsYffsEg,926 +pandas/tests/indexes/timedeltas/methods/test_shift.py,sha256=MzVVupnLHEvuwlVCn6mR7LQ9pLeNiWM2lWwNlIwoo98,2756 +pandas/tests/indexes/timedeltas/test_arithmetic.py,sha256=YocDQIovXnrpXEzz3Ac-3l2PdGaDf2_sF8UPcLVF1Z8,1561 +pandas/tests/indexes/timedeltas/test_constructors.py,sha256=atU_oy_1oyUtMWRg47A94j3S4nPJbDRRgUhDCW6TO6M,10600 +pandas/tests/indexes/timedeltas/test_delete.py,sha256=-5uYhDUCD55zv5I3Z8aVFEBzdChSWtbPNSP05nqUEiA,2398 +pandas/tests/indexes/timedeltas/test_formats.py,sha256=4yUVmL5NEabGi9AXPA5isM3c4F3Rgslk4zqcfS-ua3s,3807 +pandas/tests/indexes/timedeltas/test_freq_attr.py,sha256=gYGl9w9UdtcfN26KUx1QyY4mjh6A0m4Csk3gsCIcdos,2176 +pandas/tests/indexes/timedeltas/test_indexing.py,sha256=9C-U4bwBd7D1GnaKgi51Jlgod7KhONIlgrA9t7jSQ80,12160 +pandas/tests/indexes/timedeltas/test_join.py,sha256=7JUirtgNGJMRL1-k2gekrvondwYuIVvuI2548v4nfIo,1396 +pandas/tests/indexes/timedeltas/test_ops.py,sha256=nfGyNJvNy7_jmWebKjevLKhyAMNvI5jytkZTNlpEC-g,393 +pandas/tests/indexes/timedeltas/test_pickle.py,sha256=QesBThE22Ba17eUdG21lWNqPRvBhyupLnPsXueLazHw,302 +pandas/tests/indexes/timedeltas/test_scalar_compat.py,sha256=hldSSTxREuBBuLAhvLTjX7FUmJ9DzcJxmMqzaClnErg,4573 +pandas/tests/indexes/timedeltas/test_searchsorted.py,sha256=kCE0PkuPk1CxkZHODe3aZ54V-Hc1AiHkyNNVjN5REIM,967 +pandas/tests/indexes/timedeltas/test_setops.py,sha256=Y6OwY82XC1hDgME55I_9q_UzGZdKhAhI1sxXS8bzr1w,9503 +pandas/tests/indexes/timedeltas/test_timedelta.py,sha256=UxobS6Dhfoqy4bnoAuMlLO8acpNrCDGsYWl4vGbDO8Q,1934 +pandas/tests/indexes/timedeltas/test_timedelta_range.py,sha256=tZqv_j045dPD3K2sbqdhdvEb-qE7szf9S7DJNX5Ri3o,6220 +pandas/tests/indexing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexing/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/common.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_at.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_categorical.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_chaining_and_caching.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_check_indexer.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_coercion.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_datetime.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_floats.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_iat.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_iloc.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_indexers.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_loc.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_na_indexing.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_partial.cpython-310.pyc,, +pandas/tests/indexing/__pycache__/test_scalar.cpython-310.pyc,, +pandas/tests/indexing/common.py,sha256=LtCDO4TeMhLWAiTGiJET3YP8RO6T3OQqmdpJ8JH391g,1021 +pandas/tests/indexing/conftest.py,sha256=9C84qvdnHzbM5C0KIVw3ueQhHzuUMoAlw07dVJqCAmQ,2677 +pandas/tests/indexing/interval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexing/interval/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexing/interval/__pycache__/test_interval.cpython-310.pyc,, +pandas/tests/indexing/interval/__pycache__/test_interval_new.cpython-310.pyc,, +pandas/tests/indexing/interval/test_interval.py,sha256=pB8gTluRFlmZZVCcRDtjXUygjSJegI3YRYI3XIPgsy0,7482 +pandas/tests/indexing/interval/test_interval_new.py,sha256=IkPyCHTHvwyHf25ljz4o6Q0CnHVpnLD2jVUF3TbtLS4,7976 +pandas/tests/indexing/multiindex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexing/multiindex/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_chaining_and_caching.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_datetime.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_getitem.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_iloc.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_indexing_slow.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_loc.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_multiindex.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_partial.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_setitem.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_slice.cpython-310.pyc,, +pandas/tests/indexing/multiindex/__pycache__/test_sorted.cpython-310.pyc,, +pandas/tests/indexing/multiindex/test_chaining_and_caching.py,sha256=hPcMvvPamIHI8AeSL7xvqs3eOT-5ONMjLy2XK2Mgt4Q,2922 +pandas/tests/indexing/multiindex/test_datetime.py,sha256=tl1yr3h50R0t7uvwTcfsRW-jt1n9vsqf4BWp4dNTdd8,1234 +pandas/tests/indexing/multiindex/test_getitem.py,sha256=wNftnfXLfiyjduEYeq8MSfE8K1OKaZG0WpmKWBqWk6o,13230 +pandas/tests/indexing/multiindex/test_iloc.py,sha256=G2CUPRhd5pRImZpH0uOVIPid7fzB4OuJZjH8arQMrE0,4918 +pandas/tests/indexing/multiindex/test_indexing_slow.py,sha256=nMfW1LQn7YlJauNceeR-uo_yPxRG2E8hcbgqTBMxaH4,3335 +pandas/tests/indexing/multiindex/test_loc.py,sha256=aVvEHILvJS7cYhNKEka_QiJlEcPim76s29FQlNbFYRw,32795 +pandas/tests/indexing/multiindex/test_multiindex.py,sha256=bIihrEIUXO1s8wAnKof9ATiwqAvwuLIWzE_oZlMxlOs,8065 +pandas/tests/indexing/multiindex/test_partial.py,sha256=05MXMJmAevJ31bqHIVikEL14x6s7IUASxLaw62w44mQ,8858 +pandas/tests/indexing/multiindex/test_setitem.py,sha256=cn0FPeh4oKRpI0o01tFx24VOoNQr90GCiKIMo8cBaE0,19840 +pandas/tests/indexing/multiindex/test_slice.py,sha256=7JcyCAq91OpruPy1awmdQmblxPzQF4UrnUN2XHrahbY,27104 +pandas/tests/indexing/multiindex/test_sorted.py,sha256=xCdmS_0DBN2yoTVcSB-x6Ecwcw93p6erw3bTiU6_J3s,5192 +pandas/tests/indexing/test_at.py,sha256=Vnv3lP2MkIjLvaj5LTsPvZN_GTsaGDl7c4dzdHbEZBI,8194 +pandas/tests/indexing/test_categorical.py,sha256=JPn8mSo7FSTuFaHzpiELgVBwTsqmjISLnGoxloy6SjU,19699 +pandas/tests/indexing/test_chaining_and_caching.py,sha256=-T0e9bh8ktgrHrB8CXd-MjcvLnckuiSSyBC8Cr6q-uE,23479 +pandas/tests/indexing/test_check_indexer.py,sha256=tfr2a1h6uokN2MJDE7TKiZ0iRaHvfSWPPC-86RqaaDU,3159 +pandas/tests/indexing/test_coercion.py,sha256=RxeenIaFXLT9bPDEnV7zfpcPv5UC5QMVjp-hb0Igv9g,32629 +pandas/tests/indexing/test_datetime.py,sha256=Gj5Fo4ywd4md3H-zbk11bSbNEmktbnlHORVRzBfN0oE,5703 +pandas/tests/indexing/test_floats.py,sha256=KG_T_POIEc5nnVL7Zi8zSwamhahbfjUxBYrC3ilRlEI,20603 +pandas/tests/indexing/test_iat.py,sha256=cQrMr1MYQv5LZS5E34NumdqqeK8hvcN6duLRTaeZ6Go,1492 +pandas/tests/indexing/test_iloc.py,sha256=Y6LdDIOVnWoLlcqVI8eLoRwS4TgpNNRipr0Q-90FbM0,51335 +pandas/tests/indexing/test_indexers.py,sha256=agN_MCo403fOvqapKi_WYQli9AkDFAk4TDB5XpbJ8js,1661 +pandas/tests/indexing/test_indexing.py,sha256=a3ChWUgSad-7yXrvRU4wr6lY5Oqt24308Z_kMkqFxxg,40042 +pandas/tests/indexing/test_loc.py,sha256=mV2746VJwKa8-Rwn4iK-nvDl_CuMcHoDPOxP3wfJ5V8,119354 +pandas/tests/indexing/test_na_indexing.py,sha256=Ek_7A7ctm_WB-32NePbODbQ5LDMZBAmCvDgPKbIUOcg,2322 +pandas/tests/indexing/test_partial.py,sha256=f32wptlfPdvAdRSgt2N5USZQdtNt-GM31QoQpJSpXeA,25256 +pandas/tests/indexing/test_scalar.py,sha256=BuLsr0F1OA4IeA816BzuLFiSNGppPoALpieV2_8Nfg8,9643 +pandas/tests/interchange/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/interchange/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/interchange/__pycache__/test_impl.cpython-310.pyc,, +pandas/tests/interchange/__pycache__/test_spec_conformance.cpython-310.pyc,, +pandas/tests/interchange/__pycache__/test_utils.cpython-310.pyc,, +pandas/tests/interchange/test_impl.py,sha256=Exl81IoyGGPYt1Nz2ipJQ79cDXqlVZnJuFhCuIfKt5Q,19878 +pandas/tests/interchange/test_spec_conformance.py,sha256=JnE2kQOLr4EjUCH6Nzc1fCEXhbZ52WzKbioW6f6EVxo,5593 +pandas/tests/interchange/test_utils.py,sha256=15liIDJirQDoP7TxxQkmZJ9gCAVNCd2BwShW_GlwL2A,2965 +pandas/tests/internals/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/internals/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/internals/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/internals/__pycache__/test_internals.cpython-310.pyc,, +pandas/tests/internals/__pycache__/test_managers.cpython-310.pyc,, +pandas/tests/internals/test_api.py,sha256=7s-n3jyp-e0ikVxkIqxf3xRtxk3aBV4h5FsnMIcStMY,2166 +pandas/tests/internals/test_internals.py,sha256=jeWXqpUIEnygO0BwnHdQZNsolBusoxvRSNndgaCnuUE,49657 +pandas/tests/internals/test_managers.py,sha256=uIuBmkOCjbFuGGNOodZ7ITijw4CfsG4aOUqRLCEfg-s,3556 +pandas/tests/io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/io/__pycache__/generate_legacy_storage_files.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_clipboard.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_common.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_compression.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_feather.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_gbq.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_gcs.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_html.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_http_headers.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_orc.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_parquet.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_pickle.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_s3.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_spss.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_sql.cpython-310.pyc,, +pandas/tests/io/__pycache__/test_stata.cpython-310.pyc,, +pandas/tests/io/conftest.py,sha256=F72gAcQcyFdyv07CQkjbTT8dOkXSVHtwDQaIHFTB9xY,6406 +pandas/tests/io/excel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc,, +pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc,, +pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc,, +pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc,, +pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc,, +pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc,, +pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc,, +pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc,, +pandas/tests/io/excel/test_odf.py,sha256=DoE6DfjKkIKGJtRUG8uvBNNGBOvoqVZnL8Jr_I1vOLQ,1999 +pandas/tests/io/excel/test_odswriter.py,sha256=2SmPARRnXiOAstiUaEFaVfGu2kVQ5vVHGODlozrlUFI,3268 +pandas/tests/io/excel/test_openpyxl.py,sha256=wnADQLARvjB4BMYgd2fMs5jsvYm8DQvqFngJVnhSH1Q,15227 +pandas/tests/io/excel/test_readers.py,sha256=Qn8L41hKdO_2xkpTNBi2eqpUx0OAV3BfqeUKoSOn0aM,63198 +pandas/tests/io/excel/test_style.py,sha256=mQ7roFc4ZfBfrjc4Das0lNnYXIcV1cO1AOuXVRw1Dqw,11284 +pandas/tests/io/excel/test_writers.py,sha256=udzFSri-07QXgV0v-xHJ3Cx8wKvJJaoCByyAwwIg6gM,54899 +pandas/tests/io/excel/test_xlrd.py,sha256=e5QrByVFVm6rEZbdSifYBBCY-czTzWZZ5y7OyfrPksw,1977 +pandas/tests/io/excel/test_xlsxwriter.py,sha256=DUmibvRcUD6O2OcD_YcMymQPvMgkckIH92NjYsamyOE,2773 +pandas/tests/io/formats/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc,, +pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc,, +pandas/tests/io/formats/style/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc,, +pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc,, +pandas/tests/io/formats/style/test_bar.py,sha256=czy40UZacoi9uyzM-w-AC5lMu2z2cwKwyE9Ml0i6x_k,12014 +pandas/tests/io/formats/style/test_exceptions.py,sha256=qm62Nu_E61TOrGXzxMSYm5Ciqm7qKhCFaTDP0QJmjJo,1002 +pandas/tests/io/formats/style/test_format.py,sha256=9siaXSHvCrA-YEuRI0-zun0gwQf2fVZwSPMIrb7CLTE,21154 +pandas/tests/io/formats/style/test_highlight.py,sha256=p2vRhU8aefAfmqLptxNO4XYbrVsccERvFQRd1OowC10,7003 +pandas/tests/io/formats/style/test_html.py,sha256=FvW0Zh6U8CkOKo0Plvz8W-udOgsczg9qawyVq-xzKqc,32702 +pandas/tests/io/formats/style/test_matplotlib.py,sha256=KPTvs_DbJlT5u7xQiQW3Ct-0jmpFHuah_lfQgZkiuQw,11649 +pandas/tests/io/formats/style/test_non_unique.py,sha256=JG_rE5A5Zk5exlfivZHnOI3Upzm8dJjmKKHkwEje4LQ,4366 +pandas/tests/io/formats/style/test_style.py,sha256=x7r8-nhnYdifw_PjopT0a4t99MTGzlOBv-g38HOHxik,58095 +pandas/tests/io/formats/style/test_to_latex.py,sha256=EbsBCluJ-2eVLSxXHgLo6Uus6VsnrbzqO9sYaRuewgs,33008 +pandas/tests/io/formats/style/test_to_string.py,sha256=w1GvLm3FtKQd9t2nwN3vF55X5f0GQKGCGXpYFZxITpA,1910 +pandas/tests/io/formats/style/test_tooltip.py,sha256=GMqwXrXi9Ppp0khfZHEwgeRqahwju5U2iIhZan3ndZE,2899 +pandas/tests/io/formats/test_console.py,sha256=jAk1wudhPiLBhhtydTNRlZ43961LqFu3uYt6cVA_jV0,2435 +pandas/tests/io/formats/test_css.py,sha256=YFHK3UFe2jcnz6AhmOFb7ZU1jd5Y_LYxIx5PBrJXNLQ,8669 +pandas/tests/io/formats/test_eng_formatting.py,sha256=QqFZJMUBVnU5SpZB63tCOHX3CqZbjgesOZc6nxbhp4c,8454 +pandas/tests/io/formats/test_format.py,sha256=10Nmscrr_GplWPa9t7nAluixTS73AqJfCNbiX4Kf5HI,83181 +pandas/tests/io/formats/test_ipython_compat.py,sha256=pRAOUIZ3Vsb2LVYywzk30d834GzqLH9N8kjTGlf2MXc,3055 +pandas/tests/io/formats/test_printing.py,sha256=hLBoT3FE7J2VjxCJIAS_N24g6pMoQcyQphGTnwt0Ehc,4499 +pandas/tests/io/formats/test_to_csv.py,sha256=mThYTrnKefL4fWiqsLmJP9nsJcKx9ejdPNXndW6ADzo,27541 +pandas/tests/io/formats/test_to_excel.py,sha256=ecNeSrVd2mSPsdIqm3lM911b4mPwLIVkoz3MnJFZE3g,15320 +pandas/tests/io/formats/test_to_html.py,sha256=elbKQSMvV8p3qWEFVFA_nneSjdXl432QYDlha1cGVGw,38699 +pandas/tests/io/formats/test_to_latex.py,sha256=ka8kOxa7dLP3wQf7b4dGHLNP9lc6TI1MCepsLSfYoTQ,41660 +pandas/tests/io/formats/test_to_markdown.py,sha256=2DUY7KrRVUu_OU6q4biW8rNFEINN6fPSkqs8VzY8rlE,2757 +pandas/tests/io/formats/test_to_string.py,sha256=aCcTOFjwdLQbEZ3JLEvlUySigpY-M4Gp8pV8ue-S0Ig,39371 +pandas/tests/io/generate_legacy_storage_files.py,sha256=c-J8fZLOyR7FRP8ijI6WcJrqequzwHJBZPs_1xC3bHI,9853 +pandas/tests/io/json/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc,, +pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc,, +pandas/tests/io/json/conftest.py,sha256=Zp83o90PvZ56MbhNRr1NZEPTpho7jRHcLYiEA9R_BZw,205 +pandas/tests/io/json/test_compression.py,sha256=PNaQlGwVdCL8K6ujRinmALn9O28tNZbxgelGcK-6MSo,4506 +pandas/tests/io/json/test_deprecated_kwargs.py,sha256=DKuEh2V2IkJOu-BnurWvax8Mq5EcQHtG-K-zncGZRpo,690 +pandas/tests/io/json/test_json_table_schema.py,sha256=lWCSq6HZNqPpjffejfkqc9JKjhRPUUVuLPWyWTyXDG4,30676 +pandas/tests/io/json/test_json_table_schema_ext_dtype.py,sha256=mTwJ_IpOBewvrLU98eLo-_yibYtOqD64LKLI_WIr5n0,9500 +pandas/tests/io/json/test_normalize.py,sha256=eOQoJQBGjAqFcswdNBipHoGMGBgLiwLFNIzTuZ5XSkI,30816 +pandas/tests/io/json/test_pandas.py,sha256=JlBn9DVzXvHbDKPYfyF6dt8njaNOeQ6mkR3vdJFUM_I,78323 +pandas/tests/io/json/test_readlines.py,sha256=NaIeCB9w7iM_Ptamx4IoLMRwIG9eUQxsTJpU2cBB5y0,18819 +pandas/tests/io/json/test_ujson.py,sha256=UYh87hxO7ySZ60Q8ycDjbEqzcbBD51mV9qIlMCDA_Fc,36424 +pandas/tests/io/parser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc,, +pandas/tests/io/parser/__pycache__/test_upcast.cpython-310.pyc,, +pandas/tests/io/parser/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_chunksize.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_data_list.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_file_buffer_url.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_float.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_inf.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_ints.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_read_errors.cpython-310.pyc,, +pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc,, +pandas/tests/io/parser/common/test_chunksize.py,sha256=IEeKcV5GYLee7U0ACIyDJtR-Q176X9zfRg7YqrdeWx8,11103 +pandas/tests/io/parser/common/test_common_basic.py,sha256=7RdM9Bh71Qmpf2bkUPu2MiHDwdUb2NJIwmxK3g1QEBc,30872 +pandas/tests/io/parser/common/test_data_list.py,sha256=XTWzTbtaLRGFdrjfRTJH3TTedD8Y0uCWRzji1qnrdk4,2228 +pandas/tests/io/parser/common/test_decimal.py,sha256=6WZy1C7G2vNpSo165GZAoRFGiy9OMgKygAIEYNalQ-Y,1932 +pandas/tests/io/parser/common/test_file_buffer_url.py,sha256=Gr7jx2idDJrMKF6tdwe-hxd9ewWxRokSAlFYjAeRgfM,14007 +pandas/tests/io/parser/common/test_float.py,sha256=5XM0Cndv31L4_7ER2MOB-Bnk9_GELTpakFp1-dNRjyM,2582 +pandas/tests/io/parser/common/test_index.py,sha256=kNF9uReFUMb4YaK9Cz10zUWnUXxT3OpZIhiy1fZTu_4,8234 +pandas/tests/io/parser/common/test_inf.py,sha256=yXUF6DrDhiPKEfEXJLnb71bZnycbo4CKXkl14Vyv3QY,2114 +pandas/tests/io/parser/common/test_ints.py,sha256=K49T03jXs77ktsxIFFQqBisPI3z042A8GATZcn1Tq44,7243 +pandas/tests/io/parser/common/test_iterator.py,sha256=FljWxY67UNOCedqg_as_nY4GtkU4HDwqwgpLkxU00Aw,3702 +pandas/tests/io/parser/common/test_read_errors.py,sha256=Aas1e5CM0ohMBXNQ2tSZao7jZbWTk9LA85FglJ8CRLE,9592 +pandas/tests/io/parser/common/test_verbose.py,sha256=kil5N51khhQifV9az-x2ijMr3wGtddKrU5oAbr0b1hs,2339 +pandas/tests/io/parser/conftest.py,sha256=PW00EmO-nd14_zUV7Uf8EO5tezaI-_zFcn2jP-Msxow,8725 +pandas/tests/io/parser/dtypes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc,, +pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc,, +pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc,, +pandas/tests/io/parser/dtypes/test_categorical.py,sha256=H8HO6IYwkJryJV87hKep0rtyx4XmXAHh1ICuprkmYjM,9836 +pandas/tests/io/parser/dtypes/test_dtypes_basic.py,sha256=9IoehDR7qPsZOIAHDcsX4ekahkeU8MEkpeYMqb_fwAg,18502 +pandas/tests/io/parser/dtypes/test_empty.py,sha256=bFuG8P_48stM0rEB8J0pF-sRl3kezS-9wB3fycgCjFo,5258 +pandas/tests/io/parser/test_c_parser_only.py,sha256=qSNbMmaYlQG4ddezez8HkcFtSEtH8BCnL7UM1FwANbU,20534 +pandas/tests/io/parser/test_comment.py,sha256=QO0E262p5tnOpm9oxqTO1rwl0KU-mKMP_jydlahyFMM,7560 +pandas/tests/io/parser/test_compression.py,sha256=hW1GxllxvM8sUQhmTVibkkqdj0JcAAR9b7nKCxuXblk,6403 +pandas/tests/io/parser/test_concatenate_chunks.py,sha256=RD1MUklgLBtBNvJu5J92cVZbrO3n38UzdQvh4BAvAqI,1128 +pandas/tests/io/parser/test_converters.py,sha256=iA5iv_5YSfwloTccNfdgE-9QO-Zm9Z_taDspYeRvAF4,7453 +pandas/tests/io/parser/test_dialect.py,sha256=tgsdnhEkYBtjIKd-9BKAyQ8ATTSivnzIkiWiuLi513M,5844 +pandas/tests/io/parser/test_encoding.py,sha256=Og-q60V-nd-8xl5VBWDPtYqxGeemrs8rYCoCCWKdjmc,10782 +pandas/tests/io/parser/test_header.py,sha256=zvSu-S51vJaIGPOdZgdC2IeHd2Y_1FTId-QGJc_7BWU,21029 +pandas/tests/io/parser/test_index_col.py,sha256=deEpoBpT2KvbrcUgpnSmylzdpdAY5uhPtPRKzhJyUcE,11501 +pandas/tests/io/parser/test_mangle_dupes.py,sha256=Xwci86pIvocxp6Gc0hT2bk0QahFZwTQdhKewCrC-W38,5390 +pandas/tests/io/parser/test_multi_thread.py,sha256=x40FWVAiCprn9T83Tu7cVaiUcGIcSSOgp7lauIUsdjo,4315 +pandas/tests/io/parser/test_na_values.py,sha256=P4mcmVpprWhd0TsFdABCJnNPQrkrLLFwIrpKaHe8bJo,22138 +pandas/tests/io/parser/test_network.py,sha256=8bNvzZHJ6r_m1WEJ7qt6fZtUbxLkxWP_aGqGnrtk_Po,12319 +pandas/tests/io/parser/test_parse_dates.py,sha256=lnel1CZGmZqmRdTG5ltGwVKl9GChQU07v8uyG25ci1k,69737 +pandas/tests/io/parser/test_python_parser_only.py,sha256=wkXRjsAKI6pZnvhetAyLanvn_0pjIxQBALYDHazXrf8,15897 +pandas/tests/io/parser/test_quoting.py,sha256=7g4XLvgjtkRf9qgl7eksjwJ-N42e4dq-nCEPWP9hS9g,6244 +pandas/tests/io/parser/test_read_fwf.py,sha256=DM-YTi6mkb6dup72srdA4GTIx6nzDiBoQqIGfwa1BQg,30107 +pandas/tests/io/parser/test_skiprows.py,sha256=D0dm01x-53YqSXXvj1jczRV5SWEDNkNP87tquehyn9w,9457 +pandas/tests/io/parser/test_textreader.py,sha256=R_yeB-k6g45i6ZTQ-PdF8DIJYdodhH059OGrRdM8IOM,10672 +pandas/tests/io/parser/test_unsupported.py,sha256=149HYApTOEJP9xEXuXuncyS2zq_lpF_AyBfu_SIjjes,7986 +pandas/tests/io/parser/test_upcast.py,sha256=XEjHUvgExlKwxTCSjSfWMxjwge0HeW9q2BMIQGuxfTk,3141 +pandas/tests/io/parser/usecols/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc,, +pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc,, +pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc,, +pandas/tests/io/parser/usecols/test_parse_dates.py,sha256=7PYxerT3Eok6kVV6dfU2e-qlBpde-gfCGMg1NEht8cM,5469 +pandas/tests/io/parser/usecols/test_strings.py,sha256=-ZUBWSpxMgoxqRfGAa0mgb5motUoKveF06V9LUH-nQg,2588 +pandas/tests/io/parser/usecols/test_usecols_basic.py,sha256=BKr0EIu8g1aLiF6a_g61zF2NHPVY8Cl6CRcNnHLQ_4o,17646 +pandas/tests/io/pytables/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc,, +pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc,, +pandas/tests/io/pytables/common.py,sha256=m3IH26TCzLDpS8ctvzJKLA8x414ur5jlX3sdT4sB4m8,1264 +pandas/tests/io/pytables/conftest.py,sha256=vQgspEHypJUvbAU3P0I5BDBW2vRK4CgmcNqY5ZXksns,136 +pandas/tests/io/pytables/test_append.py,sha256=BlbvStEsoiOGbA85pP9hw8ufZXrqixjTjGaD6msIJuE,36668 +pandas/tests/io/pytables/test_categorical.py,sha256=l_Xyc15J7E4f3_jA9Lr_sz1AhW_jyIBm9xWuRb3X154,6994 +pandas/tests/io/pytables/test_compat.py,sha256=qsaDgIDMQOOMA_ZYv7r9r9sBUUbA9Fe2jb2j8XAeY_s,2547 +pandas/tests/io/pytables/test_complex.py,sha256=CUEEEU3zJh6pmj-gws7ahyhsHJTxO0W9MKraXeFg89A,5948 +pandas/tests/io/pytables/test_errors.py,sha256=Eqf2Jad_QDt2W3SCgUf6KpS_yH5HncsmLK2K-dFpggs,8372 +pandas/tests/io/pytables/test_file_handling.py,sha256=31di39gTGo5oNr7JRGGDdRBTsOOUPMX4LZoGPpTwyZk,14592 +pandas/tests/io/pytables/test_keys.py,sha256=trCYnTHa2LhD2xnqVJ6iv1BEahHpG4FDMNbjw_MG07w,2671 +pandas/tests/io/pytables/test_put.py,sha256=OHoalcEnIuqiGN0URxnagPXjguU6-sRAjeEucN2bboA,12335 +pandas/tests/io/pytables/test_pytables_missing.py,sha256=mK_l-tuF_TeoK4gZqRncm-FCe2PUgk2AS3q6q0M1YIU,345 +pandas/tests/io/pytables/test_read.py,sha256=coGLYjldztQ7XDYywtSWM6oQnG71J2q5Jg_yohuhHSg,13242 +pandas/tests/io/pytables/test_retain_attributes.py,sha256=WY5rbnlT_NqERl4OSJ9C2iWLtFpZZCW57iNiF-UbZDM,2970 +pandas/tests/io/pytables/test_round_trip.py,sha256=LTTDrvuzkO5INLmxunr4aHtQLhec_CN0kzB0fHm6Yv0,18753 +pandas/tests/io/pytables/test_select.py,sha256=ogfh1U88Zm1e0huKtgNGG16VBwgprZxaorDU09BF_ZA,37100 +pandas/tests/io/pytables/test_store.py,sha256=fL4f1vm40WPdrJ1YXlMd4hzc-DkyQksRMdKEdGc27Dw,37378 +pandas/tests/io/pytables/test_subclass.py,sha256=fgiunpfa4hECpAXsZrq4nB1a1z5txJxEj9MqyOBI3fQ,1369 +pandas/tests/io/pytables/test_time_series.py,sha256=hduw-GMBvahyZHh6JVrLKrxvU3NR0vl0cWTWamlgZw4,2481 +pandas/tests/io/pytables/test_timezones.py,sha256=3wUurqaoR-UdgndFKyPxmluEzl4euTPBFDcL6nV2IqM,11804 +pandas/tests/io/sas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/sas/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/sas/__pycache__/test_byteswap.cpython-310.pyc,, +pandas/tests/io/sas/__pycache__/test_sas.cpython-310.pyc,, +pandas/tests/io/sas/__pycache__/test_sas7bdat.cpython-310.pyc,, +pandas/tests/io/sas/__pycache__/test_xport.cpython-310.pyc,, +pandas/tests/io/sas/test_byteswap.py,sha256=fIqzF9LZs3TLm7JI4tEk4JxkynmWqZ5TydCmc12sGQs,1987 +pandas/tests/io/sas/test_sas.py,sha256=M9OeR39l3-DGJSBr84IVmnYMpMs_3xVfCgSSR8u7m-k,1057 +pandas/tests/io/sas/test_sas7bdat.py,sha256=DynUdplEj6lJUo7R6rmq5CAS1i3Jdp24AMhGBcDtaqc,14807 +pandas/tests/io/sas/test_xport.py,sha256=-gNRR9_2QZS2dQ7Zu756Omg5Bpaz-2I5nCovqEqJVwU,5728 +pandas/tests/io/test_clipboard.py,sha256=ePzhUM4NAeDeLs5HtICUJsGjJiuq1C5GZjN6fkmRND4,13302 +pandas/tests/io/test_common.py,sha256=ZCxsN-pvRYpdcw0-Dcy5wLL0MFM3fesBwQvhXcXCjnM,23778 +pandas/tests/io/test_compression.py,sha256=OGRUhXoSSY1uAUP2VUb45eRleOCuHTct3wRXVSdvpR8,12343 +pandas/tests/io/test_feather.py,sha256=uQRnNCEaENrmSWimfpgO88LAMS0p1bKy4Q4v0LrOBfY,9222 +pandas/tests/io/test_fsspec.py,sha256=fbJmi7UQIG-RDb0dw8zil2dj43OzyG0tyNww5Fiw6a4,10418 +pandas/tests/io/test_gbq.py,sha256=9tA62qL0uGbSKMZdxMwNjANpxaNB4buEdKfqAQej0HQ,401 +pandas/tests/io/test_gcs.py,sha256=gBXSRHyfJnkdVD5bI64KzG-NSv0dWDpKAOY3_rm82nM,7362 +pandas/tests/io/test_html.py,sha256=mY8-tmi5bqBngXwhfTNS2x_1fwncLP3io0TwHwT-n5k,56862 +pandas/tests/io/test_http_headers.py,sha256=_p5LsnX0QXVk7RGMU7TG-NqBWpPGVA-W79eeugdpXoU,4753 +pandas/tests/io/test_orc.py,sha256=rYZCqSiNAPDQIK-2RczRh--E96NkLGV82tp42XR5n-A,13663 +pandas/tests/io/test_parquet.py,sha256=nvzpnsoTzULj1m0pzR53xRKdrIkl_gqNkt_7FqSfHRg,50215 +pandas/tests/io/test_pickle.py,sha256=eY7TI1oVlRXu9fAIJpiE0ig7H6bRC-L2YEOJNqIjh14,20755 +pandas/tests/io/test_s3.py,sha256=vLi6EkvAGMKudRcbxcosxHV7z_q6GbknZuYdEisHjy4,1181 +pandas/tests/io/test_spss.py,sha256=qp310khtYqh_uMjB_Y28MAJVuBUJEbSXwBtPCt2VoWg,6326 +pandas/tests/io/test_sql.py,sha256=wpkLb5iYSdRZCWnoYCHX2eFPB5Dayau2c3RH23kJDX4,145081 +pandas/tests/io/test_stata.py,sha256=ZMqmUmrVz1Jh5PF6-HSqHQth1T7z6tZiFYoFE4jrHJU,92292 +pandas/tests/io/xml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc,, +pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc,, +pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc,, +pandas/tests/io/xml/conftest.py,sha256=ex3IgyE-7MBC_y5T2gJphlfUex7nqRG5VfX62mTbe5E,850 +pandas/tests/io/xml/test_to_xml.py,sha256=IxG7rT8KV0BghiUMvVMyd5GkbDR9xqWSmSDqT3CUAKM,35612 +pandas/tests/io/xml/test_xml.py,sha256=vuVe3eJJ1_fBoJNgDoLdTYCiqXhMmSH5qHGsd3T7I2A,61257 +pandas/tests/io/xml/test_xml_dtypes.py,sha256=z8unMuhwvcrDUQ-7j4PBKBzr55QXNprA7qALGW7vYw0,13266 +pandas/tests/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/libs/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/libs/__pycache__/test_hashtable.cpython-310.pyc,, +pandas/tests/libs/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/libs/__pycache__/test_lib.cpython-310.pyc,, +pandas/tests/libs/__pycache__/test_libalgos.cpython-310.pyc,, +pandas/tests/libs/test_hashtable.py,sha256=4rXFphd6C9bf5AVIqOohTwsJ7mA14SZmq3hcWtC7m-w,26091 +pandas/tests/libs/test_join.py,sha256=z5JeLRMmF_vu4wwOpi3cG6k-p6lkhjAKPad6ShMqS30,10811 +pandas/tests/libs/test_lib.py,sha256=iiYT79WGEiF-nHJuz7k-AoKwxd9x0BjcGry4j5SCFrc,10592 +pandas/tests/libs/test_libalgos.py,sha256=saDyCbchGU690HmrfZUJ6q1iCLNeW4x50Y-A2o1fgrg,5322 +pandas/tests/plotting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/common.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc,, +pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc,, +pandas/tests/plotting/common.py,sha256=6oADaI21vWLSPgHVqckoLiPFWsrGXw71fel7HHxJyZc,16871 +pandas/tests/plotting/conftest.py,sha256=WGxjahxQkw-Gk4DlnLW0rDsei0dmuoCuZusNMepwty0,1531 +pandas/tests/plotting/frame/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc,, +pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc,, +pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc,, +pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc,, +pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc,, +pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc,, +pandas/tests/plotting/frame/test_frame.py,sha256=q1Yqth7KkCdlm3UBASDvzVfm-fQMT9JPDp1INECJM4A,98409 +pandas/tests/plotting/frame/test_frame_color.py,sha256=gBkX_6DMH-joE-4GjwZpIYgWHJkrWPPDJ8R9gKuHqH8,28488 +pandas/tests/plotting/frame/test_frame_groupby.py,sha256=JNd4J9E4BEtcU5ed47_SZK5p77P6vthENn_shRPbAJQ,2547 +pandas/tests/plotting/frame/test_frame_legend.py,sha256=10NvOjyNdV703r-9mLhYXIxeyZJFq_-24N9XNkNReJw,10443 +pandas/tests/plotting/frame/test_frame_subplots.py,sha256=kRVFvweJSAwzh9gNIzoifuy6_U2d9mZ-K7zXR_K5otw,28986 +pandas/tests/plotting/frame/test_hist_box_by.py,sha256=8jqVQfLrE5AKvn7iKMX7L5Gbe7e4rv6Ic8MnNp7NALI,10969 +pandas/tests/plotting/test_backend.py,sha256=rE7SNyeJiSUOWwkvxndq3qtpUEOYkUetCwdO_ey-eWM,3382 +pandas/tests/plotting/test_boxplot_method.py,sha256=DZ7MuTRTuNzQfzbMpRerX8oMhgLwTokFNK6o_YdP6Ag,29319 +pandas/tests/plotting/test_common.py,sha256=if9WnxryRdUhub-3yjdTEKO2PME-Yhf5YIG8e2nvAXU,1869 +pandas/tests/plotting/test_converter.py,sha256=pC3IZ6pfKITbmzTZBwoPwG1abGtPT6Sp1YLMuKLDKG8,13251 +pandas/tests/plotting/test_datetimelike.py,sha256=Jvsqdvr_SKrdzgRYwoTlNJeS_NWMSTD183sQF-lQMAs,66544 +pandas/tests/plotting/test_groupby.py,sha256=mcM2bOmfvJteLz9H0qMawxN3Yef-Nj2zCa_MUUBWF_c,5735 +pandas/tests/plotting/test_hist_method.py,sha256=2Rkk6DlGz9I4rXDjs6qBrZiRvUNWiBDCIKk44m0mrxw,34972 +pandas/tests/plotting/test_misc.py,sha256=_IoHRNT_OSGTyFfIu5giv5BnaUFWENQH36VKN8q32tI,25201 +pandas/tests/plotting/test_series.py,sha256=73VoBpLMLjKHwIaZKM50rGpOSx1kBsCxlxxNSsPwh8k,35318 +pandas/tests/plotting/test_style.py,sha256=3YMcq45IgmIomuihBowBT-lyJfpJR_Q8fbMOEQXUkao,5172 +pandas/tests/reductions/__init__.py,sha256=vflo8yMcocx2X1Rdw9vt8NpiZ4ZFq9xZRC3PW6Gp-Cs,125 +pandas/tests/reductions/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/reductions/__pycache__/test_reductions.cpython-310.pyc,, +pandas/tests/reductions/__pycache__/test_stat_reductions.cpython-310.pyc,, +pandas/tests/reductions/test_reductions.py,sha256=KCjnnzvnla6IysKlbvUO8bXSZ3hxxqfskc9CweDd1iM,57446 +pandas/tests/reductions/test_stat_reductions.py,sha256=Q-sfitViCm3-oQQVHWDwjKKia1ZuUX6079cGmv3i3oU,9722 +pandas/tests/resample/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/resample/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/resample/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/resample/__pycache__/test_base.cpython-310.pyc,, +pandas/tests/resample/__pycache__/test_datetime_index.cpython-310.pyc,, +pandas/tests/resample/__pycache__/test_period_index.cpython-310.pyc,, +pandas/tests/resample/__pycache__/test_resample_api.cpython-310.pyc,, +pandas/tests/resample/__pycache__/test_resampler_grouper.cpython-310.pyc,, +pandas/tests/resample/__pycache__/test_time_grouper.cpython-310.pyc,, +pandas/tests/resample/__pycache__/test_timedelta.cpython-310.pyc,, +pandas/tests/resample/conftest.py,sha256=XXj72zj-3AH2jPBUacVV6GSpY9Y4in_38g8cSf8UfYg,3355 +pandas/tests/resample/test_base.py,sha256=vKNx1a1KdSun8OIPEy62VUk390Eew6XyVgHQNbjPRFc,15475 +pandas/tests/resample/test_datetime_index.py,sha256=yQMqPpYkVcrH6MngHltPa9uVDd5n-Hv8jgy-jQVCIvs,74496 +pandas/tests/resample/test_period_index.py,sha256=zlaCtN0II7xAg9-sHDo6HdMNJhrmhCLVbSWe4QPZkR8,43093 +pandas/tests/resample/test_resample_api.py,sha256=QP9mj7ElUdWz7mMIfOJBLxYFsPhWugrzNZNGellLXTM,34082 +pandas/tests/resample/test_resampler_grouper.py,sha256=j2WlubBPgs6CJ8u1nJHhhLOi9LxFkhd6Si2fg2M7yGc,23266 +pandas/tests/resample/test_time_grouper.py,sha256=7VGDIWdewbXeWGH80i_w0s0ffBPke0r-nqmv9_PC52s,11837 +pandas/tests/resample/test_timedelta.py,sha256=H_ZjEJhXN6fhWbpwEwuPsxFDWQermDwUvsM7oaE2pG0,7469 +pandas/tests/reshape/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc,, +pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc,, +pandas/tests/reshape/concat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_append.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc,, +pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc,, +pandas/tests/reshape/concat/conftest.py,sha256=s94n_rOGHsQKdP2KbCAQEfZeQpesYmhH_d-RNNTkvYc,162 +pandas/tests/reshape/concat/test_append.py,sha256=mCBndbLvwmM8qTbwH7HoyZjFGLQWOsOMGjn1I1Mz8PA,14299 +pandas/tests/reshape/concat/test_append_common.py,sha256=Z2hBl4TyKpIJ-staPnWVmAbRMv9Wg0tQK_W8YpcIMXQ,27866 +pandas/tests/reshape/concat/test_categorical.py,sha256=37u7FkYgN0-HZX6z7_5MpAkgv4SCTX1xT4GfSgEfw5o,9531 +pandas/tests/reshape/concat/test_concat.py,sha256=tGbGgnotYE5XJLt0cG9D_FfziSflb9oNzlfqyeZbNL4,32440 +pandas/tests/reshape/concat/test_dataframe.py,sha256=-vObBDtkJ7N_eeIFgjpOVVrMJf_bB9KKknHZg1DbG7k,8864 +pandas/tests/reshape/concat/test_datetimes.py,sha256=dZc65JXlR1l5ulBaQrVzkLv0z8LgwXBlrBFxOxRSBZk,21584 +pandas/tests/reshape/concat/test_empty.py,sha256=wyQDnoujsaY-_dz5MlE-fpXqZyESi1mp0g8BFLQ3kyw,10242 +pandas/tests/reshape/concat/test_index.py,sha256=cauuUpDyWBOAmwmvaSnehWuve5XpFtGoNj2xuEFwdp4,17453 +pandas/tests/reshape/concat/test_invalid.py,sha256=E7InfrzodepcICRP_zFyg11CMs-2SmNrxFY3f8bhqjA,1608 +pandas/tests/reshape/concat/test_series.py,sha256=af0lLNaUEvGml86Ziy-VLJt-wQ-rwQZuQoFROulm9Z8,6061 +pandas/tests/reshape/concat/test_sort.py,sha256=RuXIJduLa56IJDmUQaCwyYOz_U0KXMDWf04WEzi8y7E,4350 +pandas/tests/reshape/merge/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/reshape/merge/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc,, +pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc,, +pandas/tests/reshape/merge/__pycache__/test_merge_asof.cpython-310.pyc,, +pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc,, +pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc,, +pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc,, +pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc,, +pandas/tests/reshape/merge/test_join.py,sha256=uCi2kLp2Liq430VMue_iNsG49vML1J5DtIFKxs_yRyc,37570 +pandas/tests/reshape/merge/test_merge.py,sha256=2R-652Kz2iamzZiT6o-kgsgHEUNg9i_iQmC7p5ZIdSU,106202 +pandas/tests/reshape/merge/test_merge_asof.py,sha256=Gib-41Z735fnPUh0Ipn6V_XyxKhPL3zm1iFFTNywclo,121614 +pandas/tests/reshape/merge/test_merge_cross.py,sha256=9BVH6HWJRh-dHKDTBy8Q2it97gjVW79FgPC99HNLIc4,3146 +pandas/tests/reshape/merge/test_merge_index_as_string.py,sha256=w_9BccpqfB7yPhy_TBlMGx2BPOBwPhfg-pYRKA4HEC8,5357 +pandas/tests/reshape/merge/test_merge_ordered.py,sha256=Y4GLA6hxUoUdo6XhJ5inFBf867JJ8XqiaMi7GY4tsNY,7731 +pandas/tests/reshape/merge/test_multi.py,sha256=kV5tUCNAljJ78IPNrhaeDX9AyKtN2KdF8ZpNMTeDyzY,31130 +pandas/tests/reshape/test_crosstab.py,sha256=fJTqrjVg45YUp8aPCcpgRzrNEoXibZIAz8Tmz2cTM7k,32578 +pandas/tests/reshape/test_cut.py,sha256=vr9TM1AwpJc1c_roHi43ydZ3cMDPBvNv29qqYiypbDk,24554 +pandas/tests/reshape/test_from_dummies.py,sha256=-EzZAKwOfAIdfmAf36a9yJoXb9EDee5s8b3Niz0QXSQ,13272 +pandas/tests/reshape/test_get_dummies.py,sha256=EwXZfFJkidYfSSfV3b22rjxl87oO4IzHlL7_anciR1g,27650 +pandas/tests/reshape/test_melt.py,sha256=myoJF1JEbXammo_jC8SoxfWBSMDUoybuAYyleerElJ0,42211 +pandas/tests/reshape/test_pivot.py,sha256=52TJ3gtJ4K79Q4_kc0GCCAKD3SD0j9niybWoxmI8Z_U,93311 +pandas/tests/reshape/test_pivot_multilevel.py,sha256=DYp3BZ0h80UEgqFs0sNVqnUWBWgYU4622wp62SdCDdI,7549 +pandas/tests/reshape/test_qcut.py,sha256=0XO-B9XmAGiWLhEFW8wujFo-VR1r62SZP7MT-DBz1VE,8477 +pandas/tests/reshape/test_union_categoricals.py,sha256=-5HAPWXufLo52xxRMFedZjSfadNv9GFy4c-OKvN8GBA,15207 +pandas/tests/reshape/test_util.py,sha256=mk60VTWL9YPWNPAmVBHwkOAOtrHIDU6L3EAnlasx6IQ,2897 +pandas/tests/scalar/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc,, +pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc,, +pandas/tests/scalar/interval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc,, +pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc,, +pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc,, +pandas/tests/scalar/interval/test_arithmetic.py,sha256=qrUOEDp9dOkOoEfuuUHhmzKTZuPbj727p2PxO1kgxxM,5937 +pandas/tests/scalar/interval/test_constructors.py,sha256=DI5iRKoIg51lI_-FysKQyyaJnwrd8CqLjk7b7iqFIp0,1599 +pandas/tests/scalar/interval/test_contains.py,sha256=MSjo5U7KLuqugnEtURC8znpldI3-cLIfXQlIhNvQLI4,2354 +pandas/tests/scalar/interval/test_formats.py,sha256=Ep7692gGQMdrYiCxxudqXX-CA6S1sO3L2P2I4NHIreo,344 +pandas/tests/scalar/interval/test_interval.py,sha256=W54SKFbFSlsvFwoXkNhb6JK52klz8is2ww2ZQ7AIjUs,2656 +pandas/tests/scalar/interval/test_overlaps.py,sha256=2FHG23scoclsfZZAngK9sesna_3xgbjgSKoUzlMxHro,2274 +pandas/tests/scalar/period/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/period/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/scalar/period/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/scalar/period/__pycache__/test_asfreq.cpython-310.pyc,, +pandas/tests/scalar/period/__pycache__/test_period.cpython-310.pyc,, +pandas/tests/scalar/period/test_arithmetic.py,sha256=YYt1270I1WxtnQqGck_49ECYtrpw__lX8qx8t-GuIZM,16775 +pandas/tests/scalar/period/test_asfreq.py,sha256=dbmg35zwFwPSiYR-5OuSA790slBEct8N6C1jkEXchBs,38445 +pandas/tests/scalar/period/test_period.py,sha256=zjHRVTyPeR7y2SgMn1UsUM1M37EfT1kypoPuqjxsFGI,40121 +pandas/tests/scalar/test_na_scalar.py,sha256=0t4r9nDTQtXUSeXRBxDfgWegznLM6TvMk2pK0gLScJc,7227 +pandas/tests/scalar/test_nat.py,sha256=pUhNNUxLBv4_D-l2tsHICFiT5ruDjvlj24oEkNZycxk,19972 +pandas/tests/scalar/timedelta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/timedelta/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/scalar/timedelta/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/scalar/timedelta/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/scalar/timedelta/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/scalar/timedelta/__pycache__/test_timedelta.cpython-310.pyc,, +pandas/tests/scalar/timedelta/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/timedelta/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/scalar/timedelta/methods/__pycache__/test_as_unit.cpython-310.pyc,, +pandas/tests/scalar/timedelta/methods/__pycache__/test_round.cpython-310.pyc,, +pandas/tests/scalar/timedelta/methods/test_as_unit.py,sha256=Ut-_d5xcdAq9eD5_dknpSsnhjndzRyilGuT7PxOYl5s,2518 +pandas/tests/scalar/timedelta/methods/test_round.py,sha256=kAqNhW8GJMKvaACF1b6eKhO9DOvYUJuRrMyoxG2-nHM,6338 +pandas/tests/scalar/timedelta/test_arithmetic.py,sha256=mYTdK4okwMitWPPh335LY3wzy5hXncEXPnxLd1XrDXA,38156 +pandas/tests/scalar/timedelta/test_constructors.py,sha256=49f8ARiuEAbImuDasW9-NowtijVRPyoY6ARtX6iuNnM,22433 +pandas/tests/scalar/timedelta/test_formats.py,sha256=_5svunXjM1H4X5tMqgT7aO9CoDR96XgybUYHXNdcyDo,4161 +pandas/tests/scalar/timedelta/test_timedelta.py,sha256=VAEnw5O0egqtlazzAy6oJkgFGHCKDXp3NwRyBEQ19as,23413 +pandas/tests/scalar/timestamp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/timestamp/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/scalar/timestamp/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/scalar/timestamp/__pycache__/test_comparisons.cpython-310.pyc,, +pandas/tests/scalar/timestamp/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/scalar/timestamp/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/scalar/timestamp/__pycache__/test_timestamp.cpython-310.pyc,, +pandas/tests/scalar/timestamp/__pycache__/test_timezones.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/timestamp/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_as_unit.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_normalize.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_replace.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_round.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_timestamp_method.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_to_julian_date.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_to_pydatetime.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_tz_convert.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/__pycache__/test_tz_localize.cpython-310.pyc,, +pandas/tests/scalar/timestamp/methods/test_as_unit.py,sha256=Od0YhrglrVPaad4kzpjPKoVf-pBz0_lTbdaj7cpD7eU,2706 +pandas/tests/scalar/timestamp/methods/test_normalize.py,sha256=NMQXgPRwSB8Z8YtQLrU4qNbxhaq1InqKqwS8veJ_Cts,831 +pandas/tests/scalar/timestamp/methods/test_replace.py,sha256=JT-qoGosdZa0tgjg2AtKrniJnT6-o1YIXQrq-pFDL5E,7055 +pandas/tests/scalar/timestamp/methods/test_round.py,sha256=mA1FyUI8-J14yZ1Vf5Se0OeW2u4nv9-1s0r9eOmOxnE,13027 +pandas/tests/scalar/timestamp/methods/test_timestamp_method.py,sha256=JlFBfEixuZiw96lRZc88wXR9-5uOt74gBCUql321H6w,1017 +pandas/tests/scalar/timestamp/methods/test_to_julian_date.py,sha256=izPqS1f7lJ3Tqkiz65t3NjZqtgxu1_jbSg-LmZheiD4,810 +pandas/tests/scalar/timestamp/methods/test_to_pydatetime.py,sha256=duSR43OjYJiMOHjt7lLVrSdBZa74GQRqwJz5RPdbQ5M,2871 +pandas/tests/scalar/timestamp/methods/test_tz_convert.py,sha256=yw1GiCOn7F8ZDof9d7IvG6T28e6nsB-_XswfO0HN-Dc,1710 +pandas/tests/scalar/timestamp/methods/test_tz_localize.py,sha256=drtq_N4h6E-25vsQuJJO4Sc5dUXyCwIWTHM0ozIc8gI,12774 +pandas/tests/scalar/timestamp/test_arithmetic.py,sha256=4exZrHW0m6i4mCzKVFhehECC232IJYyc3IW1f-YzPbM,10852 +pandas/tests/scalar/timestamp/test_comparisons.py,sha256=zxzSqDtYxP7Fc4vXcIqxYq0Yg7KeKEdAn3iwbgAv-ns,10059 +pandas/tests/scalar/timestamp/test_constructors.py,sha256=qC0ZLNT77BDnBQ1atxBN20AG06mi10ur8-4BP9zEKDg,39486 +pandas/tests/scalar/timestamp/test_formats.py,sha256=TKn4H02mIrLpoWm4YuDsA3gUy87bYVqNLu8SgnckZA0,6864 +pandas/tests/scalar/timestamp/test_timestamp.py,sha256=c0ZhIgkRq9JfpohnixtM-n2frtyF2fR2pnUFjFER8fY,31042 +pandas/tests/scalar/timestamp/test_timezones.py,sha256=dXCPtLiGfQ9B2pg_s_YK7fvWwUW-CbVOPYUn9paFosk,666 +pandas/tests/series/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/series/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_arithmetic.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_constructors.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_cumulative.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_formats.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_iteration.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_logical_ops.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_missing.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_npfuncs.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_reductions.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_subclass.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_ufunc.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_unary.cpython-310.pyc,, +pandas/tests/series/__pycache__/test_validate.cpython-310.pyc,, +pandas/tests/series/accessors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/series/accessors/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/series/accessors/__pycache__/test_cat_accessor.cpython-310.pyc,, +pandas/tests/series/accessors/__pycache__/test_dt_accessor.cpython-310.pyc,, +pandas/tests/series/accessors/__pycache__/test_list_accessor.cpython-310.pyc,, +pandas/tests/series/accessors/__pycache__/test_sparse_accessor.cpython-310.pyc,, +pandas/tests/series/accessors/__pycache__/test_str_accessor.cpython-310.pyc,, +pandas/tests/series/accessors/__pycache__/test_struct_accessor.cpython-310.pyc,, +pandas/tests/series/accessors/test_cat_accessor.py,sha256=1-ZRI4h_lsBclkXljCrYFwGIYXbhrpE1iET-MjNKngk,9611 +pandas/tests/series/accessors/test_dt_accessor.py,sha256=wL3MFsru8nLxaY2KLmCFfZvdpvtIYHJie44Ff6V7eSE,29886 +pandas/tests/series/accessors/test_list_accessor.py,sha256=7OsgwSCkXFDSRh81g5WKniPsv_zcTosuGicGPSemBqo,3425 +pandas/tests/series/accessors/test_sparse_accessor.py,sha256=yPxK1Re7RDPLi5v2r9etrgsUfSL9NN45CAvuR3tYVwA,296 +pandas/tests/series/accessors/test_str_accessor.py,sha256=M29X62c2ekvH1FTv56yye2TLcXyYUCM5AegAQVWLFc8,853 +pandas/tests/series/accessors/test_struct_accessor.py,sha256=Yg_Z1GjJf92XaXOnT0aUaeEtp7AOcQqWPT4guJKGfEg,5443 +pandas/tests/series/indexing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/series/indexing/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_datetime.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_delitem.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_get.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_getitem.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_indexing.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_mask.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_set_value.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_setitem.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_take.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_where.cpython-310.pyc,, +pandas/tests/series/indexing/__pycache__/test_xs.cpython-310.pyc,, +pandas/tests/series/indexing/test_datetime.py,sha256=1_yUGMkSFYGh7TJOeDN_-5FvqsVyV-rGdgBzOnyqqNk,14752 +pandas/tests/series/indexing/test_delitem.py,sha256=bqam_JdFo9bWPIIglt0Rvms-KJxG1wZ5znTtrAn5eaI,2063 +pandas/tests/series/indexing/test_get.py,sha256=-FooS4ocg7uqbXYDNEZwMvRpTCar5LJCgCqi_CpDoo0,5758 +pandas/tests/series/indexing/test_getitem.py,sha256=TLizXWrxrsUj5KtXGVB2kIxHK3ayq5IsjnjTDqFiPzY,24431 +pandas/tests/series/indexing/test_indexing.py,sha256=UJrjrjD_5-nqaPVcjSz90dQQRdtXeCD5QzZDTCVGjTw,16679 +pandas/tests/series/indexing/test_mask.py,sha256=ecPdJ-CM8HbaaZoGUfwcoOuo0eIz7aEq-x8wL0PZWbE,1711 +pandas/tests/series/indexing/test_set_value.py,sha256=UwVNpW3Fh3PKhNiFzZiVK07W871CmFM2fGtC6CTW5z0,991 +pandas/tests/series/indexing/test_setitem.py,sha256=DdaSjqIX0sTc-zXFv3dbuq0KRcq4qcxJcZEblgdtIyg,60265 +pandas/tests/series/indexing/test_take.py,sha256=574cgL0w0fj-YnZma9b188Y0mTWs-Go6ZzB9zQSdpAk,1353 +pandas/tests/series/indexing/test_where.py,sha256=eAUIGPRMumG78t6CMCtoe50hJCKLUFCWSe8mjHyA5Bo,13441 +pandas/tests/series/indexing/test_xs.py,sha256=8EKGIgnK86_hsBjPIY5lednYnzatv14O6rq3LjR_KxI,2760 +pandas/tests/series/methods/__init__.py,sha256=zVXqGxDIQ-ebxxcetI9KcJ9ZEHeIC4086CoDvyc8CNM,225 +pandas/tests/series/methods/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_add_prefix_suffix.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_align.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_argsort.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_asof.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_astype.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_autocorr.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_between.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_case_when.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_clip.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_combine.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_combine_first.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_compare.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_convert_dtypes.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_copy.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_count.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_cov_corr.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_describe.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_diff.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_drop.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_drop_duplicates.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_dropna.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_dtypes.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_duplicated.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_equals.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_explode.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_fillna.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_get_numeric_data.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_head_tail.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_infer_objects.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_info.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_interpolate.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_is_monotonic.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_is_unique.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_isin.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_isna.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_item.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_map.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_matmul.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_nlargest.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_nunique.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_pct_change.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_pop.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_quantile.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_rank.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_reindex.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_reindex_like.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_rename.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_rename_axis.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_repeat.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_replace.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_reset_index.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_round.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_searchsorted.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_set_name.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_size.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_sort_index.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_sort_values.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_to_csv.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_to_dict.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_to_frame.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_to_numpy.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_tolist.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_truncate.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_tz_localize.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_unique.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_unstack.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_update.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_value_counts.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_values.cpython-310.pyc,, +pandas/tests/series/methods/__pycache__/test_view.cpython-310.pyc,, +pandas/tests/series/methods/test_add_prefix_suffix.py,sha256=PeUIeDHa9rGggraEbVJRtLi2GcnNcXkrXb0otlthOC4,1556 +pandas/tests/series/methods/test_align.py,sha256=1qb66VDMKpMXYpivki_WWGR_aHtEishwVQlK1ZIJqHA,7700 +pandas/tests/series/methods/test_argsort.py,sha256=GSvtMvfeUktQkrOsl-bF4di5w8QPCo9GPza1OmeofeM,2871 +pandas/tests/series/methods/test_asof.py,sha256=CqRdyeXFhE7zVdkJB-TxVqK3XPyBNvtOAfb6_a0VGgM,6324 +pandas/tests/series/methods/test_astype.py,sha256=fPZRB30wa7fFvcY66odgPfA7f2_loj-nWthKLGkOHew,25472 +pandas/tests/series/methods/test_autocorr.py,sha256=SnxELB9bcE8H68tYUDN3UKMMPu-sEfbwTlLUn8WirV8,1015 +pandas/tests/series/methods/test_between.py,sha256=9w_8uWI5kcJOTfMwbEwmjGpU2j2cyuMtCYw4MrvgSM0,2584 +pandas/tests/series/methods/test_case_when.py,sha256=0YC-SaigIaoSO2l7h9sO4ebzCrxq0ma5FtiZKiwDMRs,4223 +pandas/tests/series/methods/test_clip.py,sha256=PuUarzkVXrwdYBF6pKqKbRw_GUuXdYsSPoNomgSDyzc,5220 +pandas/tests/series/methods/test_combine.py,sha256=ye8pwpjolpG_kUKSFTC8ZoRdj3ze8qtJXvDUZ5gpap4,627 +pandas/tests/series/methods/test_combine_first.py,sha256=84sHCHBNQIhQEtTRWNZQqAuq_3LuHiVbq7Xmp5pRjZo,5420 +pandas/tests/series/methods/test_compare.py,sha256=uRA4CKyOTPSzW3sihILLvxpxdSD1hb7mHrSydGFV2J4,4658 +pandas/tests/series/methods/test_convert_dtypes.py,sha256=OsSLvgRzG1MKutYTZXPqkHwiNy6-QzXqN-KtohZ7wYs,9724 +pandas/tests/series/methods/test_copy.py,sha256=im14SuY4pXfqYHvd4UamQSSTiXsK8GOP7Ga-5w-XRFs,3164 +pandas/tests/series/methods/test_count.py,sha256=mju3vjyHXg8qRH85cRLWvRL8lFnF7HGdETjt2e_pK7M,938 +pandas/tests/series/methods/test_cov_corr.py,sha256=NfmwlBV_Umm50xTwfuhJhKtNPmrUVEaJOt9GWTsb3DQ,5709 +pandas/tests/series/methods/test_describe.py,sha256=brDSZ2qicnLANI2ReYiYQiXzu6m9VxFr4DVULEyGgSA,6646 +pandas/tests/series/methods/test_diff.py,sha256=vEBvVglFS1cSDpllOLEZ9Dkdv1E02IYP9y6s6nsL6es,2538 +pandas/tests/series/methods/test_drop.py,sha256=nqTXYfvY76BZ2cl46kUb8mkkll5StdCzBaTn_YkGfIk,3394 +pandas/tests/series/methods/test_drop_duplicates.py,sha256=P6jHz77EAtuiI2IE25pNjBx3pXteUc0JUMoj2mWo8T4,9235 +pandas/tests/series/methods/test_dropna.py,sha256=fezc4siTNn-uOEQtOhaqNAOLYBoWN3Rh6STHAtOdk8U,3577 +pandas/tests/series/methods/test_dtypes.py,sha256=IkYkFl0o2LQ5qurobwoPgp4jqi2uKU7phoAk3oZtiYo,209 +pandas/tests/series/methods/test_duplicated.py,sha256=ACzVs9IJY4lC2SQb6frHVe4dGd6YLFID5UAw4BuZa7c,2059 +pandas/tests/series/methods/test_equals.py,sha256=qo8h305o5ktv9ooQ7pMbMUnQFjzOGLWc5TwxL9wD5zg,4182 +pandas/tests/series/methods/test_explode.py,sha256=Pw0yPOLX6iHzLDld7Bo1tC2CjnMYGIo9cEQs1Q6wmDg,5110 +pandas/tests/series/methods/test_fillna.py,sha256=tjuKAfrmByzwY1H_xez3xSwKkZUDac1aSt47ZHP7llI,39985 +pandas/tests/series/methods/test_get_numeric_data.py,sha256=UPWNlzpl2a9Zez1JSfFP2EwsYfs4U4_Re4yOkqGpsl8,1178 +pandas/tests/series/methods/test_head_tail.py,sha256=1EWojjTzcLvYH34VvyvEHxczDy7zL3dMTyayFHsVSzY,343 +pandas/tests/series/methods/test_infer_objects.py,sha256=w0UyAVk4bHlCBX8Ot8BiV6Y0flw-70XiENsh0jsgyhg,1903 +pandas/tests/series/methods/test_info.py,sha256=hff1IZ3mbfwsJzNvLcFyFlnk3aubm3gcxMhROr-F-aI,4907 +pandas/tests/series/methods/test_interpolate.py,sha256=Y0pZXAceQWfdEylQi0Q78g3LLSvwv9qTr0ur9z-SED8,34267 +pandas/tests/series/methods/test_is_monotonic.py,sha256=vvyWZFxiSybq88peF0zN5dM16rH2SgCEEA-gT2rRSSY,838 +pandas/tests/series/methods/test_is_unique.py,sha256=d3aLS5q491IVZkfKx8HTc4jkgTtuN0SOaUVfkyBTImE,953 +pandas/tests/series/methods/test_isin.py,sha256=iOwKDqYVh8mFnkwcdc9oRiJVlxfDF87AwL2i7kBugqQ,8343 +pandas/tests/series/methods/test_isna.py,sha256=TzNID2_dMG6ChWSwOMIqlF9AWcc1UjtjCHLNmT0vlBE,940 +pandas/tests/series/methods/test_item.py,sha256=z9gMBXHmc-Xhpyad9O0fT2RySMhlTa6MSrz2jPSUHxc,1627 +pandas/tests/series/methods/test_map.py,sha256=nVhgNZdZvBhJfLXOckrslAK5AINuZlwtfqpkeEZSuBc,18772 +pandas/tests/series/methods/test_matmul.py,sha256=cIj2nJctMnOvEDgTefpB3jypWJ6-RHasqtxywrxXw0g,2767 +pandas/tests/series/methods/test_nlargest.py,sha256=oIkyZ6Z2NiUL09sSTvAFK7IlcfQDiVgwssFe6NtsyIE,8442 +pandas/tests/series/methods/test_nunique.py,sha256=6B7fs9niuN2QYyxjVNX33WLBJvF2SJZRCn6SInTIz0g,481 +pandas/tests/series/methods/test_pct_change.py,sha256=C_WTtvjTsvfT94CUt22jYodJCHd18nUrkCLorQPf_d8,4523 +pandas/tests/series/methods/test_pop.py,sha256=xr9ZuFCI7O2gTW8a3WBr-ooQcOhBzoUK4N1x0K5G380,295 +pandas/tests/series/methods/test_quantile.py,sha256=DrjNLdKWpR-Sy8htHn2roHNI4roGKtR-ziZ77mPBVo8,8284 +pandas/tests/series/methods/test_rank.py,sha256=PokA09Wyiil9JGQ5CBNqEtRP_uvZlwTWPd-8TsGsrfw,18104 +pandas/tests/series/methods/test_reindex.py,sha256=-AIJ2FzgeE2-1z_WPfkFcktucr78afWJWx_TTU-J-jM,14576 +pandas/tests/series/methods/test_reindex_like.py,sha256=e_nuGo4QLgsdpnZrC49xDVfcz_prTGAOXGyjEEbkKM4,1245 +pandas/tests/series/methods/test_rename.py,sha256=XzMLQMJZ4bYYOnmck8NidsW5DSfw2YlbvQmZidXVoWk,6321 +pandas/tests/series/methods/test_rename_axis.py,sha256=TqGeZdhB3Ektvj48JfbX2Jr_qsCovtoWimpfX_ViJyg,1520 +pandas/tests/series/methods/test_repeat.py,sha256=WvER_QkoVNYU4bg5hQbLdCXIWxqVnSmJ6K3_3OLLLAI,1274 +pandas/tests/series/methods/test_replace.py,sha256=xyO2g1XBJhY9ApbZK4YPhj-3FwlH7vBwRzdJ8o1vEH8,31936 +pandas/tests/series/methods/test_reset_index.py,sha256=D7fTW37psSXR22ZQHrxID1NWgeFkCZU83QLBo1Cb7xI,7848 +pandas/tests/series/methods/test_round.py,sha256=eQ6kSu0XLBX9NSA1A8V4eXEHIgqoxKbSJ79dDGcYxi8,2651 +pandas/tests/series/methods/test_searchsorted.py,sha256=2nk-hXPbFjgZfKm4bO_TiKm2xjd4hj0L9hiqR4nZ2Ss,2493 +pandas/tests/series/methods/test_set_name.py,sha256=rt1BK8BnWMd8D8vrO7yQNN4o-Fnapq5bRmlHyrYpxk4,595 +pandas/tests/series/methods/test_size.py,sha256=3-LfpWtTLM_dPAHFG_mmCxAk3dJY9WIe13czw1d9Fn4,566 +pandas/tests/series/methods/test_sort_index.py,sha256=XIiu2aL5NayZoQDsBRdBbx6po5_pW4pq4us2utrSY2c,12634 +pandas/tests/series/methods/test_sort_values.py,sha256=jIvHYYMz-RySUtJnB9aFLR88s-M20-B5E5PwK9VQhns,9372 +pandas/tests/series/methods/test_to_csv.py,sha256=baTGH5GpQJOz4rpQmRMgClwBXxwVcTpEmM870qCZ2zs,6488 +pandas/tests/series/methods/test_to_dict.py,sha256=XGdcF1jD4R0a_vWAQXwal3IVJoNwEANa1tU7qHtpIGA,1178 +pandas/tests/series/methods/test_to_frame.py,sha256=nUkHQTpMTffkpDR7w3EcQvQAevEfflD6tHm3pTBxpTI,1992 +pandas/tests/series/methods/test_to_numpy.py,sha256=pEB2B08IdIPRYp5n7USYFX9HQbClJl4xOegjVd7mYLc,1321 +pandas/tests/series/methods/test_tolist.py,sha256=5F0VAYJTPDUTlqb5zDNEec-BeBY25ZjnjqYHFQq5GPU,1115 +pandas/tests/series/methods/test_truncate.py,sha256=suMKI1jMEVVSd_b5rlLM2iqsQ08c8a9CbN8mbNKdNEU,2307 +pandas/tests/series/methods/test_tz_localize.py,sha256=chP4Dnhzfg5zphKiHwZpN-43o_p6jf0wqgid3a-ZB-Y,4336 +pandas/tests/series/methods/test_unique.py,sha256=MQB5s4KVopor1V1CgvF6lZNUSX6ZcOS2_H5JRYf7emU,2219 +pandas/tests/series/methods/test_unstack.py,sha256=ahI7kSZhf9rX-TLfy7ymFLqM4m4t2niaD7p-9TV02XE,5116 +pandas/tests/series/methods/test_update.py,sha256=deGclG13lOOd_xEkKYEfFUDge0Iiudp9MJwuv7Yis-M,5339 +pandas/tests/series/methods/test_value_counts.py,sha256=LNmYx4OpzjjbLsjYHOrd4vxJZjKm9pEntq63I3mWttc,10109 +pandas/tests/series/methods/test_values.py,sha256=Q2jACWauws0GxIc_QzxbAOgMrJR6Qs7oyx_6LK7zVt8,747 +pandas/tests/series/methods/test_view.py,sha256=JipUTX6cC-NU4nVaDsyklmpRvfvf_HvUQE_fgYFqxPU,1851 +pandas/tests/series/test_api.py,sha256=_BaBWtliETwBqhysCrXr7x_1bc3r3SIAcIFOcjZ3F_A,10339 +pandas/tests/series/test_arithmetic.py,sha256=ifsEkN9NlowAT5PU5Foi0x68t1NkHkmVdMBQjtvsFl8,33812 +pandas/tests/series/test_constructors.py,sha256=ThOK2BIx6XPEhDiDFaB5qGJdPrurlrPzod80zN67PwQ,85250 +pandas/tests/series/test_cumulative.py,sha256=lYFRlmwTQBWBP-svJnt6e55b_wnCdDVZVhuvP0ezcR8,5034 +pandas/tests/series/test_formats.py,sha256=0zdlyYIuExdMVngGrYOatSq7M5ersVQe5rPZfG48KVs,17122 +pandas/tests/series/test_iteration.py,sha256=LKCUh0-OueVvxOr7uEG8U9cQxrAk7X-WDwfgEIKUekI,1408 +pandas/tests/series/test_logical_ops.py,sha256=4gXWtkqxNRA2ge7QjsomTkXVPnhd4u1t-50rH7L10KY,20199 +pandas/tests/series/test_missing.py,sha256=6TtIBFZgw-vrOYqRzSxhYCIBngoVX8r8-sT5jFgkWKM,3277 +pandas/tests/series/test_npfuncs.py,sha256=BxhxkI2uWC-ygB3DJK_-FX2TOxcuqDUHX4tRQqD9CfU,1093 +pandas/tests/series/test_reductions.py,sha256=hgPH62fS-Ha6Czk8WhKwJ8yy2KoSmG2Jx_ebpDysDxs,6519 +pandas/tests/series/test_subclass.py,sha256=aL5tgGGXZPPIXWIgpCPBrc7Q5KS8h1ipZNKCwciw-jY,2667 +pandas/tests/series/test_ufunc.py,sha256=uo0FJLsk2WFgOIMfKBlsuySEKzwkGYtcTPCRPmJt2qY,14758 +pandas/tests/series/test_unary.py,sha256=Xktw6w940LXm38OKLW-LRqpMZSA9EB5feCt9FMLh-E4,1620 +pandas/tests/series/test_validate.py,sha256=ziCmKi_jYuGyxcnsVaJpVgwSCjBgpHDJ0dbzWLa1-kA,668 +pandas/tests/strings/__init__.py,sha256=_uWelCEA7j9QwfQkgZomjbpFbuB_FlQO1sdMXak8Zn4,367 +pandas/tests/strings/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/strings/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_case_justify.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_cat.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_extract.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_find_replace.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_get_dummies.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_split_partition.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_string_array.cpython-310.pyc,, +pandas/tests/strings/__pycache__/test_strings.cpython-310.pyc,, +pandas/tests/strings/conftest.py,sha256=M-9nIdAAynMJ7FvFFTHXJEUZFT8uOTbizf5ZOnOJ-Tk,3960 +pandas/tests/strings/test_api.py,sha256=eW1Z06Ghgx6bRdL7Kmd-YOi9TGHJ7zAUOTFp8I8hOEM,6353 +pandas/tests/strings/test_case_justify.py,sha256=b7vTpbbvc3MSR5F-Bhge2NqUVGPkOlauOuaHwr09W28,13545 +pandas/tests/strings/test_cat.py,sha256=zCJBBRtmaOxMGwXeS4evfDtAVccO3EmloEUn-dMi0ho,13575 +pandas/tests/strings/test_extract.py,sha256=LuGkboI2Q6d60kQgwMDudy-5eEbixaaCGP78CwHli6c,26463 +pandas/tests/strings/test_find_replace.py,sha256=u_XxsoYO6onAYZWc52ATYjEp9RpRoAoInMvxshaPVTE,35016 +pandas/tests/strings/test_get_dummies.py,sha256=LyWHwMrb5pgX69t4b9ouHflXKp4gBXadTCkaZSk_HB4,1608 +pandas/tests/strings/test_split_partition.py,sha256=vi8PvUAnLQgWsWehN0VpR7zfIOShPce0svmGhRNht5U,23234 +pandas/tests/strings/test_string_array.py,sha256=I2Y1NMM_iOn9K6068sRNP_mcXHJYpxqIsDmhk1B2avQ,3558 +pandas/tests/strings/test_strings.py,sha256=rwLDRm3JWax5Nsbnl0v65e7RYSgK70lLWmDULUVrvx4,25502 +pandas/tests/test_aggregation.py,sha256=-9GlIUg7qPr3Ppj_TNbBF85oKjSIMAv056hfcYZvhWw,2779 +pandas/tests/test_algos.py,sha256=-OriW5Hjib-BwwHpO19liYsC-SpkYIhjnMJ3tHfz1Zs,77987 +pandas/tests/test_common.py,sha256=SHkM8XyjSNxUJquSiEDa3lqE0GJ7tLsfwdro0x2leAg,7695 +pandas/tests/test_downstream.py,sha256=--pbHUtoMwjpQB0_gsyOQpHkKAzKrZwOZlrHt-RsZW8,10501 +pandas/tests/test_errors.py,sha256=4WVxQSyv6okTRVQC9LC9thX5ZjXVMrX-3l93bEd9KZ8,2789 +pandas/tests/test_expressions.py,sha256=fyTafylKNf7Wb3qzwlvIGbM4MdlJB7V4yGJrgiMRE5w,14256 +pandas/tests/test_flags.py,sha256=Dsu6pvQ5A6Manyt1VlQLK8pRpZtr-S2T3ubJvRQaRlA,1550 +pandas/tests/test_multilevel.py,sha256=3-Gmz-7nEzWFDYT5k_nzRL17xLCj2ZF3q69dzHO5sL8,12206 +pandas/tests/test_nanops.py,sha256=NWzcF6_g_IT0HQRG9ETV3kimAAKVmoFohuGymqsDLPI,42042 +pandas/tests/test_optional_dependency.py,sha256=wnDdNm9tlr2MFSOwB9EWAPUf1_H3L0GUTbGeZyGUqL8,3159 +pandas/tests/test_register_accessor.py,sha256=L2cU-H7UU1M36_7DU7p69SvGEFWZXpMpUJ8NZS2yOTI,2671 +pandas/tests/test_sorting.py,sha256=0rqJWWFq1kVX8m-W0X7dXdl9XoaYxZKuGHtBiJIn3nQ,16595 +pandas/tests/test_take.py,sha256=YSMLvpggEaY_MOT3PkVtQYUw0MfwN4bVvI3EgmOgxfA,11539 +pandas/tests/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tools/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc,, +pandas/tests/tools/__pycache__/test_to_numeric.cpython-310.pyc,, +pandas/tests/tools/__pycache__/test_to_time.cpython-310.pyc,, +pandas/tests/tools/__pycache__/test_to_timedelta.cpython-310.pyc,, +pandas/tests/tools/test_to_datetime.py,sha256=9-TkN1fCZiz4hBMUOvCo7YI7bQgHjXib2Nh2H51DmqA,147160 +pandas/tests/tools/test_to_numeric.py,sha256=R9fTxZIebRQp-yNh2oDsHYF8xgszrVLNqlVDYGwnajM,29480 +pandas/tests/tools/test_to_time.py,sha256=e-QmGu5nAe9clT8n9bda5aEwHBH4ZaXqBzs5-mKWMYQ,2417 +pandas/tests/tools/test_to_timedelta.py,sha256=sA-q01yavNfamRKB0JZ08ou3PN-G38PZ1Tuk5KOL8iI,12454 +pandas/tests/tseries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tseries/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/tseries/frequencies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tseries/frequencies/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/tseries/frequencies/__pycache__/test_freq_code.cpython-310.pyc,, +pandas/tests/tseries/frequencies/__pycache__/test_frequencies.cpython-310.pyc,, +pandas/tests/tseries/frequencies/__pycache__/test_inference.cpython-310.pyc,, +pandas/tests/tseries/frequencies/test_freq_code.py,sha256=hvQl37z3W6CwcLOAqrgc2acqtjOJIbqVbnXkEUBY4cM,1727 +pandas/tests/tseries/frequencies/test_frequencies.py,sha256=tyI9e6ve7sEXdALy9GYjMV3mAQHmQF2IqW-xFzPdgjY,821 +pandas/tests/tseries/frequencies/test_inference.py,sha256=o8bZEapedbcC1zoj_slbggdZkzxX9Z1oh6VuCly8PU4,15111 +pandas/tests/tseries/holiday/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tseries/holiday/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/tseries/holiday/__pycache__/test_calendar.cpython-310.pyc,, +pandas/tests/tseries/holiday/__pycache__/test_federal.cpython-310.pyc,, +pandas/tests/tseries/holiday/__pycache__/test_holiday.cpython-310.pyc,, +pandas/tests/tseries/holiday/__pycache__/test_observance.cpython-310.pyc,, +pandas/tests/tseries/holiday/test_calendar.py,sha256=SdMzzgTizQ88wJBRVTmVIgxE8E20_sgLFunP3WHlkZU,3622 +pandas/tests/tseries/holiday/test_federal.py,sha256=ukOOSRoUdcfUOlAT10AWVj8uxiD-88_H8xd--WpOsG0,1948 +pandas/tests/tseries/holiday/test_holiday.py,sha256=0NsEkl5wr2ckwvGiXnrYhluZZRpCc_Ede6SqdrFGc7I,11173 +pandas/tests/tseries/holiday/test_observance.py,sha256=GJBqIF4W6QG4k3Yzz6_13WMOR4nHSVzPbixHxO8Tukw,2723 +pandas/tests/tseries/offsets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tseries/offsets/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/common.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_business_day.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_business_hour.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_business_month.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_business_quarter.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_business_year.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_common.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_custom_business_day.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_custom_business_hour.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_custom_business_month.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_dst.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_easter.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_fiscal.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_index.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_month.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_offsets.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_offsets_properties.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_quarter.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_ticks.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_week.cpython-310.pyc,, +pandas/tests/tseries/offsets/__pycache__/test_year.cpython-310.pyc,, +pandas/tests/tseries/offsets/common.py,sha256=D3D8mcwwzW2kSEB8uX8gO6ARX4dB4PEu3_953APlRmk,900 +pandas/tests/tseries/offsets/test_business_day.py,sha256=dqOwIoAq3Mcxrc0EEeqJnnDvJYCFz5lA0JewVuODhBc,6808 +pandas/tests/tseries/offsets/test_business_hour.py,sha256=PV5Ddc4vEsQXrXhCKyDIcKptcNhXgIe-KiY14zsbVE0,58452 +pandas/tests/tseries/offsets/test_business_month.py,sha256=ZQlcBF15WTMq5w8uC7QeQ6QYVWN8hmfu1PtJvW-ebYU,6717 +pandas/tests/tseries/offsets/test_business_quarter.py,sha256=Tvp5J5r5uDBh8Y9yW65JItTp-B5fdJ4T9G0fxelHYaw,12591 +pandas/tests/tseries/offsets/test_business_year.py,sha256=OBs55t5gGKSPhTsnGafi5Uqsrjmq1cKpfuwWLUBR8Uo,6436 +pandas/tests/tseries/offsets/test_common.py,sha256=HpiuRR_ktnWLWSoFtMe87AVUCedpRcqxoTeVrfCg7is,7406 +pandas/tests/tseries/offsets/test_custom_business_day.py,sha256=YNN53-HvTW4JrbLYwyUiM10rQqIof1iA_W1uYkiHw7w,3180 +pandas/tests/tseries/offsets/test_custom_business_hour.py,sha256=UXa57Q-ZYPDMv307t7UKQGOIE32CH_FmCNY3hX8dcN4,12312 +pandas/tests/tseries/offsets/test_custom_business_month.py,sha256=WBgCVPO6PUa4oX0bDSDk_UE5hOeYbIo2sduIM9X3ASI,13362 +pandas/tests/tseries/offsets/test_dst.py,sha256=0s6bpzEFkVfUKN6lAkeFTiyzMwYRQwrZs49WAu-LK4o,9139 +pandas/tests/tseries/offsets/test_easter.py,sha256=oZlJ3lESuLTEv6A_chVDsD3Pa_cqgbVc4_zxrEE7cvc,1150 +pandas/tests/tseries/offsets/test_fiscal.py,sha256=p_rXA9wPnKZwDp40kaB8uGjq2fpHPCRU5PFF-1rClbA,26732 +pandas/tests/tseries/offsets/test_index.py,sha256=aeW6vyuME-22oikOhiE6q6nrLkIc22TjV3wPxpWXjIk,1147 +pandas/tests/tseries/offsets/test_month.py,sha256=EHsmRpEhG_CLSNEUOtA48auiJxFnr8sPsHQTyZeuu2g,23243 +pandas/tests/tseries/offsets/test_offsets.py,sha256=0yEFO27kh9uvdu4-MYW9bp5OX9Wb3lIKdiC4Jcna-2o,40623 +pandas/tests/tseries/offsets/test_offsets_properties.py,sha256=P_16zBX7ocaGN-br0pEQBGTlewfiDpJsnf5R1ei83JQ,1971 +pandas/tests/tseries/offsets/test_quarter.py,sha256=VBRsOqNS6xzYV63UVrPU3Z3_eAZQw4WefK2gPNfKork,11839 +pandas/tests/tseries/offsets/test_ticks.py,sha256=1n9PC1iEDQwnUKJivCaC6Wms3r8Je8ZKcGua_ySLLqE,11548 +pandas/tests/tseries/offsets/test_week.py,sha256=EUTDq6l4YT8xbBhQb0iHyNfJEme2jVZdjzaeg-Qj75g,12330 +pandas/tests/tseries/offsets/test_year.py,sha256=EM9DThnH2c6CMw518YpxkrpJixPmH3OVQ_Qp8iMIHPQ,10455 +pandas/tests/tslibs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tslibs/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_array_to_datetime.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_ccalendar.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_conversion.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_fields.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_libfrequencies.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_liboffsets.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_np_datetime.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_npy_units.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_parse_iso8601.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_parsing.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_period.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_resolution.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_strptime.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_timedeltas.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_timezones.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_to_offset.cpython-310.pyc,, +pandas/tests/tslibs/__pycache__/test_tzconversion.cpython-310.pyc,, +pandas/tests/tslibs/test_api.py,sha256=ooEY2RyO9oL8Wcbsc958sGrBjveqTQZPauLeBN3n9xc,1525 +pandas/tests/tslibs/test_array_to_datetime.py,sha256=uQOT4gOHQr35s3R6d8GxDdCH21db6rJZzXKQYrh89y0,11871 +pandas/tests/tslibs/test_ccalendar.py,sha256=Rl2OjoB8pHaOyXW5MmshsHmm8nNMuHQvS_Du1L6ODqw,1903 +pandas/tests/tslibs/test_conversion.py,sha256=rgtB7pIs6VvpkNakcew9PFQ8oVHtwCwwBtu2gCFqbh4,4555 +pandas/tests/tslibs/test_fields.py,sha256=BQKlBXOC4LsXe7eT2CK5mRGR_25g9qYykQZ6ojoGjbE,1352 +pandas/tests/tslibs/test_libfrequencies.py,sha256=Ai6deDiGlwUHR9mVvlkIbXYzWZADHuPLlaBjDK0R2wU,717 +pandas/tests/tslibs/test_liboffsets.py,sha256=958cVv4vva5nawrYcmSinfu62NIL7lYOXOHN7yU-gAE,5108 +pandas/tests/tslibs/test_np_datetime.py,sha256=n7MNYHw7i03w4ZcVTM6GkoRN7Y7UIGxnshjHph2eDPs,7889 +pandas/tests/tslibs/test_npy_units.py,sha256=d9NFsygcKGtp-pw-ZpOvIxMhpsRqd1uPBVlqejHkNmU,922 +pandas/tests/tslibs/test_parse_iso8601.py,sha256=XGQ_GBOCosTiOFFjK4rYoDDZcIBitnyIb_0SXxKF9yo,4535 +pandas/tests/tslibs/test_parsing.py,sha256=5b-ObA324ikkn2AjKTS3-666i8bKhiXtTICi3APdBGQ,13889 +pandas/tests/tslibs/test_period.py,sha256=l1xiNGDhMIJFG21BcAcE8Gkd6GODs-dPVOXcNuw6XTA,3424 +pandas/tests/tslibs/test_resolution.py,sha256=YC6IpOJsIHrsn7DUGi_LKdQrAuZgAqofNeW0DU2gays,1544 +pandas/tests/tslibs/test_strptime.py,sha256=DqjYyJ9t-cpSFDRyF3RepxMSZ4qvPllEjvarqvQKw1E,3896 +pandas/tests/tslibs/test_timedeltas.py,sha256=DaaxCrPg5Usv1UtpaVWpiYWixUtNT1FqjtS26MJq9PI,4662 +pandas/tests/tslibs/test_timezones.py,sha256=Hb56aLljCgRtBmXp7N_TaXM55ODLs6Mvl851dncnpsQ,4724 +pandas/tests/tslibs/test_to_offset.py,sha256=GaUG1VE0HhjMFjIj3aAP1LtzqFBCVx5_e0GUX1alIIU,5873 +pandas/tests/tslibs/test_tzconversion.py,sha256=6Ouplo1p8ArDrxCzPNyH9xpYkxERNPvbd4C_-WmTNd4,953 +pandas/tests/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/util/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/util/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_almost_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_attr_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_categorical_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_extension_array_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_frame_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_index_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_interval_array_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_numpy_array_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_produces_warning.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_assert_series_equal.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_deprecate.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_deprecate_kwarg.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_deprecate_nonkeyword_arguments.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_doc.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_hashing.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_numba.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_rewrite_warning.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_shares_memory.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_show_versions.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_util.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_validate_args.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_validate_args_and_kwargs.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_validate_inclusive.cpython-310.pyc,, +pandas/tests/util/__pycache__/test_validate_kwargs.cpython-310.pyc,, +pandas/tests/util/conftest.py,sha256=loEbQsEtHtv-T4Umeq_UeV6R7s8SO01GHbW6gn8lvlo,476 +pandas/tests/util/test_assert_almost_equal.py,sha256=K1-2c3XrbAb3jU23Dl9T79ueRfE32_Va7CNPfvopOYo,16803 +pandas/tests/util/test_assert_attr_equal.py,sha256=ZXTojP4V5Kle96QOFhxCZjq-dQf6gHvNOorYyOuFP1I,1045 +pandas/tests/util/test_assert_categorical_equal.py,sha256=yDmVzU22k5k5txSHixGfRJ4nKeP46FdNoh3CY1xEwEM,2728 +pandas/tests/util/test_assert_extension_array_equal.py,sha256=quw84fCgsrwtUMu-TcvHmrq5-08J7l1ZzS_3h1Eh3qw,3887 +pandas/tests/util/test_assert_frame_equal.py,sha256=ds6rGI2yrNUfU4tZfXT1KocldOkcOk0kRpBlLUk4S30,13382 +pandas/tests/util/test_assert_index_equal.py,sha256=V0rKjnd0r3Lpas1UF45kVaFxLvKDBVpQmkCg2nfvErU,10157 +pandas/tests/util/test_assert_interval_array_equal.py,sha256=ITqL0Z8AAy5D1knACPOHodI64AHxmNzxiG-i9FeU0b8,2158 +pandas/tests/util/test_assert_numpy_array_equal.py,sha256=fgb8GdUwX4EYiR3PWbjJULNfAJz4DfJ8RJXchssygO4,6624 +pandas/tests/util/test_assert_produces_warning.py,sha256=A-pN3V12hnIqlbFYArYbdU-992RgJ-fqsaKbM0yvYPw,8412 +pandas/tests/util/test_assert_series_equal.py,sha256=4_pRYe67lrrpjhcm5ceU4XBq9umgbczf7BnpvcuEQ8E,15081 +pandas/tests/util/test_deprecate.py,sha256=1hGoeUQTew5o0DnCjLV5-hOfEuSoIGOXGByq5KpAP7A,1617 +pandas/tests/util/test_deprecate_kwarg.py,sha256=7T2QkCxXUoJHhCxUjAH_5_hM-BHC6nPWG635LFY35lo,2043 +pandas/tests/util/test_deprecate_nonkeyword_arguments.py,sha256=0UkqIi4ehxD3aoA3z7y8-3dpOs6o30_Gp8rZvFX1W9Q,3623 +pandas/tests/util/test_doc.py,sha256=u0fxCg4zZWhB4SkJYc2huQ0xv7sKKAt0OlpWldmhh_M,1492 +pandas/tests/util/test_hashing.py,sha256=ZjoFCs6MoAhGV1j2WyjjEJkqyO9WQgRqwS6xx-3n0oE,13857 +pandas/tests/util/test_numba.py,sha256=6eOVcokESth7h6yyeehVizx61FtwDdVbF8wV8j3t-Ic,308 +pandas/tests/util/test_rewrite_warning.py,sha256=AUHz_OT0HS6kXs-9e59GflBCP3Tb5jy8jl9FxBg5rDs,1151 +pandas/tests/util/test_shares_memory.py,sha256=-ksI1I3vK3PR6jMqcQn_yFyJ5P0v3eLsiMI9vjZVMi4,789 +pandas/tests/util/test_show_versions.py,sha256=FjYUrUMAF7hOzphaXED__8yjeF0HTccZS6q05__rH44,2096 +pandas/tests/util/test_util.py,sha256=4UacWPLyjRQZU697jBxBWO6V1gUgkE4E-KKF6H6aXuE,1463 +pandas/tests/util/test_validate_args.py,sha256=9Z4zTqnKAWn1q9KZNvuO3DF6oszHjQrQgtOOimurWcs,1907 +pandas/tests/util/test_validate_args_and_kwargs.py,sha256=d_XcMRAQ9r--yIAAWSdJML6KeWgksy5qRNFXaY1BMQA,2456 +pandas/tests/util/test_validate_inclusive.py,sha256=w2twetJgIedm6KGQ4WmdmGC_6-RShFjXBMBVxR0gcME,896 +pandas/tests/util/test_validate_kwargs.py,sha256=NAZi-4Z0DrlQKZkkcKrWxoHxzWuKFxY8iphCBweA9jk,1808 +pandas/tests/window/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/window/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/window/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_api.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_apply.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_base_indexer.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_cython_aggregations.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_dtypes.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_ewm.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_expanding.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_groupby.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_numba.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_online.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_pairwise.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_rolling.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_rolling_functions.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_rolling_quantile.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_rolling_skew_kurt.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_timeseries_window.cpython-310.pyc,, +pandas/tests/window/__pycache__/test_win_type.cpython-310.pyc,, +pandas/tests/window/conftest.py,sha256=rlS3eILzfTByRmmm7HLjk-FHEIbdTVVE9c0Dq-nfxa4,3137 +pandas/tests/window/moments/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/window/moments/__pycache__/__init__.cpython-310.pyc,, +pandas/tests/window/moments/__pycache__/conftest.cpython-310.pyc,, +pandas/tests/window/moments/__pycache__/test_moments_consistency_ewm.cpython-310.pyc,, +pandas/tests/window/moments/__pycache__/test_moments_consistency_expanding.cpython-310.pyc,, +pandas/tests/window/moments/__pycache__/test_moments_consistency_rolling.cpython-310.pyc,, +pandas/tests/window/moments/conftest.py,sha256=xSkyyVltsAkJETLDHJSksjRkjcVHsnhfyCiNvhsQ3no,1595 +pandas/tests/window/moments/test_moments_consistency_ewm.py,sha256=4FPmIGVQuOUg13aT5c9l_DN7j7K3J9QEU0KXeO2Qrt0,8107 +pandas/tests/window/moments/test_moments_consistency_expanding.py,sha256=e4Vn3nE02q-UeRH2aWLOSMv0QN4nN04iePKst5N-Vbo,5537 +pandas/tests/window/moments/test_moments_consistency_rolling.py,sha256=UBQL1mWD1qIB3fNb4tizqv-q4xlAz4tGT1nC1G-9RWM,7821 +pandas/tests/window/test_api.py,sha256=QzFr1mgU99ETdYjqoucENyzJLmruPOO-dGR41MCARsY,13192 +pandas/tests/window/test_apply.py,sha256=v9YC4aORGX7yA50RFMjZqMx93SWp9o4Vpjo32xTROx0,9865 +pandas/tests/window/test_base_indexer.py,sha256=Fz81kU5x1g6OnNmRra6PRarPpq5HEYuA8XX0sR_y6LI,15954 +pandas/tests/window/test_cython_aggregations.py,sha256=wPAk76yfrG9D1-IzI0kDklpiTVqgp4xsEGjONe9lCY4,3967 +pandas/tests/window/test_dtypes.py,sha256=a3Xnqcq_jO0kczZmhmuBKkmCsKHOOufy9h6yNCPHlMk,5785 +pandas/tests/window/test_ewm.py,sha256=F1BB5E3_n5i5IzDNTMZeZzmG3aZqxC1jp_Pj-bWcozU,23020 +pandas/tests/window/test_expanding.py,sha256=Kz-2wSWxj4E31kd6y4jo7T7gE7aSe7yGHMYE7b4Bq18,24239 +pandas/tests/window/test_groupby.py,sha256=KXxA5gESxTSJEjmgnIz29Kz1jJRz1PYQsD64GSsoDz0,46719 +pandas/tests/window/test_numba.py,sha256=cd3uGoexUCkQ3BRNrGhfjzBb0DYGJbJ67_PJjnIxU8Q,16046 +pandas/tests/window/test_online.py,sha256=OuVpQr2NExZQ36Fl5RW4cm-2sDF3_CgEhbP-3W2xjUM,3346 +pandas/tests/window/test_pairwise.py,sha256=BXJLxRbolFs00FxTMp3uIFDNpZkciv8VGyAXFMw3zHI,16141 +pandas/tests/window/test_rolling.py,sha256=PzPkVsNDBUh6wgzFZvq_YNba2bdmwSO_H8BUK9ZxAys,61158 +pandas/tests/window/test_rolling_functions.py,sha256=xmaaXFaMq22o1s0Ba4NieIkTZtKWi9WOYae6z8i_rBo,17877 +pandas/tests/window/test_rolling_quantile.py,sha256=AvsqMR5YrVAlAFfhL0lHHAZIazXnzI1VkoVuPuiDEro,5516 +pandas/tests/window/test_rolling_skew_kurt.py,sha256=Emw9AJhTZyuVnxPg-nfYxpRNGJToWJ-he7obTSOy8iU,7807 +pandas/tests/window/test_timeseries_window.py,sha256=I0hk72tAFP4RJUaGesfUrjR5HC_bxBWwcXW7mxgslfg,24250 +pandas/tests/window/test_win_type.py,sha256=GRu_7tF1tQAEH8hcb6kZPSG2FJihUTE1_85tH1iYaN8,17522 +pandas/tseries/__init__.py,sha256=CM1Forog6FJC_5YY4IueiWfQ9cATlSDJ4hF23RTniBQ,293 +pandas/tseries/__pycache__/__init__.cpython-310.pyc,, +pandas/tseries/__pycache__/api.cpython-310.pyc,, +pandas/tseries/__pycache__/frequencies.cpython-310.pyc,, +pandas/tseries/__pycache__/holiday.cpython-310.pyc,, +pandas/tseries/__pycache__/offsets.cpython-310.pyc,, +pandas/tseries/api.py,sha256=0Tms-OsqaHcpWH7a2F4mqKqEV-G5btiZKte3cUnEWQM,234 +pandas/tseries/frequencies.py,sha256=HNmBHzxRPhtlnpZF6iBSvq6e2du9J1JZ9gQ2c48Bvv0,17686 +pandas/tseries/holiday.py,sha256=G9kQvaBMzdNUoCs4WApAcxzSkOozFEyfDYFFjL8ZlZc,18596 +pandas/tseries/offsets.py,sha256=wLWH1_fg7dYGDsHDRyBxc62788G9CDhLcpDeZHt5ixI,1531 +pandas/util/__init__.py,sha256=tXNVCMKcgkFf4GETkpUx_UYvN56-54tYCCM0-04OIn4,827 +pandas/util/__pycache__/__init__.cpython-310.pyc,, +pandas/util/__pycache__/_decorators.cpython-310.pyc,, +pandas/util/__pycache__/_doctools.cpython-310.pyc,, +pandas/util/__pycache__/_exceptions.cpython-310.pyc,, +pandas/util/__pycache__/_print_versions.cpython-310.pyc,, +pandas/util/__pycache__/_test_decorators.cpython-310.pyc,, +pandas/util/__pycache__/_tester.cpython-310.pyc,, +pandas/util/__pycache__/_validators.cpython-310.pyc,, +pandas/util/_decorators.py,sha256=n1OyKRRG-dcCRUSmyejpKTyfP_iu2kVF0TJ_9yIJkeo,17106 +pandas/util/_doctools.py,sha256=Es1FLqrmsOLpJ_7Y24q_vqdXGw5Vy6vcajcfbIi_FCo,6819 +pandas/util/_exceptions.py,sha256=H6Tz6X1PqPVp6wG_7OsjHEqTvTM9I3SebF5-WcTdZOc,2876 +pandas/util/_print_versions.py,sha256=eHw3wpaF-l66uzupWfl_x2jjXz8WTedHZdH4FFKtWo0,4636 +pandas/util/_test_decorators.py,sha256=KEhS1cMaBbf4U0R0KMRXZl-CcCkPfNqxpVz8BTtb0zY,5079 +pandas/util/_tester.py,sha256=Mluqpd_YwVdcdgZfSu-_oVdadk_JjX9FuPGFjn_S6ZA,1462 +pandas/util/_validators.py,sha256=VGKuOFzz0rY5g2dmbKpWV8vZb5Jb1RV5w-HTVi1GMY0,14300 +pandas/util/version/__init__.py,sha256=57SNOildSF8ehHn99uGwCZeAkTEuA6YMw6cYxjEyQ2I,16394 +pandas/util/version/__pycache__/__init__.cpython-310.pyc,, diff --git a/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/REQUESTED b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/WHEEL b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..4e4c38ae320920b8f083b87f408214cdecd350d2 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/entry_points.txt b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..3c1b523d70758fbd0080e21ca4c7ce6d9c9d9bd5 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pandas-2.2.3.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[pandas_plotting_backends] +matplotlib = pandas:plotting._matplotlib + diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1800 b/evalkit_tf437/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1800 new file mode 100644 index 0000000000000000000000000000000000000000..41bf2c4d2148e68ddc23f9b5da2fb6d662f0e774 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1800 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a9588bbb6ed632ec93e4268425f77cd7c26a148d6a83bfab2c9e37ee3de0a73 +size 5338320 diff --git a/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/INSTALLER b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/LICENSE b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..67db8588217f266eb561f75fae738656325deac9 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/RECORD b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..6da255187d444f5c2652e7c8774ceb8606a589b3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/RECORD @@ -0,0 +1,43 @@ +requests-2.32.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +requests-2.32.3.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +requests-2.32.3.dist-info/METADATA,sha256=ZY7oRUweLnb7jCEnEW9hFWs7IpQbNVnAA4ncpwA4WBo,4610 +requests-2.32.3.dist-info/RECORD,, +requests-2.32.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +requests-2.32.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +requests-2.32.3.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9 +requests/__init__.py,sha256=4xaAERmPDIBPsa2PsjpU9r06yooK-2mZKHTZAhWRWts,5072 +requests/__pycache__/__init__.cpython-310.pyc,, +requests/__pycache__/__version__.cpython-310.pyc,, +requests/__pycache__/_internal_utils.cpython-310.pyc,, +requests/__pycache__/adapters.cpython-310.pyc,, +requests/__pycache__/api.cpython-310.pyc,, +requests/__pycache__/auth.cpython-310.pyc,, +requests/__pycache__/certs.cpython-310.pyc,, +requests/__pycache__/compat.cpython-310.pyc,, +requests/__pycache__/cookies.cpython-310.pyc,, +requests/__pycache__/exceptions.cpython-310.pyc,, +requests/__pycache__/help.cpython-310.pyc,, +requests/__pycache__/hooks.cpython-310.pyc,, +requests/__pycache__/models.cpython-310.pyc,, +requests/__pycache__/packages.cpython-310.pyc,, +requests/__pycache__/sessions.cpython-310.pyc,, +requests/__pycache__/status_codes.cpython-310.pyc,, +requests/__pycache__/structures.cpython-310.pyc,, +requests/__pycache__/utils.cpython-310.pyc,, +requests/__version__.py,sha256=FVfglgZmNQnmYPXpOohDU58F5EUb_-VnSTaAesS187g,435 +requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 +requests/adapters.py,sha256=KIcecscqam6reOCXRl4DwP4jX8Jcl8sd57ft17KR2cQ,27451 +requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449 +requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186 +requests/certs.py,sha256=Z9Sb410Anv6jUFTyss0jFFhU6xst8ctELqfy8Ev23gw,429 +requests/compat.py,sha256=C5w_DPLSurXPgcdWU78fora0APmbYkX2G89QvH5xzPA,1817 +requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590 +requests/exceptions.py,sha256=jJPS1UWATs86ShVUaLorTiJb1SaGuoNEWgICJep-VkY,4260 +requests/help.py,sha256=gPX5d_H7Xd88aDABejhqGgl9B1VFRTt5BmiYvL3PzIQ,3875 +requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 +requests/models.py,sha256=k42roXzC8u_OagAPQi9U4MkfO7i4r2FdaqvMqstPehc,35418 +requests/packages.py,sha256=_g0gZ681UyAlKHRjH6kanbaoxx2eAb6qzcXiODyTIoc,904 +requests/sessions.py,sha256=ykTI8UWGSltOfH07HKollH7kTBGw4WhiBVaQGmckTw4,30495 +requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322 +requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 +requests/utils.py,sha256=HiQC6Nq_Da3ktaMiFzQkh-dCk3iQHHKEsYS5kDc-8Cw,33619 diff --git a/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/REQUESTED b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/WHEEL b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/top_level.txt b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2293605cf1b01dca72aad0a15c45b72ed5429a2 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/requests-2.32.3.dist-info/top_level.txt @@ -0,0 +1 @@ +requests diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34aaea702787cce1989eeca58272ebc901549b13 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/__init__.py @@ -0,0 +1,162 @@ +"""Configure global settings and get information about the working environment.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +# Machine learning module for Python +# ================================== +# +# sklearn is a Python module integrating classical machine +# learning algorithms in the tightly-knit world of scientific Python +# packages (numpy, scipy, matplotlib). +# +# It aims to provide simple and efficient solutions to learning problems +# that are accessible to everybody and reusable in various contexts: +# machine-learning as a versatile tool for science and engineering. +# +# See https://scikit-learn.org for complete documentation. + +import importlib as _importlib +import logging +import os +import random + +from ._config import config_context, get_config, set_config + +logger = logging.getLogger(__name__) + + +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Generic release markers: +# X.Y.0 # For first release after an increment in Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.Y.ZaN # Alpha release +# X.Y.ZbN # Beta release +# X.Y.ZrcN # Release Candidate +# X.Y.Z # Final release +# +# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. +# 'X.Y.dev0' is the canonical version of 'X.Y.dev' +# +__version__ = "1.6.1" + + +# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded +# simultaneously. This can happen for instance when calling BLAS inside a +# prange. Setting the following environment variable allows multiple OpenMP +# libraries to be loaded. It should not degrade performances since we manually +# take care of potential over-subcription performance issues, in sections of +# the code where nested OpenMP loops can happen, by dynamically reconfiguring +# the inner OpenMP runtime to temporarily disable it while under the scope of +# the outer OpenMP parallel section. +os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True") + +# Workaround issue discovered in intel-openmp 2019.5: +# https://github.com/ContinuumIO/anaconda-issues/issues/11294 +os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") + +# `_distributor_init` allows distributors to run custom init code. +# For instance, for the Windows wheel, this is used to pre-load the +# vcomp shared library runtime for OpenMP embedded in the sklearn/.libs +# sub-folder. +# It is necessary to do this prior to importing show_versions as the +# later is linked to the OpenMP runtime to make it possible to introspect +# it and importing it first would fail if the OpenMP dll cannot be found. +from . import ( # noqa: F401 E402 + __check_build, + _distributor_init, +) +from .base import clone # noqa: E402 +from .utils._show_versions import show_versions # noqa: E402 + +_submodules = [ + "calibration", + "cluster", + "covariance", + "cross_decomposition", + "datasets", + "decomposition", + "dummy", + "ensemble", + "exceptions", + "experimental", + "externals", + "feature_extraction", + "feature_selection", + "frozen", + "gaussian_process", + "inspection", + "isotonic", + "kernel_approximation", + "kernel_ridge", + "linear_model", + "manifold", + "metrics", + "mixture", + "model_selection", + "multiclass", + "multioutput", + "naive_bayes", + "neighbors", + "neural_network", + "pipeline", + "preprocessing", + "random_projection", + "semi_supervised", + "svm", + "tree", + "discriminant_analysis", + "impute", + "compose", +] + +__all__ = _submodules + [ + # Non-modules: + "clone", + "get_config", + "set_config", + "config_context", + "show_versions", +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + if name in _submodules: + return _importlib.import_module(f"sklearn.{name}") + else: + try: + return globals()[name] + except KeyError: + raise AttributeError(f"Module 'sklearn' has no attribute '{name}'") + + +_BUILT_WITH_MESON = False +try: + import sklearn._built_with_meson # noqa: F401 + + _BUILT_WITH_MESON = True +except ModuleNotFoundError: + pass + + +def setup_module(module): + """Fixture for the tests to assure globally controllable seeding of RNGs""" + + import numpy as np + + # Check if a random seed exists in the environment, if not create one. + _random_seed = os.environ.get("SKLEARN_SEED", None) + if _random_seed is None: + _random_seed = np.random.uniform() * np.iinfo(np.int32).max + _random_seed = int(_random_seed) + print("I: Seeding RNGs with %r" % _random_seed) + np.random.seed(_random_seed) + random.seed(_random_seed) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_built_with_meson.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_built_with_meson.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_config.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..05549c88a9ddc896b67fe19ee2fe43ff443d31d0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_config.py @@ -0,0 +1,376 @@ +"""Global configuration state and functions for management""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import os +import threading +from contextlib import contextmanager as contextmanager + +_global_config = { + "assume_finite": bool(os.environ.get("SKLEARN_ASSUME_FINITE", False)), + "working_memory": int(os.environ.get("SKLEARN_WORKING_MEMORY", 1024)), + "print_changed_only": True, + "display": "diagram", + "pairwise_dist_chunk_size": int( + os.environ.get("SKLEARN_PAIRWISE_DIST_CHUNK_SIZE", 256) + ), + "enable_cython_pairwise_dist": True, + "array_api_dispatch": False, + "transform_output": "default", + "enable_metadata_routing": False, + "skip_parameter_validation": False, +} +_threadlocal = threading.local() + + +def _get_threadlocal_config(): + """Get a threadlocal **mutable** configuration. If the configuration + does not exist, copy the default global configuration.""" + if not hasattr(_threadlocal, "global_config"): + _threadlocal.global_config = _global_config.copy() + return _threadlocal.global_config + + +def get_config(): + """Retrieve current values for configuration set by :func:`set_config`. + + Returns + ------- + config : dict + Keys are parameter names that can be passed to :func:`set_config`. + + See Also + -------- + config_context : Context manager for global scikit-learn configuration. + set_config : Set global scikit-learn configuration. + + Examples + -------- + >>> import sklearn + >>> config = sklearn.get_config() + >>> config.keys() + dict_keys([...]) + """ + # Return a copy of the threadlocal configuration so that users will + # not be able to modify the configuration with the returned dict. + return _get_threadlocal_config().copy() + + +def set_config( + assume_finite=None, + working_memory=None, + print_changed_only=None, + display=None, + pairwise_dist_chunk_size=None, + enable_cython_pairwise_dist=None, + array_api_dispatch=None, + transform_output=None, + enable_metadata_routing=None, + skip_parameter_validation=None, +): + """Set global scikit-learn configuration. + + .. versionadded:: 0.19 + + Parameters + ---------- + assume_finite : bool, default=None + If True, validation for finiteness will be skipped, + saving time, but leading to potential crashes. If + False, validation for finiteness will be performed, + avoiding error. Global default: False. + + .. versionadded:: 0.19 + + working_memory : int, default=None + If set, scikit-learn will attempt to limit the size of temporary arrays + to this number of MiB (per job when parallelised), often saving both + computation time and memory on expensive operations that can be + performed in chunks. Global default: 1024. + + .. versionadded:: 0.20 + + print_changed_only : bool, default=None + If True, only the parameters that were set to non-default + values will be printed when printing an estimator. For example, + ``print(SVC())`` while True will only print 'SVC()' while the default + behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with + all the non-changed parameters. + + .. versionadded:: 0.21 + + display : {'text', 'diagram'}, default=None + If 'diagram', estimators will be displayed as a diagram in a Jupyter + lab or notebook context. If 'text', estimators will be displayed as + text. Default is 'diagram'. + + .. versionadded:: 0.23 + + pairwise_dist_chunk_size : int, default=None + The number of row vectors per chunk for the accelerated pairwise- + distances reduction backend. Default is 256 (suitable for most of + modern laptops' caches and architectures). + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + enable_cython_pairwise_dist : bool, default=None + Use the accelerated pairwise-distances reduction backend when + possible. Global default: True. + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + array_api_dispatch : bool, default=None + Use Array API dispatching when inputs follow the Array API standard. + Default is False. + + See the :ref:`User Guide ` for more details. + + .. versionadded:: 1.2 + + transform_output : str, default=None + Configure output of `transform` and `fit_transform`. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.2 + .. versionadded:: 1.4 + `"polars"` option was added. + + enable_metadata_routing : bool, default=None + Enable metadata routing. By default this feature is disabled. + + Refer to :ref:`metadata routing user guide ` for more + details. + + - `True`: Metadata routing is enabled + - `False`: Metadata routing is disabled, use the old syntax. + - `None`: Configuration is unchanged + + .. versionadded:: 1.3 + + skip_parameter_validation : bool, default=None + If `True`, disable the validation of the hyper-parameters' types and values in + the fit method of estimators and for arguments passed to public helper + functions. It can save time in some situations but can lead to low level + crashes and exceptions with confusing error messages. + + Note that for data parameters, such as `X` and `y`, only type validation is + skipped but validation with `check_array` will continue to run. + + .. versionadded:: 1.3 + + See Also + -------- + config_context : Context manager for global scikit-learn configuration. + get_config : Retrieve current values of the global configuration. + + Examples + -------- + >>> from sklearn import set_config + >>> set_config(display='diagram') # doctest: +SKIP + """ + local_config = _get_threadlocal_config() + + if assume_finite is not None: + local_config["assume_finite"] = assume_finite + if working_memory is not None: + local_config["working_memory"] = working_memory + if print_changed_only is not None: + local_config["print_changed_only"] = print_changed_only + if display is not None: + local_config["display"] = display + if pairwise_dist_chunk_size is not None: + local_config["pairwise_dist_chunk_size"] = pairwise_dist_chunk_size + if enable_cython_pairwise_dist is not None: + local_config["enable_cython_pairwise_dist"] = enable_cython_pairwise_dist + if array_api_dispatch is not None: + from .utils._array_api import _check_array_api_dispatch + + _check_array_api_dispatch(array_api_dispatch) + local_config["array_api_dispatch"] = array_api_dispatch + if transform_output is not None: + local_config["transform_output"] = transform_output + if enable_metadata_routing is not None: + local_config["enable_metadata_routing"] = enable_metadata_routing + if skip_parameter_validation is not None: + local_config["skip_parameter_validation"] = skip_parameter_validation + + +@contextmanager +def config_context( + *, + assume_finite=None, + working_memory=None, + print_changed_only=None, + display=None, + pairwise_dist_chunk_size=None, + enable_cython_pairwise_dist=None, + array_api_dispatch=None, + transform_output=None, + enable_metadata_routing=None, + skip_parameter_validation=None, +): + """Context manager for global scikit-learn configuration. + + Parameters + ---------- + assume_finite : bool, default=None + If True, validation for finiteness will be skipped, + saving time, but leading to potential crashes. If + False, validation for finiteness will be performed, + avoiding error. If None, the existing value won't change. + The default value is False. + + working_memory : int, default=None + If set, scikit-learn will attempt to limit the size of temporary arrays + to this number of MiB (per job when parallelised), often saving both + computation time and memory on expensive operations that can be + performed in chunks. If None, the existing value won't change. + The default value is 1024. + + print_changed_only : bool, default=None + If True, only the parameters that were set to non-default + values will be printed when printing an estimator. For example, + ``print(SVC())`` while True will only print 'SVC()', but would print + 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters + when False. If None, the existing value won't change. + The default value is True. + + .. versionchanged:: 0.23 + Default changed from False to True. + + display : {'text', 'diagram'}, default=None + If 'diagram', estimators will be displayed as a diagram in a Jupyter + lab or notebook context. If 'text', estimators will be displayed as + text. If None, the existing value won't change. + The default value is 'diagram'. + + .. versionadded:: 0.23 + + pairwise_dist_chunk_size : int, default=None + The number of row vectors per chunk for the accelerated pairwise- + distances reduction backend. Default is 256 (suitable for most of + modern laptops' caches and architectures). + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + enable_cython_pairwise_dist : bool, default=None + Use the accelerated pairwise-distances reduction backend when + possible. Global default: True. + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + array_api_dispatch : bool, default=None + Use Array API dispatching when inputs follow the Array API standard. + Default is False. + + See the :ref:`User Guide ` for more details. + + .. versionadded:: 1.2 + + transform_output : str, default=None + Configure output of `transform` and `fit_transform`. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.2 + .. versionadded:: 1.4 + `"polars"` option was added. + + enable_metadata_routing : bool, default=None + Enable metadata routing. By default this feature is disabled. + + Refer to :ref:`metadata routing user guide ` for more + details. + + - `True`: Metadata routing is enabled + - `False`: Metadata routing is disabled, use the old syntax. + - `None`: Configuration is unchanged + + .. versionadded:: 1.3 + + skip_parameter_validation : bool, default=None + If `True`, disable the validation of the hyper-parameters' types and values in + the fit method of estimators and for arguments passed to public helper + functions. It can save time in some situations but can lead to low level + crashes and exceptions with confusing error messages. + + Note that for data parameters, such as `X` and `y`, only type validation is + skipped but validation with `check_array` will continue to run. + + .. versionadded:: 1.3 + + Yields + ------ + None. + + See Also + -------- + set_config : Set global scikit-learn configuration. + get_config : Retrieve current values of the global configuration. + + Notes + ----- + All settings, not just those presently modified, will be returned to + their previous values when the context manager is exited. + + Examples + -------- + >>> import sklearn + >>> from sklearn.utils.validation import assert_all_finite + >>> with sklearn.config_context(assume_finite=True): + ... assert_all_finite([float('nan')]) + >>> with sklearn.config_context(assume_finite=True): + ... with sklearn.config_context(assume_finite=False): + ... assert_all_finite([float('nan')]) + Traceback (most recent call last): + ... + ValueError: Input contains NaN... + """ + old_config = get_config() + set_config( + assume_finite=assume_finite, + working_memory=working_memory, + print_changed_only=print_changed_only, + display=display, + pairwise_dist_chunk_size=pairwise_dist_chunk_size, + enable_cython_pairwise_dist=enable_cython_pairwise_dist, + array_api_dispatch=array_api_dispatch, + transform_output=transform_output, + enable_metadata_routing=enable_metadata_routing, + skip_parameter_validation=skip_parameter_validation, + ) + + try: + yield + finally: + set_config(**old_config) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_distributor_init.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..d66d5d36955c1ca5debf5821e2a2f265ef1f98ed --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_distributor_init.py @@ -0,0 +1,13 @@ +"""Distributor init file + +Distributors: you can add custom code here to support particular distributions +of scikit-learn. + +For example, this is a good place to put any checks for hardware requirements. + +The scikit-learn standard source distribution will not put code in this file, +so you can safely replace this file with your own version. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_isotonic.pyx b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_isotonic.pyx new file mode 100644 index 0000000000000000000000000000000000000000..31489f1107645afd20bf977d54c0ce9b75e1343a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_isotonic.pyx @@ -0,0 +1,115 @@ +# Author: Nelle Varoquaux, Andrew Tulloch, Antony Lee + +# Uses the pool adjacent violators algorithm (PAVA), with the +# enhancement of searching for the longest decreasing subsequence to +# pool at each step. + +import numpy as np +from cython cimport floating + + +def _inplace_contiguous_isotonic_regression(floating[::1] y, floating[::1] w): + cdef: + Py_ssize_t n = y.shape[0], i, k + floating prev_y, sum_wy, sum_w + Py_ssize_t[::1] target = np.arange(n, dtype=np.intp) + + # target describes a list of blocks. At any time, if [i..j] (inclusive) is + # an active block, then target[i] := j and target[j] := i. + + # For "active" indices (block starts): + # w[i] := sum{w_orig[j], j=[i..target[i]]} + # y[i] := sum{y_orig[j]*w_orig[j], j=[i..target[i]]} / w[i] + + with nogil: + i = 0 + while i < n: + k = target[i] + 1 + if k == n: + break + if y[i] < y[k]: + i = k + continue + sum_wy = w[i] * y[i] + sum_w = w[i] + while True: + # We are within a decreasing subsequence. + prev_y = y[k] + sum_wy += w[k] * y[k] + sum_w += w[k] + k = target[k] + 1 + if k == n or prev_y < y[k]: + # Non-singleton decreasing subsequence is finished, + # update first entry. + y[i] = sum_wy / sum_w + w[i] = sum_w + target[i] = k - 1 + target[k - 1] = i + if i > 0: + # Backtrack if we can. This makes the algorithm + # single-pass and ensures O(n) complexity. + i = target[i - 1] + # Otherwise, restart from the same point. + break + # Reconstruct the solution. + i = 0 + while i < n: + k = target[i] + 1 + y[i + 1 : k] = y[i] + i = k + + +def _make_unique(const floating[::1] X, + const floating[::1] y, + const floating[::1] sample_weights): + """Average targets for duplicate X, drop duplicates. + + Aggregates duplicate X values into a single X value where + the target y is a (sample_weighted) average of the individual + targets. + + Assumes that X is ordered, so that all duplicates follow each other. + """ + unique_values = len(np.unique(X)) + + if floating is float: + dtype = np.float32 + else: + dtype = np.float64 + + cdef floating[::1] y_out = np.empty(unique_values, dtype=dtype) + cdef floating[::1] x_out = np.empty_like(y_out) + cdef floating[::1] weights_out = np.empty_like(y_out) + + cdef floating current_x = X[0] + cdef floating current_y = 0 + cdef floating current_weight = 0 + cdef int i = 0 + cdef int j + cdef floating x + cdef int n_samples = len(X) + cdef floating eps = np.finfo(dtype).resolution + + for j in range(n_samples): + x = X[j] + if x - current_x >= eps: + # next unique value + x_out[i] = current_x + weights_out[i] = current_weight + y_out[i] = current_y / current_weight + i += 1 + current_x = x + current_weight = sample_weights[j] + current_y = y[j] * sample_weights[j] + else: + current_weight += sample_weights[j] + current_y += y[j] * sample_weights[j] + + x_out[i] = current_x + weights_out[i] = current_weight + y_out[i] = current_y / current_weight + return( + np.asarray(x_out[:i+1]), + np.asarray(y_out[:i+1]), + np.asarray(weights_out[:i+1]), + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_min_dependencies.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_min_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..42d1ffbcc2d123b5af4e80ce20cc63eb772b2865 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_min_dependencies.py @@ -0,0 +1,75 @@ +"""All minimum dependencies for scikit-learn.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import argparse +from collections import defaultdict + +# scipy and cython should by in sync with pyproject.toml +NUMPY_MIN_VERSION = "1.19.5" +SCIPY_MIN_VERSION = "1.6.0" +JOBLIB_MIN_VERSION = "1.2.0" +THREADPOOLCTL_MIN_VERSION = "3.1.0" +PYTEST_MIN_VERSION = "7.1.2" +CYTHON_MIN_VERSION = "3.0.10" + + +# 'build' and 'install' is included to have structured metadata for CI. +# It will NOT be included in setup's extras_require +# The values are (version_spec, comma separated tags) +dependent_packages = { + "numpy": (NUMPY_MIN_VERSION, "build, install"), + "scipy": (SCIPY_MIN_VERSION, "build, install"), + "joblib": (JOBLIB_MIN_VERSION, "install"), + "threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"), + "cython": (CYTHON_MIN_VERSION, "build"), + "meson-python": ("0.16.0", "build"), + "matplotlib": ("3.3.4", "benchmark, docs, examples, tests"), + "scikit-image": ("0.17.2", "docs, examples, tests"), + "pandas": ("1.1.5", "benchmark, docs, examples, tests"), + "seaborn": ("0.9.0", "docs, examples"), + "memory_profiler": ("0.57.0", "benchmark, docs"), + "pytest": (PYTEST_MIN_VERSION, "tests"), + "pytest-cov": ("2.9.0", "tests"), + "ruff": ("0.5.1", "tests"), + "black": ("24.3.0", "tests"), + "mypy": ("1.9", "tests"), + "pyamg": ("4.0.0", "tests"), + "polars": ("0.20.30", "docs, tests"), + "pyarrow": ("12.0.0", "tests"), + "sphinx": ("7.3.7", "docs"), + "sphinx-copybutton": ("0.5.2", "docs"), + "sphinx-gallery": ("0.17.1", "docs"), + "numpydoc": ("1.2.0", "docs, tests"), + "Pillow": ("7.1.2", "docs"), + "pooch": ("1.6.0", "docs, examples, tests"), + "sphinx-prompt": ("1.4.0", "docs"), + "sphinxext-opengraph": ("0.9.1", "docs"), + "plotly": ("5.14.0", "docs, examples"), + "sphinxcontrib-sass": ("0.3.4", "docs"), + "sphinx-remove-toctrees": ("1.0.0.post1", "docs"), + "sphinx-design": ("0.6.0", "docs"), + "pydata-sphinx-theme": ("0.15.3", "docs"), + "towncrier": ("24.8.0", "docs"), + # XXX: Pin conda-lock to the latest released version (needs manual update + # from time to time) + "conda-lock": ("2.5.6", "maintenance"), +} + + +# create inverse mapping for setuptools +tag_to_packages: dict = defaultdict(list) +for package, (min_version, extras) in dependent_packages.items(): + for extra in extras.split(", "): + tag_to_packages[extra].append("{}>={}".format(package, min_version)) + + +# Used by CI to get the min dependencies +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Get min dependencies for a package") + + parser.add_argument("package", choices=dependent_packages) + args = parser.parse_args() + min_version = dependent_packages[args.package][0] + print(min_version) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/base.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/base.py new file mode 100644 index 0000000000000000000000000000000000000000..3343caa05ca02cc5de1b807a466ddbca54da0343 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/base.py @@ -0,0 +1,1393 @@ +"""Base classes for all estimators and various utility functions.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import copy +import functools +import inspect +import platform +import re +import warnings +from collections import defaultdict + +import numpy as np + +from . import __version__ +from ._config import config_context, get_config +from .exceptions import InconsistentVersionWarning +from .utils._estimator_html_repr import _HTMLDocumentationLinkMixin, estimator_html_repr +from .utils._metadata_requests import _MetadataRequester, _routing_enabled +from .utils._param_validation import validate_parameter_constraints +from .utils._set_output import _SetOutputMixin +from .utils._tags import ( + ClassifierTags, + RegressorTags, + Tags, + TargetTags, + TransformerTags, + get_tags, +) +from .utils.fixes import _IS_32BIT +from .utils.validation import ( + _check_feature_names, + _check_feature_names_in, + _check_n_features, + _generate_get_feature_names_out, + _is_fitted, + check_array, + check_is_fitted, + validate_data, +) + + +def clone(estimator, *, safe=True): + """Construct a new unfitted estimator with the same parameters. + + Clone does a deep copy of the model in an estimator + without actually copying attached data. It returns a new estimator + with the same parameters that has not been fitted on any data. + + .. versionchanged:: 1.3 + Delegates to `estimator.__sklearn_clone__` if the method exists. + + Parameters + ---------- + estimator : {list, tuple, set} of estimator instance or a single \ + estimator instance + The estimator or group of estimators to be cloned. + safe : bool, default=True + If safe is False, clone will fall back to a deep copy on objects + that are not estimators. Ignored if `estimator.__sklearn_clone__` + exists. + + Returns + ------- + estimator : object + The deep copy of the input, an estimator if input is an estimator. + + Notes + ----- + If the estimator's `random_state` parameter is an integer (or if the + estimator doesn't have a `random_state` parameter), an *exact clone* is + returned: the clone and the original estimator will give the exact same + results. Otherwise, *statistical clone* is returned: the clone might + return different results from the original estimator. More details can be + found in :ref:`randomness`. + + Examples + -------- + >>> from sklearn.base import clone + >>> from sklearn.linear_model import LogisticRegression + >>> X = [[-1, 0], [0, 1], [0, -1], [1, 0]] + >>> y = [0, 0, 1, 1] + >>> classifier = LogisticRegression().fit(X, y) + >>> cloned_classifier = clone(classifier) + >>> hasattr(classifier, "classes_") + True + >>> hasattr(cloned_classifier, "classes_") + False + >>> classifier is cloned_classifier + False + """ + if hasattr(estimator, "__sklearn_clone__") and not inspect.isclass(estimator): + return estimator.__sklearn_clone__() + return _clone_parametrized(estimator, safe=safe) + + +def _clone_parametrized(estimator, *, safe=True): + """Default implementation of clone. See :func:`sklearn.base.clone` for details.""" + + estimator_type = type(estimator) + if estimator_type is dict: + return {k: clone(v, safe=safe) for k, v in estimator.items()} + elif estimator_type in (list, tuple, set, frozenset): + return estimator_type([clone(e, safe=safe) for e in estimator]) + elif not hasattr(estimator, "get_params") or isinstance(estimator, type): + if not safe: + return copy.deepcopy(estimator) + else: + if isinstance(estimator, type): + raise TypeError( + "Cannot clone object. " + + "You should provide an instance of " + + "scikit-learn estimator instead of a class." + ) + else: + raise TypeError( + "Cannot clone object '%s' (type %s): " + "it does not seem to be a scikit-learn " + "estimator as it does not implement a " + "'get_params' method." % (repr(estimator), type(estimator)) + ) + + klass = estimator.__class__ + new_object_params = estimator.get_params(deep=False) + for name, param in new_object_params.items(): + new_object_params[name] = clone(param, safe=False) + + new_object = klass(**new_object_params) + try: + new_object._metadata_request = copy.deepcopy(estimator._metadata_request) + except AttributeError: + pass + + params_set = new_object.get_params(deep=False) + + # quick sanity check of the parameters of the clone + for name in new_object_params: + param1 = new_object_params[name] + param2 = params_set[name] + if param1 is not param2: + raise RuntimeError( + "Cannot clone object %s, as the constructor " + "either does not set or modifies parameter %s" % (estimator, name) + ) + + # _sklearn_output_config is used by `set_output` to configure the output + # container of an estimator. + if hasattr(estimator, "_sklearn_output_config"): + new_object._sklearn_output_config = copy.deepcopy( + estimator._sklearn_output_config + ) + return new_object + + +class BaseEstimator(_HTMLDocumentationLinkMixin, _MetadataRequester): + """Base class for all estimators in scikit-learn. + + Inheriting from this class provides default implementations of: + + - setting and getting parameters used by `GridSearchCV` and friends; + - textual and HTML representation displayed in terminals and IDEs; + - estimator serialization; + - parameters validation; + - data validation; + - feature names validation. + + Read more in the :ref:`User Guide `. + + + Notes + ----- + All estimators should specify all the parameters that can be set + at the class level in their ``__init__`` as explicit keyword + arguments (no ``*args`` or ``**kwargs``). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator + >>> class MyEstimator(BaseEstimator): + ... def __init__(self, *, param=1): + ... self.param = param + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + ... def predict(self, X): + ... return np.full(shape=X.shape[0], fill_value=self.param) + >>> estimator = MyEstimator(param=2) + >>> estimator.get_params() + {'param': 2} + >>> X = np.array([[1, 2], [2, 3], [3, 4]]) + >>> y = np.array([1, 0, 1]) + >>> estimator.fit(X, y).predict(X) + array([2, 2, 2]) + >>> estimator.set_params(param=3).fit(X, y).predict(X) + array([3, 3, 3]) + """ + + @classmethod + def _get_param_names(cls): + """Get parameter names for the estimator""" + # fetch the constructor or the original constructor before + # deprecation wrapping if any + init = getattr(cls.__init__, "deprecated_original", cls.__init__) + if init is object.__init__: + # No explicit constructor to introspect + return [] + + # introspect the constructor arguments to find the model parameters + # to represent + init_signature = inspect.signature(init) + # Consider the constructor parameters excluding 'self' + parameters = [ + p + for p in init_signature.parameters.values() + if p.name != "self" and p.kind != p.VAR_KEYWORD + ] + for p in parameters: + if p.kind == p.VAR_POSITIONAL: + raise RuntimeError( + "scikit-learn estimators should always " + "specify their parameters in the signature" + " of their __init__ (no varargs)." + " %s with constructor %s doesn't " + " follow this convention." % (cls, init_signature) + ) + # Extract and sort argument names excluding 'self' + return sorted([p.name for p in parameters]) + + def get_params(self, deep=True): + """ + Get parameters for this estimator. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + out = dict() + for key in self._get_param_names(): + value = getattr(self, key) + if deep and hasattr(value, "get_params") and not isinstance(value, type): + deep_items = value.get_params().items() + out.update((key + "__" + k, val) for k, val in deep_items) + out[key] = value + return out + + def set_params(self, **params): + """Set the parameters of this estimator. + + The method works on simple estimators as well as on nested objects + (such as :class:`~sklearn.pipeline.Pipeline`). The latter have + parameters of the form ``__`` so that it's + possible to update each component of a nested object. + + Parameters + ---------- + **params : dict + Estimator parameters. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + if not params: + # Simple optimization to gain speed (inspect is slow) + return self + valid_params = self.get_params(deep=True) + + nested_params = defaultdict(dict) # grouped by prefix + for key, value in params.items(): + key, delim, sub_key = key.partition("__") + if key not in valid_params: + local_valid_params = self._get_param_names() + raise ValueError( + f"Invalid parameter {key!r} for estimator {self}. " + f"Valid parameters are: {local_valid_params!r}." + ) + + if delim: + nested_params[key][sub_key] = value + else: + setattr(self, key, value) + valid_params[key] = value + + for key, sub_params in nested_params.items(): + valid_params[key].set_params(**sub_params) + + return self + + def __sklearn_clone__(self): + return _clone_parametrized(self) + + def __repr__(self, N_CHAR_MAX=700): + # N_CHAR_MAX is the (approximate) maximum number of non-blank + # characters to render. We pass it as an optional parameter to ease + # the tests. + + from .utils._pprint import _EstimatorPrettyPrinter + + N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences + + # use ellipsis for sequences with a lot of elements + pp = _EstimatorPrettyPrinter( + compact=True, + indent=1, + indent_at_name=True, + n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW, + ) + + repr_ = pp.pformat(self) + + # Use bruteforce ellipsis when there are a lot of non-blank characters + n_nonblank = len("".join(repr_.split())) + if n_nonblank > N_CHAR_MAX: + lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends + regex = r"^(\s*\S){%d}" % lim + # The regex '^(\s*\S){%d}' % n + # matches from the start of the string until the nth non-blank + # character: + # - ^ matches the start of string + # - (pattern){n} matches n repetitions of pattern + # - \s*\S matches a non-blank char following zero or more blanks + left_lim = re.match(regex, repr_).end() + right_lim = re.match(regex, repr_[::-1]).end() + + if "\n" in repr_[left_lim:-right_lim]: + # The left side and right side aren't on the same line. + # To avoid weird cuts, e.g.: + # categoric...ore', + # we need to start the right side with an appropriate newline + # character so that it renders properly as: + # categoric... + # handle_unknown='ignore', + # so we add [^\n]*\n which matches until the next \n + regex += r"[^\n]*\n" + right_lim = re.match(regex, repr_[::-1]).end() + + ellipsis = "..." + if left_lim + len(ellipsis) < len(repr_) - right_lim: + # Only add ellipsis if it results in a shorter repr + repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:] + + return repr_ + + def __getstate__(self): + if getattr(self, "__slots__", None): + raise TypeError( + "You cannot use `__slots__` in objects inheriting from " + "`sklearn.base.BaseEstimator`." + ) + + try: + state = super().__getstate__() + if state is None: + # For Python 3.11+, empty instance (no `__slots__`, + # and `__dict__`) will return a state equal to `None`. + state = self.__dict__.copy() + except AttributeError: + # Python < 3.11 + state = self.__dict__.copy() + + if type(self).__module__.startswith("sklearn."): + return dict(state.items(), _sklearn_version=__version__) + else: + return state + + def __setstate__(self, state): + if type(self).__module__.startswith("sklearn."): + pickle_version = state.pop("_sklearn_version", "pre-0.18") + if pickle_version != __version__: + warnings.warn( + InconsistentVersionWarning( + estimator_name=self.__class__.__name__, + current_sklearn_version=__version__, + original_sklearn_version=pickle_version, + ), + ) + try: + super().__setstate__(state) + except AttributeError: + self.__dict__.update(state) + + # TODO(1.7): Remove this method + def _more_tags(self): + """This code should never be reached since our `get_tags` will fallback on + `__sklearn_tags__` implemented below. We keep it for backward compatibility. + It is tested in `test_base_estimator_more_tags` in + `sklearn/utils/testing/test_tags.py`.""" + from sklearn.utils._tags import _to_old_tags, default_tags + + warnings.warn( + "The `_more_tags` method is deprecated in 1.6 and will be removed in " + "1.7. Please implement the `__sklearn_tags__` method.", + category=DeprecationWarning, + ) + return _to_old_tags(default_tags(self)) + + # TODO(1.7): Remove this method + def _get_tags(self): + from sklearn.utils._tags import _to_old_tags, get_tags + + warnings.warn( + "The `_get_tags` method is deprecated in 1.6 and will be removed in " + "1.7. Please implement the `__sklearn_tags__` method.", + category=DeprecationWarning, + ) + + return _to_old_tags(get_tags(self)) + + def __sklearn_tags__(self): + return Tags( + estimator_type=None, + target_tags=TargetTags(required=False), + transformer_tags=None, + regressor_tags=None, + classifier_tags=None, + ) + + def _validate_params(self): + """Validate types and values of constructor parameters + + The expected type and values must be defined in the `_parameter_constraints` + class attribute, which is a dictionary `param_name: list of constraints`. See + the docstring of `validate_parameter_constraints` for a description of the + accepted constraints. + """ + validate_parameter_constraints( + self._parameter_constraints, + self.get_params(deep=False), + caller_name=self.__class__.__name__, + ) + + @property + def _repr_html_(self): + """HTML representation of estimator. + + This is redundant with the logic of `_repr_mimebundle_`. The latter + should be favorted in the long term, `_repr_html_` is only + implemented for consumers who do not interpret `_repr_mimbundle_`. + """ + if get_config()["display"] != "diagram": + raise AttributeError( + "_repr_html_ is only defined when the " + "'display' configuration option is set to " + "'diagram'" + ) + return self._repr_html_inner + + def _repr_html_inner(self): + """This function is returned by the @property `_repr_html_` to make + `hasattr(estimator, "_repr_html_") return `True` or `False` depending + on `get_config()["display"]`. + """ + return estimator_html_repr(self) + + def _repr_mimebundle_(self, **kwargs): + """Mime bundle used by jupyter kernels to display estimator""" + output = {"text/plain": repr(self)} + if get_config()["display"] == "diagram": + output["text/html"] = estimator_html_repr(self) + return output + + # TODO(1.7): Remove this method + def _validate_data(self, *args, **kwargs): + warnings.warn( + "`BaseEstimator._validate_data` is deprecated in 1.6 and will be removed " + "in 1.7. Use `sklearn.utils.validation.validate_data` instead. This " + "function becomes public and is part of the scikit-learn developer API.", + FutureWarning, + ) + return validate_data(self, *args, **kwargs) + + # TODO(1.7): Remove this method + def _check_n_features(self, *args, **kwargs): + warnings.warn( + "`BaseEstimator._check_n_features` is deprecated in 1.6 and will be " + "removed in 1.7. Use `sklearn.utils.validation._check_n_features` instead.", + FutureWarning, + ) + _check_n_features(self, *args, **kwargs) + + # TODO(1.7): Remove this method + def _check_feature_names(self, *args, **kwargs): + warnings.warn( + "`BaseEstimator._check_feature_names` is deprecated in 1.6 and will be " + "removed in 1.7. Use `sklearn.utils.validation._check_feature_names` " + "instead.", + FutureWarning, + ) + _check_feature_names(self, *args, **kwargs) + + +class ClassifierMixin: + """Mixin class for all classifiers in scikit-learn. + + This mixin defines the following functionality: + + - set estimator type to `"classifier"` through the `estimator_type` tag; + - `score` method that default to :func:`~sklearn.metrics.accuracy_score`. + - enforce that `fit` requires `y` to be passed through the `requires_y` tag, + which is done by setting the classifier type tag. + + Read more in the :ref:`User Guide `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, ClassifierMixin + >>> # Mixin classes should always be on the left-hand side for a correct MRO + >>> class MyEstimator(ClassifierMixin, BaseEstimator): + ... def __init__(self, *, param=1): + ... self.param = param + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + ... def predict(self, X): + ... return np.full(shape=X.shape[0], fill_value=self.param) + >>> estimator = MyEstimator(param=1) + >>> X = np.array([[1, 2], [2, 3], [3, 4]]) + >>> y = np.array([1, 0, 1]) + >>> estimator.fit(X, y).predict(X) + array([1, 1, 1]) + >>> estimator.score(X, y) + 0.66... + """ + + # TODO(1.8): Remove this attribute + _estimator_type = "classifier" + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.estimator_type = "classifier" + tags.classifier_tags = ClassifierTags() + tags.target_tags.required = True + return tags + + def score(self, X, y, sample_weight=None): + """ + Return the mean accuracy on the given test data and labels. + + In multi-label classification, this is the subset accuracy + which is a harsh metric since you require for each sample that + each label set be correctly predicted. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True labels for `X`. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + Mean accuracy of ``self.predict(X)`` w.r.t. `y`. + """ + from .metrics import accuracy_score + + return accuracy_score(y, self.predict(X), sample_weight=sample_weight) + + +class RegressorMixin: + """Mixin class for all regression estimators in scikit-learn. + + This mixin defines the following functionality: + + - set estimator type to `"regressor"` through the `estimator_type` tag; + - `score` method that default to :func:`~sklearn.metrics.r2_score`. + - enforce that `fit` requires `y` to be passed through the `requires_y` tag, + which is done by setting the regressor type tag. + + Read more in the :ref:`User Guide `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, RegressorMixin + >>> # Mixin classes should always be on the left-hand side for a correct MRO + >>> class MyEstimator(RegressorMixin, BaseEstimator): + ... def __init__(self, *, param=1): + ... self.param = param + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + ... def predict(self, X): + ... return np.full(shape=X.shape[0], fill_value=self.param) + >>> estimator = MyEstimator(param=0) + >>> X = np.array([[1, 2], [2, 3], [3, 4]]) + >>> y = np.array([-1, 0, 1]) + >>> estimator.fit(X, y).predict(X) + array([0, 0, 0]) + >>> estimator.score(X, y) + 0.0 + """ + + # TODO(1.8): Remove this attribute + _estimator_type = "regressor" + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.estimator_type = "regressor" + tags.regressor_tags = RegressorTags() + tags.target_tags.required = True + return tags + + def score(self, X, y, sample_weight=None): + """Return the coefficient of determination of the prediction. + + The coefficient of determination :math:`R^2` is defined as + :math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual + sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v` + is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``. + The best possible score is 1.0 and it can be negative (because the + model can be arbitrarily worse). A constant model that always predicts + the expected value of `y`, disregarding the input features, would get + a :math:`R^2` score of 0.0. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. For some estimators this may be a precomputed + kernel matrix or a list of generic objects instead with shape + ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted`` + is the number of samples used in the fitting for the estimator. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True values for `X`. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + :math:`R^2` of ``self.predict(X)`` w.r.t. `y`. + + Notes + ----- + The :math:`R^2` score used when calling ``score`` on a regressor uses + ``multioutput='uniform_average'`` from version 0.23 to keep consistent + with default value of :func:`~sklearn.metrics.r2_score`. + This influences the ``score`` method of all the multioutput + regressors (except for + :class:`~sklearn.multioutput.MultiOutputRegressor`). + """ + + from .metrics import r2_score + + y_pred = self.predict(X) + return r2_score(y, y_pred, sample_weight=sample_weight) + + +class ClusterMixin: + """Mixin class for all cluster estimators in scikit-learn. + + - set estimator type to `"clusterer"` through the `estimator_type` tag; + - `fit_predict` method returning the cluster labels associated to each sample. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, ClusterMixin + >>> class MyClusterer(ClusterMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self.labels_ = np.ones(shape=(len(X),), dtype=np.int64) + ... return self + >>> X = [[1, 2], [2, 3], [3, 4]] + >>> MyClusterer().fit_predict(X) + array([1, 1, 1]) + """ + + # TODO(1.8): Remove this attribute + _estimator_type = "clusterer" + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.estimator_type = "clusterer" + if tags.transformer_tags is not None: + tags.transformer_tags.preserves_dtype = [] + return tags + + def fit_predict(self, X, y=None, **kwargs): + """ + Perform clustering on `X` and returns cluster labels. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + y : Ignored + Not used, present for API consistency by convention. + + **kwargs : dict + Arguments to be passed to ``fit``. + + .. versionadded:: 1.4 + + Returns + ------- + labels : ndarray of shape (n_samples,), dtype=np.int64 + Cluster labels. + """ + # non-optimized default implementation; override when a better + # method is possible for a given clustering algorithm + self.fit(X, **kwargs) + return self.labels_ + + +class BiclusterMixin: + """Mixin class for all bicluster estimators in scikit-learn. + + This mixin defines the following functionality: + + - `biclusters_` property that returns the row and column indicators; + - `get_indices` method that returns the row and column indices of a bicluster; + - `get_shape` method that returns the shape of a bicluster; + - `get_submatrix` method that returns the submatrix corresponding to a bicluster. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, BiclusterMixin + >>> class DummyBiClustering(BiclusterMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self.rows_ = np.ones(shape=(1, X.shape[0]), dtype=bool) + ... self.columns_ = np.ones(shape=(1, X.shape[1]), dtype=bool) + ... return self + >>> X = np.array([[1, 1], [2, 1], [1, 0], + ... [4, 7], [3, 5], [3, 6]]) + >>> bicluster = DummyBiClustering().fit(X) + >>> hasattr(bicluster, "biclusters_") + True + >>> bicluster.get_indices(0) + (array([0, 1, 2, 3, 4, 5]), array([0, 1])) + """ + + @property + def biclusters_(self): + """Convenient way to get row and column indicators together. + + Returns the ``rows_`` and ``columns_`` members. + """ + return self.rows_, self.columns_ + + def get_indices(self, i): + """Row and column indices of the `i`'th bicluster. + + Only works if ``rows_`` and ``columns_`` attributes exist. + + Parameters + ---------- + i : int + The index of the cluster. + + Returns + ------- + row_ind : ndarray, dtype=np.intp + Indices of rows in the dataset that belong to the bicluster. + col_ind : ndarray, dtype=np.intp + Indices of columns in the dataset that belong to the bicluster. + """ + rows = self.rows_[i] + columns = self.columns_[i] + return np.nonzero(rows)[0], np.nonzero(columns)[0] + + def get_shape(self, i): + """Shape of the `i`'th bicluster. + + Parameters + ---------- + i : int + The index of the cluster. + + Returns + ------- + n_rows : int + Number of rows in the bicluster. + + n_cols : int + Number of columns in the bicluster. + """ + indices = self.get_indices(i) + return tuple(len(i) for i in indices) + + def get_submatrix(self, i, data): + """Return the submatrix corresponding to bicluster `i`. + + Parameters + ---------- + i : int + The index of the cluster. + data : array-like of shape (n_samples, n_features) + The data. + + Returns + ------- + submatrix : ndarray of shape (n_rows, n_cols) + The submatrix corresponding to bicluster `i`. + + Notes + ----- + Works with sparse matrices. Only works if ``rows_`` and + ``columns_`` attributes exist. + """ + + data = check_array(data, accept_sparse="csr") + row_ind, col_ind = self.get_indices(i) + return data[row_ind[:, np.newaxis], col_ind] + + +class TransformerMixin(_SetOutputMixin): + """Mixin class for all transformers in scikit-learn. + + This mixin defines the following functionality: + + - a `fit_transform` method that delegates to `fit` and `transform`; + - a `set_output` method to output `X` as a specific container type. + + If :term:`get_feature_names_out` is defined, then :class:`BaseEstimator` will + automatically wrap `transform` and `fit_transform` to follow the `set_output` + API. See the :ref:`developer_api_set_output` for details. + + :class:`OneToOneFeatureMixin` and + :class:`ClassNamePrefixFeaturesOutMixin` are helpful mixins for + defining :term:`get_feature_names_out`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, TransformerMixin + >>> class MyTransformer(TransformerMixin, BaseEstimator): + ... def __init__(self, *, param=1): + ... self.param = param + ... def fit(self, X, y=None): + ... return self + ... def transform(self, X): + ... return np.full(shape=len(X), fill_value=self.param) + >>> transformer = MyTransformer() + >>> X = [[1, 2], [2, 3], [3, 4]] + >>> transformer.fit_transform(X) + array([1, 1, 1]) + """ + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.transformer_tags = TransformerTags() + return tags + + def fit_transform(self, X, y=None, **fit_params): + """ + Fit to data, then transform it. + + Fits transformer to `X` and `y` with optional parameters `fit_params` + and returns a transformed version of `X`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input samples. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + **fit_params : dict + Additional fit parameters. + + Returns + ------- + X_new : ndarray array of shape (n_samples, n_features_new) + Transformed array. + """ + # non-optimized default implementation; override when a better + # method is possible for a given clustering algorithm + + # we do not route parameters here, since consumers don't route. But + # since it's possible for a `transform` method to also consume + # metadata, we check if that's the case, and we raise a warning telling + # users that they should implement a custom `fit_transform` method + # to forward metadata to `transform` as well. + # + # For that, we calculate routing and check if anything would be routed + # to `transform` if we were to route them. + if _routing_enabled(): + transform_params = self.get_metadata_routing().consumes( + method="transform", params=fit_params.keys() + ) + if transform_params: + warnings.warn( + ( + f"This object ({self.__class__.__name__}) has a `transform`" + " method which consumes metadata, but `fit_transform` does not" + " forward metadata to `transform`. Please implement a custom" + " `fit_transform` method to forward metadata to `transform` as" + " well. Alternatively, you can explicitly do" + " `set_transform_request`and set all values to `False` to" + " disable metadata routed to `transform`, if that's an option." + ), + UserWarning, + ) + + if y is None: + # fit method of arity 1 (unsupervised transformation) + return self.fit(X, **fit_params).transform(X) + else: + # fit method of arity 2 (supervised transformation) + return self.fit(X, y, **fit_params).transform(X) + + +class OneToOneFeatureMixin: + """Provides `get_feature_names_out` for simple transformers. + + This mixin assumes there's a 1-to-1 correspondence between input features + and output features, such as :class:`~sklearn.preprocessing.StandardScaler`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import OneToOneFeatureMixin, BaseEstimator + >>> class MyEstimator(OneToOneFeatureMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self.n_features_in_ = X.shape[1] + ... return self + >>> X = np.array([[1, 2], [3, 4]]) + >>> MyEstimator().fit(X).get_feature_names_out() + array(['x0', 'x1'], dtype=object) + """ + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Same as input features. + """ + # Note that passing attributes="n_features_in_" forces check_is_fitted + # to check if the attribute is present. Otherwise it will pass on + # stateless estimators (requires_fit=False) + check_is_fitted(self, attributes="n_features_in_") + return _check_feature_names_in(self, input_features) + + +class ClassNamePrefixFeaturesOutMixin: + """Mixin class for transformers that generate their own names by prefixing. + + This mixin is useful when the transformer needs to generate its own feature + names out, such as :class:`~sklearn.decomposition.PCA`. For example, if + :class:`~sklearn.decomposition.PCA` outputs 3 features, then the generated feature + names out are: `["pca0", "pca1", "pca2"]`. + + This mixin assumes that a `_n_features_out` attribute is defined when the + transformer is fitted. `_n_features_out` is the number of output features + that the transformer will return in `transform` of `fit_transform`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import ClassNamePrefixFeaturesOutMixin, BaseEstimator + >>> class MyEstimator(ClassNamePrefixFeaturesOutMixin, BaseEstimator): + ... def fit(self, X, y=None): + ... self._n_features_out = X.shape[1] + ... return self + >>> X = np.array([[1, 2], [3, 4]]) + >>> MyEstimator().fit(X).get_feature_names_out() + array(['myestimator0', 'myestimator1'], dtype=object) + """ + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + The feature names out will prefixed by the lowercased class name. For + example, if the transformer outputs 3 features, then the feature names + out are: `["class_name0", "class_name1", "class_name2"]`. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Only used to validate feature names with the names seen in `fit`. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "_n_features_out") + return _generate_get_feature_names_out( + self, self._n_features_out, input_features=input_features + ) + + +class DensityMixin: + """Mixin class for all density estimators in scikit-learn. + + This mixin defines the following functionality: + + - sets estimator type to `"density_estimator"` through the `estimator_type` tag; + - `score` method that default that do no-op. + + Examples + -------- + >>> from sklearn.base import DensityMixin + >>> class MyEstimator(DensityMixin): + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + >>> estimator = MyEstimator() + >>> hasattr(estimator, "score") + True + """ + + # TODO(1.8): Remove this attribute + _estimator_type = "DensityEstimator" + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.estimator_type = "density_estimator" + return tags + + def score(self, X, y=None): + """Return the score of the model on the data `X`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + score : float + """ + pass + + +class OutlierMixin: + """Mixin class for all outlier detection estimators in scikit-learn. + + This mixin defines the following functionality: + + - set estimator type to `"outlier_detector"` through the `estimator_type` tag; + - `fit_predict` method that default to `fit` and `predict`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.base import BaseEstimator, OutlierMixin + >>> class MyEstimator(OutlierMixin): + ... def fit(self, X, y=None): + ... self.is_fitted_ = True + ... return self + ... def predict(self, X): + ... return np.ones(shape=len(X)) + >>> estimator = MyEstimator() + >>> X = np.array([[1, 2], [2, 3], [3, 4]]) + >>> estimator.fit_predict(X) + array([1., 1., 1.]) + """ + + # TODO(1.8): Remove this attribute + _estimator_type = "outlier_detector" + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.estimator_type = "outlier_detector" + return tags + + def fit_predict(self, X, y=None, **kwargs): + """Perform fit on X and returns labels for X. + + Returns -1 for outliers and 1 for inliers. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. + + y : Ignored + Not used, present for API consistency by convention. + + **kwargs : dict + Arguments to be passed to ``fit``. + + .. versionadded:: 1.4 + + Returns + ------- + y : ndarray of shape (n_samples,) + 1 for inliers, -1 for outliers. + """ + # we do not route parameters here, since consumers don't route. But + # since it's possible for a `predict` method to also consume + # metadata, we check if that's the case, and we raise a warning telling + # users that they should implement a custom `fit_predict` method + # to forward metadata to `predict` as well. + # + # For that, we calculate routing and check if anything would be routed + # to `predict` if we were to route them. + if _routing_enabled(): + transform_params = self.get_metadata_routing().consumes( + method="predict", params=kwargs.keys() + ) + if transform_params: + warnings.warn( + ( + f"This object ({self.__class__.__name__}) has a `predict` " + "method which consumes metadata, but `fit_predict` does not " + "forward metadata to `predict`. Please implement a custom " + "`fit_predict` method to forward metadata to `predict` as well." + "Alternatively, you can explicitly do `set_predict_request`" + "and set all values to `False` to disable metadata routed to " + "`predict`, if that's an option." + ), + UserWarning, + ) + + # override for transductive outlier detectors like LocalOulierFactor + return self.fit(X, **kwargs).predict(X) + + +class MetaEstimatorMixin: + """Mixin class for all meta estimators in scikit-learn. + + This mixin is empty, and only exists to indicate that the estimator is a + meta-estimator. + + .. versionchanged:: 1.6 + The `_required_parameters` is now removed and is unnecessary since tests are + refactored and don't use this anymore. + + Examples + -------- + >>> from sklearn.base import MetaEstimatorMixin + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> class MyEstimator(MetaEstimatorMixin): + ... def __init__(self, *, estimator=None): + ... self.estimator = estimator + ... def fit(self, X, y=None): + ... if self.estimator is None: + ... self.estimator_ = LogisticRegression() + ... else: + ... self.estimator_ = self.estimator + ... return self + >>> X, y = load_iris(return_X_y=True) + >>> estimator = MyEstimator().fit(X, y) + >>> estimator.estimator_ + LogisticRegression() + """ + + +class MultiOutputMixin: + """Mixin to mark estimators that support multioutput.""" + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.target_tags.multi_output = True + return tags + + +class _UnstableArchMixin: + """Mark estimators that are non-determinstic on 32bit or PowerPC""" + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.non_deterministic = _IS_32BIT or platform.machine().startswith( + ("ppc", "powerpc") + ) + return tags + + +def is_classifier(estimator): + """Return True if the given estimator is (probably) a classifier. + + Parameters + ---------- + estimator : object + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is a classifier and False otherwise. + + Examples + -------- + >>> from sklearn.base import is_classifier + >>> from sklearn.cluster import KMeans + >>> from sklearn.svm import SVC, SVR + >>> classifier = SVC() + >>> regressor = SVR() + >>> kmeans = KMeans() + >>> is_classifier(classifier) + True + >>> is_classifier(regressor) + False + >>> is_classifier(kmeans) + False + """ + # TODO(1.8): Remove this check + if isinstance(estimator, type): + warnings.warn( + f"passing a class to {print(inspect.stack()[0][3])} is deprecated and " + "will be removed in 1.8. Use an instance of the class instead.", + FutureWarning, + ) + return getattr(estimator, "_estimator_type", None) == "classifier" + + return get_tags(estimator).estimator_type == "classifier" + + +def is_regressor(estimator): + """Return True if the given estimator is (probably) a regressor. + + Parameters + ---------- + estimator : estimator instance + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is a regressor and False otherwise. + + Examples + -------- + >>> from sklearn.base import is_regressor + >>> from sklearn.cluster import KMeans + >>> from sklearn.svm import SVC, SVR + >>> classifier = SVC() + >>> regressor = SVR() + >>> kmeans = KMeans() + >>> is_regressor(classifier) + False + >>> is_regressor(regressor) + True + >>> is_regressor(kmeans) + False + """ + # TODO(1.8): Remove this check + if isinstance(estimator, type): + warnings.warn( + f"passing a class to {print(inspect.stack()[0][3])} is deprecated and " + "will be removed in 1.8. Use an instance of the class instead.", + FutureWarning, + ) + return getattr(estimator, "_estimator_type", None) == "regressor" + + return get_tags(estimator).estimator_type == "regressor" + + +def is_clusterer(estimator): + """Return True if the given estimator is (probably) a clusterer. + + .. versionadded:: 1.6 + + Parameters + ---------- + estimator : object + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is a clusterer and False otherwise. + + Examples + -------- + >>> from sklearn.base import is_clusterer + >>> from sklearn.cluster import KMeans + >>> from sklearn.svm import SVC, SVR + >>> classifier = SVC() + >>> regressor = SVR() + >>> kmeans = KMeans() + >>> is_clusterer(classifier) + False + >>> is_clusterer(regressor) + False + >>> is_clusterer(kmeans) + True + """ + # TODO(1.8): Remove this check + if isinstance(estimator, type): + warnings.warn( + f"passing a class to {print(inspect.stack()[0][3])} is deprecated and " + "will be removed in 1.8. Use an instance of the class instead.", + FutureWarning, + ) + return getattr(estimator, "_estimator_type", None) == "clusterer" + + return get_tags(estimator).estimator_type == "clusterer" + + +def is_outlier_detector(estimator): + """Return True if the given estimator is (probably) an outlier detector. + + Parameters + ---------- + estimator : estimator instance + Estimator object to test. + + Returns + ------- + out : bool + True if estimator is an outlier detector and False otherwise. + """ + # TODO(1.8): Remove this check + if isinstance(estimator, type): + warnings.warn( + f"passing a class to {print(inspect.stack()[0][3])} is deprecated and " + "will be removed in 1.8. Use an instance of the class instead.", + FutureWarning, + ) + return getattr(estimator, "_estimator_type", None) == "outlier_detector" + + return get_tags(estimator).estimator_type == "outlier_detector" + + +def _fit_context(*, prefer_skip_nested_validation): + """Decorator to run the fit methods of estimators within context managers. + + Parameters + ---------- + prefer_skip_nested_validation : bool + If True, the validation of parameters of inner estimators or functions + called during fit will be skipped. + + This is useful to avoid validating many times the parameters passed by the + user from the public facing API. It's also useful to avoid validating + parameters that we pass internally to inner functions that are guaranteed to + be valid by the test suite. + + It should be set to True for most estimators, except for those that receive + non-validated objects as parameters, such as meta-estimators that are given + estimator objects. + + Returns + ------- + decorated_fit : method + The decorated fit method. + """ + + def decorator(fit_method): + @functools.wraps(fit_method) + def wrapper(estimator, *args, **kwargs): + global_skip_validation = get_config()["skip_parameter_validation"] + + # we don't want to validate again for each call to partial_fit + partial_fit_and_fitted = ( + fit_method.__name__ == "partial_fit" and _is_fitted(estimator) + ) + + if not global_skip_validation and not partial_fit_and_fitted: + estimator._validate_params() + + with config_context( + skip_parameter_validation=( + prefer_skip_nested_validation or global_skip_validation + ) + ): + return fit_method(estimator, *args, **kwargs) + + return wrapper + + return decorator diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/calibration.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/calibration.py new file mode 100644 index 0000000000000000000000000000000000000000..1a39315ba65574ec056ff4d94063e59d530c187f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/calibration.py @@ -0,0 +1,1423 @@ +"""Methods for calibrating predicted probabilities.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from inspect import signature +from math import log +from numbers import Integral, Real + +import numpy as np +from scipy.optimize import minimize +from scipy.special import expit + +from sklearn.utils import Bunch + +from ._loss import HalfBinomialLoss +from .base import ( + BaseEstimator, + ClassifierMixin, + MetaEstimatorMixin, + RegressorMixin, + _fit_context, + clone, +) +from .frozen import FrozenEstimator +from .isotonic import IsotonicRegression +from .model_selection import LeaveOneOut, check_cv, cross_val_predict +from .preprocessing import LabelEncoder, label_binarize +from .svm import LinearSVC +from .utils import _safe_indexing, column_or_1d, get_tags, indexable +from .utils._param_validation import ( + HasMethods, + Hidden, + Interval, + StrOptions, + validate_params, +) +from .utils._plotting import _BinaryClassifierCurveDisplayMixin, _validate_style_kwargs +from .utils._response import _get_response_values, _process_predict_proba +from .utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _routing_enabled, + process_routing, +) +from .utils.multiclass import check_classification_targets +from .utils.parallel import Parallel, delayed +from .utils.validation import ( + _check_method_params, + _check_pos_label_consistency, + _check_response_method, + _check_sample_weight, + _num_samples, + check_consistent_length, + check_is_fitted, +) + + +class CalibratedClassifierCV(ClassifierMixin, MetaEstimatorMixin, BaseEstimator): + """Probability calibration with isotonic regression or logistic regression. + + This class uses cross-validation to both estimate the parameters of a + classifier and subsequently calibrate a classifier. With default + `ensemble=True`, for each cv split it + fits a copy of the base estimator to the training subset, and calibrates it + using the testing subset. For prediction, predicted probabilities are + averaged across these individual calibrated classifiers. When + `ensemble=False`, cross-validation is used to obtain unbiased predictions, + via :func:`~sklearn.model_selection.cross_val_predict`, which are then + used for calibration. For prediction, the base estimator, trained using all + the data, is used. This is the prediction method implemented when + `probabilities=True` for :class:`~sklearn.svm.SVC` and :class:`~sklearn.svm.NuSVC` + estimators (see :ref:`User Guide ` for details). + + Already fitted classifiers can be calibrated by wrapping the model in a + :class:`~sklearn.frozen.FrozenEstimator`. In this case all provided + data is used for calibration. The user has to take care manually that data + for model fitting and calibration are disjoint. + + The calibration is based on the :term:`decision_function` method of the + `estimator` if it exists, else on :term:`predict_proba`. + + Read more in the :ref:`User Guide `. + In order to learn more on the CalibratedClassifierCV class, see the + following calibration examples: + :ref:`sphx_glr_auto_examples_calibration_plot_calibration.py`, + :ref:`sphx_glr_auto_examples_calibration_plot_calibration_curve.py`, and + :ref:`sphx_glr_auto_examples_calibration_plot_calibration_multiclass.py`. + + Parameters + ---------- + estimator : estimator instance, default=None + The classifier whose output need to be calibrated to provide more + accurate `predict_proba` outputs. The default classifier is + a :class:`~sklearn.svm.LinearSVC`. + + .. versionadded:: 1.2 + + method : {'sigmoid', 'isotonic'}, default='sigmoid' + The method to use for calibration. Can be 'sigmoid' which + corresponds to Platt's method (i.e. a logistic regression model) or + 'isotonic' which is a non-parametric approach. It is not advised to + use isotonic calibration with too few calibration samples + ``(<<1000)`` since it tends to overfit. + + cv : int, cross-validation generator, or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if ``y`` is binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is + neither binary nor multiclass, :class:`~sklearn.model_selection.KFold` + is used. + + Refer to the :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + .. versionchanged:: 1.6 + `"prefit"` is deprecated. Use :class:`~sklearn.frozen.FrozenEstimator` + instead. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. + + Base estimator clones are fitted in parallel across cross-validation + iterations. Therefore parallelism happens only when `cv != "prefit"`. + + See :term:`Glossary ` for more details. + + .. versionadded:: 0.24 + + ensemble : bool, or "auto", default="auto" + Determines how the calibrator is fitted. + + "auto" will use `False` if the `estimator` is a + :class:`~sklearn.frozen.FrozenEstimator`, and `True` otherwise. + + If `True`, the `estimator` is fitted using training data, and + calibrated using testing data, for each `cv` fold. The final estimator + is an ensemble of `n_cv` fitted classifier and calibrator pairs, where + `n_cv` is the number of cross-validation folds. The output is the + average predicted probabilities of all pairs. + + If `False`, `cv` is used to compute unbiased predictions, via + :func:`~sklearn.model_selection.cross_val_predict`, which are then + used for calibration. At prediction time, the classifier used is the + `estimator` trained on all the data. + Note that this method is also internally implemented in + :mod:`sklearn.svm` estimators with the `probabilities=True` parameter. + + .. versionadded:: 0.24 + + .. versionchanged:: 1.6 + `"auto"` option is added and is the default. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + The class labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 1.0 + + calibrated_classifiers_ : list (len() equal to cv or 1 if `ensemble=False`) + The list of classifier and calibrator pairs. + + - When `ensemble=True`, `n_cv` fitted `estimator` and calibrator pairs. + `n_cv` is the number of cross-validation folds. + - When `ensemble=False`, the `estimator`, fitted on all the data, and fitted + calibrator. + + .. versionchanged:: 0.24 + Single calibrated classifier case when `ensemble=False`. + + See Also + -------- + calibration_curve : Compute true and predicted probabilities + for a calibration curve. + + References + ---------- + .. [1] Obtaining calibrated probability estimates from decision trees + and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001 + + .. [2] Transforming Classifier Scores into Accurate Multiclass + Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002) + + .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to + Regularized Likelihood Methods, J. Platt, (1999) + + .. [4] Predicting Good Probabilities with Supervised Learning, + A. Niculescu-Mizil & R. Caruana, ICML 2005 + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.naive_bayes import GaussianNB + >>> from sklearn.calibration import CalibratedClassifierCV + >>> X, y = make_classification(n_samples=100, n_features=2, + ... n_redundant=0, random_state=42) + >>> base_clf = GaussianNB() + >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3) + >>> calibrated_clf.fit(X, y) + CalibratedClassifierCV(...) + >>> len(calibrated_clf.calibrated_classifiers_) + 3 + >>> calibrated_clf.predict_proba(X)[:5, :] + array([[0.110..., 0.889...], + [0.072..., 0.927...], + [0.928..., 0.071...], + [0.928..., 0.071...], + [0.071..., 0.928...]]) + >>> from sklearn.model_selection import train_test_split + >>> X, y = make_classification(n_samples=100, n_features=2, + ... n_redundant=0, random_state=42) + >>> X_train, X_calib, y_train, y_calib = train_test_split( + ... X, y, random_state=42 + ... ) + >>> base_clf = GaussianNB() + >>> base_clf.fit(X_train, y_train) + GaussianNB() + >>> from sklearn.frozen import FrozenEstimator + >>> calibrated_clf = CalibratedClassifierCV(FrozenEstimator(base_clf)) + >>> calibrated_clf.fit(X_calib, y_calib) + CalibratedClassifierCV(...) + >>> len(calibrated_clf.calibrated_classifiers_) + 1 + >>> calibrated_clf.predict_proba([[-0.5, 0.5]]) + array([[0.936..., 0.063...]]) + """ + + _parameter_constraints: dict = { + "estimator": [ + HasMethods(["fit", "predict_proba"]), + HasMethods(["fit", "decision_function"]), + None, + ], + "method": [StrOptions({"isotonic", "sigmoid"})], + "cv": ["cv_object", Hidden(StrOptions({"prefit"}))], + "n_jobs": [Integral, None], + "ensemble": ["boolean", StrOptions({"auto"})], + } + + def __init__( + self, + estimator=None, + *, + method="sigmoid", + cv=None, + n_jobs=None, + ensemble="auto", + ): + self.estimator = estimator + self.method = method + self.cv = cv + self.n_jobs = n_jobs + self.ensemble = ensemble + + def _get_estimator(self): + """Resolve which estimator to return (default is LinearSVC)""" + if self.estimator is None: + # we want all classifiers that don't expose a random_state + # to be deterministic (and we don't want to expose this one). + estimator = LinearSVC(random_state=0) + if _routing_enabled(): + estimator.set_fit_request(sample_weight=True) + else: + estimator = self.estimator + + return estimator + + @_fit_context( + # CalibratedClassifierCV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None, **fit_params): + """Fit the calibrated model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + **fit_params : dict + Parameters to pass to the `fit` method of the underlying + classifier. + + Returns + ------- + self : object + Returns an instance of self. + """ + check_classification_targets(y) + X, y = indexable(X, y) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + estimator = self._get_estimator() + + _ensemble = self.ensemble + if _ensemble == "auto": + _ensemble = not isinstance(estimator, FrozenEstimator) + + self.calibrated_classifiers_ = [] + if self.cv == "prefit": + # TODO(1.8): Remove this code branch and cv='prefit' + warnings.warn( + "The `cv='prefit'` option is deprecated in 1.6 and will be removed in" + " 1.8. You can use CalibratedClassifierCV(FrozenEstimator(estimator))" + " instead." + ) + # `classes_` should be consistent with that of estimator + check_is_fitted(self.estimator, attributes=["classes_"]) + self.classes_ = self.estimator.classes_ + + predictions, _ = _get_response_values( + estimator, + X, + response_method=["decision_function", "predict_proba"], + ) + if predictions.ndim == 1: + # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` + predictions = predictions.reshape(-1, 1) + + calibrated_classifier = _fit_calibrator( + estimator, + predictions, + y, + self.classes_, + self.method, + sample_weight, + ) + self.calibrated_classifiers_.append(calibrated_classifier) + else: + # Set `classes_` using all `y` + label_encoder_ = LabelEncoder().fit(y) + self.classes_ = label_encoder_.classes_ + + if _routing_enabled(): + routed_params = process_routing( + self, + "fit", + sample_weight=sample_weight, + **fit_params, + ) + else: + # sample_weight checks + fit_parameters = signature(estimator.fit).parameters + supports_sw = "sample_weight" in fit_parameters + if sample_weight is not None and not supports_sw: + estimator_name = type(estimator).__name__ + warnings.warn( + f"Since {estimator_name} does not appear to accept" + " sample_weight, sample weights will only be used for the" + " calibration itself. This can be caused by a limitation of" + " the current scikit-learn API. See the following issue for" + " more details:" + " https://github.com/scikit-learn/scikit-learn/issues/21134." + " Be warned that the result of the calibration is likely to be" + " incorrect." + ) + routed_params = Bunch() + routed_params.splitter = Bunch(split={}) # no routing for splitter + routed_params.estimator = Bunch(fit=fit_params) + if sample_weight is not None and supports_sw: + routed_params.estimator.fit["sample_weight"] = sample_weight + + # Check that each cross-validation fold can have at least one + # example per class + if isinstance(self.cv, int): + n_folds = self.cv + elif hasattr(self.cv, "n_splits"): + n_folds = self.cv.n_splits + else: + n_folds = None + if n_folds and np.any(np.unique(y, return_counts=True)[1] < n_folds): + raise ValueError( + f"Requesting {n_folds}-fold " + "cross-validation but provided less than " + f"{n_folds} examples for at least one class." + ) + if isinstance(self.cv, LeaveOneOut): + raise ValueError( + "LeaveOneOut cross-validation does not allow" + "all classes to be present in test splits. " + "Please use a cross-validation generator that allows " + "all classes to appear in every test and train split." + ) + cv = check_cv(self.cv, y, classifier=True) + + if _ensemble: + parallel = Parallel(n_jobs=self.n_jobs) + self.calibrated_classifiers_ = parallel( + delayed(_fit_classifier_calibrator_pair)( + clone(estimator), + X, + y, + train=train, + test=test, + method=self.method, + classes=self.classes_, + sample_weight=sample_weight, + fit_params=routed_params.estimator.fit, + ) + for train, test in cv.split(X, y, **routed_params.splitter.split) + ) + else: + this_estimator = clone(estimator) + method_name = _check_response_method( + this_estimator, + ["decision_function", "predict_proba"], + ).__name__ + predictions = cross_val_predict( + estimator=this_estimator, + X=X, + y=y, + cv=cv, + method=method_name, + n_jobs=self.n_jobs, + params=routed_params.estimator.fit, + ) + if len(self.classes_) == 2: + # Ensure shape (n_samples, 1) in the binary case + if method_name == "predict_proba": + # Select the probability column of the postive class + predictions = _process_predict_proba( + y_pred=predictions, + target_type="binary", + classes=self.classes_, + pos_label=self.classes_[1], + ) + predictions = predictions.reshape(-1, 1) + + this_estimator.fit(X, y, **routed_params.estimator.fit) + # Note: Here we don't pass on fit_params because the supported + # calibrators don't support fit_params anyway + calibrated_classifier = _fit_calibrator( + this_estimator, + predictions, + y, + self.classes_, + self.method, + sample_weight, + ) + self.calibrated_classifiers_.append(calibrated_classifier) + + first_clf = self.calibrated_classifiers_[0].estimator + if hasattr(first_clf, "n_features_in_"): + self.n_features_in_ = first_clf.n_features_in_ + if hasattr(first_clf, "feature_names_in_"): + self.feature_names_in_ = first_clf.feature_names_in_ + return self + + def predict_proba(self, X): + """Calibrated probabilities of classification. + + This function returns calibrated probabilities of classification + according to each class on an array of test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The samples, as accepted by `estimator.predict_proba`. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + The predicted probas. + """ + check_is_fitted(self) + # Compute the arithmetic mean of the predictions of the calibrated + # classifiers + mean_proba = np.zeros((_num_samples(X), len(self.classes_))) + for calibrated_classifier in self.calibrated_classifiers_: + proba = calibrated_classifier.predict_proba(X) + mean_proba += proba + + mean_proba /= len(self.calibrated_classifiers_) + + return mean_proba + + def predict(self, X): + """Predict the target of new samples. + + The predicted class is the class that has the highest probability, + and can thus be different from the prediction of the uncalibrated classifier. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The samples, as accepted by `estimator.predict`. + + Returns + ------- + C : ndarray of shape (n_samples,) + The predicted class. + """ + check_is_fitted(self) + return self.classes_[np.argmax(self.predict_proba(X), axis=1)] + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + estimator=self._get_estimator(), + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + .add( + splitter=self.cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + ) + return router + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = get_tags(self._get_estimator()).input_tags.sparse + return tags + + +def _fit_classifier_calibrator_pair( + estimator, + X, + y, + train, + test, + method, + classes, + sample_weight=None, + fit_params=None, +): + """Fit a classifier/calibration pair on a given train/test split. + + Fit the classifier on the train set, compute its predictions on the test + set and use the predictions as input to fit the calibrator along with the + test labels. + + Parameters + ---------- + estimator : estimator instance + Cloned base estimator. + + X : array-like, shape (n_samples, n_features) + Sample data. + + y : array-like, shape (n_samples,) + Targets. + + train : ndarray, shape (n_train_indices,) + Indices of the training subset. + + test : ndarray, shape (n_test_indices,) + Indices of the testing subset. + + method : {'sigmoid', 'isotonic'} + Method to use for calibration. + + classes : ndarray, shape (n_classes,) + The target classes. + + sample_weight : array-like, default=None + Sample weights for `X`. + + fit_params : dict, default=None + Parameters to pass to the `fit` method of the underlying + classifier. + + Returns + ------- + calibrated_classifier : _CalibratedClassifier instance + """ + fit_params_train = _check_method_params(X, params=fit_params, indices=train) + X_train, y_train = _safe_indexing(X, train), _safe_indexing(y, train) + X_test, y_test = _safe_indexing(X, test), _safe_indexing(y, test) + + estimator.fit(X_train, y_train, **fit_params_train) + + predictions, _ = _get_response_values( + estimator, + X_test, + response_method=["decision_function", "predict_proba"], + ) + if predictions.ndim == 1: + # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` + predictions = predictions.reshape(-1, 1) + + sw_test = None if sample_weight is None else _safe_indexing(sample_weight, test) + calibrated_classifier = _fit_calibrator( + estimator, predictions, y_test, classes, method, sample_weight=sw_test + ) + return calibrated_classifier + + +def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None): + """Fit calibrator(s) and return a `_CalibratedClassifier` + instance. + + `n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted. + However, if `n_classes` equals 2, one calibrator is fitted. + + Parameters + ---------- + clf : estimator instance + Fitted classifier. + + predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \ + when binary. + Raw predictions returned by the un-calibrated base classifier. + + y : array-like, shape (n_samples,) + The targets. + + classes : ndarray, shape (n_classes,) + All the prediction classes. + + method : {'sigmoid', 'isotonic'} + The method to use for calibration. + + sample_weight : ndarray, shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + Returns + ------- + pipeline : _CalibratedClassifier instance + """ + Y = label_binarize(y, classes=classes) + label_encoder = LabelEncoder().fit(classes) + pos_class_indices = label_encoder.transform(clf.classes_) + calibrators = [] + for class_idx, this_pred in zip(pos_class_indices, predictions.T): + if method == "isotonic": + calibrator = IsotonicRegression(out_of_bounds="clip") + else: # "sigmoid" + calibrator = _SigmoidCalibration() + calibrator.fit(this_pred, Y[:, class_idx], sample_weight) + calibrators.append(calibrator) + + pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes) + return pipeline + + +class _CalibratedClassifier: + """Pipeline-like chaining a fitted classifier and its fitted calibrators. + + Parameters + ---------- + estimator : estimator instance + Fitted classifier. + + calibrators : list of fitted estimator instances + List of fitted calibrators (either 'IsotonicRegression' or + '_SigmoidCalibration'). The number of calibrators equals the number of + classes. However, if there are 2 classes, the list contains only one + fitted calibrator. + + classes : array-like of shape (n_classes,) + All the prediction classes. + + method : {'sigmoid', 'isotonic'}, default='sigmoid' + The method to use for calibration. Can be 'sigmoid' which + corresponds to Platt's method or 'isotonic' which is a + non-parametric approach based on isotonic regression. + """ + + def __init__(self, estimator, calibrators, *, classes, method="sigmoid"): + self.estimator = estimator + self.calibrators = calibrators + self.classes = classes + self.method = method + + def predict_proba(self, X): + """Calculate calibrated probabilities. + + Calculates classification calibrated probabilities + for each class, in a one-vs-all manner, for `X`. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + The sample data. + + Returns + ------- + proba : array, shape (n_samples, n_classes) + The predicted probabilities. Can be exact zeros. + """ + predictions, _ = _get_response_values( + self.estimator, + X, + response_method=["decision_function", "predict_proba"], + ) + if predictions.ndim == 1: + # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` + predictions = predictions.reshape(-1, 1) + + n_classes = len(self.classes) + + label_encoder = LabelEncoder().fit(self.classes) + pos_class_indices = label_encoder.transform(self.estimator.classes_) + + proba = np.zeros((_num_samples(X), n_classes)) + for class_idx, this_pred, calibrator in zip( + pos_class_indices, predictions.T, self.calibrators + ): + if n_classes == 2: + # When binary, `predictions` consists only of predictions for + # clf.classes_[1] but `pos_class_indices` = 0 + class_idx += 1 + proba[:, class_idx] = calibrator.predict(this_pred) + + # Normalize the probabilities + if n_classes == 2: + proba[:, 0] = 1.0 - proba[:, 1] + else: + denominator = np.sum(proba, axis=1)[:, np.newaxis] + # In the edge case where for each class calibrator returns a null + # probability for a given sample, use the uniform distribution + # instead. + uniform_proba = np.full_like(proba, 1 / n_classes) + proba = np.divide( + proba, denominator, out=uniform_proba, where=denominator != 0 + ) + + # Deal with cases where the predicted probability minimally exceeds 1.0 + proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0 + + return proba + + +# The max_abs_prediction_threshold was approximated using +# logit(np.finfo(np.float64).eps) which is about -36 +def _sigmoid_calibration( + predictions, y, sample_weight=None, max_abs_prediction_threshold=30 +): + """Probability Calibration with sigmoid method (Platt 2000) + + Parameters + ---------- + predictions : ndarray of shape (n_samples,) + The decision function or predict proba for the samples. + + y : ndarray of shape (n_samples,) + The targets. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + Returns + ------- + a : float + The slope. + + b : float + The intercept. + + References + ---------- + Platt, "Probabilistic Outputs for Support Vector Machines" + """ + predictions = column_or_1d(predictions) + y = column_or_1d(y) + + F = predictions # F follows Platt's notations + + scale_constant = 1.0 + max_prediction = np.max(np.abs(F)) + + # If the predictions have large values we scale them in order to bring + # them within a suitable range. This has no effect on the final + # (prediction) result because linear models like Logisitic Regression + # without a penalty are invariant to multiplying the features by a + # constant. + if max_prediction >= max_abs_prediction_threshold: + scale_constant = max_prediction + # We rescale the features in a copy: inplace rescaling could confuse + # the caller and make the code harder to reason about. + F = F / scale_constant + + # Bayesian priors (see Platt end of section 2.2): + # It corresponds to the number of samples, taking into account the + # `sample_weight`. + mask_negative_samples = y <= 0 + if sample_weight is not None: + prior0 = (sample_weight[mask_negative_samples]).sum() + prior1 = (sample_weight[~mask_negative_samples]).sum() + else: + prior0 = float(np.sum(mask_negative_samples)) + prior1 = y.shape[0] - prior0 + T = np.zeros_like(y, dtype=predictions.dtype) + T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0) + T[y <= 0] = 1.0 / (prior0 + 2.0) + + bin_loss = HalfBinomialLoss() + + def loss_grad(AB): + # .astype below is needed to ensure y_true and raw_prediction have the + # same dtype. With result = np.float64(0) * np.array([1, 2], dtype=np.float32) + # - in Numpy 2, result.dtype is float64 + # - in Numpy<2, result.dtype is float32 + raw_prediction = -(AB[0] * F + AB[1]).astype(dtype=predictions.dtype) + l, g = bin_loss.loss_gradient( + y_true=T, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + loss = l.sum() + # TODO: Remove casting to np.float64 when minimum supported SciPy is 1.11.2 + # With SciPy >= 1.11.2, the LBFGS implementation will cast to float64 + # https://github.com/scipy/scipy/pull/18825. + # Here we cast to float64 to support SciPy < 1.11.2 + grad = np.asarray([-g @ F, -g.sum()], dtype=np.float64) + return loss, grad + + AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))]) + + opt_result = minimize( + loss_grad, + AB0, + method="L-BFGS-B", + jac=True, + options={ + "gtol": 1e-6, + "ftol": 64 * np.finfo(float).eps, + }, + ) + AB_ = opt_result.x + + # The tuned multiplicative parameter is converted back to the original + # input feature scale. The offset parameter does not need rescaling since + # we did not rescale the outcome variable. + return AB_[0] / scale_constant, AB_[1] + + +class _SigmoidCalibration(RegressorMixin, BaseEstimator): + """Sigmoid regression model. + + Attributes + ---------- + a_ : float + The slope. + + b_ : float + The intercept. + """ + + def fit(self, X, y, sample_weight=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples,) + Training data. + + y : array-like of shape (n_samples,) + Training target. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, then samples are equally weighted. + + Returns + ------- + self : object + Returns an instance of self. + """ + X = column_or_1d(X) + y = column_or_1d(y) + X, y = indexable(X, y) + + self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight) + return self + + def predict(self, T): + """Predict new data by linear interpolation. + + Parameters + ---------- + T : array-like of shape (n_samples,) + Data to predict from. + + Returns + ------- + T_ : ndarray of shape (n_samples,) + The predicted data. + """ + T = column_or_1d(T) + return expit(-(self.a_ * T + self.b_)) + + +@validate_params( + { + "y_true": ["array-like"], + "y_prob": ["array-like"], + "pos_label": [Real, str, "boolean", None], + "n_bins": [Interval(Integral, 1, None, closed="left")], + "strategy": [StrOptions({"uniform", "quantile"})], + }, + prefer_skip_nested_validation=True, +) +def calibration_curve( + y_true, + y_prob, + *, + pos_label=None, + n_bins=5, + strategy="uniform", +): + """Compute true and predicted probabilities for a calibration curve. + + The method assumes the inputs come from a binary classifier, and + discretize the [0, 1] interval into bins. + + Calibration curves may also be referred to as reliability diagrams. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True targets. + + y_prob : array-like of shape (n_samples,) + Probabilities of the positive class. + + pos_label : int, float, bool or str, default=None + The label of the positive class. + + .. versionadded:: 1.1 + + n_bins : int, default=5 + Number of bins to discretize the [0, 1] interval. A bigger number + requires more data. Bins with no samples (i.e. without + corresponding values in `y_prob`) will not be returned, thus the + returned arrays may have less than `n_bins` values. + + strategy : {'uniform', 'quantile'}, default='uniform' + Strategy used to define the widths of the bins. + + uniform + The bins have identical widths. + quantile + The bins have the same number of samples and depend on `y_prob`. + + Returns + ------- + prob_true : ndarray of shape (n_bins,) or smaller + The proportion of samples whose class is the positive class, in each + bin (fraction of positives). + + prob_pred : ndarray of shape (n_bins,) or smaller + The mean predicted probability in each bin. + + References + ---------- + Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good + Probabilities With Supervised Learning, in Proceedings of the 22nd + International Conference on Machine Learning (ICML). + See section 4 (Qualitative Analysis of Predictions). + + Examples + -------- + >>> import numpy as np + >>> from sklearn.calibration import calibration_curve + >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1]) + >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.]) + >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3) + >>> prob_true + array([0. , 0.5, 1. ]) + >>> prob_pred + array([0.2 , 0.525, 0.85 ]) + """ + y_true = column_or_1d(y_true) + y_prob = column_or_1d(y_prob) + check_consistent_length(y_true, y_prob) + pos_label = _check_pos_label_consistency(pos_label, y_true) + + if y_prob.min() < 0 or y_prob.max() > 1: + raise ValueError("y_prob has values outside [0, 1].") + + labels = np.unique(y_true) + if len(labels) > 2: + raise ValueError( + f"Only binary classification is supported. Provided labels {labels}." + ) + y_true = y_true == pos_label + + if strategy == "quantile": # Determine bin edges by distribution of data + quantiles = np.linspace(0, 1, n_bins + 1) + bins = np.percentile(y_prob, quantiles * 100) + elif strategy == "uniform": + bins = np.linspace(0.0, 1.0, n_bins + 1) + else: + raise ValueError( + "Invalid entry to 'strategy' input. Strategy " + "must be either 'quantile' or 'uniform'." + ) + + binids = np.searchsorted(bins[1:-1], y_prob) + + bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) + bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) + bin_total = np.bincount(binids, minlength=len(bins)) + + nonzero = bin_total != 0 + prob_true = bin_true[nonzero] / bin_total[nonzero] + prob_pred = bin_sums[nonzero] / bin_total[nonzero] + + return prob_true, prob_pred + + +class CalibrationDisplay(_BinaryClassifierCurveDisplayMixin): + """Calibration curve (also known as reliability diagram) visualization. + + It is recommended to use + :func:`~sklearn.calibration.CalibrationDisplay.from_estimator` or + :func:`~sklearn.calibration.CalibrationDisplay.from_predictions` + to create a `CalibrationDisplay`. All parameters are stored as attributes. + + Read more about calibration in the :ref:`User Guide ` and + more about the scikit-learn visualization API in :ref:`visualizations`. + + For an example on how to use the visualization, see + :ref:`sphx_glr_auto_examples_calibration_plot_calibration_curve.py`. + + .. versionadded:: 1.0 + + Parameters + ---------- + prob_true : ndarray of shape (n_bins,) + The proportion of samples whose class is the positive class (fraction + of positives), in each bin. + + prob_pred : ndarray of shape (n_bins,) + The mean predicted probability in each bin. + + y_prob : ndarray of shape (n_samples,) + Probability estimates for the positive class, for each sample. + + estimator_name : str, default=None + Name of estimator. If None, the estimator name is not shown. + + pos_label : int, float, bool or str, default=None + The positive class when computing the calibration curve. + By default, `pos_label` is set to `estimators.classes_[1]` when using + `from_estimator` and set to 1 when using `from_predictions`. + + .. versionadded:: 1.1 + + Attributes + ---------- + line_ : matplotlib Artist + Calibration curve. + + ax_ : matplotlib Axes + Axes with calibration curve. + + figure_ : matplotlib Figure + Figure containing the curve. + + See Also + -------- + calibration_curve : Compute true and predicted probabilities for a + calibration curve. + CalibrationDisplay.from_predictions : Plot calibration curve using true + and predicted labels. + CalibrationDisplay.from_estimator : Plot calibration curve using an + estimator and data. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.calibration import calibration_curve, CalibrationDisplay + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = LogisticRegression(random_state=0) + >>> clf.fit(X_train, y_train) + LogisticRegression(random_state=0) + >>> y_prob = clf.predict_proba(X_test)[:, 1] + >>> prob_true, prob_pred = calibration_curve(y_test, y_prob, n_bins=10) + >>> disp = CalibrationDisplay(prob_true, prob_pred, y_prob) + >>> disp.plot() + <...> + """ + + def __init__( + self, prob_true, prob_pred, y_prob, *, estimator_name=None, pos_label=None + ): + self.prob_true = prob_true + self.prob_pred = prob_pred + self.y_prob = y_prob + self.estimator_name = estimator_name + self.pos_label = pos_label + + def plot(self, *, ax=None, name=None, ref_line=True, **kwargs): + """Plot visualization. + + Extra keyword arguments will be passed to + :func:`matplotlib.pyplot.plot`. + + Parameters + ---------- + ax : Matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + name : str, default=None + Name for labeling curve. If `None`, use `estimator_name` if + not `None`, otherwise no labeling is shown. + + ref_line : bool, default=True + If `True`, plots a reference line representing a perfectly + calibrated classifier. + + **kwargs : dict + Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`. + + Returns + ------- + display : :class:`~sklearn.calibration.CalibrationDisplay` + Object that stores computed values. + """ + self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name) + + info_pos_label = ( + f"(Positive class: {self.pos_label})" if self.pos_label is not None else "" + ) + + default_line_kwargs = {"marker": "s", "linestyle": "-"} + if name is not None: + default_line_kwargs["label"] = name + line_kwargs = _validate_style_kwargs(default_line_kwargs, kwargs) + + ref_line_label = "Perfectly calibrated" + existing_ref_line = ref_line_label in self.ax_.get_legend_handles_labels()[1] + if ref_line and not existing_ref_line: + self.ax_.plot([0, 1], [0, 1], "k:", label=ref_line_label) + self.line_ = self.ax_.plot(self.prob_pred, self.prob_true, **line_kwargs)[0] + + # We always have to show the legend for at least the reference line + self.ax_.legend(loc="lower right") + + xlabel = f"Mean predicted probability {info_pos_label}" + ylabel = f"Fraction of positives {info_pos_label}" + self.ax_.set(xlabel=xlabel, ylabel=ylabel) + + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + n_bins=5, + strategy="uniform", + pos_label=None, + name=None, + ref_line=True, + ax=None, + **kwargs, + ): + """Plot calibration curve using a binary classifier and data. + + A calibration curve, also known as a reliability diagram, uses inputs + from a binary classifier and plots the average predicted probability + for each bin against the fraction of positive classes, on the + y-axis. + + Extra keyword arguments will be passed to + :func:`matplotlib.pyplot.plot`. + + Read more about calibration in the :ref:`User Guide ` and + more about the scikit-learn visualization API in :ref:`visualizations`. + + .. versionadded:: 1.0 + + Parameters + ---------- + estimator : estimator instance + Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline` + in which the last estimator is a classifier. The classifier must + have a :term:`predict_proba` method. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input values. + + y : array-like of shape (n_samples,) + Binary target values. + + n_bins : int, default=5 + Number of bins to discretize the [0, 1] interval into when + calculating the calibration curve. A bigger number requires more + data. + + strategy : {'uniform', 'quantile'}, default='uniform' + Strategy used to define the widths of the bins. + + - `'uniform'`: The bins have identical widths. + - `'quantile'`: The bins have the same number of samples and depend + on predicted probabilities. + + pos_label : int, float, bool or str, default=None + The positive class when computing the calibration curve. + By default, `estimators.classes_[1]` is considered as the + positive class. + + .. versionadded:: 1.1 + + name : str, default=None + Name for labeling curve. If `None`, the name of the estimator is + used. + + ref_line : bool, default=True + If `True`, plots a reference line representing a perfectly + calibrated classifier. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`. + + Returns + ------- + display : :class:`~sklearn.calibration.CalibrationDisplay`. + Object that stores computed values. + + See Also + -------- + CalibrationDisplay.from_predictions : Plot calibration curve using true + and predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.calibration import CalibrationDisplay + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = LogisticRegression(random_state=0) + >>> clf.fit(X_train, y_train) + LogisticRegression(random_state=0) + >>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test) + >>> plt.show() + """ + y_prob, pos_label, name = cls._validate_and_get_response_values( + estimator, + X, + y, + response_method="predict_proba", + pos_label=pos_label, + name=name, + ) + + return cls.from_predictions( + y, + y_prob, + n_bins=n_bins, + strategy=strategy, + pos_label=pos_label, + name=name, + ref_line=ref_line, + ax=ax, + **kwargs, + ) + + @classmethod + def from_predictions( + cls, + y_true, + y_prob, + *, + n_bins=5, + strategy="uniform", + pos_label=None, + name=None, + ref_line=True, + ax=None, + **kwargs, + ): + """Plot calibration curve using true labels and predicted probabilities. + + Calibration curve, also known as reliability diagram, uses inputs + from a binary classifier and plots the average predicted probability + for each bin against the fraction of positive classes, on the + y-axis. + + Extra keyword arguments will be passed to + :func:`matplotlib.pyplot.plot`. + + Read more about calibration in the :ref:`User Guide ` and + more about the scikit-learn visualization API in :ref:`visualizations`. + + .. versionadded:: 1.0 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True labels. + + y_prob : array-like of shape (n_samples,) + The predicted probabilities of the positive class. + + n_bins : int, default=5 + Number of bins to discretize the [0, 1] interval into when + calculating the calibration curve. A bigger number requires more + data. + + strategy : {'uniform', 'quantile'}, default='uniform' + Strategy used to define the widths of the bins. + + - `'uniform'`: The bins have identical widths. + - `'quantile'`: The bins have the same number of samples and depend + on predicted probabilities. + + pos_label : int, float, bool or str, default=None + The positive class when computing the calibration curve. + By default `pos_label` is set to 1. + + .. versionadded:: 1.1 + + name : str, default=None + Name for labeling curve. + + ref_line : bool, default=True + If `True`, plots a reference line representing a perfectly + calibrated classifier. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`. + + Returns + ------- + display : :class:`~sklearn.calibration.CalibrationDisplay`. + Object that stores computed values. + + See Also + -------- + CalibrationDisplay.from_estimator : Plot calibration curve using an + estimator and data. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.calibration import CalibrationDisplay + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = LogisticRegression(random_state=0) + >>> clf.fit(X_train, y_train) + LogisticRegression(random_state=0) + >>> y_prob = clf.predict_proba(X_test)[:, 1] + >>> disp = CalibrationDisplay.from_predictions(y_test, y_prob) + >>> plt.show() + """ + pos_label_validated, name = cls._validate_from_predictions_params( + y_true, y_prob, sample_weight=None, pos_label=pos_label, name=name + ) + + prob_true, prob_pred = calibration_curve( + y_true, y_prob, n_bins=n_bins, strategy=strategy, pos_label=pos_label + ) + + disp = cls( + prob_true=prob_true, + prob_pred=prob_pred, + y_prob=y_prob, + estimator_name=name, + pos_label=pos_label_validated, + ) + return disp.plot(ax=ax, ref_line=ref_line, **kwargs) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/conftest.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..6c91c5340b4865bd13c94d28e4b107b5bb08f286 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/conftest.py @@ -0,0 +1,358 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import builtins +import platform +import sys +from contextlib import suppress +from functools import wraps +from os import environ +from unittest import SkipTest + +import joblib +import numpy as np +import pytest +from _pytest.doctest import DoctestItem +from threadpoolctl import threadpool_limits + +from sklearn import set_config +from sklearn._min_dependencies import PYTEST_MIN_VERSION +from sklearn.datasets import ( + fetch_20newsgroups, + fetch_20newsgroups_vectorized, + fetch_california_housing, + fetch_covtype, + fetch_kddcup99, + fetch_lfw_pairs, + fetch_lfw_people, + fetch_olivetti_faces, + fetch_rcv1, + fetch_species_distributions, +) +from sklearn.utils._testing import get_pytest_filterwarning_lines +from sklearn.utils.fixes import ( + _IS_32BIT, + np_base_version, + parse_version, + sp_version, +) + +if parse_version(pytest.__version__) < parse_version(PYTEST_MIN_VERSION): + raise ImportError( + f"Your version of pytest is too old. Got version {pytest.__version__}, you" + f" should have pytest >= {PYTEST_MIN_VERSION} installed." + ) + +scipy_datasets_require_network = sp_version >= parse_version("1.10") + + +def raccoon_face_or_skip(): + # SciPy >= 1.10 requires network to access to get data + if scipy_datasets_require_network: + run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" + if not run_network_tests: + raise SkipTest("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0") + + try: + import pooch # noqa + except ImportError: + raise SkipTest("test requires pooch to be installed") + + from scipy.datasets import face + else: + from scipy.misc import face + + return face(gray=True) + + +dataset_fetchers = { + "fetch_20newsgroups_fxt": fetch_20newsgroups, + "fetch_20newsgroups_vectorized_fxt": fetch_20newsgroups_vectorized, + "fetch_california_housing_fxt": fetch_california_housing, + "fetch_covtype_fxt": fetch_covtype, + "fetch_kddcup99_fxt": fetch_kddcup99, + "fetch_lfw_pairs_fxt": fetch_lfw_pairs, + "fetch_lfw_people_fxt": fetch_lfw_people, + "fetch_olivetti_faces_fxt": fetch_olivetti_faces, + "fetch_rcv1_fxt": fetch_rcv1, + "fetch_species_distributions_fxt": fetch_species_distributions, +} + +if scipy_datasets_require_network: + dataset_fetchers["raccoon_face_fxt"] = raccoon_face_or_skip + +_SKIP32_MARK = pytest.mark.skipif( + environ.get("SKLEARN_RUN_FLOAT32_TESTS", "0") != "1", + reason="Set SKLEARN_RUN_FLOAT32_TESTS=1 to run float32 dtype tests", +) + + +# Global fixtures +@pytest.fixture(params=[pytest.param(np.float32, marks=_SKIP32_MARK), np.float64]) +def global_dtype(request): + yield request.param + + +def _fetch_fixture(f): + """Fetch dataset (download if missing and requested by environment).""" + download_if_missing = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" + + @wraps(f) + def wrapped(*args, **kwargs): + kwargs["download_if_missing"] = download_if_missing + try: + return f(*args, **kwargs) + except OSError as e: + if str(e) != "Data not found and `download_if_missing` is False": + raise + pytest.skip("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0") + + return pytest.fixture(lambda: wrapped) + + +# Adds fixtures for fetching data +fetch_20newsgroups_fxt = _fetch_fixture(fetch_20newsgroups) +fetch_20newsgroups_vectorized_fxt = _fetch_fixture(fetch_20newsgroups_vectorized) +fetch_california_housing_fxt = _fetch_fixture(fetch_california_housing) +fetch_covtype_fxt = _fetch_fixture(fetch_covtype) +fetch_kddcup99_fxt = _fetch_fixture(fetch_kddcup99) +fetch_lfw_pairs_fxt = _fetch_fixture(fetch_lfw_pairs) +fetch_lfw_people_fxt = _fetch_fixture(fetch_lfw_people) +fetch_olivetti_faces_fxt = _fetch_fixture(fetch_olivetti_faces) +fetch_rcv1_fxt = _fetch_fixture(fetch_rcv1) +fetch_species_distributions_fxt = _fetch_fixture(fetch_species_distributions) +raccoon_face_fxt = pytest.fixture(raccoon_face_or_skip) + + +def pytest_collection_modifyitems(config, items): + """Called after collect is completed. + + Parameters + ---------- + config : pytest config + items : list of collected items + """ + run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" + skip_network = pytest.mark.skip( + reason="test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0" + ) + + # download datasets during collection to avoid thread unsafe behavior + # when running pytest in parallel with pytest-xdist + dataset_features_set = set(dataset_fetchers) + datasets_to_download = set() + + for item in items: + if isinstance(item, DoctestItem) and "fetch_" in item.name: + fetcher_function_name = item.name.split(".")[-1] + dataset_fetchers_key = f"{fetcher_function_name}_fxt" + dataset_to_fetch = set([dataset_fetchers_key]) & dataset_features_set + elif not hasattr(item, "fixturenames"): + continue + else: + item_fixtures = set(item.fixturenames) + dataset_to_fetch = item_fixtures & dataset_features_set + + if not dataset_to_fetch: + continue + + if run_network_tests: + datasets_to_download |= dataset_to_fetch + else: + # network tests are skipped + item.add_marker(skip_network) + + # Only download datasets on the first worker spawned by pytest-xdist + # to avoid thread unsafe behavior. If pytest-xdist is not used, we still + # download before tests run. + worker_id = environ.get("PYTEST_XDIST_WORKER", "gw0") + if worker_id == "gw0" and run_network_tests: + for name in datasets_to_download: + with suppress(SkipTest): + dataset_fetchers[name]() + + for item in items: + # Known failure on with GradientBoostingClassifier on ARM64 + if ( + item.name.endswith("GradientBoostingClassifier") + and platform.machine() == "aarch64" + ): + marker = pytest.mark.xfail( + reason=( + "know failure. See " + "https://github.com/scikit-learn/scikit-learn/issues/17797" # noqa + ) + ) + item.add_marker(marker) + + skip_doctests = False + try: + import matplotlib # noqa + except ImportError: + skip_doctests = True + reason = "matplotlib is required to run the doctests" + + if _IS_32BIT: + reason = "doctest are only run when the default numpy int is 64 bits." + skip_doctests = True + elif sys.platform.startswith("win32"): + reason = ( + "doctests are not run for Windows because numpy arrays " + "repr is inconsistent across platforms." + ) + skip_doctests = True + + if np_base_version < parse_version("2"): + # TODO: configure numpy to output scalar arrays as regular Python scalars + # once possible to improve readability of the tests docstrings. + # https://numpy.org/neps/nep-0051-scalar-representation.html#implementation + reason = "Due to NEP 51 numpy scalar repr has changed in numpy 2" + skip_doctests = True + + if sp_version < parse_version("1.14"): + reason = "Scipy sparse matrix repr has changed in scipy 1.14" + skip_doctests = True + + # Normally doctest has the entire module's scope. Here we set globs to an empty dict + # to remove the module's scope: + # https://docs.python.org/3/library/doctest.html#what-s-the-execution-context + for item in items: + if isinstance(item, DoctestItem): + item.dtest.globs = {} + + if skip_doctests: + skip_marker = pytest.mark.skip(reason=reason) + + for item in items: + if isinstance(item, DoctestItem): + # work-around an internal error with pytest if adding a skip + # mark to a doctest in a contextmanager, see + # https://github.com/pytest-dev/pytest/issues/8796 for more + # details. + if item.name != "sklearn._config.config_context": + item.add_marker(skip_marker) + try: + import PIL # noqa + + pillow_installed = True + except ImportError: + pillow_installed = False + + if not pillow_installed: + skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!") + for item in items: + if item.name in [ + "sklearn.feature_extraction.image.PatchExtractor", + "sklearn.feature_extraction.image.extract_patches_2d", + ]: + item.add_marker(skip_marker) + + +@pytest.fixture(scope="function") +def pyplot(): + """Setup and teardown fixture for matplotlib. + + This fixture checks if we can import matplotlib. If not, the tests will be + skipped. Otherwise, we close the figures before and after running the + functions. + + Returns + ------- + pyplot : module + The ``matplotlib.pyplot`` module. + """ + pyplot = pytest.importorskip("matplotlib.pyplot") + pyplot.close("all") + yield pyplot + pyplot.close("all") + + +def pytest_generate_tests(metafunc): + """Parametrization of global_random_seed fixture + + based on the SKLEARN_TESTS_GLOBAL_RANDOM_SEED environment variable. + + The goal of this fixture is to prevent tests that use it to be sensitive + to a specific seed value while still being deterministic by default. + + See the documentation for the SKLEARN_TESTS_GLOBAL_RANDOM_SEED + variable for instructions on how to use this fixture. + + https://scikit-learn.org/dev/computing/parallelism.html#sklearn-tests-global-random-seed + + """ + # When using pytest-xdist this function is called in the xdist workers. + # We rely on SKLEARN_TESTS_GLOBAL_RANDOM_SEED environment variable which is + # set in before running pytest and is available in xdist workers since they + # are subprocesses. + RANDOM_SEED_RANGE = list(range(100)) # All seeds in [0, 99] should be valid. + random_seed_var = environ.get("SKLEARN_TESTS_GLOBAL_RANDOM_SEED") + + default_random_seeds = [42] + + if random_seed_var is None: + random_seeds = default_random_seeds + elif random_seed_var == "all": + random_seeds = RANDOM_SEED_RANGE + else: + if "-" in random_seed_var: + start, stop = random_seed_var.split("-") + random_seeds = list(range(int(start), int(stop) + 1)) + else: + random_seeds = [int(random_seed_var)] + + if min(random_seeds) < 0 or max(random_seeds) > 99: + raise ValueError( + "The value(s) of the environment variable " + "SKLEARN_TESTS_GLOBAL_RANDOM_SEED must be in the range [0, 99] " + f"(or 'all'), got: {random_seed_var}" + ) + + if "global_random_seed" in metafunc.fixturenames: + metafunc.parametrize("global_random_seed", random_seeds) + + +def pytest_configure(config): + # Use matplotlib agg backend during the tests including doctests + try: + import matplotlib + + matplotlib.use("agg") + except ImportError: + pass + + allowed_parallelism = joblib.cpu_count(only_physical_cores=True) + xdist_worker_count = environ.get("PYTEST_XDIST_WORKER_COUNT") + if xdist_worker_count is not None: + # Set the number of OpenMP and BLAS threads based on the number of workers + # xdist is using to prevent oversubscription. + allowed_parallelism = max(allowed_parallelism // int(xdist_worker_count), 1) + threadpool_limits(allowed_parallelism) + + if environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0": + # This seems like the only way to programmatically change the config + # filterwarnings. This was suggested in + # https://github.com/pytest-dev/pytest/issues/3311#issuecomment-373177592 + for line in get_pytest_filterwarning_lines(): + config.addinivalue_line("filterwarnings", line) + + +@pytest.fixture +def hide_available_pandas(monkeypatch): + """Pretend pandas was not installed.""" + import_orig = builtins.__import__ + + def mocked_import(name, *args, **kwargs): + if name == "pandas": + raise ImportError() + return import_orig(name, *args, **kwargs) + + monkeypatch.setattr(builtins, "__import__", mocked_import) + + +@pytest.fixture +def print_changed_only_false(): + """Set `print_changed_only` to False for the duration of the test.""" + set_config(print_changed_only=False) + yield + set_config(print_changed_only=True) # reset to default diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/discriminant_analysis.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/discriminant_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..6a851c07dc896eaf785974bb025981adec43a8b3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/discriminant_analysis.py @@ -0,0 +1,1129 @@ +"""Linear and quadratic discriminant analysis.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.linalg +from scipy import linalg + +from .base import ( + BaseEstimator, + ClassifierMixin, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from .covariance import empirical_covariance, ledoit_wolf, shrunk_covariance +from .linear_model._base import LinearClassifierMixin +from .preprocessing import StandardScaler +from .utils._array_api import _expit, device, get_namespace, size +from .utils._param_validation import HasMethods, Interval, StrOptions +from .utils.extmath import softmax +from .utils.multiclass import check_classification_targets, unique_labels +from .utils.validation import check_is_fitted, validate_data + +__all__ = ["LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis"] + + +def _cov(X, shrinkage=None, covariance_estimator=None): + """Estimate covariance matrix (using optional covariance_estimator). + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + shrinkage : {'empirical', 'auto'} or float, default=None + Shrinkage parameter, possible values: + - None or 'empirical': no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` + is not None. + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying on the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in :mod:`sklearn.covariance``. + if None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Returns + ------- + s : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + """ + if covariance_estimator is None: + shrinkage = "empirical" if shrinkage is None else shrinkage + if isinstance(shrinkage, str): + if shrinkage == "auto": + sc = StandardScaler() # standardize features + X = sc.fit_transform(X) + s = ledoit_wolf(X)[0] + # rescale + s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] + elif shrinkage == "empirical": + s = empirical_covariance(X) + elif isinstance(shrinkage, Real): + s = shrunk_covariance(empirical_covariance(X), shrinkage) + else: + if shrinkage is not None and shrinkage != 0: + raise ValueError( + "covariance_estimator and shrinkage parameters " + "are not None. Only one of the two can be set." + ) + covariance_estimator.fit(X) + if not hasattr(covariance_estimator, "covariance_"): + raise ValueError( + "%s does not have a covariance_ attribute" + % covariance_estimator.__class__.__name__ + ) + s = covariance_estimator.covariance_ + return s + + +def _class_means(X, y): + """Compute class means. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Returns + ------- + means : array-like of shape (n_classes, n_features) + Class means. + """ + xp, is_array_api_compliant = get_namespace(X) + classes, y = xp.unique_inverse(y) + means = xp.zeros((classes.shape[0], X.shape[1]), device=device(X), dtype=X.dtype) + + if is_array_api_compliant: + for i in range(classes.shape[0]): + means[i, :] = xp.mean(X[y == i], axis=0) + else: + # TODO: Explore the choice of using bincount + add.at as it seems sub optimal + # from a performance-wise + cnt = np.bincount(y) + np.add.at(means, y, X) + means /= cnt[:, None] + return means + + +def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None): + """Compute weighted within-class covariance matrix. + + The per-class covariance are weighted by the class priors. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + priors : array-like of shape (n_classes,) + Class priors. + + shrinkage : 'auto' or float, default=None + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` is not None. + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + If None, the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Returns + ------- + cov : array-like of shape (n_features, n_features) + Weighted within-class covariance matrix + """ + classes = np.unique(y) + cov = np.zeros(shape=(X.shape[1], X.shape[1])) + for idx, group in enumerate(classes): + Xg = X[y == group, :] + cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator)) + return cov + + +class DiscriminantAnalysisPredictionMixin: + """Mixin class for QuadraticDiscriminantAnalysis and NearestCentroid.""" + + def decision_function(self, X): + """Apply decision function to an array of samples. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array of samples (test vectors). + + Returns + ------- + y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes) + Decision function values related to each class, per sample. + In the two-class case, the shape is `(n_samples,)`, giving the + log likelihood ratio of the positive class. + """ + y_scores = self._decision_function(X) + if len(self.classes_) == 2: + return y_scores[:, 1] - y_scores[:, 0] + return y_scores + + def predict(self, X): + """Perform classification on an array of vectors `X`. + + Returns the class label for each sample. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Class label for each sample. + """ + scores = self._decision_function(X) + return self.classes_.take(scores.argmax(axis=1)) + + def predict_proba(self, X): + """Estimate class probabilities. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + y_proba : ndarray of shape (n_samples, n_classes) + Probability estimate of the sample for each class in the + model, where classes are ordered as they are in `self.classes_`. + """ + return np.exp(self.predict_log_proba(X)) + + def predict_log_proba(self, X): + """Estimate log class probabilities. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + y_log_proba : ndarray of shape (n_samples, n_classes) + Estimated log probabilities. + """ + scores = self._decision_function(X) + log_likelihood = scores - scores.max(axis=1)[:, np.newaxis] + return log_likelihood - np.log( + np.exp(log_likelihood).sum(axis=1)[:, np.newaxis] + ) + + +class LinearDiscriminantAnalysis( + ClassNamePrefixFeaturesOutMixin, + LinearClassifierMixin, + TransformerMixin, + BaseEstimator, +): + """Linear Discriminant Analysis. + + A classifier with a linear decision boundary, generated by fitting class + conditional densities to the data and using Bayes' rule. + + The model fits a Gaussian density to each class, assuming that all classes + share the same covariance matrix. + + The fitted model can also be used to reduce the dimensionality of the input + by projecting it to the most discriminative directions, using the + `transform` method. + + .. versionadded:: 0.17 + + For a comparison between + :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis` + and :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis`, see + :ref:`sphx_glr_auto_examples_classification_plot_lda_qda.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + solver : {'svd', 'lsqr', 'eigen'}, default='svd' + Solver to use, possible values: + - 'svd': Singular value decomposition (default). + Does not compute the covariance matrix, therefore this solver is + recommended for data with a large number of features. + - 'lsqr': Least squares solution. + Can be combined with shrinkage or custom covariance estimator. + - 'eigen': Eigenvalue decomposition. + Can be combined with shrinkage or custom covariance estimator. + + .. versionchanged:: 1.2 + `solver="svd"` now has experimental Array API support. See the + :ref:`Array API User Guide ` for more details. + + shrinkage : 'auto' or float, default=None + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + This should be left to None if `covariance_estimator` is used. + Note that shrinkage works only with 'lsqr' and 'eigen' solvers. + + For a usage example, see + :ref:`sphx_glr_auto_examples_classification_plot_lda.py`. + + priors : array-like of shape (n_classes,), default=None + The class prior probabilities. By default, the class proportions are + inferred from the training data. + + n_components : int, default=None + Number of components (<= min(n_classes - 1, n_features)) for + dimensionality reduction. If None, will be set to + min(n_classes - 1, n_features). This parameter only affects the + `transform` method. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_pca_vs_lda.py`. + + store_covariance : bool, default=False + If True, explicitly compute the weighted within-class covariance + matrix when solver is 'svd'. The matrix is always computed + and stored for the other solvers. + + .. versionadded:: 0.17 + + tol : float, default=1.0e-4 + Absolute threshold for a singular value of X to be considered + significant, used to estimate the rank of X. Dimensions whose + singular values are non-significant are discarded. Only used if + solver is 'svd'. + + .. versionadded:: 0.17 + + covariance_estimator : covariance estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying on the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in :mod:`sklearn.covariance`. + if None the shrinkage parameter drives the estimate. + + This should be left to None if `shrinkage` is used. + Note that `covariance_estimator` works only with 'lsqr' and 'eigen' + solvers. + + .. versionadded:: 0.24 + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_classes, n_features) + Weight vector(s). + + intercept_ : ndarray of shape (n_classes,) + Intercept term. + + covariance_ : array-like of shape (n_features, n_features) + Weighted within-class covariance matrix. It corresponds to + `sum_k prior_k * C_k` where `C_k` is the covariance matrix of the + samples in class `k`. The `C_k` are estimated using the (potentially + shrunk) biased estimator of covariance. If solver is 'svd', only + exists when `store_covariance` is True. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + If ``n_components`` is not set then all components are stored and the + sum of explained variances is equal to 1.0. Only available when eigen + or svd solver is used. + + means_ : array-like of shape (n_classes, n_features) + Class-wise means. + + priors_ : array-like of shape (n_classes,) + Class priors (sum to 1). + + scalings_ : array-like of shape (rank, n_classes - 1) + Scaling of the features in the space spanned by the class centroids. + Only available for 'svd' and 'eigen' solvers. + + xbar_ : array-like of shape (n_features,) + Overall mean. Only present if solver is 'svd'. + + classes_ : array-like of shape (n_classes,) + Unique class labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> y = np.array([1, 1, 1, 2, 2, 2]) + >>> clf = LinearDiscriminantAnalysis() + >>> clf.fit(X, y) + LinearDiscriminantAnalysis() + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + "solver": [StrOptions({"svd", "lsqr", "eigen"})], + "shrinkage": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both"), None], + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "priors": ["array-like", None], + "store_covariance": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "covariance_estimator": [HasMethods("fit"), None], + } + + def __init__( + self, + solver="svd", + shrinkage=None, + priors=None, + n_components=None, + store_covariance=False, + tol=1e-4, + covariance_estimator=None, + ): + self.solver = solver + self.shrinkage = shrinkage + self.priors = priors + self.n_components = n_components + self.store_covariance = store_covariance # used only in svd solver + self.tol = tol # used only in svd solver + self.covariance_estimator = covariance_estimator + + def _solve_lstsq(self, X, y, shrinkage, covariance_estimator): + """Least squares solver. + + The least squares solver computes a straightforward solution of the + optimal decision rule based directly on the discriminant functions. It + can only be used for classification (with any covariance estimator), + because + estimation of eigenvectors is not performed. Therefore, dimensionality + reduction with the transform is not supported. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_classes) + Target values. + + shrinkage : 'auto', float or None + Shrinkage parameter, possible values: + - None: no shrinkage. + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` i + not None + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + if None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Notes + ----- + This solver is based on [1]_, section 2.6.2, pp. 39-41. + + References + ---------- + .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification + (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN + 0-471-05669-3. + """ + self.means_ = _class_means(X, y) + self.covariance_ = _class_cov( + X, y, self.priors_, shrinkage, covariance_estimator + ) + self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T + self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log( + self.priors_ + ) + + def _solve_eigen(self, X, y, shrinkage, covariance_estimator): + """Eigenvalue solver. + + The eigenvalue solver computes the optimal solution of the Rayleigh + coefficient (basically the ratio of between class scatter to within + class scatter). This solver supports both classification and + dimensionality reduction (with any covariance estimator). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + shrinkage : 'auto', float or None + Shrinkage parameter, possible values: + - None: no shrinkage. + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage constant. + + Shrinkage parameter is ignored if `covariance_estimator` i + not None + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + if None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Notes + ----- + This solver is based on [1]_, section 3.8.3, pp. 121-124. + + References + ---------- + .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification + (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN + 0-471-05669-3. + """ + self.means_ = _class_means(X, y) + self.covariance_ = _class_cov( + X, y, self.priors_, shrinkage, covariance_estimator + ) + + Sw = self.covariance_ # within scatter + St = _cov(X, shrinkage, covariance_estimator) # total scatter + Sb = St - Sw # between scatter + + evals, evecs = linalg.eigh(Sb, Sw) + self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][ + : self._max_components + ] + evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors + + self.scalings_ = evecs + self.coef_ = np.dot(self.means_, evecs).dot(evecs.T) + self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log( + self.priors_ + ) + + def _solve_svd(self, X, y): + """SVD solver. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + """ + xp, is_array_api_compliant = get_namespace(X) + + if is_array_api_compliant: + svd = xp.linalg.svd + else: + svd = scipy.linalg.svd + + n_samples, n_features = X.shape + n_classes = self.classes_.shape[0] + + self.means_ = _class_means(X, y) + if self.store_covariance: + self.covariance_ = _class_cov(X, y, self.priors_) + + Xc = [] + for idx, group in enumerate(self.classes_): + Xg = X[y == group] + Xc.append(Xg - self.means_[idx, :]) + + self.xbar_ = self.priors_ @ self.means_ + + Xc = xp.concat(Xc, axis=0) + + # 1) within (univariate) scaling by with classes std-dev + std = xp.std(Xc, axis=0) + # avoid division by zero in normalization + std[std == 0] = 1.0 + fac = xp.asarray(1.0 / (n_samples - n_classes), dtype=X.dtype) + + # 2) Within variance scaling + X = xp.sqrt(fac) * (Xc / std) + # SVD of centered (within)scaled data + U, S, Vt = svd(X, full_matrices=False) + + rank = xp.sum(xp.astype(S > self.tol, xp.int32)) + # Scaling of within covariance is: V' 1/S + scalings = (Vt[:rank, :] / std).T / S[:rank] + fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1) + + # 3) Between variance scaling + # Scale weighted centers + X = ( + (xp.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T + ).T @ scalings + # Centers are living in a space with n_classes-1 dim (maximum) + # Use SVD to find projection in the space spanned by the + # (n_classes) centers + _, S, Vt = svd(X, full_matrices=False) + + if self._max_components == 0: + self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype) + else: + self.explained_variance_ratio_ = (S**2 / xp.sum(S**2))[ + : self._max_components + ] + + rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32)) + self.scalings_ = scalings @ Vt.T[:, :rank] + coef = (self.means_ - self.xbar_) @ self.scalings_ + self.intercept_ = -0.5 * xp.sum(coef**2, axis=1) + xp.log(self.priors_) + self.coef_ = coef @ self.scalings_.T + self.intercept_ -= self.xbar_ @ self.coef_.T + + @_fit_context( + # LinearDiscriminantAnalysis.covariance_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the Linear Discriminant Analysis model. + + .. versionchanged:: 0.19 + `store_covariance` and `tol` has been moved to main constructor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + self : object + Fitted estimator. + """ + xp, _ = get_namespace(X) + + X, y = validate_data( + self, X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32] + ) + self.classes_ = unique_labels(y) + n_samples, _ = X.shape + n_classes = self.classes_.shape[0] + + if n_samples == n_classes: + raise ValueError( + "The number of samples must be more than the number of classes." + ) + + if self.priors is None: # estimate priors from sample + _, cnts = xp.unique_counts(y) # non-negative ints + self.priors_ = xp.astype(cnts, X.dtype) / float(y.shape[0]) + else: + self.priors_ = xp.asarray(self.priors, dtype=X.dtype) + + if xp.any(self.priors_ < 0): + raise ValueError("priors must be non-negative") + + if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-5: + warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning) + self.priors_ = self.priors_ / self.priors_.sum() + + # Maximum number of components no matter what n_components is + # specified: + max_components = min(n_classes - 1, X.shape[1]) + + if self.n_components is None: + self._max_components = max_components + else: + if self.n_components > max_components: + raise ValueError( + "n_components cannot be larger than min(n_features, n_classes - 1)." + ) + self._max_components = self.n_components + + if self.solver == "svd": + if self.shrinkage is not None: + raise NotImplementedError("shrinkage not supported with 'svd' solver.") + if self.covariance_estimator is not None: + raise ValueError( + "covariance estimator " + "is not supported " + "with svd solver. Try another solver" + ) + self._solve_svd(X, y) + elif self.solver == "lsqr": + self._solve_lstsq( + X, + y, + shrinkage=self.shrinkage, + covariance_estimator=self.covariance_estimator, + ) + elif self.solver == "eigen": + self._solve_eigen( + X, + y, + shrinkage=self.shrinkage, + covariance_estimator=self.covariance_estimator, + ) + if size(self.classes_) == 2: # treat binary case as a special case + coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype) + self.coef_ = xp.reshape(coef_, (1, -1)) + intercept_ = xp.asarray( + self.intercept_[1] - self.intercept_[0], dtype=X.dtype + ) + self.intercept_ = xp.reshape(intercept_, (1,)) + self._n_features_out = self._max_components + return self + + def transform(self, X): + """Project data to maximize class separation. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) or \ + (n_samples, min(rank, n_components)) + Transformed data. In the case of the 'svd' solver, the shape + is (n_samples, min(rank, n_components)). + """ + if self.solver == "lsqr": + raise NotImplementedError( + "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')." + ) + check_is_fitted(self) + xp, _ = get_namespace(X) + X = validate_data(self, X, reset=False) + + if self.solver == "svd": + X_new = (X - self.xbar_) @ self.scalings_ + elif self.solver == "eigen": + X_new = X @ self.scalings_ + + return X_new[:, : self._max_components] + + def predict_proba(self, X): + """Estimate probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Estimated probabilities. + """ + check_is_fitted(self) + xp, is_array_api_compliant = get_namespace(X) + decision = self.decision_function(X) + if size(self.classes_) == 2: + proba = _expit(decision, xp) + return xp.stack([1 - proba, proba], axis=1) + else: + return softmax(decision) + + def predict_log_proba(self, X): + """Estimate log probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Estimated log probabilities. + """ + xp, _ = get_namespace(X) + prediction = self.predict_proba(X) + + info = xp.finfo(prediction.dtype) + if hasattr(info, "smallest_normal"): + smallest_normal = info.smallest_normal + else: + # smallest_normal was introduced in NumPy 1.22 + smallest_normal = info.tiny + + prediction[prediction == 0.0] += smallest_normal + return xp.log(prediction) + + def decision_function(self, X): + """Apply decision function to an array of samples. + + The decision function is equal (up to a constant factor) to the + log-posterior of the model, i.e. `log p(y = k | x)`. In a binary + classification setting this instead corresponds to the difference + `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples (test vectors). + + Returns + ------- + y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes) + Decision function values related to each class, per sample. + In the two-class case, the shape is `(n_samples,)`, giving the + log likelihood ratio of the positive class. + """ + # Only override for the doc + return super().decision_function(X) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.array_api_support = True + return tags + + +class QuadraticDiscriminantAnalysis( + DiscriminantAnalysisPredictionMixin, ClassifierMixin, BaseEstimator +): + """Quadratic Discriminant Analysis. + + A classifier with a quadratic decision boundary, generated + by fitting class conditional densities to the data + and using Bayes' rule. + + The model fits a Gaussian density to each class. + + .. versionadded:: 0.17 + + For a comparison between + :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis` + and :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`, see + :ref:`sphx_glr_auto_examples_classification_plot_lda_qda.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + priors : array-like of shape (n_classes,), default=None + Class priors. By default, the class proportions are inferred from the + training data. + + reg_param : float, default=0.0 + Regularizes the per-class covariance estimates by transforming S2 as + ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``, + where S2 corresponds to the `scaling_` attribute of a given class. + + store_covariance : bool, default=False + If True, the class covariance matrices are explicitly computed and + stored in the `self.covariance_` attribute. + + .. versionadded:: 0.17 + + tol : float, default=1.0e-4 + Absolute threshold for the covariance matrix to be considered rank + deficient after applying some regularization (see `reg_param`) to each + `Sk` where `Sk` represents covariance matrix for k-th class. This + parameter does not affect the predictions. It controls when a warning + is raised if the covariance matrix is not full rank. + + .. versionadded:: 0.17 + + Attributes + ---------- + covariance_ : list of len n_classes of ndarray \ + of shape (n_features, n_features) + For each class, gives the covariance matrix estimated using the + samples of that class. The estimations are unbiased. Only present if + `store_covariance` is True. + + means_ : array-like of shape (n_classes, n_features) + Class-wise means. + + priors_ : array-like of shape (n_classes,) + Class priors (sum to 1). + + rotations_ : list of len n_classes of ndarray of shape (n_features, n_k) + For each class k an array of shape (n_features, n_k), where + ``n_k = min(n_features, number of elements in class k)`` + It is the rotation of the Gaussian distribution, i.e. its + principal axis. It corresponds to `V`, the matrix of eigenvectors + coming from the SVD of `Xk = U S Vt` where `Xk` is the centered + matrix of samples from class k. + + scalings_ : list of len n_classes of ndarray of shape (n_k,) + For each class, contains the scaling of + the Gaussian distributions along its principal axes, i.e. the + variance in the rotated coordinate system. It corresponds to `S^2 / + (n_samples - 1)`, where `S` is the diagonal matrix of singular values + from the SVD of `Xk`, where `Xk` is the centered matrix of samples + from class k. + + classes_ : ndarray of shape (n_classes,) + Unique class labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + LinearDiscriminantAnalysis : Linear Discriminant Analysis. + + Examples + -------- + >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis + >>> import numpy as np + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> y = np.array([1, 1, 1, 2, 2, 2]) + >>> clf = QuadraticDiscriminantAnalysis() + >>> clf.fit(X, y) + QuadraticDiscriminantAnalysis() + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + "priors": ["array-like", None], + "reg_param": [Interval(Real, 0, 1, closed="both")], + "store_covariance": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, *, priors=None, reg_param=0.0, store_covariance=False, tol=1.0e-4 + ): + self.priors = priors + self.reg_param = reg_param + self.store_covariance = store_covariance + self.tol = tol + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model according to the given training data and parameters. + + .. versionchanged:: 0.19 + ``store_covariances`` has been moved to main constructor as + ``store_covariance``. + + .. versionchanged:: 0.19 + ``tol`` has been moved to main constructor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values (integers). + + Returns + ------- + self : object + Fitted estimator. + """ + X, y = validate_data(self, X, y) + check_classification_targets(y) + self.classes_, y = np.unique(y, return_inverse=True) + n_samples, n_features = X.shape + n_classes = len(self.classes_) + if n_classes < 2: + raise ValueError( + "The number of classes has to be greater than one; got %d class" + % (n_classes) + ) + if self.priors is None: + self.priors_ = np.bincount(y) / float(n_samples) + else: + self.priors_ = np.array(self.priors) + + cov = None + store_covariance = self.store_covariance + if store_covariance: + cov = [] + means = [] + scalings = [] + rotations = [] + for ind in range(n_classes): + Xg = X[y == ind, :] + meang = Xg.mean(0) + means.append(meang) + if len(Xg) == 1: + raise ValueError( + "y has only 1 sample in class %s, covariance is ill defined." + % str(self.classes_[ind]) + ) + Xgc = Xg - meang + # Xgc = U * S * V.T + _, S, Vt = np.linalg.svd(Xgc, full_matrices=False) + S2 = (S**2) / (len(Xg) - 1) + S2 = ((1 - self.reg_param) * S2) + self.reg_param + rank = np.sum(S2 > self.tol) + if rank < n_features: + warnings.warn( + f"The covariance matrix of class {ind} is not full rank. " + "Increasing the value of parameter `reg_param` might help" + " reducing the collinearity.", + linalg.LinAlgWarning, + ) + if self.store_covariance or store_covariance: + # cov = V * (S^2 / (n-1)) * V.T + cov.append(np.dot(S2 * Vt.T, Vt)) + scalings.append(S2) + rotations.append(Vt.T) + if self.store_covariance or store_covariance: + self.covariance_ = cov + self.means_ = np.asarray(means) + self.scalings_ = scalings + self.rotations_ = rotations + return self + + def _decision_function(self, X): + # return log posterior, see eq (4.12) p. 110 of the ESL. + check_is_fitted(self) + + X = validate_data(self, X, reset=False) + norm2 = [] + for i in range(len(self.classes_)): + R = self.rotations_[i] + S = self.scalings_[i] + Xm = X - self.means_[i] + X2 = np.dot(Xm, R * (S ** (-0.5))) + norm2.append(np.sum(X2**2, axis=1)) + norm2 = np.array(norm2).T # shape = [len(X), n_classes] + u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) + return -0.5 * (norm2 + u) + np.log(self.priors_) + + def decision_function(self, X): + """Apply decision function to an array of samples. + + The decision function is equal (up to a constant factor) to the + log-posterior of the model, i.e. `log p(y = k | x)`. In a binary + classification setting this instead corresponds to the difference + `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples (test vectors). + + Returns + ------- + C : ndarray of shape (n_samples,) or (n_samples, n_classes) + Decision function values related to each class, per sample. + In the two-class case, the shape is `(n_samples,)`, giving the + log likelihood ratio of the positive class. + """ + return super().decision_function(X) + + def predict(self, X): + """Perform classification on an array of test vectors X. + + The predicted class C for each sample in X is returned. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Vector to be scored, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + C : ndarray of shape (n_samples,) + Estimated probabilities. + """ + return super().predict(X) + + def predict_proba(self, X): + """Return posterior probabilities of classification. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples/test vectors. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Posterior probabilities of classification per class. + """ + # compute the likelihood of the underlying gaussian models + # up to a multiplicative constant. + return super().predict_proba(X) + + def predict_log_proba(self, X): + """Return log of posterior probabilities of classification. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples/test vectors. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Posterior log-probabilities of classification per class. + """ + # XXX : can do better to avoid precision overflows + return super().predict_log_proba(X) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/dummy.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..dbcb36c4c002583c5bb104b0d12450317e70ef93 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/dummy.py @@ -0,0 +1,702 @@ +"""Dummy estimators that implement simple rules of thumb.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from .base import ( + BaseEstimator, + ClassifierMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, +) +from .utils import check_random_state +from .utils._param_validation import Interval, StrOptions +from .utils.multiclass import class_distribution +from .utils.random import _random_choice_csc +from .utils.stats import _weighted_percentile +from .utils.validation import ( + _check_sample_weight, + _num_samples, + check_array, + check_consistent_length, + check_is_fitted, + validate_data, +) + + +class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator): + """DummyClassifier makes predictions that ignore the input features. + + This classifier serves as a simple baseline to compare against other more + complex classifiers. + + The specific behavior of the baseline is selected with the `strategy` + parameter. + + All strategies make predictions that ignore the input feature values passed + as the `X` argument to `fit` and `predict`. The predictions, however, + typically depend on values observed in the `y` parameter passed to `fit`. + + Note that the "stratified" and "uniform" strategies lead to + non-deterministic predictions that can be rendered deterministic by setting + the `random_state` parameter if needed. The other strategies are naturally + deterministic and, once fit, always return the same constant prediction + for any value of `X`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + strategy : {"most_frequent", "prior", "stratified", "uniform", \ + "constant"}, default="prior" + Strategy to use to generate predictions. + + * "most_frequent": the `predict` method always returns the most + frequent class label in the observed `y` argument passed to `fit`. + The `predict_proba` method returns the matching one-hot encoded + vector. + * "prior": the `predict` method always returns the most frequent + class label in the observed `y` argument passed to `fit` (like + "most_frequent"). ``predict_proba`` always returns the empirical + class distribution of `y` also known as the empirical class prior + distribution. + * "stratified": the `predict_proba` method randomly samples one-hot + vectors from a multinomial distribution parametrized by the empirical + class prior probabilities. + The `predict` method returns the class label which got probability + one in the one-hot vector of `predict_proba`. + Each sampled row of both methods is therefore independent and + identically distributed. + * "uniform": generates predictions uniformly at random from the list + of unique classes observed in `y`, i.e. each class has equal + probability. + * "constant": always predicts a constant label that is provided by + the user. This is useful for metrics that evaluate a non-majority + class. + + .. versionchanged:: 0.24 + The default value of `strategy` has changed to "prior" in version + 0.24. + + random_state : int, RandomState instance or None, default=None + Controls the randomness to generate the predictions when + ``strategy='stratified'`` or ``strategy='uniform'``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + constant : int or str or array-like of shape (n_outputs,), default=None + The explicit constant as predicted by the "constant" strategy. This + parameter is useful only for the "constant" strategy. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) or list of such arrays + Unique class labels observed in `y`. For multi-output classification + problems, this attribute is a list of arrays as each output has an + independent set of possible classes. + + n_classes_ : int or list of int + Number of label for each output. + + class_prior_ : ndarray of shape (n_classes,) or list of such arrays + Frequency of each class observed in `y`. For multioutput classification + problems, this is computed independently for each output. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` has + feature names that are all strings. + + n_outputs_ : int + Number of outputs. + + sparse_output_ : bool + True if the array returned from predict is to be in sparse CSC format. + Is automatically set to True if the input `y` is passed in sparse + format. + + See Also + -------- + DummyRegressor : Regressor that makes predictions using simple rules. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.dummy import DummyClassifier + >>> X = np.array([-1, 1, 1, 1]) + >>> y = np.array([0, 1, 1, 1]) + >>> dummy_clf = DummyClassifier(strategy="most_frequent") + >>> dummy_clf.fit(X, y) + DummyClassifier(strategy='most_frequent') + >>> dummy_clf.predict(X) + array([1, 1, 1, 1]) + >>> dummy_clf.score(X, y) + 0.75 + """ + + _parameter_constraints: dict = { + "strategy": [ + StrOptions({"most_frequent", "prior", "stratified", "uniform", "constant"}) + ], + "random_state": ["random_state"], + "constant": [Integral, str, "array-like", None], + } + + def __init__(self, *, strategy="prior", random_state=None, constant=None): + self.strategy = strategy + self.random_state = random_state + self.constant = constant + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the baseline classifier. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Returns the instance itself. + """ + validate_data(self, X, skip_check_array=True) + + self._strategy = self.strategy + + if self._strategy == "uniform" and sp.issparse(y): + y = y.toarray() + warnings.warn( + ( + "A local copy of the target data has been converted " + "to a numpy array. Predicting on sparse target data " + "with the uniform strategy would not save memory " + "and would be slower." + ), + UserWarning, + ) + + self.sparse_output_ = sp.issparse(y) + + if not self.sparse_output_: + y = np.asarray(y) + y = np.atleast_1d(y) + + if y.ndim == 1: + y = np.reshape(y, (-1, 1)) + + self.n_outputs_ = y.shape[1] + + check_consistent_length(X, y) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if self._strategy == "constant": + if self.constant is None: + raise ValueError( + "Constant target value has to be specified " + "when the constant strategy is used." + ) + else: + constant = np.reshape(np.atleast_1d(self.constant), (-1, 1)) + if constant.shape[0] != self.n_outputs_: + raise ValueError( + "Constant target value should have shape (%d, 1)." + % self.n_outputs_ + ) + + (self.classes_, self.n_classes_, self.class_prior_) = class_distribution( + y, sample_weight + ) + + if self._strategy == "constant": + for k in range(self.n_outputs_): + if not any(constant[k][0] == c for c in self.classes_[k]): + # Checking in case of constant strategy if the constant + # provided by the user is in y. + err_msg = ( + "The constant target value must be present in " + "the training data. You provided constant={}. " + "Possible values are: {}.".format( + self.constant, self.classes_[k].tolist() + ) + ) + raise ValueError(err_msg) + + if self.n_outputs_ == 1: + self.n_classes_ = self.n_classes_[0] + self.classes_ = self.classes_[0] + self.class_prior_ = self.class_prior_[0] + + return self + + def predict(self, X): + """Perform classification on test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + Returns + ------- + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Predicted target values for X. + """ + check_is_fitted(self) + + # numpy random_state expects Python int and not long as size argument + # under Windows + n_samples = _num_samples(X) + rs = check_random_state(self.random_state) + + n_classes_ = self.n_classes_ + classes_ = self.classes_ + class_prior_ = self.class_prior_ + constant = self.constant + if self.n_outputs_ == 1: + # Get same type even for self.n_outputs_ == 1 + n_classes_ = [n_classes_] + classes_ = [classes_] + class_prior_ = [class_prior_] + constant = [constant] + # Compute probability only once + if self._strategy == "stratified": + proba = self.predict_proba(X) + if self.n_outputs_ == 1: + proba = [proba] + + if self.sparse_output_: + class_prob = None + if self._strategy in ("most_frequent", "prior"): + classes_ = [np.array([cp.argmax()]) for cp in class_prior_] + + elif self._strategy == "stratified": + class_prob = class_prior_ + + elif self._strategy == "uniform": + raise ValueError( + "Sparse target prediction is not " + "supported with the uniform strategy" + ) + + elif self._strategy == "constant": + classes_ = [np.array([c]) for c in constant] + + y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state) + else: + if self._strategy in ("most_frequent", "prior"): + y = np.tile( + [ + classes_[k][class_prior_[k].argmax()] + for k in range(self.n_outputs_) + ], + [n_samples, 1], + ) + + elif self._strategy == "stratified": + y = np.vstack( + [ + classes_[k][proba[k].argmax(axis=1)] + for k in range(self.n_outputs_) + ] + ).T + + elif self._strategy == "uniform": + ret = [ + classes_[k][rs.randint(n_classes_[k], size=n_samples)] + for k in range(self.n_outputs_) + ] + y = np.vstack(ret).T + + elif self._strategy == "constant": + y = np.tile(self.constant, (n_samples, 1)) + + if self.n_outputs_ == 1: + y = np.ravel(y) + + return y + + def predict_proba(self, X): + """ + Return probability estimates for the test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + Returns + ------- + P : ndarray of shape (n_samples, n_classes) or list of such arrays + Returns the probability of the sample for each class in + the model, where classes are ordered arithmetically, for each + output. + """ + check_is_fitted(self) + + # numpy random_state expects Python int and not long as size argument + # under Windows + n_samples = _num_samples(X) + rs = check_random_state(self.random_state) + + n_classes_ = self.n_classes_ + classes_ = self.classes_ + class_prior_ = self.class_prior_ + constant = self.constant + if self.n_outputs_ == 1: + # Get same type even for self.n_outputs_ == 1 + n_classes_ = [n_classes_] + classes_ = [classes_] + class_prior_ = [class_prior_] + constant = [constant] + + P = [] + for k in range(self.n_outputs_): + if self._strategy == "most_frequent": + ind = class_prior_[k].argmax() + out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) + out[:, ind] = 1.0 + elif self._strategy == "prior": + out = np.ones((n_samples, 1)) * class_prior_[k] + + elif self._strategy == "stratified": + out = rs.multinomial(1, class_prior_[k], size=n_samples) + out = out.astype(np.float64) + + elif self._strategy == "uniform": + out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) + out /= n_classes_[k] + + elif self._strategy == "constant": + ind = np.where(classes_[k] == constant[k]) + out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) + out[:, ind] = 1.0 + + P.append(out) + + if self.n_outputs_ == 1: + P = P[0] + + return P + + def predict_log_proba(self, X): + """ + Return log probability estimates for the test vectors X. + + Parameters + ---------- + X : {array-like, object with finite length or shape} + Training data. + + Returns + ------- + P : ndarray of shape (n_samples, n_classes) or list of such arrays + Returns the log probability of the sample for each class in + the model, where classes are ordered arithmetically for each + output. + """ + proba = self.predict_proba(X) + if self.n_outputs_ == 1: + return np.log(proba) + else: + return [np.log(p) for p in proba] + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.classifier_tags.poor_score = True + tags.no_validation = True + return tags + + def score(self, X, y, sample_weight=None): + """Return the mean accuracy on the given test data and labels. + + In multi-label classification, this is the subset accuracy + which is a harsh metric since you require for each sample that + each label set be correctly predicted. + + Parameters + ---------- + X : None or array-like of shape (n_samples, n_features) + Test samples. Passing None as test samples gives the same result + as passing real test samples, since DummyClassifier + operates independently of the sampled observations. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + Mean accuracy of self.predict(X) w.r.t. y. + """ + if X is None: + X = np.zeros(shape=(len(y), 1)) + return super().score(X, y, sample_weight) + + +class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): + """Regressor that makes predictions using simple rules. + + This regressor is useful as a simple baseline to compare with other + (real) regressors. Do not use it for real problems. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + strategy : {"mean", "median", "quantile", "constant"}, default="mean" + Strategy to use to generate predictions. + + * "mean": always predicts the mean of the training set + * "median": always predicts the median of the training set + * "quantile": always predicts a specified quantile of the training set, + provided with the quantile parameter. + * "constant": always predicts a constant value that is provided by + the user. + + constant : int or float or array-like of shape (n_outputs,), default=None + The explicit constant as predicted by the "constant" strategy. This + parameter is useful only for the "constant" strategy. + + quantile : float in [0.0, 1.0], default=None + The quantile to predict using the "quantile" strategy. A quantile of + 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the + maximum. + + Attributes + ---------- + constant_ : ndarray of shape (1, n_outputs) + Mean or median or quantile of the training targets or constant value + given by the user. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` has + feature names that are all strings. + + n_outputs_ : int + Number of outputs. + + See Also + -------- + DummyClassifier: Classifier that makes predictions using simple rules. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.dummy import DummyRegressor + >>> X = np.array([1.0, 2.0, 3.0, 4.0]) + >>> y = np.array([2.0, 3.0, 5.0, 10.0]) + >>> dummy_regr = DummyRegressor(strategy="mean") + >>> dummy_regr.fit(X, y) + DummyRegressor() + >>> dummy_regr.predict(X) + array([5., 5., 5., 5.]) + >>> dummy_regr.score(X, y) + 0.0 + """ + + _parameter_constraints: dict = { + "strategy": [StrOptions({"mean", "median", "quantile", "constant"})], + "quantile": [Interval(Real, 0.0, 1.0, closed="both"), None], + "constant": [ + Interval(Real, None, None, closed="neither"), + "array-like", + None, + ], + } + + def __init__(self, *, strategy="mean", constant=None, quantile=None): + self.strategy = strategy + self.constant = constant + self.quantile = quantile + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the baseline regressor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Fitted estimator. + """ + validate_data(self, X, skip_check_array=True) + + y = check_array(y, ensure_2d=False, input_name="y") + if len(y) == 0: + raise ValueError("y must not be empty.") + + if y.ndim == 1: + y = np.reshape(y, (-1, 1)) + self.n_outputs_ = y.shape[1] + + check_consistent_length(X, y, sample_weight) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if self.strategy == "mean": + self.constant_ = np.average(y, axis=0, weights=sample_weight) + + elif self.strategy == "median": + if sample_weight is None: + self.constant_ = np.median(y, axis=0) + else: + self.constant_ = [ + _weighted_percentile(y[:, k], sample_weight, percentile=50.0) + for k in range(self.n_outputs_) + ] + + elif self.strategy == "quantile": + if self.quantile is None: + raise ValueError( + "When using `strategy='quantile', you have to specify the desired " + "quantile in the range [0, 1]." + ) + percentile = self.quantile * 100.0 + if sample_weight is None: + self.constant_ = np.percentile(y, axis=0, q=percentile) + else: + self.constant_ = [ + _weighted_percentile(y[:, k], sample_weight, percentile=percentile) + for k in range(self.n_outputs_) + ] + + elif self.strategy == "constant": + if self.constant is None: + raise TypeError( + "Constant target value has to be specified " + "when the constant strategy is used." + ) + + self.constant_ = check_array( + self.constant, + accept_sparse=["csr", "csc", "coo"], + ensure_2d=False, + ensure_min_samples=0, + ) + + if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]: + raise ValueError( + "Constant target value should have shape (%d, 1)." % y.shape[1] + ) + + self.constant_ = np.reshape(self.constant_, (1, -1)) + return self + + def predict(self, X, return_std=False): + """Perform classification on test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + return_std : bool, default=False + Whether to return the standard deviation of posterior prediction. + All zeros in this case. + + .. versionadded:: 0.20 + + Returns + ------- + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Predicted target values for X. + + y_std : array-like of shape (n_samples,) or (n_samples, n_outputs) + Standard deviation of predictive distribution of query points. + """ + check_is_fitted(self) + n_samples = _num_samples(X) + + y = np.full( + (n_samples, self.n_outputs_), + self.constant_, + dtype=np.array(self.constant_).dtype, + ) + y_std = np.zeros((n_samples, self.n_outputs_)) + + if self.n_outputs_ == 1: + y = np.ravel(y) + y_std = np.ravel(y_std) + + return (y, y_std) if return_std else y + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.regressor_tags.poor_score = True + tags.no_validation = True + return tags + + def score(self, X, y, sample_weight=None): + """Return the coefficient of determination R^2 of the prediction. + + The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the + residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the + total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best + possible score is 1.0 and it can be negative (because the model can be + arbitrarily worse). A constant model that always predicts the expected + value of y, disregarding the input features, would get a R^2 score of + 0.0. + + Parameters + ---------- + X : None or array-like of shape (n_samples, n_features) + Test samples. Passing None as test samples gives the same result + as passing real test samples, since `DummyRegressor` + operates independently of the sampled observations. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True values for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + R^2 of `self.predict(X)` w.r.t. y. + """ + if X is None: + X = np.zeros(shape=(len(y), 1)) + return super().score(X, y, sample_weight) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/exceptions.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..1c9162dc760f9e0e6975e4bfd33c7bc3da8b5373 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/exceptions.py @@ -0,0 +1,249 @@ +"""Custom warnings and errors used across scikit-learn.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "NotFittedError", + "ConvergenceWarning", + "DataConversionWarning", + "DataDimensionalityWarning", + "EfficiencyWarning", + "FitFailedWarning", + "SkipTestWarning", + "UndefinedMetricWarning", + "PositiveSpectrumWarning", + "UnsetMetadataPassedError", + "EstimatorCheckFailedWarning", +] + + +class UnsetMetadataPassedError(ValueError): + """Exception class to raise if a metadata is passed which is not explicitly \ + requested (metadata=True) or not requested (metadata=False). + + .. versionadded:: 1.3 + + Parameters + ---------- + message : str + The message + + unrequested_params : dict + A dictionary of parameters and their values which are provided but not + requested. + + routed_params : dict + A dictionary of routed parameters. + """ + + def __init__(self, *, message, unrequested_params, routed_params): + super().__init__(message) + self.unrequested_params = unrequested_params + self.routed_params = routed_params + + +class NotFittedError(ValueError, AttributeError): + """Exception class to raise if estimator is used before fitting. + + This class inherits from both ValueError and AttributeError to help with + exception handling and backward compatibility. + + Examples + -------- + >>> from sklearn.svm import LinearSVC + >>> from sklearn.exceptions import NotFittedError + >>> try: + ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]]) + ... except NotFittedError as e: + ... print(repr(e)) + NotFittedError("This LinearSVC instance is not fitted yet. Call 'fit' with + appropriate arguments before using this estimator."...) + + .. versionchanged:: 0.18 + Moved from sklearn.utils.validation. + """ + + +class ConvergenceWarning(UserWarning): + """Custom warning to capture convergence problems + + .. versionchanged:: 0.18 + Moved from sklearn.utils. + """ + + +class DataConversionWarning(UserWarning): + """Warning used to notify implicit data conversions happening in the code. + + This warning occurs when some input data needs to be converted or + interpreted in a way that may not match the user's expectations. + + For example, this warning may occur when the user + - passes an integer array to a function which expects float input and + will convert the input + - requests a non-copying operation, but a copy is required to meet the + implementation's data-type expectations; + - passes an input whose shape can be interpreted ambiguously. + + .. versionchanged:: 0.18 + Moved from sklearn.utils.validation. + """ + + +class DataDimensionalityWarning(UserWarning): + """Custom warning to notify potential issues with data dimensionality. + + For example, in random projection, this warning is raised when the + number of components, which quantifies the dimensionality of the target + projection space, is higher than the number of features, which quantifies + the dimensionality of the original source space, to imply that the + dimensionality of the problem will not be reduced. + + .. versionchanged:: 0.18 + Moved from sklearn.utils. + """ + + +class EfficiencyWarning(UserWarning): + """Warning used to notify the user of inefficient computation. + + This warning notifies the user that the efficiency may not be optimal due + to some reason which may be included as a part of the warning message. + This may be subclassed into a more specific Warning class. + + .. versionadded:: 0.18 + """ + + +class FitFailedWarning(RuntimeWarning): + """Warning class used if there is an error while fitting the estimator. + + This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV + and the cross-validation helper function cross_val_score to warn when there + is an error while fitting the estimator. + + .. versionchanged:: 0.18 + Moved from sklearn.cross_validation. + """ + + +class SkipTestWarning(UserWarning): + """Warning class used to notify the user of a test that was skipped. + + For example, one of the estimator checks requires a pandas import. + If the pandas package cannot be imported, the test will be skipped rather + than register as a failure. + """ + + +class UndefinedMetricWarning(UserWarning): + """Warning used when the metric is invalid + + .. versionchanged:: 0.18 + Moved from sklearn.base. + """ + + +class PositiveSpectrumWarning(UserWarning): + """Warning raised when the eigenvalues of a PSD matrix have issues + + This warning is typically raised by ``_check_psd_eigenvalues`` when the + eigenvalues of a positive semidefinite (PSD) matrix such as a gram matrix + (kernel) present significant negative eigenvalues, or bad conditioning i.e. + very small non-zero eigenvalues compared to the largest eigenvalue. + + .. versionadded:: 0.22 + """ + + +class InconsistentVersionWarning(UserWarning): + """Warning raised when an estimator is unpickled with a inconsistent version. + + Parameters + ---------- + estimator_name : str + Estimator name. + + current_sklearn_version : str + Current scikit-learn version. + + original_sklearn_version : str + Original scikit-learn version. + """ + + def __init__( + self, *, estimator_name, current_sklearn_version, original_sklearn_version + ): + self.estimator_name = estimator_name + self.current_sklearn_version = current_sklearn_version + self.original_sklearn_version = original_sklearn_version + + def __str__(self): + return ( + f"Trying to unpickle estimator {self.estimator_name} from version" + f" {self.original_sklearn_version} when " + f"using version {self.current_sklearn_version}. This might lead to breaking" + " code or " + "invalid results. Use at your own risk. " + "For more info please refer to:\n" + "https://scikit-learn.org/stable/model_persistence.html" + "#security-maintainability-limitations" + ) + + +class EstimatorCheckFailedWarning(UserWarning): + """Warning raised when an estimator check from the common tests fails. + + Parameters + ---------- + estimator : estimator object + Estimator instance for which the test failed. + + check_name : str + Name of the check that failed. + + exception : Exception + Exception raised by the failed check. + + status : str + Status of the check. + + expected_to_fail : bool + Whether the check was expected to fail. + + expected_to_fail_reason : str + Reason for the expected failure. + """ + + def __init__( + self, + *, + estimator, + check_name: str, + exception: Exception, + status: str, + expected_to_fail: bool, + expected_to_fail_reason: str, + ): + self.estimator = estimator + self.check_name = check_name + self.exception = exception + self.status = status + self.expected_to_fail = expected_to_fail + self.expected_to_fail_reason = expected_to_fail_reason + + def __repr__(self): + expected_to_fail_str = ( + f"Expected to fail: {self.expected_to_fail_reason}" + if self.expected_to_fail + else "Not expected to fail" + ) + return ( + f"Test {self.check_name} failed for estimator {self.estimator!r}.\n" + f"Expected to fail reason: {expected_to_fail_str}\n" + f"Exception: {self.exception}" + ) + + def __str__(self): + return self.__repr__() diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca86d86bee68ee6ac74eabbef55182d0a0ee893 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/__init__.py @@ -0,0 +1,18 @@ +"""Feature extraction from raw data.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from . import image, text +from ._dict_vectorizer import DictVectorizer +from ._hash import FeatureHasher +from .image import grid_to_graph, img_to_graph + +__all__ = [ + "DictVectorizer", + "image", + "img_to_graph", + "grid_to_graph", + "text", + "FeatureHasher", +] diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd494b82ca51907b5ba4794b8e9675f917c9b094 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..a754b9282458537d423ec6648a4c1df3761bf0d9 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_dict_vectorizer.py @@ -0,0 +1,459 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from array import array +from collections.abc import Iterable, Mapping +from numbers import Number +from operator import itemgetter + +import numpy as np +import scipy.sparse as sp + +from sklearn.utils import metadata_routing + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import check_array +from ..utils.validation import check_is_fitted + + +class DictVectorizer(TransformerMixin, BaseEstimator): + """Transforms lists of feature-value mappings to vectors. + + This transformer turns lists of mappings (dict-like objects) of feature + names to feature values into Numpy arrays or scipy.sparse matrices for use + with scikit-learn estimators. + + When feature values are strings, this transformer will do a binary one-hot + (aka one-of-K) coding: one boolean-valued feature is constructed for each + of the possible string values that the feature can take on. For instance, + a feature "f" that can take on the values "ham" and "spam" will become two + features in the output, one signifying "f=ham", the other "f=spam". + + If a feature value is a sequence or set of strings, this transformer + will iterate over the values and will count the occurrences of each string + value. + + However, note that this transformer will only do a binary one-hot encoding + when feature values are of type string. If categorical features are + represented as numeric values such as int or iterables of strings, the + DictVectorizer can be followed by + :class:`~sklearn.preprocessing.OneHotEncoder` to complete + binary one-hot encoding. + + Features that do not occur in a sample (mapping) will have a zero value + in the resulting array/matrix. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + dtype : dtype, default=np.float64 + The type of feature values. Passed to Numpy array/scipy.sparse matrix + constructors as the dtype argument. + separator : str, default="=" + Separator string used when constructing new features for one-hot + coding. + sparse : bool, default=True + Whether transform should produce scipy.sparse matrices. + sort : bool, default=True + Whether ``feature_names_`` and ``vocabulary_`` should be + sorted when fitting. + + Attributes + ---------- + vocabulary_ : dict + A dictionary mapping feature names to feature indices. + + feature_names_ : list + A list of length n_features containing the feature names (e.g., "f=ham" + and "f=spam"). + + See Also + -------- + FeatureHasher : Performs vectorization using only a hash function. + sklearn.preprocessing.OrdinalEncoder : Handles nominal/categorical + features encoded as columns of arbitrary data types. + + Examples + -------- + >>> from sklearn.feature_extraction import DictVectorizer + >>> v = DictVectorizer(sparse=False) + >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] + >>> X = v.fit_transform(D) + >>> X + array([[2., 0., 1.], + [0., 1., 3.]]) + >>> v.inverse_transform(X) == [{'bar': 2.0, 'foo': 1.0}, + ... {'baz': 1.0, 'foo': 3.0}] + True + >>> v.transform({'foo': 4, 'unseen_feature': 3}) + array([[0., 0., 4.]]) + """ + + # This isn't something that people should be routing / using in a pipeline. + __metadata_request__inverse_transform = {"dict_type": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "dtype": "no_validation", # validation delegated to numpy, + "separator": [str], + "sparse": ["boolean"], + "sort": ["boolean"], + } + + def __init__(self, *, dtype=np.float64, separator="=", sparse=True, sort=True): + self.dtype = dtype + self.separator = separator + self.sparse = sparse + self.sort = sort + + def _add_iterable_element( + self, + f, + v, + feature_names, + vocab, + *, + fitting=True, + transforming=False, + indices=None, + values=None, + ): + """Add feature names for iterable of strings""" + for vv in v: + if isinstance(vv, str): + feature_name = "%s%s%s" % (f, self.separator, vv) + vv = 1 + else: + raise TypeError( + f"Unsupported type {type(vv)} in iterable " + "value. Only iterables of string are " + "supported." + ) + if fitting and feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if transforming and feature_name in vocab: + indices.append(vocab[feature_name]) + values.append(self.dtype(vv)) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Learn a list of feature name -> indices mappings. + + Parameters + ---------- + X : Mapping or iterable over Mappings + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + .. versionchanged:: 0.24 + Accepts multiple string values for one categorical feature. + + y : (ignored) + Ignored parameter. + + Returns + ------- + self : object + DictVectorizer class instance. + """ + feature_names = [] + vocab = {} + + for x in X: + for f, v in x.items(): + if isinstance(v, str): + feature_name = "%s%s%s" % (f, self.separator, v) + elif isinstance(v, Number) or (v is None): + feature_name = f + elif isinstance(v, Mapping): + raise TypeError( + f"Unsupported value type {type(v)} " + f"for {f}: {v}.\n" + "Mapping objects are not supported." + ) + elif isinstance(v, Iterable): + feature_name = None + self._add_iterable_element(f, v, feature_names, vocab) + + if feature_name is not None: + if feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if self.sort: + feature_names.sort() + vocab = {f: i for i, f in enumerate(feature_names)} + + self.feature_names_ = feature_names + self.vocabulary_ = vocab + + return self + + def _transform(self, X, fitting): + # Sanity check: Python's array has no way of explicitly requesting the + # signed 32-bit integers that scipy.sparse needs, so we use the next + # best thing: typecode "i" (int). However, if that gives larger or + # smaller integers than 32-bit ones, np.frombuffer screws up. + assert array("i").itemsize == 4, ( + "sizeof(int) != 4 on your platform; please report this at" + " https://github.com/scikit-learn/scikit-learn/issues and" + " include the output from platform.platform() in your bug report" + ) + + dtype = self.dtype + if fitting: + feature_names = [] + vocab = {} + else: + feature_names = self.feature_names_ + vocab = self.vocabulary_ + + transforming = True + + # Process everything as sparse regardless of setting + X = [X] if isinstance(X, Mapping) else X + + indices = array("i") + indptr = [0] + # XXX we could change values to an array.array as well, but it + # would require (heuristic) conversion of dtype to typecode... + values = [] + + # collect all the possible feature names and build sparse matrix at + # same time + for x in X: + for f, v in x.items(): + if isinstance(v, str): + feature_name = "%s%s%s" % (f, self.separator, v) + v = 1 + elif isinstance(v, Number) or (v is None): + feature_name = f + elif not isinstance(v, Mapping) and isinstance(v, Iterable): + feature_name = None + self._add_iterable_element( + f, + v, + feature_names, + vocab, + fitting=fitting, + transforming=transforming, + indices=indices, + values=values, + ) + else: + raise TypeError( + f"Unsupported value Type {type(v)} " + f"for {f}: {v}.\n" + f"{type(v)} objects are not supported." + ) + + if feature_name is not None: + if fitting and feature_name not in vocab: + vocab[feature_name] = len(feature_names) + feature_names.append(feature_name) + + if feature_name in vocab: + indices.append(vocab[feature_name]) + values.append(self.dtype(v)) + + indptr.append(len(indices)) + + if len(indptr) == 1: + raise ValueError("Sample sequence X is empty.") + + indices = np.frombuffer(indices, dtype=np.intc) + shape = (len(indptr) - 1, len(vocab)) + + result_matrix = sp.csr_matrix( + (values, indices, indptr), shape=shape, dtype=dtype + ) + + # Sort everything if asked + if fitting and self.sort: + feature_names.sort() + map_index = np.empty(len(feature_names), dtype=np.int32) + for new_val, f in enumerate(feature_names): + map_index[new_val] = vocab[f] + vocab[f] = new_val + result_matrix = result_matrix[:, map_index] + + if self.sparse: + result_matrix.sort_indices() + else: + result_matrix = result_matrix.toarray() + + if fitting: + self.feature_names_ = feature_names + self.vocabulary_ = vocab + + return result_matrix + + @_fit_context(prefer_skip_nested_validation=True) + def fit_transform(self, X, y=None): + """Learn a list of feature name -> indices mappings and transform X. + + Like fit(X) followed by transform(X), but does not require + materializing X in memory. + + Parameters + ---------- + X : Mapping or iterable over Mappings + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + .. versionchanged:: 0.24 + Accepts multiple string values for one categorical feature. + + y : (ignored) + Ignored parameter. + + Returns + ------- + Xa : {array, sparse matrix} + Feature vectors; always 2-d. + """ + return self._transform(X, fitting=True) + + def inverse_transform(self, X, dict_type=dict): + """Transform array or sparse matrix X back to feature mappings. + + X must have been produced by this DictVectorizer's transform or + fit_transform method; it may only have passed through transformers + that preserve the number of features and their order. + + In the case of one-hot/one-of-K coding, the constructed feature + names and values are returned rather than the original ones. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Sample matrix. + dict_type : type, default=dict + Constructor for feature mappings. Must conform to the + collections.Mapping API. + + Returns + ------- + D : list of dict_type objects of shape (n_samples,) + Feature mappings for the samples in X. + """ + check_is_fitted(self, "feature_names_") + + # COO matrix is not subscriptable + X = check_array(X, accept_sparse=["csr", "csc"]) + n_samples = X.shape[0] + + names = self.feature_names_ + dicts = [dict_type() for _ in range(n_samples)] + + if sp.issparse(X): + for i, j in zip(*X.nonzero()): + dicts[i][names[j]] = X[i, j] + else: + for i, d in enumerate(dicts): + for j, v in enumerate(X[i, :]): + if v != 0: + d[names[j]] = X[i, j] + + return dicts + + def transform(self, X): + """Transform feature->value dicts to array or sparse matrix. + + Named features not encountered during fit or fit_transform will be + silently ignored. + + Parameters + ---------- + X : Mapping or iterable over Mappings of shape (n_samples,) + Dict(s) or Mapping(s) from feature names (arbitrary Python + objects) to feature values (strings or convertible to dtype). + + Returns + ------- + Xa : {array, sparse matrix} + Feature vectors; always 2-d. + """ + check_is_fitted(self, ["feature_names_", "vocabulary_"]) + return self._transform(X, fitting=False) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Not used, present here for API consistency by convention. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self, "feature_names_") + if any(not isinstance(name, str) for name in self.feature_names_): + feature_names = [str(name) for name in self.feature_names_] + else: + feature_names = self.feature_names_ + return np.asarray(feature_names, dtype=object) + + def restrict(self, support, indices=False): + """Restrict the features to those in support using feature selection. + + This function modifies the estimator in-place. + + Parameters + ---------- + support : array-like + Boolean mask or list of indices (as returned by the get_support + member of feature selectors). + indices : bool, default=False + Whether support is a list of indices. + + Returns + ------- + self : object + DictVectorizer class instance. + + Examples + -------- + >>> from sklearn.feature_extraction import DictVectorizer + >>> from sklearn.feature_selection import SelectKBest, chi2 + >>> v = DictVectorizer() + >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] + >>> X = v.fit_transform(D) + >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1]) + >>> v.get_feature_names_out() + array(['bar', 'baz', 'foo'], ...) + >>> v.restrict(support.get_support()) + DictVectorizer() + >>> v.get_feature_names_out() + array(['bar', 'foo'], ...) + """ + check_is_fitted(self, "feature_names_") + + if not indices: + support = np.where(support)[0] + + names = self.feature_names_ + new_vocab = {} + for i in support: + new_vocab[names[i]] = len(new_vocab) + + self.vocabulary_ = new_vocab + self.feature_names_ = [ + f for f, i in sorted(new_vocab.items(), key=itemgetter(1)) + ] + + return self + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.dict = True + tags.input_tags.two_d_array = False + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py new file mode 100644 index 0000000000000000000000000000000000000000..ac0bed3110c4e9089b5ba543729b4cbb02078754 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_hash.py @@ -0,0 +1,208 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from itertools import chain +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from sklearn.utils import metadata_routing + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils._param_validation import Interval, StrOptions +from ._hashing_fast import transform as _hashing_transform + + +def _iteritems(d): + """Like d.iteritems, but accepts any collections.Mapping.""" + return d.iteritems() if hasattr(d, "iteritems") else d.items() + + +class FeatureHasher(TransformerMixin, BaseEstimator): + """Implements feature hashing, aka the hashing trick. + + This class turns sequences of symbolic feature names (strings) into + scipy.sparse matrices, using a hash function to compute the matrix column + corresponding to a name. The hash function employed is the signed 32-bit + version of Murmurhash3. + + Feature names of type byte string are used as-is. Unicode strings are + converted to UTF-8 first, but no Unicode normalization is done. + Feature values must be (finite) numbers. + + This class is a low-memory alternative to DictVectorizer and + CountVectorizer, intended for large-scale (online) learning and situations + where memory is tight, e.g. when running prediction code on embedded + devices. + + For an efficiency comparison of the different feature extractors, see + :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_features : int, default=2**20 + The number of features (columns) in the output matrices. Small numbers + of features are likely to cause hash collisions, but large numbers + will cause larger coefficient dimensions in linear learners. + input_type : str, default='dict' + Choose a string from {'dict', 'pair', 'string'}. + Either "dict" (the default) to accept dictionaries over + (feature_name, value); "pair" to accept pairs of (feature_name, value); + or "string" to accept single strings. + feature_name should be a string, while value should be a number. + In the case of "string", a value of 1 is implied. + The feature_name is hashed to find the appropriate column for the + feature. The value's sign might be flipped in the output (but see + non_negative, below). + dtype : numpy dtype, default=np.float64 + The type of feature values. Passed to scipy.sparse matrix constructors + as the dtype argument. Do not set this to bool, np.boolean or any + unsigned integer type. + alternate_sign : bool, default=True + When True, an alternating sign is added to the features as to + approximately conserve the inner product in the hashed space even for + small n_features. This approach is similar to sparse random projection. + + .. versionchanged:: 0.19 + ``alternate_sign`` replaces the now deprecated ``non_negative`` + parameter. + + See Also + -------- + DictVectorizer : Vectorizes string-valued features using a hash table. + sklearn.preprocessing.OneHotEncoder : Handles nominal/categorical features. + + Notes + ----- + This estimator is :term:`stateless` and does not need to be fitted. + However, we recommend to call :meth:`fit_transform` instead of + :meth:`transform`, as parameter validation is only performed in + :meth:`fit`. + + Examples + -------- + >>> from sklearn.feature_extraction import FeatureHasher + >>> h = FeatureHasher(n_features=10) + >>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}] + >>> f = h.transform(D) + >>> f.toarray() + array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.], + [ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]]) + + With `input_type="string"`, the input must be an iterable over iterables of + strings: + + >>> h = FeatureHasher(n_features=8, input_type="string") + >>> raw_X = [["dog", "cat", "snake"], ["snake", "dog"], ["cat", "bird"]] + >>> f = h.transform(raw_X) + >>> f.toarray() + array([[ 0., 0., 0., -1., 0., -1., 0., 1.], + [ 0., 0., 0., -1., 0., -1., 0., 0.], + [ 0., -1., 0., 0., 0., 0., 0., 1.]]) + """ + + # raw_X should have been called X + __metadata_request__transform = {"raw_X": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="both")], + "input_type": [StrOptions({"dict", "pair", "string"})], + "dtype": "no_validation", # delegate to numpy + "alternate_sign": ["boolean"], + } + + def __init__( + self, + n_features=(2**20), + *, + input_type="dict", + dtype=np.float64, + alternate_sign=True, + ): + self.dtype = dtype + self.input_type = input_type + self.n_features = n_features + self.alternate_sign = alternate_sign + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X=None, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : Ignored + Not used, present here for API consistency by convention. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + FeatureHasher class instance. + """ + return self + + def transform(self, raw_X): + """Transform a sequence of instances to a scipy.sparse matrix. + + Parameters + ---------- + raw_X : iterable over iterable over raw features, length = n_samples + Samples. Each sample must be iterable an (e.g., a list or tuple) + containing/generating feature names (and optionally values, see + the input_type constructor argument) which will be hashed. + raw_X need not support the len function, so it can be the result + of a generator; n_samples is determined on the fly. + + Returns + ------- + X : sparse matrix of shape (n_samples, n_features) + Feature matrix, for use with estimators or further transformers. + """ + raw_X = iter(raw_X) + if self.input_type == "dict": + raw_X = (_iteritems(d) for d in raw_X) + elif self.input_type == "string": + first_raw_X = next(raw_X) + if isinstance(first_raw_X, str): + raise ValueError( + "Samples can not be a single string. The input must be an iterable" + " over iterables of strings." + ) + raw_X_ = chain([first_raw_X], raw_X) + raw_X = (((f, 1) for f in x) for x in raw_X_) + + indices, indptr, values = _hashing_transform( + raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0 + ) + n_samples = indptr.shape[0] - 1 + + if n_samples == 0: + raise ValueError("Cannot vectorize empty sequence.") + + X = sp.csr_matrix( + (values, indices, indptr), + dtype=self.dtype, + shape=(n_samples, self.n_features), + ) + X.sum_duplicates() # also sorts the indices + + return X + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.two_d_array = False + if self.input_type == "string": + tags.input_tags.string = True + elif self.input_type == "dict": + tags.input_tags.dict = True + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc8e6d2f37dc06cf834cb42b363594901a86d1f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/_stop_words.py @@ -0,0 +1,328 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +# This list of English stop words is taken from the "Glasgow Information +# Retrieval Group". The original list can be found at +# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words +ENGLISH_STOP_WORDS = frozenset( + [ + "a", + "about", + "above", + "across", + "after", + "afterwards", + "again", + "against", + "all", + "almost", + "alone", + "along", + "already", + "also", + "although", + "always", + "am", + "among", + "amongst", + "amoungst", + "amount", + "an", + "and", + "another", + "any", + "anyhow", + "anyone", + "anything", + "anyway", + "anywhere", + "are", + "around", + "as", + "at", + "back", + "be", + "became", + "because", + "become", + "becomes", + "becoming", + "been", + "before", + "beforehand", + "behind", + "being", + "below", + "beside", + "besides", + "between", + "beyond", + "bill", + "both", + "bottom", + "but", + "by", + "call", + "can", + "cannot", + "cant", + "co", + "con", + "could", + "couldnt", + "cry", + "de", + "describe", + "detail", + "do", + "done", + "down", + "due", + "during", + "each", + "eg", + "eight", + "either", + "eleven", + "else", + "elsewhere", + "empty", + "enough", + "etc", + "even", + "ever", + "every", + "everyone", + "everything", + "everywhere", + "except", + "few", + "fifteen", + "fifty", + "fill", + "find", + "fire", + "first", + "five", + "for", + "former", + "formerly", + "forty", + "found", + "four", + "from", + "front", + "full", + "further", + "get", + "give", + "go", + "had", + "has", + "hasnt", + "have", + "he", + "hence", + "her", + "here", + "hereafter", + "hereby", + "herein", + "hereupon", + "hers", + "herself", + "him", + "himself", + "his", + "how", + "however", + "hundred", + "i", + "ie", + "if", + "in", + "inc", + "indeed", + "interest", + "into", + "is", + "it", + "its", + "itself", + "keep", + "last", + "latter", + "latterly", + "least", + "less", + "ltd", + "made", + "many", + "may", + "me", + "meanwhile", + "might", + "mill", + "mine", + "more", + "moreover", + "most", + "mostly", + "move", + "much", + "must", + "my", + "myself", + "name", + "namely", + "neither", + "never", + "nevertheless", + "next", + "nine", + "no", + "nobody", + "none", + "noone", + "nor", + "not", + "nothing", + "now", + "nowhere", + "of", + "off", + "often", + "on", + "once", + "one", + "only", + "onto", + "or", + "other", + "others", + "otherwise", + "our", + "ours", + "ourselves", + "out", + "over", + "own", + "part", + "per", + "perhaps", + "please", + "put", + "rather", + "re", + "same", + "see", + "seem", + "seemed", + "seeming", + "seems", + "serious", + "several", + "she", + "should", + "show", + "side", + "since", + "sincere", + "six", + "sixty", + "so", + "some", + "somehow", + "someone", + "something", + "sometime", + "sometimes", + "somewhere", + "still", + "such", + "system", + "take", + "ten", + "than", + "that", + "the", + "their", + "them", + "themselves", + "then", + "thence", + "there", + "thereafter", + "thereby", + "therefore", + "therein", + "thereupon", + "these", + "they", + "thick", + "thin", + "third", + "this", + "those", + "though", + "three", + "through", + "throughout", + "thru", + "thus", + "to", + "together", + "too", + "top", + "toward", + "towards", + "twelve", + "twenty", + "two", + "un", + "under", + "until", + "up", + "upon", + "us", + "very", + "via", + "was", + "we", + "well", + "were", + "what", + "whatever", + "when", + "whence", + "whenever", + "where", + "whereafter", + "whereas", + "whereby", + "wherein", + "whereupon", + "wherever", + "whether", + "which", + "while", + "whither", + "who", + "whoever", + "whole", + "whom", + "whose", + "why", + "will", + "with", + "within", + "without", + "would", + "yet", + "you", + "your", + "yours", + "yourself", + "yourselves", + ] +) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/image.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/image.py new file mode 100644 index 0000000000000000000000000000000000000000..ae7325d52822489fc11d577e5338eb8b4fc1ea83 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/image.py @@ -0,0 +1,685 @@ +"""Utilities to extract features from images.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from itertools import product +from numbers import Integral, Number, Real + +import numpy as np +from numpy.lib.stride_tricks import as_strided +from scipy import sparse + +from ..base import BaseEstimator, TransformerMixin, _fit_context +from ..utils import check_array, check_random_state +from ..utils._param_validation import Hidden, Interval, RealNotInt, validate_params + +__all__ = [ + "PatchExtractor", + "extract_patches_2d", + "grid_to_graph", + "img_to_graph", + "reconstruct_from_patches_2d", +] + +from ..utils.validation import validate_data + +############################################################################### +# From an image to a graph + + +def _make_edges_3d(n_x, n_y, n_z=1): + """Returns a list of edges for a 3D image. + + Parameters + ---------- + n_x : int + The size of the grid in the x direction. + n_y : int + The size of the grid in the y direction. + n_z : integer, default=1 + The size of the grid in the z direction, defaults to 1 + """ + vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z)) + edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel())) + edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel())) + edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel())) + edges = np.hstack((edges_deep, edges_right, edges_down)) + return edges + + +def _compute_gradient_3d(edges, img): + _, n_y, n_z = img.shape + gradient = np.abs( + img[ + edges[0] // (n_y * n_z), + (edges[0] % (n_y * n_z)) // n_z, + (edges[0] % (n_y * n_z)) % n_z, + ] + - img[ + edges[1] // (n_y * n_z), + (edges[1] % (n_y * n_z)) // n_z, + (edges[1] % (n_y * n_z)) % n_z, + ] + ) + return gradient + + +# XXX: Why mask the image after computing the weights? + + +def _mask_edges_weights(mask, edges, weights=None): + """Apply a mask to edges (weighted or not)""" + inds = np.arange(mask.size) + inds = inds[mask.ravel()] + ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds)) + edges = edges[:, ind_mask] + if weights is not None: + weights = weights[ind_mask] + if len(edges.ravel()): + maxval = edges.max() + else: + maxval = 0 + order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1)) + edges = order[edges] + if weights is None: + return edges + else: + return edges, weights + + +def _to_graph( + n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None +): + """Auxiliary function for img_to_graph and grid_to_graph""" + edges = _make_edges_3d(n_x, n_y, n_z) + + if dtype is None: # To not overwrite input dtype + if img is None: + dtype = int + else: + dtype = img.dtype + + if img is not None: + img = np.atleast_3d(img) + weights = _compute_gradient_3d(edges, img) + if mask is not None: + edges, weights = _mask_edges_weights(mask, edges, weights) + diag = img.squeeze()[mask] + else: + diag = img.ravel() + n_voxels = diag.size + else: + if mask is not None: + mask = mask.astype(dtype=bool, copy=False) + edges = _mask_edges_weights(mask, edges) + n_voxels = np.sum(mask) + else: + n_voxels = n_x * n_y * n_z + weights = np.ones(edges.shape[1], dtype=dtype) + diag = np.ones(n_voxels, dtype=dtype) + + diag_idx = np.arange(n_voxels) + i_idx = np.hstack((edges[0], edges[1])) + j_idx = np.hstack((edges[1], edges[0])) + graph = sparse.coo_matrix( + ( + np.hstack((weights, weights, diag)), + (np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))), + ), + (n_voxels, n_voxels), + dtype=dtype, + ) + if return_as is np.ndarray: + return graph.toarray() + return return_as(graph) + + +@validate_params( + { + "img": ["array-like"], + "mask": [None, np.ndarray], + "return_as": [type], + "dtype": "no_validation", # validation delegated to numpy + }, + prefer_skip_nested_validation=True, +) +def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None): + """Graph of the pixel-to-pixel gradient connections. + + Edges are weighted with the gradient values. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + img : array-like of shape (height, width) or (height, width, channel) + 2D or 3D image. + mask : ndarray of shape (height, width) or \ + (height, width, channel), dtype=bool, default=None + An optional mask of the image, to consider only part of the + pixels. + return_as : np.ndarray or a sparse matrix class, \ + default=sparse.coo_matrix + The class to use to build the returned adjacency matrix. + dtype : dtype, default=None + The data of the returned sparse matrix. By default it is the + dtype of img. + + Returns + ------- + graph : ndarray or a sparse matrix class + The computed adjacency matrix. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.feature_extraction.image import img_to_graph + >>> img = np.array([[0, 0], [0, 1]]) + >>> img_to_graph(img, return_as=np.ndarray) + array([[0, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 0, 1], + [0, 1, 1, 1]]) + """ + img = np.atleast_3d(img) + n_x, n_y, n_z = img.shape + return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype) + + +@validate_params( + { + "n_x": [Interval(Integral, left=1, right=None, closed="left")], + "n_y": [Interval(Integral, left=1, right=None, closed="left")], + "n_z": [Interval(Integral, left=1, right=None, closed="left")], + "mask": [None, np.ndarray], + "return_as": [type], + "dtype": "no_validation", # validation delegated to numpy + }, + prefer_skip_nested_validation=True, +) +def grid_to_graph( + n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int +): + """Graph of the pixel-to-pixel connections. + + Edges exist if 2 voxels are connected. + + Parameters + ---------- + n_x : int + Dimension in x axis. + n_y : int + Dimension in y axis. + n_z : int, default=1 + Dimension in z axis. + mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None + An optional mask of the image, to consider only part of the + pixels. + return_as : np.ndarray or a sparse matrix class, \ + default=sparse.coo_matrix + The class to use to build the returned adjacency matrix. + dtype : dtype, default=int + The data of the returned sparse matrix. By default it is int. + + Returns + ------- + graph : np.ndarray or a sparse matrix class + The computed adjacency matrix. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.feature_extraction.image import grid_to_graph + >>> shape_img = (4, 4, 1) + >>> mask = np.zeros(shape=shape_img, dtype=bool) + >>> mask[[1, 2], [1, 2], :] = True + >>> graph = grid_to_graph(*shape_img, mask=mask) + >>> print(graph) + + Coords Values + (0, 0) 1 + (1, 1) 1 + """ + return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype) + + +############################################################################### +# From an image to a set of small image patches + + +def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None): + """Compute the number of patches that will be extracted in an image. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + i_h : int + The image height + i_w : int + The image with + p_h : int + The height of a patch + p_w : int + The width of a patch + max_patches : int or float, default=None + The maximum number of patches to extract. If `max_patches` is a float + between 0 and 1, it is taken to be a proportion of the total number + of patches. If `max_patches` is None, all possible patches are extracted. + """ + n_h = i_h - p_h + 1 + n_w = i_w - p_w + 1 + all_patches = n_h * n_w + + if max_patches: + if isinstance(max_patches, (Integral)) and max_patches < all_patches: + return max_patches + elif isinstance(max_patches, (Integral)) and max_patches >= all_patches: + return all_patches + elif isinstance(max_patches, (Real)) and 0 < max_patches < 1: + return int(max_patches * all_patches) + else: + raise ValueError("Invalid value for max_patches: %r" % max_patches) + else: + return all_patches + + +def _extract_patches(arr, patch_shape=8, extraction_step=1): + """Extracts patches of any n-dimensional array in place using strides. + + Given an n-dimensional array it will return a 2n-dimensional array with + the first n dimensions indexing patch position and the last n indexing + the patch content. This operation is immediate (O(1)). A reshape + performed on the first n dimensions will cause numpy to copy data, leading + to a list of extracted patches. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + arr : ndarray + n-dimensional array of which patches are to be extracted + + patch_shape : int or tuple of length arr.ndim.default=8 + Indicates the shape of the patches to be extracted. If an + integer is given, the shape will be a hypercube of + sidelength given by its value. + + extraction_step : int or tuple of length arr.ndim, default=1 + Indicates step size at which extraction shall be performed. + If integer is given, then the step is uniform in all dimensions. + + + Returns + ------- + patches : strided ndarray + 2n-dimensional array indexing patches on first n dimensions and + containing patches on the last n dimensions. These dimensions + are fake, but this way no data is copied. A simple reshape invokes + a copying operation to obtain a list of patches: + result.reshape([-1] + list(patch_shape)) + """ + + arr_ndim = arr.ndim + + if isinstance(patch_shape, Number): + patch_shape = tuple([patch_shape] * arr_ndim) + if isinstance(extraction_step, Number): + extraction_step = tuple([extraction_step] * arr_ndim) + + patch_strides = arr.strides + + slices = tuple(slice(None, None, st) for st in extraction_step) + indexing_strides = arr[slices].strides + + patch_indices_shape = ( + (np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step) + ) + 1 + + shape = tuple(list(patch_indices_shape) + list(patch_shape)) + strides = tuple(list(indexing_strides) + list(patch_strides)) + + patches = as_strided(arr, shape=shape, strides=strides) + return patches + + +@validate_params( + { + "image": [np.ndarray], + "patch_size": [tuple, list], + "max_patches": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(Integral, 1, None, closed="left"), + None, + ], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): + """Reshape a 2D image into a collection of patches. + + The resulting patches are allocated in a dedicated array. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + image : ndarray of shape (image_height, image_width) or \ + (image_height, image_width, n_channels) + The original image data. For color images, the last dimension specifies + the channel: a RGB image would have `n_channels=3`. + + patch_size : tuple of int (patch_height, patch_width) + The dimensions of one patch. + + max_patches : int or float, default=None + The maximum number of patches to extract. If `max_patches` is a float + between 0 and 1, it is taken to be a proportion of the total number + of patches. If `max_patches` is None it corresponds to the total number + of patches that can be extracted. + + random_state : int, RandomState instance, default=None + Determines the random number generator used for random sampling when + `max_patches` is not None. Use an int to make the randomness + deterministic. + See :term:`Glossary `. + + Returns + ------- + patches : array of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The collection of patches extracted from the image, where `n_patches` + is either `max_patches` or the total number of patches that can be + extracted. + + Examples + -------- + >>> from sklearn.datasets import load_sample_image + >>> from sklearn.feature_extraction import image + >>> # Use the array data from the first image in this dataset: + >>> one_image = load_sample_image("china.jpg") + >>> print('Image shape: {}'.format(one_image.shape)) + Image shape: (427, 640, 3) + >>> patches = image.extract_patches_2d(one_image, (2, 2)) + >>> print('Patches shape: {}'.format(patches.shape)) + Patches shape: (272214, 2, 2, 3) + >>> # Here are just two of these patches: + >>> print(patches[1]) + [[[174 201 231] + [174 201 231]] + [[173 200 230] + [173 200 230]]] + >>> print(patches[800]) + [[[187 214 243] + [188 215 244]] + [[187 214 243] + [188 215 244]]] + """ + i_h, i_w = image.shape[:2] + p_h, p_w = patch_size + + if p_h > i_h: + raise ValueError( + "Height of the patch should be less than the height of the image." + ) + + if p_w > i_w: + raise ValueError( + "Width of the patch should be less than the width of the image." + ) + + image = check_array(image, allow_nd=True) + image = image.reshape((i_h, i_w, -1)) + n_colors = image.shape[-1] + + extracted_patches = _extract_patches( + image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 + ) + + n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) + if max_patches: + rng = check_random_state(random_state) + i_s = rng.randint(i_h - p_h + 1, size=n_patches) + j_s = rng.randint(i_w - p_w + 1, size=n_patches) + patches = extracted_patches[i_s, j_s, 0] + else: + patches = extracted_patches + + patches = patches.reshape(-1, p_h, p_w, n_colors) + # remove the color dimension if useless + if patches.shape[-1] == 1: + return patches.reshape((n_patches, p_h, p_w)) + else: + return patches + + +@validate_params( + {"patches": [np.ndarray], "image_size": [tuple, Hidden(list)]}, + prefer_skip_nested_validation=True, +) +def reconstruct_from_patches_2d(patches, image_size): + """Reconstruct the image from all of its patches. + + Patches are assumed to overlap and the image is constructed by filling in + the patches from left to right, top to bottom, averaging the overlapping + regions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + patches : ndarray of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The complete set of patches. If the patches contain colour information, + channels are indexed along the last dimension: RGB patches would + have `n_channels=3`. + + image_size : tuple of int (image_height, image_width) or \ + (image_height, image_width, n_channels) + The size of the image that will be reconstructed. + + Returns + ------- + image : ndarray of shape image_size + The reconstructed image. + + Examples + -------- + >>> from sklearn.datasets import load_sample_image + >>> from sklearn.feature_extraction import image + >>> one_image = load_sample_image("china.jpg") + >>> print('Image shape: {}'.format(one_image.shape)) + Image shape: (427, 640, 3) + >>> image_patches = image.extract_patches_2d(image=one_image, patch_size=(10, 10)) + >>> print('Patches shape: {}'.format(image_patches.shape)) + Patches shape: (263758, 10, 10, 3) + >>> image_reconstructed = image.reconstruct_from_patches_2d( + ... patches=image_patches, + ... image_size=one_image.shape + ... ) + >>> print(f"Reconstructed shape: {image_reconstructed.shape}") + Reconstructed shape: (427, 640, 3) + """ + i_h, i_w = image_size[:2] + p_h, p_w = patches.shape[1:3] + img = np.zeros(image_size) + # compute the dimensions of the patches array + n_h = i_h - p_h + 1 + n_w = i_w - p_w + 1 + for p, (i, j) in zip(patches, product(range(n_h), range(n_w))): + img[i : i + p_h, j : j + p_w] += p + + for i in range(i_h): + for j in range(i_w): + # divide by the amount of overlap + # XXX: is this the most efficient way? memory-wise yes, cpu wise? + img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j)) + return img + + +class PatchExtractor(TransformerMixin, BaseEstimator): + """Extracts patches from a collection of images. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + patch_size : tuple of int (patch_height, patch_width), default=None + The dimensions of one patch. If set to None, the patch size will be + automatically set to `(img_height // 10, img_width // 10)`, where + `img_height` and `img_width` are the dimensions of the input images. + + max_patches : int or float, default=None + The maximum number of patches per image to extract. If `max_patches` is + a float in (0, 1), it is taken to mean a proportion of the total number + of patches. If set to None, extract all possible patches. + + random_state : int, RandomState instance, default=None + Determines the random number generator used for random sampling when + `max_patches is not None`. Use an int to make the randomness + deterministic. + See :term:`Glossary `. + + See Also + -------- + reconstruct_from_patches_2d : Reconstruct image from all of its patches. + + Notes + ----- + This estimator is stateless and does not need to be fitted. However, we + recommend to call :meth:`fit_transform` instead of :meth:`transform`, as + parameter validation is only performed in :meth:`fit`. + + Examples + -------- + >>> from sklearn.datasets import load_sample_images + >>> from sklearn.feature_extraction import image + >>> # Use the array data from the second image in this dataset: + >>> X = load_sample_images().images[1] + >>> X = X[None, ...] + >>> print(f"Image shape: {X.shape}") + Image shape: (1, 427, 640, 3) + >>> pe = image.PatchExtractor(patch_size=(10, 10)) + >>> pe_trans = pe.transform(X) + >>> print(f"Patches shape: {pe_trans.shape}") + Patches shape: (263758, 10, 10, 3) + >>> X_reconstructed = image.reconstruct_from_patches_2d(pe_trans, X.shape[1:]) + >>> print(f"Reconstructed shape: {X_reconstructed.shape}") + Reconstructed shape: (427, 640, 3) + """ + + _parameter_constraints: dict = { + "patch_size": [tuple, None], + "max_patches": [ + None, + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(Integral, 1, None, closed="left"), + ], + "random_state": ["random_state"], + } + + def __init__(self, *, patch_size=None, max_patches=None, random_state=None): + self.patch_size = patch_size + self.max_patches = max_patches + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validate the parameters of the estimator. + + This method allows to: (i) validate the parameters of the estimator and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : ndarray of shape (n_samples, image_height, image_width) or \ + (n_samples, image_height, image_width, n_channels) + Array of images from which to extract patches. For color images, + the last dimension specifies the channel: a RGB image would have + `n_channels=3`. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + return self + + def transform(self, X): + """Transform the image samples in `X` into a matrix of patch data. + + Parameters + ---------- + X : ndarray of shape (n_samples, image_height, image_width) or \ + (n_samples, image_height, image_width, n_channels) + Array of images from which to extract patches. For color images, + the last dimension specifies the channel: a RGB image would have + `n_channels=3`. + + Returns + ------- + patches : array of shape (n_patches, patch_height, patch_width) or \ + (n_patches, patch_height, patch_width, n_channels) + The collection of patches extracted from the images, where + `n_patches` is either `n_samples * max_patches` or the total + number of patches that can be extracted. + """ + X = validate_data( + self, + X=X, + ensure_2d=False, + allow_nd=True, + ensure_min_samples=1, + ensure_min_features=1, + reset=False, + ) + random_state = check_random_state(self.random_state) + n_imgs, img_height, img_width = X.shape[:3] + if self.patch_size is None: + patch_size = img_height // 10, img_width // 10 + else: + if len(self.patch_size) != 2: + raise ValueError( + "patch_size must be a tuple of two integers. Got" + f" {self.patch_size} instead." + ) + patch_size = self.patch_size + + n_imgs, img_height, img_width = X.shape[:3] + X = np.reshape(X, (n_imgs, img_height, img_width, -1)) + n_channels = X.shape[-1] + + # compute the dimensions of the patches array + patch_height, patch_width = patch_size + n_patches = _compute_n_patches( + img_height, img_width, patch_height, patch_width, self.max_patches + ) + patches_shape = (n_imgs * n_patches,) + patch_size + if n_channels > 1: + patches_shape += (n_channels,) + + # extract the patches + patches = np.empty(patches_shape) + for ii, image in enumerate(X): + patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d( + image, + patch_size, + max_patches=self.max_patches, + random_state=random_state, + ) + return patches + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.two_d_array = False + tags.input_tags.three_d_array = True + tags.requires_fit = False + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/meson.build b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..81732474de3b2e2c5895cd18a398789d9a66bc7c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/meson.build @@ -0,0 +1,9 @@ +py.extension_module( + '_hashing_fast', + ['_hashing_fast.pyx', utils_cython_tree], + dependencies: [np_dep], + override_options: ['cython_language=cpp'], + cython_args: cython_args, + subdir: 'sklearn/feature_extraction', + install: true +) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_extraction/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/isotonic.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/isotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..fb47ca1dde68fa1e9670003e667e7a7c53bfdf17 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/isotonic.py @@ -0,0 +1,517 @@ +"""Isotonic regression for obtaining monotonic fit to data.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import math +import warnings +from numbers import Real + +import numpy as np +from scipy import interpolate, optimize +from scipy.stats import spearmanr + +from sklearn.utils import metadata_routing + +from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique +from .base import BaseEstimator, RegressorMixin, TransformerMixin, _fit_context +from .utils import check_array, check_consistent_length +from .utils._param_validation import Interval, StrOptions, validate_params +from .utils.fixes import parse_version, sp_base_version +from .utils.validation import _check_sample_weight, check_is_fitted + +__all__ = ["check_increasing", "isotonic_regression", "IsotonicRegression"] + + +@validate_params( + { + "x": ["array-like"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def check_increasing(x, y): + """Determine whether y is monotonically correlated with x. + + y is found increasing or decreasing with respect to x based on a Spearman + correlation test. + + Parameters + ---------- + x : array-like of shape (n_samples,) + Training data. + + y : array-like of shape (n_samples,) + Training target. + + Returns + ------- + increasing_bool : boolean + Whether the relationship is increasing or decreasing. + + Notes + ----- + The Spearman correlation coefficient is estimated from the data, and the + sign of the resulting estimate is used as the result. + + In the event that the 95% confidence interval based on Fisher transform + spans zero, a warning is raised. + + References + ---------- + Fisher transformation. Wikipedia. + https://en.wikipedia.org/wiki/Fisher_transformation + + Examples + -------- + >>> from sklearn.isotonic import check_increasing + >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10] + >>> check_increasing(x, y) + np.True_ + >>> y = [10, 8, 6, 4, 2] + >>> check_increasing(x, y) + np.False_ + """ + + # Calculate Spearman rho estimate and set return accordingly. + rho, _ = spearmanr(x, y) + increasing_bool = rho >= 0 + + # Run Fisher transform to get the rho CI, but handle rho=+/-1 + if rho not in [-1.0, 1.0] and len(x) > 3: + F = 0.5 * math.log((1.0 + rho) / (1.0 - rho)) + F_se = 1 / math.sqrt(len(x) - 3) + + # Use a 95% CI, i.e., +/-1.96 S.E. + # https://en.wikipedia.org/wiki/Fisher_transformation + rho_0 = math.tanh(F - 1.96 * F_se) + rho_1 = math.tanh(F + 1.96 * F_se) + + # Warn if the CI spans zero. + if np.sign(rho_0) != np.sign(rho_1): + warnings.warn( + "Confidence interval of the Spearman " + "correlation coefficient spans zero. " + "Determination of ``increasing`` may be " + "suspect." + ) + + return increasing_bool + + +@validate_params( + { + "y": ["array-like"], + "sample_weight": ["array-like", None], + "y_min": [Interval(Real, None, None, closed="both"), None], + "y_max": [Interval(Real, None, None, closed="both"), None], + "increasing": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def isotonic_regression( + y, *, sample_weight=None, y_min=None, y_max=None, increasing=True +): + """Solve the isotonic regression model. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y : array-like of shape (n_samples,) + The data. + + sample_weight : array-like of shape (n_samples,), default=None + Weights on each point of the regression. + If None, weight is set to 1 (equal weights). + + y_min : float, default=None + Lower bound on the lowest predicted value (the minimum value may + still be higher). If not set, defaults to -inf. + + y_max : float, default=None + Upper bound on the highest predicted value (the maximum may still be + lower). If not set, defaults to +inf. + + increasing : bool, default=True + Whether to compute ``y_`` is increasing (if set to True) or decreasing + (if set to False). + + Returns + ------- + y_ : ndarray of shape (n_samples,) + Isotonic fit of y. + + References + ---------- + "Active set algorithms for isotonic regression; A unifying framework" + by Michael J. Best and Nilotpal Chakravarti, section 3. + + Examples + -------- + >>> from sklearn.isotonic import isotonic_regression + >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4]) + array([2.75 , 2.75 , 2.75 , 2.75 , 7.33..., + 7.33..., 7.33..., 7.33..., 7.33..., 7.33...]) + """ + y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32]) + if sp_base_version >= parse_version("1.12.0"): + res = optimize.isotonic_regression( + y=y, weights=sample_weight, increasing=increasing + ) + y = np.asarray(res.x, dtype=y.dtype) + else: + # TODO: remove this branch when Scipy 1.12 is the minimum supported version + # Also remove _inplace_contiguous_isotonic_regression. + order = np.s_[:] if increasing else np.s_[::-1] + y = np.array(y[order], dtype=y.dtype) + sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True) + sample_weight = np.ascontiguousarray(sample_weight[order]) + _inplace_contiguous_isotonic_regression(y, sample_weight) + y = y[order] + + if y_min is not None or y_max is not None: + # Older versions of np.clip don't accept None as a bound, so use np.inf + if y_min is None: + y_min = -np.inf + if y_max is None: + y_max = np.inf + np.clip(y, y_min, y_max, y) + return y + + +class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator): + """Isotonic regression model. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + y_min : float, default=None + Lower bound on the lowest predicted value (the minimum value may + still be higher). If not set, defaults to -inf. + + y_max : float, default=None + Upper bound on the highest predicted value (the maximum may still be + lower). If not set, defaults to +inf. + + increasing : bool or 'auto', default=True + Determines whether the predictions should be constrained to increase + or decrease with `X`. 'auto' will decide based on the Spearman + correlation estimate's sign. + + out_of_bounds : {'nan', 'clip', 'raise'}, default='nan' + Handles how `X` values outside of the training domain are handled + during prediction. + + - 'nan', predictions will be NaN. + - 'clip', predictions will be set to the value corresponding to + the nearest train interval endpoint. + - 'raise', a `ValueError` is raised. + + Attributes + ---------- + X_min_ : float + Minimum value of input array `X_` for left bound. + + X_max_ : float + Maximum value of input array `X_` for right bound. + + X_thresholds_ : ndarray of shape (n_thresholds,) + Unique ascending `X` values used to interpolate + the y = f(X) monotonic function. + + .. versionadded:: 0.24 + + y_thresholds_ : ndarray of shape (n_thresholds,) + De-duplicated `y` values suitable to interpolate the y = f(X) + monotonic function. + + .. versionadded:: 0.24 + + f_ : function + The stepwise interpolating function that covers the input domain ``X``. + + increasing_ : bool + Inferred value for ``increasing``. + + See Also + -------- + sklearn.linear_model.LinearRegression : Ordinary least squares Linear + Regression. + sklearn.ensemble.HistGradientBoostingRegressor : Gradient boosting that + is a non-parametric model accepting monotonicity constraints. + isotonic_regression : Function to solve the isotonic regression model. + + Notes + ----- + Ties are broken using the secondary method from de Leeuw, 1977. + + References + ---------- + Isotonic Median Regression: A Linear Programming Approach + Nilotpal Chakravarti + Mathematics of Operations Research + Vol. 14, No. 2 (May, 1989), pp. 303-308 + + Isotone Optimization in R : Pool-Adjacent-Violators + Algorithm (PAVA) and Active Set Methods + de Leeuw, Hornik, Mair + Journal of Statistical Software 2009 + + Correctness of Kruskal's algorithms for monotone regression with ties + de Leeuw, Psychometrica, 1977 + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.isotonic import IsotonicRegression + >>> X, y = make_regression(n_samples=10, n_features=1, random_state=41) + >>> iso_reg = IsotonicRegression().fit(X, y) + >>> iso_reg.predict([.1, .2]) + array([1.8628..., 3.7256...]) + """ + + # T should have been called X + __metadata_request__predict = {"T": metadata_routing.UNUSED} + __metadata_request__transform = {"T": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "y_min": [Interval(Real, None, None, closed="both"), None], + "y_max": [Interval(Real, None, None, closed="both"), None], + "increasing": ["boolean", StrOptions({"auto"})], + "out_of_bounds": [StrOptions({"nan", "clip", "raise"})], + } + + def __init__(self, *, y_min=None, y_max=None, increasing=True, out_of_bounds="nan"): + self.y_min = y_min + self.y_max = y_max + self.increasing = increasing + self.out_of_bounds = out_of_bounds + + def _check_input_data_shape(self, X): + if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)): + msg = ( + "Isotonic regression input X should be a 1d array or " + "2d array with 1 feature" + ) + raise ValueError(msg) + + def _build_f(self, X, y): + """Build the f_ interp1d function.""" + + bounds_error = self.out_of_bounds == "raise" + if len(y) == 1: + # single y, constant prediction + self.f_ = lambda x: y.repeat(x.shape) + else: + self.f_ = interpolate.interp1d( + X, y, kind="linear", bounds_error=bounds_error + ) + + def _build_y(self, X, y, sample_weight, trim_duplicates=True): + """Build the y_ IsotonicRegression.""" + self._check_input_data_shape(X) + X = X.reshape(-1) # use 1d view + + # Determine increasing if auto-determination requested + if self.increasing == "auto": + self.increasing_ = check_increasing(X, y) + else: + self.increasing_ = self.increasing + + # If sample_weights is passed, removed zero-weight values and clean + # order + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + mask = sample_weight > 0 + X, y, sample_weight = X[mask], y[mask], sample_weight[mask] + + order = np.lexsort((y, X)) + X, y, sample_weight = [array[order] for array in [X, y, sample_weight]] + unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight) + + X = unique_X + y = isotonic_regression( + unique_y, + sample_weight=unique_sample_weight, + y_min=self.y_min, + y_max=self.y_max, + increasing=self.increasing_, + ) + + # Handle the left and right bounds on X + self.X_min_, self.X_max_ = np.min(X), np.max(X) + + if trim_duplicates: + # Remove unnecessary points for faster prediction + keep_data = np.ones((len(y),), dtype=bool) + # Aside from the 1st and last point, remove points whose y values + # are equal to both the point before and the point after it. + keep_data[1:-1] = np.logical_or( + np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:]) + ) + return X[keep_data], y[keep_data] + else: + # The ability to turn off trim_duplicates is only used to it make + # easier to unit test that removing duplicates in y does not have + # any impact the resulting interpolation function (besides + # prediction speed). + return X, y + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples,) or (n_samples, 1) + Training data. + + .. versionchanged:: 0.24 + Also accepts 2d array with 1 feature. + + y : array-like of shape (n_samples,) + Training target. + + sample_weight : array-like of shape (n_samples,), default=None + Weights. If set to None, all weights will be set to 1 (equal + weights). + + Returns + ------- + self : object + Returns an instance of self. + + Notes + ----- + X is stored for future use, as :meth:`transform` needs X to interpolate + new input data. + """ + check_params = dict(accept_sparse=False, ensure_2d=False) + X = check_array( + X, input_name="X", dtype=[np.float64, np.float32], **check_params + ) + y = check_array(y, input_name="y", dtype=X.dtype, **check_params) + check_consistent_length(X, y, sample_weight) + + # Transform y by running the isotonic regression algorithm and + # transform X accordingly. + X, y = self._build_y(X, y, sample_weight) + + # It is necessary to store the non-redundant part of the training set + # on the model to make it possible to support model persistence via + # the pickle module as the object built by scipy.interp1d is not + # picklable directly. + self.X_thresholds_, self.y_thresholds_ = X, y + + # Build the interpolation function + self._build_f(X, y) + return self + + def _transform(self, T): + """`_transform` is called by both `transform` and `predict` methods. + + Since `transform` is wrapped to output arrays of specific types (e.g. + NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform` + directly. + + The above behaviour could be changed in the future, if we decide to output + other type of arrays when calling `predict`. + """ + if hasattr(self, "X_thresholds_"): + dtype = self.X_thresholds_.dtype + else: + dtype = np.float64 + + T = check_array(T, dtype=dtype, ensure_2d=False) + + self._check_input_data_shape(T) + T = T.reshape(-1) # use 1d view + + if self.out_of_bounds == "clip": + T = np.clip(T, self.X_min_, self.X_max_) + + res = self.f_(T) + + # on scipy 0.17, interp1d up-casts to float64, so we cast back + res = res.astype(T.dtype) + + return res + + def transform(self, T): + """Transform new data by linear interpolation. + + Parameters + ---------- + T : array-like of shape (n_samples,) or (n_samples, 1) + Data to transform. + + .. versionchanged:: 0.24 + Also accepts 2d array with 1 feature. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + The transformed data. + """ + return self._transform(T) + + def predict(self, T): + """Predict new data by linear interpolation. + + Parameters + ---------- + T : array-like of shape (n_samples,) or (n_samples, 1) + Data to transform. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Transformed data. + """ + return self._transform(T) + + # We implement get_feature_names_out here instead of using + # `ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored. + # `input_features` are ignored because `IsotonicRegression` accepts 1d + # arrays and the semantics of `feature_names_in_` are not clear for 1d arrays. + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Ignored. + + Returns + ------- + feature_names_out : ndarray of str objects + An ndarray with one string i.e. ["isotonicregression0"]. + """ + check_is_fitted(self, "f_") + class_name = self.__class__.__name__.lower() + return np.asarray([f"{class_name}0"], dtype=object) + + def __getstate__(self): + """Pickle-protocol - return state of the estimator.""" + state = super().__getstate__() + # remove interpolation method + state.pop("f_", None) + return state + + def __setstate__(self, state): + """Pickle-protocol - set state of the estimator. + + We need to rebuild the interpolation function. + """ + super().__setstate__(state) + if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"): + self._build_f(self.X_thresholds_, self.y_thresholds_) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.one_d_array = True + tags.input_tags.two_d_array = False + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/kernel_approximation.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/kernel_approximation.py new file mode 100644 index 0000000000000000000000000000000000000000..35da4d08dcbf4e217de1f8a43a50265dcce6bfac --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/kernel_approximation.py @@ -0,0 +1,1106 @@ +"""Approximate kernel feature maps based on Fourier transforms and count sketches.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy.fft import fft, ifft +from scipy.linalg import svd + +from .base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from .metrics.pairwise import KERNEL_PARAMS, PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels +from .utils import check_random_state +from .utils._param_validation import Interval, StrOptions +from .utils.extmath import safe_sparse_dot +from .utils.validation import ( + _check_feature_names_in, + check_is_fitted, + validate_data, +) + + +class PolynomialCountSketch( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Polynomial kernel approximation via Tensor Sketch. + + Implements Tensor Sketch, which approximates the feature map + of the polynomial kernel:: + + K(X, Y) = (gamma * + coef0)^degree + + by efficiently computing a Count Sketch of the outer product of a + vector with itself using Fast Fourier Transforms (FFT). Read more in the + :ref:`User Guide `. + + .. versionadded:: 0.24 + + Parameters + ---------- + gamma : float, default=1.0 + Parameter of the polynomial kernel whose feature map + will be approximated. + + degree : int, default=2 + Degree of the polynomial kernel whose feature map + will be approximated. + + coef0 : int, default=0 + Constant term of the polynomial kernel whose feature map + will be approximated. + + n_components : int, default=100 + Dimensionality of the output feature space. Usually, `n_components` + should be greater than the number of features in input samples in + order to achieve good performance. The optimal score / run time + balance is typically achieved around `n_components` = 10 * `n_features`, + but this depends on the specific dataset being used. + + random_state : int, RandomState instance, default=None + Determines random number generation for indexHash and bitHash + initialization. Pass an int for reproducible results across multiple + function calls. See :term:`Glossary `. + + Attributes + ---------- + indexHash_ : ndarray of shape (degree, n_features), dtype=int64 + Array of indexes in range [0, n_components) used to represent + the 2-wise independent hash functions for Count Sketch computation. + + bitHash_ : ndarray of shape (degree, n_features), dtype=float32 + Array with random entries in {+1, -1}, used to represent + the 2-wise independent hash functions for Count Sketch computation. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. + Nystroem : Approximate a kernel map using a subset of the training data. + RBFSampler : Approximate a RBF kernel feature map using random Fourier + features. + SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. + sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. + + Examples + -------- + >>> from sklearn.kernel_approximation import PolynomialCountSketch + >>> from sklearn.linear_model import SGDClassifier + >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] + >>> y = [0, 0, 1, 1] + >>> ps = PolynomialCountSketch(degree=3, random_state=1) + >>> X_features = ps.fit_transform(X) + >>> clf = SGDClassifier(max_iter=10, tol=1e-3) + >>> clf.fit(X_features, y) + SGDClassifier(max_iter=10) + >>> clf.score(X_features, y) + 1.0 + + For a more detailed example of usage, see + :ref:`sphx_glr_auto_examples_kernel_approximation_plot_scalable_poly_kernels.py` + """ + + _parameter_constraints: dict = { + "gamma": [Interval(Real, 0, None, closed="left")], + "degree": [Interval(Integral, 1, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "n_components": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__( + self, *, gamma=1.0, degree=2, coef0=0, n_components=100, random_state=None + ): + self.gamma = gamma + self.degree = degree + self.coef0 = coef0 + self.n_components = n_components + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Initializes the internal variables. The method needs no information + about the distribution of data, so we only care about n_features in X. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the instance itself. + """ + X = validate_data(self, X, accept_sparse="csc") + random_state = check_random_state(self.random_state) + + n_features = X.shape[1] + if self.coef0 != 0: + n_features += 1 + + self.indexHash_ = random_state.randint( + 0, high=self.n_components, size=(self.degree, n_features) + ) + + self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features)) + self._n_features_out = self.n_components + return self + + def transform(self, X): + """Generate the feature map approximation for X. + + Parameters + ---------- + X : {array-like}, shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + Returns the instance itself. + """ + + check_is_fitted(self) + X = validate_data(self, X, accept_sparse="csc", reset=False) + + X_gamma = np.sqrt(self.gamma) * X + + if sp.issparse(X_gamma) and self.coef0 != 0: + X_gamma = sp.hstack( + [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))], + format="csc", + ) + + elif not sp.issparse(X_gamma) and self.coef0 != 0: + X_gamma = np.hstack( + [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))] + ) + + if X_gamma.shape[1] != self.indexHash_.shape[1]: + raise ValueError( + "Number of features of test samples does not" + " match that of training samples." + ) + + count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components)) + + if sp.issparse(X_gamma): + for j in range(X_gamma.shape[1]): + for d in range(self.degree): + iHashIndex = self.indexHash_[d, j] + iHashBit = self.bitHash_[d, j] + count_sketches[:, d, iHashIndex] += ( + (iHashBit * X_gamma[:, [j]]).toarray().ravel() + ) + + else: + for j in range(X_gamma.shape[1]): + for d in range(self.degree): + iHashIndex = self.indexHash_[d, j] + iHashBit = self.bitHash_[d, j] + count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j] + + # For each same, compute a count sketch of phi(x) using the polynomial + # multiplication (via FFT) of p count sketches of x. + count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True) + count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1) + data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True)) + + return data_sketch + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags + + +class RBFSampler(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Approximate a RBF kernel feature map using random Fourier features. + + It implements a variant of Random Kitchen Sinks.[1] + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + gamma : 'scale' or float, default=1.0 + Parameter of RBF kernel: exp(-gamma * x^2). + If ``gamma='scale'`` is passed then it uses + 1 / (n_features * X.var()) as value of gamma. + + .. versionadded:: 1.2 + The option `"scale"` was added in 1.2. + + n_components : int, default=100 + Number of Monte Carlo samples per original feature. + Equals the dimensionality of the computed feature space. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the generation of the random + weights and random offset when fitting the training data. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + random_offset_ : ndarray of shape (n_components,), dtype={np.float64, np.float32} + Random offset used to compute the projection in the `n_components` + dimensions of the feature space. + + random_weights_ : ndarray of shape (n_features, n_components),\ + dtype={np.float64, np.float32} + Random projection directions drawn from the Fourier transform + of the RBF kernel. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. + Nystroem : Approximate a kernel map using a subset of the training data. + PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch. + SkewedChi2Sampler : Approximate feature map for + "skewed chi-squared" kernel. + sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. + + Notes + ----- + See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and + Benjamin Recht. + + [1] "Weighted Sums of Random Kitchen Sinks: Replacing + minimization with randomization in learning" by A. Rahimi and + Benjamin Recht. + (https://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf) + + Examples + -------- + >>> from sklearn.kernel_approximation import RBFSampler + >>> from sklearn.linear_model import SGDClassifier + >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] + >>> y = [0, 0, 1, 1] + >>> rbf_feature = RBFSampler(gamma=1, random_state=1) + >>> X_features = rbf_feature.fit_transform(X) + >>> clf = SGDClassifier(max_iter=5, tol=1e-3) + >>> clf.fit(X_features, y) + SGDClassifier(max_iter=5) + >>> clf.score(X_features, y) + 1.0 + """ + + _parameter_constraints: dict = { + "gamma": [ + StrOptions({"scale"}), + Interval(Real, 0.0, None, closed="left"), + ], + "n_components": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__(self, *, gamma=1.0, n_components=100, random_state=None): + self.gamma = gamma + self.n_components = n_components + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Samples random projection according to n_features. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the instance itself. + """ + X = validate_data(self, X, accept_sparse="csr") + random_state = check_random_state(self.random_state) + n_features = X.shape[1] + sparse = sp.issparse(X) + if self.gamma == "scale": + # var = E[X^2] - E[X]^2 if sparse + X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var() + self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0 + else: + self._gamma = self.gamma + self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal( + size=(n_features, self.n_components) + ) + + self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) + + if X.dtype == np.float32: + # Setting the data type of the fitted attribute will ensure the + # output data type during `transform`. + self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False) + self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False) + + self._n_features_out = self.n_components + return self + + def transform(self, X): + """Apply the approximate feature map to X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + Returns the instance itself. + """ + check_is_fitted(self) + + X = validate_data(self, X, accept_sparse="csr", reset=False) + projection = safe_sparse_dot(X, self.random_weights_) + projection += self.random_offset_ + np.cos(projection, projection) + projection *= (2.0 / self.n_components) ** 0.5 + return projection + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.transformer_tags.preserves_dtype = ["float64", "float32"] + return tags + + +class SkewedChi2Sampler( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator +): + """Approximate feature map for "skewed chi-squared" kernel. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + skewedness : float, default=1.0 + "skewedness" parameter of the kernel. Needs to be cross-validated. + + n_components : int, default=100 + Number of Monte Carlo samples per original feature. + Equals the dimensionality of the computed feature space. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the generation of the random + weights and random offset when fitting the training data. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + random_weights_ : ndarray of shape (n_features, n_components) + Weight array, sampled from a secant hyperbolic distribution, which will + be used to linearly transform the log of the data. + + random_offset_ : ndarray of shape (n_features, n_components) + Bias term, which will be added to the data. It is uniformly distributed + between 0 and 2*pi. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. + Nystroem : Approximate a kernel map using a subset of the training data. + RBFSampler : Approximate a RBF kernel feature map using random Fourier + features. + SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. + sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. + sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. + + References + ---------- + See "Random Fourier Approximations for Skewed Multiplicative Histogram + Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu. + + Examples + -------- + >>> from sklearn.kernel_approximation import SkewedChi2Sampler + >>> from sklearn.linear_model import SGDClassifier + >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] + >>> y = [0, 0, 1, 1] + >>> chi2_feature = SkewedChi2Sampler(skewedness=.01, + ... n_components=10, + ... random_state=0) + >>> X_features = chi2_feature.fit_transform(X, y) + >>> clf = SGDClassifier(max_iter=10, tol=1e-3) + >>> clf.fit(X_features, y) + SGDClassifier(max_iter=10) + >>> clf.score(X_features, y) + 1.0 + """ + + _parameter_constraints: dict = { + "skewedness": [Interval(Real, None, None, closed="neither")], + "n_components": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__(self, *, skewedness=1.0, n_components=100, random_state=None): + self.skewedness = skewedness + self.n_components = n_components + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the model with X. + + Samples random projection according to n_features. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the instance itself. + """ + X = validate_data(self, X) + random_state = check_random_state(self.random_state) + n_features = X.shape[1] + uniform = random_state.uniform(size=(n_features, self.n_components)) + # transform by inverse CDF of sech + self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform)) + self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) + + if X.dtype == np.float32: + # Setting the data type of the fitted attribute will ensure the + # output data type during `transform`. + self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False) + self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False) + + self._n_features_out = self.n_components + return self + + def transform(self, X): + """Apply the approximate feature map to X. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + New data, where `n_samples` is the number of samples + and `n_features` is the number of features. All values of X must be + strictly greater than "-skewedness". + + Returns + ------- + X_new : array-like, shape (n_samples, n_components) + Returns the instance itself. + """ + check_is_fitted(self) + X = validate_data( + self, X, copy=True, dtype=[np.float64, np.float32], reset=False + ) + if (X <= -self.skewedness).any(): + raise ValueError("X may not contain entries smaller than -skewedness.") + + X += self.skewedness + np.log(X, X) + projection = safe_sparse_dot(X, self.random_weights_) + projection += self.random_offset_ + np.cos(projection, projection) + projection *= np.sqrt(2.0) / np.sqrt(self.n_components) + return projection + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.transformer_tags.preserves_dtype = ["float64", "float32"] + return tags + + +class AdditiveChi2Sampler(TransformerMixin, BaseEstimator): + """Approximate feature map for additive chi2 kernel. + + Uses sampling the fourier transform of the kernel characteristic + at regular intervals. + + Since the kernel that is to be approximated is additive, the components of + the input vectors can be treated separately. Each entry in the original + space is transformed into 2*sample_steps-1 features, where sample_steps is + a parameter of the method. Typical values of sample_steps include 1, 2 and + 3. + + Optimal choices for the sampling interval for certain data ranges can be + computed (see the reference). The default values should be reasonable. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + sample_steps : int, default=2 + Gives the number of (complex) sampling points. + + sample_interval : float, default=None + Sampling interval. Must be specified when sample_steps not in {1,2,3}. + + Attributes + ---------- + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of + the chi squared kernel. + + sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. + + sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi + squared kernel. + + Notes + ----- + This estimator approximates a slightly different version of the additive + chi squared kernel then ``metric.additive_chi2`` computes. + + This estimator is stateless and does not need to be fitted. However, we + recommend to call :meth:`fit_transform` instead of :meth:`transform`, as + parameter validation is only performed in :meth:`fit`. + + References + ---------- + See `"Efficient additive kernels via explicit feature maps" + `_ + A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, + 2011 + + Examples + -------- + >>> from sklearn.datasets import load_digits + >>> from sklearn.linear_model import SGDClassifier + >>> from sklearn.kernel_approximation import AdditiveChi2Sampler + >>> X, y = load_digits(return_X_y=True) + >>> chi2sampler = AdditiveChi2Sampler(sample_steps=2) + >>> X_transformed = chi2sampler.fit_transform(X, y) + >>> clf = SGDClassifier(max_iter=5, random_state=0, tol=1e-3) + >>> clf.fit(X_transformed, y) + SGDClassifier(max_iter=5, random_state=0) + >>> clf.score(X_transformed, y) + 0.9499... + """ + + _parameter_constraints: dict = { + "sample_steps": [Interval(Integral, 1, None, closed="left")], + "sample_interval": [Interval(Real, 0, None, closed="left"), None], + } + + def __init__(self, *, sample_steps=2, sample_interval=None): + self.sample_steps = sample_steps + self.sample_interval = sample_interval + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Only validates estimator's parameters. + + This method allows to: (i) validate the estimator's parameters and + (ii) be consistent with the scikit-learn transformer API. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the transformer. + """ + X = validate_data(self, X, accept_sparse="csr", ensure_non_negative=True) + + if self.sample_interval is None and self.sample_steps not in (1, 2, 3): + raise ValueError( + "If sample_steps is not in [1, 2, 3]," + " you need to provide sample_interval" + ) + + return self + + def transform(self, X): + """Apply approximate feature map to X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Returns + ------- + X_new : {ndarray, sparse matrix}, \ + shape = (n_samples, n_features * (2*sample_steps - 1)) + Whether the return value is an array or sparse matrix depends on + the type of the input X. + """ + X = validate_data( + self, X, accept_sparse="csr", reset=False, ensure_non_negative=True + ) + sparse = sp.issparse(X) + + if self.sample_interval is None: + # See figure 2 c) of "Efficient additive kernels via explicit feature maps" # noqa + # + # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, # noqa + # 2011 + if self.sample_steps == 1: + sample_interval = 0.8 + elif self.sample_steps == 2: + sample_interval = 0.5 + elif self.sample_steps == 3: + sample_interval = 0.4 + else: + raise ValueError( + "If sample_steps is not in [1, 2, 3]," + " you need to provide sample_interval" + ) + else: + sample_interval = self.sample_interval + + # zeroth component + # 1/cosh = sech + # cosh(0) = 1.0 + transf = self._transform_sparse if sparse else self._transform_dense + return transf(X, self.sample_steps, sample_interval) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Only used to validate feature names with the names seen in :meth:`fit`. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + # Note that passing attributes="n_features_in_" forces check_is_fitted + # to check if the attribute is present. Otherwise it will pass on this + # stateless estimator (requires_fit=False) + check_is_fitted(self, attributes="n_features_in_") + input_features = _check_feature_names_in( + self, input_features, generate_names=True + ) + est_name = self.__class__.__name__.lower() + + names_list = [f"{est_name}_{name}_sqrt" for name in input_features] + + for j in range(1, self.sample_steps): + cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features] + sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features] + names_list.extend(cos_names + sin_names) + + return np.asarray(names_list, dtype=object) + + @staticmethod + def _transform_dense(X, sample_steps, sample_interval): + non_zero = X != 0.0 + X_nz = X[non_zero] + + X_step = np.zeros_like(X) + X_step[non_zero] = np.sqrt(X_nz * sample_interval) + + X_new = [X_step] + + log_step_nz = sample_interval * np.log(X_nz) + step_nz = 2 * X_nz * sample_interval + + for j in range(1, sample_steps): + factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval)) + + X_step = np.zeros_like(X) + X_step[non_zero] = factor_nz * np.cos(j * log_step_nz) + X_new.append(X_step) + + X_step = np.zeros_like(X) + X_step[non_zero] = factor_nz * np.sin(j * log_step_nz) + X_new.append(X_step) + + return np.hstack(X_new) + + @staticmethod + def _transform_sparse(X, sample_steps, sample_interval): + indices = X.indices.copy() + indptr = X.indptr.copy() + + data_step = np.sqrt(X.data * sample_interval) + X_step = sp.csr_matrix( + (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False + ) + X_new = [X_step] + + log_step_nz = sample_interval * np.log(X.data) + step_nz = 2 * X.data * sample_interval + + for j in range(1, sample_steps): + factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval)) + + data_step = factor_nz * np.cos(j * log_step_nz) + X_step = sp.csr_matrix( + (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False + ) + X_new.append(X_step) + + data_step = factor_nz * np.sin(j * log_step_nz) + X_step = sp.csr_matrix( + (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False + ) + X_new.append(X_step) + + return sp.hstack(X_new) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.requires_fit = False + tags.input_tags.positive_only = True + tags.input_tags.sparse = True + return tags + + +class Nystroem(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): + """Approximate a kernel map using a subset of the training data. + + Constructs an approximate feature map for an arbitrary kernel + using a subset of the data as basis. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + kernel : str or callable, default='rbf' + Kernel map to be approximated. A callable should accept two arguments + and the keyword arguments passed to this object as `kernel_params`, and + should return a floating point number. + + gamma : float, default=None + Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 + and sigmoid kernels. Interpretation of the default value is left to + the kernel; see the documentation for sklearn.metrics.pairwise. + Ignored by other kernels. + + coef0 : float, default=None + Zero coefficient for polynomial and sigmoid kernels. + Ignored by other kernels. + + degree : float, default=None + Degree of the polynomial kernel. Ignored by other kernels. + + kernel_params : dict, default=None + Additional parameters (keyword arguments) for kernel function passed + as callable object. + + n_components : int, default=100 + Number of features to construct. + How many data points will be used to construct the mapping. + + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the uniform sampling without + replacement of `n_components` of the training data to construct the + basis kernel. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_jobs : int, default=None + The number of jobs to use for the computation. This works by breaking + down the kernel matrix into `n_jobs` even slices and computing them in + parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionadded:: 0.24 + + Attributes + ---------- + components_ : ndarray of shape (n_components, n_features) + Subset of training points used to construct the feature map. + + component_indices_ : ndarray of shape (n_components) + Indices of ``components_`` in the training set. + + normalization_ : ndarray of shape (n_components, n_components) + Normalization matrix needed for embedding. + Square root of the kernel matrix on ``components_``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. + PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch. + RBFSampler : Approximate a RBF kernel feature map using random Fourier + features. + SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. + sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. + + References + ---------- + * Williams, C.K.I. and Seeger, M. + "Using the Nystroem method to speed up kernel machines", + Advances in neural information processing systems 2001 + + * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou + "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical + Comparison", + Advances in Neural Information Processing Systems 2012 + + Examples + -------- + >>> from sklearn import datasets, svm + >>> from sklearn.kernel_approximation import Nystroem + >>> X, y = datasets.load_digits(n_class=9, return_X_y=True) + >>> data = X / 16. + >>> clf = svm.LinearSVC() + >>> feature_map_nystroem = Nystroem(gamma=.2, + ... random_state=1, + ... n_components=300) + >>> data_transformed = feature_map_nystroem.fit_transform(data) + >>> clf.fit(data_transformed, y) + LinearSVC() + >>> clf.score(data_transformed, y) + 0.9987... + """ + + _parameter_constraints: dict = { + "kernel": [ + StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}), + callable, + ], + "gamma": [Interval(Real, 0, None, closed="left"), None], + "coef0": [Interval(Real, None, None, closed="neither"), None], + "degree": [Interval(Real, 1, None, closed="left"), None], + "kernel_params": [dict, None], + "n_components": [Interval(Integral, 1, None, closed="left")], + "random_state": ["random_state"], + "n_jobs": [Integral, None], + } + + def __init__( + self, + kernel="rbf", + *, + gamma=None, + coef0=None, + degree=None, + kernel_params=None, + n_components=100, + random_state=None, + n_jobs=None, + ): + self.kernel = kernel + self.gamma = gamma + self.coef0 = coef0 + self.degree = degree + self.kernel_params = kernel_params + self.n_components = n_components + self.random_state = random_state + self.n_jobs = n_jobs + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit estimator to data. + + Samples a subset of training points, computes kernel + on these and computes normalization matrix. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ + default=None + Target values (None for unsupervised transformations). + + Returns + ------- + self : object + Returns the instance itself. + """ + X = validate_data(self, X, accept_sparse="csr") + rnd = check_random_state(self.random_state) + n_samples = X.shape[0] + + # get basis vectors + if self.n_components > n_samples: + # XXX should we just bail? + n_components = n_samples + warnings.warn( + "n_components > n_samples. This is not possible.\n" + "n_components was set to n_samples, which results" + " in inefficient evaluation of the full kernel." + ) + + else: + n_components = self.n_components + n_components = min(n_samples, n_components) + inds = rnd.permutation(n_samples) + basis_inds = inds[:n_components] + basis = X[basis_inds] + + basis_kernel = pairwise_kernels( + basis, + metric=self.kernel, + filter_params=True, + n_jobs=self.n_jobs, + **self._get_kernel_params(), + ) + + # sqrt of kernel matrix on basis vectors + U, S, V = svd(basis_kernel) + S = np.maximum(S, 1e-12) + self.normalization_ = np.dot(U / np.sqrt(S), V) + self.components_ = basis + self.component_indices_ = basis_inds + self._n_features_out = n_components + return self + + def transform(self, X): + """Apply feature map to X. + + Computes an approximate feature map using the kernel + between some training points and X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to transform. + + Returns + ------- + X_transformed : ndarray of shape (n_samples, n_components) + Transformed data. + """ + check_is_fitted(self) + X = validate_data(self, X, accept_sparse="csr", reset=False) + + kernel_params = self._get_kernel_params() + embedded = pairwise_kernels( + X, + self.components_, + metric=self.kernel, + filter_params=True, + n_jobs=self.n_jobs, + **kernel_params, + ) + return np.dot(embedded, self.normalization_.T) + + def _get_kernel_params(self): + params = self.kernel_params + if params is None: + params = {} + if not callable(self.kernel) and self.kernel != "precomputed": + for param in KERNEL_PARAMS[self.kernel]: + if getattr(self, param) is not None: + params[param] = getattr(self, param) + else: + if ( + self.gamma is not None + or self.coef0 is not None + or self.degree is not None + ): + raise ValueError( + "Don't pass gamma, coef0 or degree to " + "Nystroem if using a callable " + "or precomputed kernel" + ) + + return params + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.transformer_tags.preserves_dtype = ["float64", "float32"] + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/kernel_ridge.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/kernel_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..29e744647acc97d3ff7493f2cfc9af4f07ce1bdc --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/kernel_ridge.py @@ -0,0 +1,240 @@ +"""Kernel ridge regression.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from numbers import Real + +import numpy as np + +from .base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context +from .linear_model._ridge import _solve_cholesky_kernel +from .metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels +from .utils._param_validation import Interval, StrOptions +from .utils.validation import _check_sample_weight, check_is_fitted, validate_data + + +class KernelRidge(MultiOutputMixin, RegressorMixin, BaseEstimator): + """Kernel ridge regression. + + Kernel ridge regression (KRR) combines ridge regression (linear least + squares with l2-norm regularization) with the kernel trick. It thus + learns a linear function in the space induced by the respective kernel and + the data. For non-linear kernels, this corresponds to a non-linear + function in the original space. + + The form of the model learned by KRR is identical to support vector + regression (SVR). However, different loss functions are used: KRR uses + squared error loss while support vector regression uses epsilon-insensitive + loss, both combined with l2 regularization. In contrast to SVR, fitting a + KRR model can be done in closed-form and is typically faster for + medium-sized datasets. On the other hand, the learned model is non-sparse + and thus slower than SVR, which learns a sparse model for epsilon > 0, at + prediction-time. + + This estimator has built-in support for multi-variate regression + (i.e., when y is a 2d-array of shape [n_samples, n_targets]). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float or array-like of shape (n_targets,), default=1.0 + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. See :ref:`ridge_regression` for formula. + + kernel : str or callable, default="linear" + Kernel mapping used internally. This parameter is directly passed to + :class:`~sklearn.metrics.pairwise.pairwise_kernels`. + If `kernel` is a string, it must be one of the metrics + in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed". + If `kernel` is "precomputed", X is assumed to be a kernel matrix. + Alternatively, if `kernel` is a callable function, it is called on + each pair of instances (rows) and the resulting value recorded. The + callable should take two rows from X as input and return the + corresponding kernel value as a single number. This means that + callables from :mod:`sklearn.metrics.pairwise` are not allowed, as + they operate on matrices, not single samples. Use the string + identifying the kernel instead. + + gamma : float, default=None + Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 + and sigmoid kernels. Interpretation of the default value is left to + the kernel; see the documentation for sklearn.metrics.pairwise. + Ignored by other kernels. + + degree : float, default=3 + Degree of the polynomial kernel. Ignored by other kernels. + + coef0 : float, default=1 + Zero coefficient for polynomial and sigmoid kernels. + Ignored by other kernels. + + kernel_params : dict, default=None + Additional parameters (keyword arguments) for kernel function passed + as callable object. + + Attributes + ---------- + dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets) + Representation of weight vector(s) in kernel space + + X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data, which is also required for prediction. If + kernel == "precomputed" this is instead the precomputed + training matrix, of shape (n_samples, n_samples). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.gaussian_process.GaussianProcessRegressor : Gaussian + Process regressor providing automatic kernel hyperparameters + tuning and predictions uncertainty. + sklearn.linear_model.Ridge : Linear ridge regression. + sklearn.linear_model.RidgeCV : Ridge regression with built-in + cross-validation. + sklearn.svm.SVR : Support Vector Regression accepting a large variety + of kernels. + + References + ---------- + * Kevin P. Murphy + "Machine Learning: A Probabilistic Perspective", The MIT Press + chapter 14.4.3, pp. 492-493 + + Examples + -------- + >>> from sklearn.kernel_ridge import KernelRidge + >>> import numpy as np + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> krr = KernelRidge(alpha=1.0) + >>> krr.fit(X, y) + KernelRidge(alpha=1.0) + """ + + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], + "kernel": [ + StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}), + callable, + ], + "gamma": [Interval(Real, 0, None, closed="left"), None], + "degree": [Interval(Real, 0, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "kernel_params": [dict, None], + } + + def __init__( + self, + alpha=1, + *, + kernel="linear", + gamma=None, + degree=3, + coef0=1, + kernel_params=None, + ): + self.alpha = alpha + self.kernel = kernel + self.gamma = gamma + self.degree = degree + self.coef0 = coef0 + self.kernel_params = kernel_params + + def _get_kernel(self, X, Y=None): + if callable(self.kernel): + params = self.kernel_params or {} + else: + params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} + return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.input_tags.pairwise = self.kernel == "precomputed" + return tags + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Kernel Ridge regression model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. If kernel == "precomputed" this is instead + a precomputed kernel matrix, of shape (n_samples, n_samples). + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or array-like of shape (n_samples,), default=None + Individual weights for each sample, ignored if None is passed. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Convert data + X, y = validate_data( + self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True + ) + if sample_weight is not None and not isinstance(sample_weight, float): + sample_weight = _check_sample_weight(sample_weight, X) + + K = self._get_kernel(X) + alpha = np.atleast_1d(self.alpha) + + ravel = False + if len(y.shape) == 1: + y = y.reshape(-1, 1) + ravel = True + + copy = self.kernel == "precomputed" + self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy) + if ravel: + self.dual_coef_ = self.dual_coef_.ravel() + + self.X_fit_ = X + + return self + + def predict(self, X): + """Predict using the kernel ridge model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. If kernel == "precomputed" this is instead a + precomputed kernel matrix, shape = [n_samples, + n_samples_fitted], where n_samples_fitted is the number of + samples used in the fitting for this estimator. + + Returns + ------- + C : ndarray of shape (n_samples,) or (n_samples, n_targets) + Returns predicted values. + """ + check_is_fitted(self) + X = validate_data(self, X, accept_sparse=("csr", "csc"), reset=False) + K = self._get_kernel(X, self.X_fit_) + return np.dot(K, self.dual_coef_) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/meson.build b/evalkit_tf437/lib/python3.10/site-packages/sklearn/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..eaf1b98e60cc28b5e3b98583745200b4975d61de --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/meson.build @@ -0,0 +1,241 @@ +fs = import('fs') + +cython_args = [] + +# Platform detection +is_windows = host_machine.system() == 'windows' +is_mingw = is_windows and cc.get_id() == 'gcc' + +# Adapted from Scipy. mingw is untested and not officially supported. If you +# ever bump into issues when trying to compile for mingw, please open an issue +# in the scikit-learn issue tracker +if is_mingw + # For mingw-w64, link statically against the UCRT. + gcc_link_args = ['-lucrt', '-static'] + add_project_link_arguments(gcc_link_args, language: ['c', 'cpp']) + # Force gcc to float64 long doubles for compatibility with MSVC + # builds, for C only. + add_project_arguments('-mlong-double-64', language: 'c') +endif + +# Only check build dependencies version when not cross-compiling, as running +# Python interpreter can be tricky in cross-compilation settings. For more +# details, see https://docs.scipy.org/doc/scipy/building/cross_compilation.html +if not meson.is_cross_build() + if not py.version().version_compare('>=3.9') + error('scikit-learn requires Python>=3.9, got ' + py.version() + ' instead') + endif + + cython_min_version = run_command(py, ['_min_dependencies.py', 'cython'], check: true).stdout().strip() + if not cython.version().version_compare('>=' + cython_min_version) + error('scikit-learn requires Cython>=' + cython_min_version + ', got ' + cython.version() + ' instead') + endif + + numpy_version = run_command(py, + ['-c', 'import numpy; print(numpy.__version__)'], check: true).stdout().strip() + numpy_min_version = run_command(py, ['_min_dependencies.py', 'numpy'], check: true).stdout().strip() + if not numpy_version.version_compare('>=' + numpy_min_version) + error('scikit-learn requires numpy>=' + numpy_min_version + ', got ' + numpy_version + ' instead') + endif + + scipy_version = run_command(py, + ['-c', 'import scipy; print(scipy.__version__)'], check: true).stdout().strip() + scipy_min_version = run_command(py, ['_min_dependencies.py', 'scipy'], check: true).stdout().strip() + if not scipy_version.version_compare('>=' + scipy_min_version) + error('scikit-learn requires scipy>=' + scipy_min_version + ', got ' + scipy_version + ' instead') + endif + + # meson-python is required only when going through pip. Using meson directly + # should not check meson-python version. + meson_python_version_command_result = run_command(py, + ['-c', 'import importlib.metadata; print(importlib.metadata.version("meson-python"))'], check: false) + meson_python_installed = meson_python_version_command_result.returncode() == 0 + if meson_python_installed + meson_python_version = meson_python_version_command_result.stdout().strip() + meson_python_min_version = run_command(py, ['_min_dependencies.py', 'meson-python'], check: true).stdout().strip() + if not meson_python_version.version_compare('>=' + meson_python_min_version) + error('scikit-learn requires meson-python>=' + meson_python_min_version + ', got ' + meson_python_version + ' instead') + endif + endif + +endif + +# Adapted from scipy, each project seems to have its own tweaks for this. One +# day using dependency('numpy') will be a thing, see +# https://github.com/mesonbuild/meson/issues/9598. +# NumPy include directory - needed in all submodules +# Relative paths are needed when for example a virtualenv is +# placed inside the source tree; Meson rejects absolute paths to places inside +# the source tree. The try-except is needed because when things are split +# across drives on Windows, there is no relative path and an exception gets +# raised. There may be other such cases, so add a catch-all and switch to +# an absolute path. +# For cross-compilation it is often not possible to run the Python interpreter +# in order to retrieve numpy's include directory. It can be specified in the +# cross file instead: +# [properties] +# numpy-include-dir = /abspath/to/host-pythons/site-packages/numpy/core/include +# +# This uses the path as is, and avoids running the interpreter. +incdir_numpy = meson.get_external_property('numpy-include-dir', 'not-given') +if incdir_numpy == 'not-given' + incdir_numpy = run_command(py, + [ + '-c', + ''' +import os +import numpy as np +try: + incdir = os.path.relpath(np.get_include()) +except Exception: + incdir = np.get_include() +print(incdir) +''' + ], + check: true + ).stdout().strip() +endif + +inc_np = include_directories(incdir_numpy) +# Don't use the deprecated NumPy C API. Define this to a fixed version instead of +# NPY_API_VERSION in order not to break compilation for released SciPy versions +# when NumPy introduces a new deprecation. +numpy_no_deprecated_api = ['-DNPY_NO_DEPRECATED_API=NPY_1_9_API_VERSION'] +np_dep = declare_dependency(include_directories: inc_np, compile_args: numpy_no_deprecated_api) + +openmp_dep = dependency('OpenMP', language: 'c', required: false) + +if not openmp_dep.found() + warn_about_missing_openmp = true + # On Apple Clang avoid a misleading warning if compiler variables are set. + # See https://github.com/scikit-learn/scikit-learn/issues/28710 for more + # details. This may be removed if the OpenMP detection on Apple Clang improves, + # see https://github.com/mesonbuild/meson/issues/7435#issuecomment-2047585466. + if host_machine.system() == 'darwin' and cc.get_id() == 'clang' + compiler_env_vars_with_openmp = run_command(py, + [ + '-c', + ''' +import os + +compiler_env_vars_to_check = ["CPPFLAGS", "CFLAGS", "CXXFLAGS"] + +compiler_env_vars_with_openmp = [ + var for var in compiler_env_vars_to_check if "-fopenmp" in os.getenv(var, "")] +print(compiler_env_vars_with_openmp) +'''], check: true).stdout().strip() + warn_about_missing_openmp = compiler_env_vars_with_openmp == '[]' + endif + if warn_about_missing_openmp + warning( +''' + *********** + * WARNING * + *********** + +It seems that scikit-learn cannot be built with OpenMP. + +- Make sure you have followed the installation instructions: + + https://scikit-learn.org/dev/developers/advanced_installation.html + +- If your compiler supports OpenMP but you still see this + message, please submit a bug report at: + + https://github.com/scikit-learn/scikit-learn/issues + +- The build will continue with OpenMP-based parallelism + disabled. Note however that some estimators will run in + sequential mode instead of leveraging thread-based + parallelism. + + *** +''') + else + warning( +'''It looks like compiler environment variables were set to enable OpenMP support. +Check the output of "import sklearn; sklearn.show_versions()" after the build +to make sure that scikit-learn was actually built with OpenMP support. +''') + endif +endif + +# For now, we keep supporting SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES variable +# (see how it is done in sklearn/_build_utils/__init__.py when building with +# setuptools). Accessing environment variables in meson.build is discouraged, +# so once we drop setuptools this functionality should be behind a meson option +# or buildtype +boundscheck = run_command(py, + [ + '-c', + ''' +import os + +if os.environ.get("SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES", "0") != "0": + print(True) +else: + print(False) + ''' + ], + check: true + ).stdout().strip() + +scikit_learn_cython_args = [ + '-X language_level=3', '-X boundscheck=' + boundscheck, '-X wraparound=False', + '-X initializedcheck=False', '-X nonecheck=False', '-X cdivision=True', + '-X profile=False', + # Needed for cython imports across subpackages, e.g. cluster pyx that + # cimports metrics pxd + '--include-dir', meson.global_build_root(), +] +cython_args += scikit_learn_cython_args + +# Write file in Meson build dir to be able to figure out from Python code +# whether scikit-learn was built with Meson. Adapted from pandas +# _version_meson.py. +custom_target('write_built_with_meson_file', + output: '_built_with_meson.py', + command: [ + py, '-c', 'with open("sklearn/_built_with_meson.py", "w") as f: f.write("")' + ], + install: true, + install_dir: py.get_install_dir() / 'sklearn' +) + +extensions = ['_isotonic'] + +py.extension_module( + '_isotonic', + '_isotonic.pyx', + cython_args: cython_args, + install: true, + subdir: 'sklearn', +) + +# Need for Cython cimports across subpackages to work, i.e. avoid errors like +# relative cimport from non-package directory is not allowed +sklearn_root_cython_tree = [ + fs.copyfile('__init__.py') +] + +sklearn_dir = py.get_install_dir() / 'sklearn' + +# Subpackages are mostly in alphabetical order except to handle Cython +# dependencies across subpackages +subdir('__check_build') +subdir('_loss') +# utils needs to be early since plenty of other modules cimports utils .pxd +subdir('utils') +# metrics needs to be to be before cluster since cluster cimports metrics .pxd +subdir('metrics') +subdir('cluster') +subdir('datasets') +subdir('decomposition') +subdir('ensemble') +subdir('feature_extraction') +subdir('linear_model') +subdir('manifold') +subdir('neighbors') +subdir('preprocessing') +subdir('svm') +subdir('tree') diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/multiclass.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/multiclass.py new file mode 100644 index 0000000000000000000000000000000000000000..1ddb36ca4fa8fa2842026cd5a3227d9d6d1f41ad --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/multiclass.py @@ -0,0 +1,1285 @@ +"""Multiclass learning algorithms. + +- one-vs-the-rest / one-vs-all +- one-vs-one +- error correcting output codes + +The estimators provided in this module are meta-estimators: they require a base +estimator to be provided in their constructor. For example, it is possible to +use these estimators to turn a binary classifier or a regressor into a +multiclass classifier. It is also possible to use these estimators with +multiclass estimators in the hope that their accuracy or runtime performance +improves. + +All classifiers in scikit-learn implement multiclass classification; you +only need to use this module if you want to experiment with custom multiclass +strategies. + +The one-vs-the-rest meta-classifier also implements a `predict_proba` method, +so long as such a method is implemented by the base classifier. This method +returns probabilities of class membership in both the single label and +multilabel case. Note that in the multilabel case, probabilities are the +marginal probability that a given sample falls in the given class. As such, in +the multilabel case the sum of these probabilities over all possible labels +for a given sample *will not* sum to unity, as they do in the single label +case. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import array +import itertools +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from .base import ( + BaseEstimator, + ClassifierMixin, + MetaEstimatorMixin, + MultiOutputMixin, + _fit_context, + clone, + is_classifier, + is_regressor, +) +from .metrics.pairwise import pairwise_distances_argmin +from .preprocessing import LabelBinarizer +from .utils import check_random_state +from .utils._param_validation import HasMethods, Interval +from .utils._tags import get_tags +from .utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + process_routing, +) +from .utils.metaestimators import _safe_split, available_if +from .utils.multiclass import ( + _check_partial_fit_first_call, + _ovr_decision_function, + check_classification_targets, +) +from .utils.parallel import Parallel, delayed +from .utils.validation import ( + _check_method_params, + _num_samples, + check_is_fitted, + validate_data, +) + +__all__ = [ + "OneVsRestClassifier", + "OneVsOneClassifier", + "OutputCodeClassifier", +] + + +def _fit_binary(estimator, X, y, fit_params, classes=None): + """Fit a single binary estimator.""" + unique_y = np.unique(y) + if len(unique_y) == 1: + if classes is not None: + if y[0] == -1: + c = 0 + else: + c = y[0] + warnings.warn( + "Label %s is present in all training examples." % str(classes[c]) + ) + estimator = _ConstantPredictor().fit(X, unique_y) + else: + estimator = clone(estimator) + estimator.fit(X, y, **fit_params) + return estimator + + +def _partial_fit_binary(estimator, X, y, partial_fit_params): + """Partially fit a single binary estimator.""" + estimator.partial_fit(X, y, classes=np.array((0, 1)), **partial_fit_params) + return estimator + + +def _predict_binary(estimator, X): + """Make predictions using a single binary estimator.""" + if is_regressor(estimator): + return estimator.predict(X) + try: + score = np.ravel(estimator.decision_function(X)) + except (AttributeError, NotImplementedError): + # probabilities of the positive class + score = estimator.predict_proba(X)[:, 1] + return score + + +def _threshold_for_binary_predict(estimator): + """Threshold for predictions from binary estimator.""" + if hasattr(estimator, "decision_function") and is_classifier(estimator): + return 0.0 + else: + # predict_proba threshold + return 0.5 + + +class _ConstantPredictor(BaseEstimator): + """Helper predictor to be used when only one class is present.""" + + def fit(self, X, y): + check_params = dict( + ensure_all_finite=False, dtype=None, ensure_2d=False, accept_sparse=True + ) + validate_data( + self, X, y, reset=True, validate_separately=(check_params, check_params) + ) + self.y_ = y + return self + + def predict(self, X): + check_is_fitted(self) + validate_data( + self, + X, + ensure_all_finite=False, + dtype=None, + accept_sparse=True, + ensure_2d=False, + reset=False, + ) + + return np.repeat(self.y_, _num_samples(X)) + + def decision_function(self, X): + check_is_fitted(self) + validate_data( + self, + X, + ensure_all_finite=False, + dtype=None, + accept_sparse=True, + ensure_2d=False, + reset=False, + ) + + return np.repeat(self.y_, _num_samples(X)) + + def predict_proba(self, X): + check_is_fitted(self) + validate_data( + self, + X, + ensure_all_finite=False, + dtype=None, + accept_sparse=True, + ensure_2d=False, + reset=False, + ) + y_ = self.y_.astype(np.float64) + return np.repeat([np.hstack([1 - y_, y_])], _num_samples(X), axis=0) + + +def _estimators_has(attr): + """Check if self.estimator or self.estimators_[0] has attr. + + If `self.estimators_[0]` has the attr, then its safe to assume that other + estimators have it too. We raise the original `AttributeError` if `attr` + does not exist. This function is used together with `available_if`. + """ + + def check(self): + if hasattr(self, "estimators_"): + getattr(self.estimators_[0], attr) + else: + getattr(self.estimator, attr) + + return True + + return check + + +class OneVsRestClassifier( + MultiOutputMixin, + ClassifierMixin, + MetaEstimatorMixin, + BaseEstimator, +): + """One-vs-the-rest (OvR) multiclass strategy. + + Also known as one-vs-all, this strategy consists in fitting one classifier + per class. For each classifier, the class is fitted against all the other + classes. In addition to its computational efficiency (only `n_classes` + classifiers are needed), one advantage of this approach is its + interpretability. Since each class is represented by one and one classifier + only, it is possible to gain knowledge about the class by inspecting its + corresponding classifier. This is the most commonly used strategy for + multiclass classification and is a fair default choice. + + OneVsRestClassifier can also be used for multilabel classification. To use + this feature, provide an indicator matrix for the target `y` when calling + `.fit`. In other words, the target labels should be formatted as a 2D + binary (0/1) matrix, where [i, j] == 1 indicates the presence of label j + in sample i. This estimator uses the binary relevance method to perform + multilabel classification, which involves training one binary classifier + independently for each label. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + A regressor or a classifier that implements :term:`fit`. + When a classifier is passed, :term:`decision_function` will be used + in priority and it will fallback to :term:`predict_proba` if it is not + available. + When a regressor is passed, :term:`predict` is used. + + n_jobs : int, default=None + The number of jobs to use for the computation: the `n_classes` + one-vs-rest problems are computed in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: 0.20 + `n_jobs` default changed from 1 to None + + verbose : int, default=0 + The verbosity level, if non zero, progress messages are printed. + Below 50, the output is sent to stderr. Otherwise, the output is sent + to stdout. The frequency of the messages increases with the verbosity + level, reporting all iterations at 10. See :class:`joblib.Parallel` for + more details. + + .. versionadded:: 1.1 + + Attributes + ---------- + estimators_ : list of `n_classes` estimators + Estimators used for predictions. + + classes_ : array, shape = [`n_classes`] + Class labels. + + n_classes_ : int + Number of classes. + + label_binarizer_ : LabelBinarizer object + Object used to transform multiclass labels to binary labels and + vice-versa. + + multilabel_ : boolean + Whether a OneVsRestClassifier is a multilabel classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + OneVsOneClassifier : One-vs-one multiclass strategy. + OutputCodeClassifier : (Error-Correcting) Output-Code multiclass strategy. + sklearn.multioutput.MultiOutputClassifier : Alternate way of extending an + estimator for multilabel classification. + sklearn.preprocessing.MultiLabelBinarizer : Transform iterable of iterables + to binary indicator matrix. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.multiclass import OneVsRestClassifier + >>> from sklearn.svm import SVC + >>> X = np.array([ + ... [10, 10], + ... [8, 10], + ... [-5, 5.5], + ... [-5.4, 5.5], + ... [-20, -20], + ... [-15, -20] + ... ]) + >>> y = np.array([0, 0, 1, 1, 2, 2]) + >>> clf = OneVsRestClassifier(SVC()).fit(X, y) + >>> clf.predict([[-19, -20], [9, 9], [-5, 5]]) + array([2, 0, 1]) + """ + + _parameter_constraints = { + "estimator": [HasMethods(["fit"])], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + } + + def __init__(self, estimator, *, n_jobs=None, verbose=0): + self.estimator = estimator + self.n_jobs = n_jobs + self.verbose = verbose + + @_fit_context( + # OneVsRestClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) + Multi-class targets. An indicator matrix turns on multilabel + classification. + + **fit_params : dict + Parameters passed to the ``estimator.fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Instance of fitted estimator. + """ + _raise_for_params(fit_params, self, "fit") + + routed_params = process_routing( + self, + "fit", + **fit_params, + ) + # A sparse LabelBinarizer, with sparse_output=True, has been shown to + # outperform or match a dense label binarizer in all cases and has also + # resulted in less or equal memory consumption in the fit_ovr function + # overall. + self.label_binarizer_ = LabelBinarizer(sparse_output=True) + Y = self.label_binarizer_.fit_transform(y) + Y = Y.tocsc() + self.classes_ = self.label_binarizer_.classes_ + columns = (col.toarray().ravel() for col in Y.T) + # In cases where individual estimators are very fast to train setting + # n_jobs > 1 in can results in slower performance due to the overhead + # of spawning threads. See joblib issue #112. + self.estimators_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(_fit_binary)( + self.estimator, + X, + column, + fit_params=routed_params.estimator.fit, + classes=[ + "not %s" % self.label_binarizer_.classes_[i], + self.label_binarizer_.classes_[i], + ], + ) + for i, column in enumerate(columns) + ) + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + if hasattr(self.estimators_[0], "feature_names_in_"): + self.feature_names_in_ = self.estimators_[0].feature_names_in_ + + return self + + @available_if(_estimators_has("partial_fit")) + @_fit_context( + # OneVsRestClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y, classes=None, **partial_fit_params): + """Partially fit underlying estimators. + + Should be used when memory is inefficient to train all data. + Chunks of data can be passed in several iterations. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) + Multi-class targets. An indicator matrix turns on multilabel + classification. + + classes : array, shape (n_classes, ) + Classes across all calls to partial_fit. + Can be obtained via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is only required in the first call of partial_fit + and can be omitted in the subsequent calls. + + **partial_fit_params : dict + Parameters passed to the ``estimator.partial_fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Instance of partially fitted estimator. + """ + _raise_for_params(partial_fit_params, self, "partial_fit") + + routed_params = process_routing( + self, + "partial_fit", + **partial_fit_params, + ) + + if _check_partial_fit_first_call(self, classes): + self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)] + + # A sparse LabelBinarizer, with sparse_output=True, has been + # shown to outperform or match a dense label binarizer in all + # cases and has also resulted in less or equal memory consumption + # in the fit_ovr function overall. + self.label_binarizer_ = LabelBinarizer(sparse_output=True) + self.label_binarizer_.fit(self.classes_) + + if len(np.setdiff1d(y, self.classes_)): + raise ValueError( + ( + "Mini-batch contains {0} while classes " + "must be subset of {1}" + ).format(np.unique(y), self.classes_) + ) + + Y = self.label_binarizer_.transform(y) + Y = Y.tocsc() + columns = (col.toarray().ravel() for col in Y.T) + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_partial_fit_binary)( + estimator, + X, + column, + partial_fit_params=routed_params.estimator.partial_fit, + ) + for estimator, column in zip(self.estimators_, columns) + ) + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + + return self + + def predict(self, X): + """Predict multi-class targets using underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + Returns + ------- + y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) + Predicted multi-class targets. + """ + check_is_fitted(self) + + n_samples = _num_samples(X) + if self.label_binarizer_.y_type_ == "multiclass": + maxima = np.empty(n_samples, dtype=float) + maxima.fill(-np.inf) + argmaxima = np.zeros(n_samples, dtype=int) + for i, e in enumerate(self.estimators_): + pred = _predict_binary(e, X) + np.maximum(maxima, pred, out=maxima) + argmaxima[maxima == pred] = i + return self.classes_[argmaxima] + else: + thresh = _threshold_for_binary_predict(self.estimators_[0]) + indices = array.array("i") + indptr = array.array("i", [0]) + for e in self.estimators_: + indices.extend(np.where(_predict_binary(e, X) > thresh)[0]) + indptr.append(len(indices)) + data = np.ones(len(indices), dtype=int) + indicator = sp.csc_matrix( + (data, indices, indptr), shape=(n_samples, len(self.estimators_)) + ) + return self.label_binarizer_.inverse_transform(indicator) + + @available_if(_estimators_has("predict_proba")) + def predict_proba(self, X): + """Probability estimates. + + The returned estimates for all classes are ordered by label of classes. + + Note that in the multilabel case, each sample can have any number of + labels. This returns the marginal probability that the given sample has + the label in question. For example, it is entirely consistent that two + labels both have a 90% probability of applying to a given sample. + + In the single label multiclass case, the rows of the returned matrix + sum to 1. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + T : array-like of shape (n_samples, n_classes) + Returns the probability of the sample for each class in the model, + where classes are ordered as they are in `self.classes_`. + """ + check_is_fitted(self) + # Y[i, j] gives the probability that sample i has the label j. + # In the multi-label case, these are not disjoint. + Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T + + if len(self.estimators_) == 1: + # Only one estimator, but we still want to return probabilities + # for two classes. + Y = np.concatenate(((1 - Y), Y), axis=1) + + if not self.multilabel_: + # Then, probabilities should be normalized to 1. + Y /= np.sum(Y, axis=1)[:, np.newaxis] + return Y + + @available_if(_estimators_has("decision_function")) + def decision_function(self, X): + """Decision function for the OneVsRestClassifier. + + Return the distance of each sample from the decision boundary for each + class. This can only be used with estimators which implement the + `decision_function` method. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + T : array-like of shape (n_samples, n_classes) or (n_samples,) for \ + binary classification. + Result of calling `decision_function` on the final estimator. + + .. versionchanged:: 0.19 + output shape changed to ``(n_samples,)`` to conform to + scikit-learn conventions for binary classification. + """ + check_is_fitted(self) + if len(self.estimators_) == 1: + return self.estimators_[0].decision_function(X) + return np.array( + [est.decision_function(X).ravel() for est in self.estimators_] + ).T + + @property + def multilabel_(self): + """Whether this is a multilabel classifier.""" + return self.label_binarizer_.y_type_.startswith("multilabel") + + @property + def n_classes_(self): + """Number of classes.""" + return len(self.classes_) + + def __sklearn_tags__(self): + """Indicate if wrapped estimator is using a precomputed Gram matrix""" + tags = super().__sklearn_tags__() + tags.input_tags.pairwise = get_tags(self.estimator).input_tags.pairwise + tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse + return tags + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(caller="fit", callee="fit") + .add(caller="partial_fit", callee="partial_fit"), + ) + ) + return router + + +def _fit_ovo_binary(estimator, X, y, i, j, fit_params): + """Fit a single binary estimator (one-vs-one).""" + cond = np.logical_or(y == i, y == j) + y = y[cond] + y_binary = np.empty(y.shape, int) + y_binary[y == i] = 0 + y_binary[y == j] = 1 + indcond = np.arange(_num_samples(X))[cond] + + fit_params_subset = _check_method_params(X, params=fit_params, indices=indcond) + return ( + _fit_binary( + estimator, + _safe_split(estimator, X, None, indices=indcond)[0], + y_binary, + fit_params=fit_params_subset, + classes=[i, j], + ), + indcond, + ) + + +def _partial_fit_ovo_binary(estimator, X, y, i, j, partial_fit_params): + """Partially fit a single binary estimator(one-vs-one).""" + + cond = np.logical_or(y == i, y == j) + y = y[cond] + if len(y) != 0: + y_binary = np.zeros_like(y) + y_binary[y == j] = 1 + partial_fit_params_subset = _check_method_params( + X, params=partial_fit_params, indices=cond + ) + return _partial_fit_binary( + estimator, X[cond], y_binary, partial_fit_params=partial_fit_params_subset + ) + return estimator + + +class OneVsOneClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): + """One-vs-one multiclass strategy. + + This strategy consists in fitting one classifier per class pair. + At prediction time, the class which received the most votes is selected. + Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers, + this method is usually slower than one-vs-the-rest, due to its + O(n_classes^2) complexity. However, this method may be advantageous for + algorithms such as kernel algorithms which don't scale well with + `n_samples`. This is because each individual learning problem only involves + a small subset of the data whereas, with one-vs-the-rest, the complete + dataset is used `n_classes` times. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + A regressor or a classifier that implements :term:`fit`. + When a classifier is passed, :term:`decision_function` will be used + in priority and it will fallback to :term:`predict_proba` if it is not + available. + When a regressor is passed, :term:`predict` is used. + + n_jobs : int, default=None + The number of jobs to use for the computation: the `n_classes * ( + n_classes - 1) / 2` OVO problems are computed in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + estimators_ : list of ``n_classes * (n_classes - 1) / 2`` estimators + Estimators used for predictions. + + classes_ : numpy array of shape [n_classes] + Array containing labels. + + n_classes_ : int + Number of classes. + + pairwise_indices_ : list, length = ``len(estimators_)``, or ``None`` + Indices of samples used when training the estimators. + ``None`` when ``estimator``'s `pairwise` tag is False. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + OneVsRestClassifier : One-vs-all multiclass strategy. + OutputCodeClassifier : (Error-Correcting) Output-Code multiclass strategy. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.multiclass import OneVsOneClassifier + >>> from sklearn.svm import LinearSVC + >>> X, y = load_iris(return_X_y=True) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, test_size=0.33, shuffle=True, random_state=0) + >>> clf = OneVsOneClassifier( + ... LinearSVC(random_state=0)).fit(X_train, y_train) + >>> clf.predict(X_test[:10]) + array([2, 1, 0, 2, 0, 2, 0, 1, 1, 1]) + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "n_jobs": [Integral, None], + } + + def __init__(self, estimator, *, n_jobs=None): + self.estimator = estimator + self.n_jobs = n_jobs + + @_fit_context( + # OneVsOneClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + y : array-like of shape (n_samples,) + Multi-class targets. + + **fit_params : dict + Parameters passed to the ``estimator.fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + The fitted underlying estimator. + """ + _raise_for_params(fit_params, self, "fit") + + routed_params = process_routing( + self, + "fit", + **fit_params, + ) + + # We need to validate the data because we do a safe_indexing later. + X, y = validate_data( + self, X, y, accept_sparse=["csr", "csc"], ensure_all_finite=False + ) + check_classification_targets(y) + + self.classes_ = np.unique(y) + if len(self.classes_) == 1: + raise ValueError( + "OneVsOneClassifier can not be fit when only one class is present." + ) + n_classes = self.classes_.shape[0] + estimators_indices = list( + zip( + *( + Parallel(n_jobs=self.n_jobs)( + delayed(_fit_ovo_binary)( + self.estimator, + X, + y, + self.classes_[i], + self.classes_[j], + fit_params=routed_params.estimator.fit, + ) + for i in range(n_classes) + for j in range(i + 1, n_classes) + ) + ) + ) + ) + + self.estimators_ = estimators_indices[0] + + pairwise = self.__sklearn_tags__().input_tags.pairwise + self.pairwise_indices_ = estimators_indices[1] if pairwise else None + + return self + + @available_if(_estimators_has("partial_fit")) + @_fit_context( + # OneVsOneClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y, classes=None, **partial_fit_params): + """Partially fit underlying estimators. + + Should be used when memory is inefficient to train all data. Chunks + of data can be passed in several iteration, where the first call + should have an array of all target variables. + + Parameters + ---------- + X : {array-like, sparse matrix) of shape (n_samples, n_features) + Data. + + y : array-like of shape (n_samples,) + Multi-class targets. + + classes : array, shape (n_classes, ) + Classes across all calls to partial_fit. + Can be obtained via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is only required in the first call of partial_fit + and can be omitted in the subsequent calls. + + **partial_fit_params : dict + Parameters passed to the ``estimator.partial_fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + The partially fitted underlying estimator. + """ + _raise_for_params(partial_fit_params, self, "partial_fit") + + routed_params = process_routing( + self, + "partial_fit", + **partial_fit_params, + ) + + first_call = _check_partial_fit_first_call(self, classes) + if first_call: + self.estimators_ = [ + clone(self.estimator) + for _ in range(self.n_classes_ * (self.n_classes_ - 1) // 2) + ] + + if len(np.setdiff1d(y, self.classes_)): + raise ValueError( + "Mini-batch contains {0} while it must be subset of {1}".format( + np.unique(y), self.classes_ + ) + ) + + X, y = validate_data( + self, + X, + y, + accept_sparse=["csr", "csc"], + ensure_all_finite=False, + reset=first_call, + ) + check_classification_targets(y) + combinations = itertools.combinations(range(self.n_classes_), 2) + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_partial_fit_ovo_binary)( + estimator, + X, + y, + self.classes_[i], + self.classes_[j], + partial_fit_params=routed_params.estimator.partial_fit, + ) + for estimator, (i, j) in zip(self.estimators_, (combinations)) + ) + + self.pairwise_indices_ = None + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + + return self + + def predict(self, X): + """Estimate the best class label for each sample in X. + + This is implemented as ``argmax(decision_function(X), axis=1)`` which + will return the label of the class with most votes by estimators + predicting the outcome of a decision for each possible class pair. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + Returns + ------- + y : numpy array of shape [n_samples] + Predicted multi-class targets. + """ + Y = self.decision_function(X) + if self.n_classes_ == 2: + thresh = _threshold_for_binary_predict(self.estimators_[0]) + return self.classes_[(Y > thresh).astype(int)] + return self.classes_[Y.argmax(axis=1)] + + def decision_function(self, X): + """Decision function for the OneVsOneClassifier. + + The decision values for the samples are computed by adding the + normalized sum of pair-wise classification confidence levels to the + votes in order to disambiguate between the decision values when the + votes for all the classes are equal leading to a tie. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + Y : array-like of shape (n_samples, n_classes) or (n_samples,) + Result of calling `decision_function` on the final estimator. + + .. versionchanged:: 0.19 + output shape changed to ``(n_samples,)`` to conform to + scikit-learn conventions for binary classification. + """ + check_is_fitted(self) + X = validate_data( + self, + X, + accept_sparse=True, + ensure_all_finite=False, + reset=False, + ) + + indices = self.pairwise_indices_ + if indices is None: + Xs = [X] * len(self.estimators_) + else: + Xs = [X[:, idx] for idx in indices] + + predictions = np.vstack( + [est.predict(Xi) for est, Xi in zip(self.estimators_, Xs)] + ).T + confidences = np.vstack( + [_predict_binary(est, Xi) for est, Xi in zip(self.estimators_, Xs)] + ).T + Y = _ovr_decision_function(predictions, confidences, len(self.classes_)) + if self.n_classes_ == 2: + return Y[:, 1] + return Y + + @property + def n_classes_(self): + """Number of classes.""" + return len(self.classes_) + + def __sklearn_tags__(self): + """Indicate if wrapped estimator is using a precomputed Gram matrix""" + tags = super().__sklearn_tags__() + tags.input_tags.pairwise = get_tags(self.estimator).input_tags.pairwise + tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse + return tags + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(caller="fit", callee="fit") + .add(caller="partial_fit", callee="partial_fit"), + ) + ) + return router + + +class OutputCodeClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): + """(Error-Correcting) Output-Code multiclass strategy. + + Output-code based strategies consist in representing each class with a + binary code (an array of 0s and 1s). At fitting time, one binary + classifier per bit in the code book is fitted. At prediction time, the + classifiers are used to project new points in the class space and the class + closest to the points is chosen. The main advantage of these strategies is + that the number of classifiers used can be controlled by the user, either + for compressing the model (0 < `code_size` < 1) or for making the model more + robust to errors (`code_size` > 1). See the documentation for more details. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + An estimator object implementing :term:`fit` and one of + :term:`decision_function` or :term:`predict_proba`. + + code_size : float, default=1.5 + Percentage of the number of classes to be used to create the code book. + A number between 0 and 1 will require fewer classifiers than + one-vs-the-rest. A number greater than 1 will require more classifiers + than one-vs-the-rest. + + random_state : int, RandomState instance, default=None + The generator used to initialize the codebook. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + n_jobs : int, default=None + The number of jobs to use for the computation: the multiclass problems + are computed in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + estimators_ : list of `int(n_classes * code_size)` estimators + Estimators used for predictions. + + classes_ : ndarray of shape (n_classes,) + Array containing labels. + + code_book_ : ndarray of shape (n_classes, `len(estimators_)`) + Binary array containing the code of each class. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + OneVsRestClassifier : One-vs-all multiclass strategy. + OneVsOneClassifier : One-vs-one multiclass strategy. + + References + ---------- + + .. [1] "Solving multiclass learning problems via error-correcting output + codes", + Dietterich T., Bakiri G., + Journal of Artificial Intelligence Research 2, + 1995. + + .. [2] "The error coding method and PICTs", + James G., Hastie T., + Journal of Computational and Graphical statistics 7, + 1998. + + .. [3] "The Elements of Statistical Learning", + Hastie T., Tibshirani R., Friedman J., page 606 (second-edition) + 2008. + + Examples + -------- + >>> from sklearn.multiclass import OutputCodeClassifier + >>> from sklearn.ensemble import RandomForestClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=100, n_features=4, + ... n_informative=2, n_redundant=0, + ... random_state=0, shuffle=False) + >>> clf = OutputCodeClassifier( + ... estimator=RandomForestClassifier(random_state=0), + ... random_state=0).fit(X, y) + >>> clf.predict([[0, 0, 0, 0]]) + array([1]) + """ + + _parameter_constraints: dict = { + "estimator": [ + HasMethods(["fit", "decision_function"]), + HasMethods(["fit", "predict_proba"]), + ], + "code_size": [Interval(Real, 0.0, None, closed="neither")], + "random_state": ["random_state"], + "n_jobs": [Integral, None], + } + + def __init__(self, estimator, *, code_size=1.5, random_state=None, n_jobs=None): + self.estimator = estimator + self.code_size = code_size + self.random_state = random_state + self.n_jobs = n_jobs + + @_fit_context( + # OutputCodeClassifier.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + y : array-like of shape (n_samples,) + Multi-class targets. + + **fit_params : dict + Parameters passed to the ``estimator.fit`` method of each + sub-estimator. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + _raise_for_params(fit_params, self, "fit") + + routed_params = process_routing( + self, + "fit", + **fit_params, + ) + + y = validate_data(self, X="no_validation", y=y) + + random_state = check_random_state(self.random_state) + check_classification_targets(y) + + self.classes_ = np.unique(y) + n_classes = self.classes_.shape[0] + if n_classes == 0: + raise ValueError( + "OutputCodeClassifier can not be fit when no class is present." + ) + n_estimators = int(n_classes * self.code_size) + + # FIXME: there are more elaborate methods than generating the codebook + # randomly. + self.code_book_ = random_state.uniform(size=(n_classes, n_estimators)) + self.code_book_[self.code_book_ > 0.5] = 1.0 + + if hasattr(self.estimator, "decision_function"): + self.code_book_[self.code_book_ != 1] = -1.0 + else: + self.code_book_[self.code_book_ != 1] = 0.0 + + classes_index = {c: i for i, c in enumerate(self.classes_)} + + Y = np.array( + [self.code_book_[classes_index[y[i]]] for i in range(_num_samples(y))], + dtype=int, + ) + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_fit_binary)( + self.estimator, X, Y[:, i], fit_params=routed_params.estimator.fit + ) + for i in range(Y.shape[1]) + ) + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + if hasattr(self.estimators_[0], "feature_names_in_"): + self.feature_names_in_ = self.estimators_[0].feature_names_in_ + + return self + + def predict(self, X): + """Predict multi-class targets using underlying estimators. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Data. + + Returns + ------- + y : ndarray of shape (n_samples,) + Predicted multi-class targets. + """ + check_is_fitted(self) + # ArgKmin only accepts C-contiguous array. The aggregated predictions need to be + # transposed. We therefore create a F-contiguous array to avoid a copy and have + # a C-contiguous array after the transpose operation. + Y = np.array( + [_predict_binary(e, X) for e in self.estimators_], + order="F", + dtype=np.float64, + ).T + pred = pairwise_distances_argmin(Y, self.code_book_, metric="euclidean") + return self.classes_[pred] + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + return router + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/multioutput.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/multioutput.py new file mode 100644 index 0000000000000000000000000000000000000000..38b6eb4a7e0ec6b3802348458c2e2b64960ce84f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/multioutput.py @@ -0,0 +1,1267 @@ +"""Multioutput regression and classification. + +The estimators provided in this module are meta-estimators: they require +a base estimator to be provided in their constructor. The meta-estimator +extends single output estimators to multioutput estimators. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + + +from abc import ABCMeta, abstractmethod +from numbers import Integral + +import numpy as np +import scipy.sparse as sp + +from .base import ( + BaseEstimator, + ClassifierMixin, + MetaEstimatorMixin, + RegressorMixin, + _fit_context, + clone, + is_classifier, +) +from .model_selection import cross_val_predict +from .utils import Bunch, check_random_state, get_tags +from .utils._param_validation import HasMethods, StrOptions +from .utils._response import _get_response_values +from .utils._user_interface import _print_elapsed_time +from .utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from .utils.metaestimators import available_if +from .utils.multiclass import check_classification_targets +from .utils.parallel import Parallel, delayed +from .utils.validation import ( + _check_method_params, + _check_response_method, + check_is_fitted, + has_fit_parameter, + validate_data, +) + +__all__ = [ + "MultiOutputRegressor", + "MultiOutputClassifier", + "ClassifierChain", + "RegressorChain", +] + + +def _fit_estimator(estimator, X, y, sample_weight=None, **fit_params): + estimator = clone(estimator) + if sample_weight is not None: + estimator.fit(X, y, sample_weight=sample_weight, **fit_params) + else: + estimator.fit(X, y, **fit_params) + return estimator + + +def _partial_fit_estimator( + estimator, X, y, classes=None, partial_fit_params=None, first_time=True +): + partial_fit_params = {} if partial_fit_params is None else partial_fit_params + if first_time: + estimator = clone(estimator) + + if classes is not None: + estimator.partial_fit(X, y, classes=classes, **partial_fit_params) + else: + estimator.partial_fit(X, y, **partial_fit_params) + return estimator + + +def _available_if_estimator_has(attr): + """Return a function to check if the sub-estimator(s) has(have) `attr`. + + Helper for Chain implementations. + """ + + def _check(self): + if hasattr(self, "estimators_"): + return all(hasattr(est, attr) for est in self.estimators_) + + if hasattr(self.estimator, attr): + return True + + return False + + return available_if(_check) + + +class _MultiOutputEstimator(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "predict"])], + "n_jobs": [Integral, None], + } + + @abstractmethod + def __init__(self, estimator, *, n_jobs=None): + self.estimator = estimator + self.n_jobs = n_jobs + + @_available_if_estimator_has("partial_fit") + @_fit_context( + # MultiOutput*.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def partial_fit(self, X, y, classes=None, sample_weight=None, **partial_fit_params): + """Incrementally fit a separate model for each class output. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : {array-like, sparse matrix} of shape (n_samples, n_outputs) + Multi-output targets. + + classes : list of ndarray of shape (n_outputs,), default=None + Each array is unique classes for one output in str/int. + Can be obtained via + ``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where `y` + is the target matrix of the entire dataset. + This argument is required for the first call to partial_fit + and can be omitted in the subsequent calls. + Note that `y` doesn't need to contain all labels in `classes`. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, then samples are equally weighted. + Only supported if the underlying regressor supports sample + weights. + + **partial_fit_params : dict of str -> object + Parameters passed to the ``estimator.partial_fit`` method of each + sub-estimator. + + Only available if `enable_metadata_routing=True`. See the + :ref:`User Guide `. + + .. versionadded:: 1.3 + + Returns + ------- + self : object + Returns a fitted instance. + """ + _raise_for_params(partial_fit_params, self, "partial_fit") + + first_time = not hasattr(self, "estimators_") + + y = validate_data(self, X="no_validation", y=y, multi_output=True) + + if y.ndim == 1: + raise ValueError( + "y must have at least two dimensions for " + "multi-output regression but has only one." + ) + + if _routing_enabled(): + if sample_weight is not None: + partial_fit_params["sample_weight"] = sample_weight + routed_params = process_routing( + self, + "partial_fit", + **partial_fit_params, + ) + else: + if sample_weight is not None and not has_fit_parameter( + self.estimator, "sample_weight" + ): + raise ValueError( + "Underlying estimator does not support sample weights." + ) + + if sample_weight is not None: + routed_params = Bunch( + estimator=Bunch(partial_fit=Bunch(sample_weight=sample_weight)) + ) + else: + routed_params = Bunch(estimator=Bunch(partial_fit=Bunch())) + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_partial_fit_estimator)( + self.estimators_[i] if not first_time else self.estimator, + X, + y[:, i], + classes[i] if classes is not None else None, + partial_fit_params=routed_params.estimator.partial_fit, + first_time=first_time, + ) + for i in range(y.shape[1]) + ) + + if first_time and hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + if first_time and hasattr(self.estimators_[0], "feature_names_in_"): + self.feature_names_in_ = self.estimators_[0].feature_names_in_ + + return self + + @_fit_context( + # MultiOutput*.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None, **fit_params): + """Fit the model to data, separately for each output variable. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : {array-like, sparse matrix} of shape (n_samples, n_outputs) + Multi-output targets. An indicator matrix turns on multilabel + estimation. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, then samples are equally weighted. + Only supported if the underlying regressor supports sample + weights. + + **fit_params : dict of string -> object + Parameters passed to the ``estimator.fit`` method of each step. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Returns a fitted instance. + """ + if not hasattr(self.estimator, "fit"): + raise ValueError("The base estimator should implement a fit method") + + y = validate_data(self, X="no_validation", y=y, multi_output=True) + + if is_classifier(self): + check_classification_targets(y) + + if y.ndim == 1: + raise ValueError( + "y must have at least two dimensions for " + "multi-output regression but has only one." + ) + + if _routing_enabled(): + if sample_weight is not None: + fit_params["sample_weight"] = sample_weight + routed_params = process_routing( + self, + "fit", + **fit_params, + ) + else: + if sample_weight is not None and not has_fit_parameter( + self.estimator, "sample_weight" + ): + raise ValueError( + "Underlying estimator does not support sample weights." + ) + + fit_params_validated = _check_method_params(X, params=fit_params) + routed_params = Bunch(estimator=Bunch(fit=fit_params_validated)) + if sample_weight is not None: + routed_params.estimator.fit["sample_weight"] = sample_weight + + self.estimators_ = Parallel(n_jobs=self.n_jobs)( + delayed(_fit_estimator)( + self.estimator, X, y[:, i], **routed_params.estimator.fit + ) + for i in range(y.shape[1]) + ) + + if hasattr(self.estimators_[0], "n_features_in_"): + self.n_features_in_ = self.estimators_[0].n_features_in_ + if hasattr(self.estimators_[0], "feature_names_in_"): + self.feature_names_in_ = self.estimators_[0].feature_names_in_ + + return self + + def predict(self, X): + """Predict multi-output variable using model for each target variable. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + y : {array-like, sparse matrix} of shape (n_samples, n_outputs) + Multi-output targets predicted across multiple predictors. + Note: Separate models are generated for each predictor. + """ + check_is_fitted(self) + if not hasattr(self.estimators_[0], "predict"): + raise ValueError("The base estimator should implement a predict method") + + y = Parallel(n_jobs=self.n_jobs)( + delayed(e.predict)(X) for e in self.estimators_ + ) + + return np.asarray(y).T + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse + tags.target_tags.single_output = False + tags.target_tags.multi_output = True + return tags + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.estimator, + method_mapping=MethodMapping() + .add(caller="partial_fit", callee="partial_fit") + .add(caller="fit", callee="fit"), + ) + return router + + +class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator): + """Multi target regression. + + This strategy consists of fitting one regressor per target. This is a + simple strategy for extending regressors that do not natively support + multi-target regression. + + .. versionadded:: 0.18 + + Parameters + ---------- + estimator : estimator object + An estimator object implementing :term:`fit` and :term:`predict`. + + n_jobs : int or None, optional (default=None) + The number of jobs to run in parallel. + :meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported + by the passed estimator) will be parallelized for each target. + + When individual estimators are fast to train or predict, + using ``n_jobs > 1`` can result in slower performance due + to the parallelism overhead. + + ``None`` means `1` unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all available processes / threads. + See :term:`Glossary ` for more details. + + .. versionchanged:: 0.20 + `n_jobs` default changed from `1` to `None`. + + Attributes + ---------- + estimators_ : list of ``n_output`` estimators + Estimators used for predictions. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying `estimator` exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimators expose such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + RegressorChain : A multi-label model that arranges regressions into a + chain. + MultiOutputClassifier : Classifies each output independently rather than + chaining. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import load_linnerud + >>> from sklearn.multioutput import MultiOutputRegressor + >>> from sklearn.linear_model import Ridge + >>> X, y = load_linnerud(return_X_y=True) + >>> regr = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y) + >>> regr.predict(X[[0]]) + array([[176..., 35..., 57...]]) + """ + + def __init__(self, estimator, *, n_jobs=None): + super().__init__(estimator, n_jobs=n_jobs) + + @_available_if_estimator_has("partial_fit") + def partial_fit(self, X, y, sample_weight=None, **partial_fit_params): + """Incrementally fit the model to data, for each output variable. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + y : {array-like, sparse matrix} of shape (n_samples, n_outputs) + Multi-output targets. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, then samples are equally weighted. + Only supported if the underlying regressor supports sample + weights. + + **partial_fit_params : dict of str -> object + Parameters passed to the ``estimator.partial_fit`` method of each + sub-estimator. + + Only available if `enable_metadata_routing=True`. See the + :ref:`User Guide `. + + .. versionadded:: 1.3 + + Returns + ------- + self : object + Returns a fitted instance. + """ + super().partial_fit(X, y, sample_weight=sample_weight, **partial_fit_params) + + +class MultiOutputClassifier(ClassifierMixin, _MultiOutputEstimator): + """Multi target classification. + + This strategy consists of fitting one classifier per target. This is a + simple strategy for extending classifiers that do not natively support + multi-target classification. + + Parameters + ---------- + estimator : estimator object + An estimator object implementing :term:`fit` and :term:`predict`. + A :term:`predict_proba` method will be exposed only if `estimator` implements + it. + + n_jobs : int or None, optional (default=None) + The number of jobs to run in parallel. + :meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported + by the passed estimator) will be parallelized for each target. + + When individual estimators are fast to train or predict, + using ``n_jobs > 1`` can result in slower performance due + to the parallelism overhead. + + ``None`` means `1` unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all available processes / threads. + See :term:`Glossary ` for more details. + + .. versionchanged:: 0.20 + `n_jobs` default changed from `1` to `None`. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) + Class labels. + + estimators_ : list of ``n_output`` estimators + Estimators used for predictions. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying `estimator` exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimators expose such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + ClassifierChain : A multi-label model that arranges binary classifiers + into a chain. + MultiOutputRegressor : Fits one regressor per target variable. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_multilabel_classification + >>> from sklearn.multioutput import MultiOutputClassifier + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_multilabel_classification(n_classes=3, random_state=0) + >>> clf = MultiOutputClassifier(LogisticRegression()).fit(X, y) + >>> clf.predict(X[-2:]) + array([[1, 1, 1], + [1, 0, 1]]) + """ + + def __init__(self, estimator, *, n_jobs=None): + super().__init__(estimator, n_jobs=n_jobs) + + def fit(self, X, Y, sample_weight=None, **fit_params): + """Fit the model to data matrix X and targets Y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Y : array-like of shape (n_samples, n_classes) + The target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, then samples are equally weighted. + Only supported if the underlying classifier supports sample + weights. + + **fit_params : dict of string -> object + Parameters passed to the ``estimator.fit`` method of each step. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Returns a fitted instance. + """ + super().fit(X, Y, sample_weight=sample_weight, **fit_params) + self.classes_ = [estimator.classes_ for estimator in self.estimators_] + return self + + def _check_predict_proba(self): + if hasattr(self, "estimators_"): + # raise an AttributeError if `predict_proba` does not exist for + # each estimator + [getattr(est, "predict_proba") for est in self.estimators_] + return True + # raise an AttributeError if `predict_proba` does not exist for the + # unfitted estimator + getattr(self.estimator, "predict_proba") + return True + + @available_if(_check_predict_proba) + def predict_proba(self, X): + """Return prediction probabilities for each class of each output. + + This method will raise a ``ValueError`` if any of the + estimators do not have ``predict_proba``. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data. + + Returns + ------- + p : array of shape (n_samples, n_classes), or a list of n_outputs \ + such arrays if n_outputs > 1. + The class probabilities of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + + .. versionchanged:: 0.19 + This function now returns a list of arrays where the length of + the list is ``n_outputs``, and each array is (``n_samples``, + ``n_classes``) for that particular output. + """ + check_is_fitted(self) + results = [estimator.predict_proba(X) for estimator in self.estimators_] + return results + + def score(self, X, y): + """Return the mean accuracy on the given test data and labels. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples, n_outputs) + True values for X. + + Returns + ------- + scores : float + Mean accuracy of predicted target versus true target. + """ + check_is_fitted(self) + n_outputs_ = len(self.estimators_) + if y.ndim == 1: + raise ValueError( + "y must have at least two dimensions for " + "multi target classification but has only one" + ) + if y.shape[1] != n_outputs_: + raise ValueError( + "The number of outputs of Y for fit {0} and" + " score {1} should be same".format(n_outputs_, y.shape[1]) + ) + y_pred = self.predict(X) + return np.mean(np.all(y == y_pred, axis=1)) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + # FIXME + tags._skip_test = True + return tags + + +def _available_if_base_estimator_has(attr): + """Return a function to check if `base_estimator` or `estimators_` has `attr`. + + Helper for Chain implementations. + """ + + def _check(self): + return hasattr(self.base_estimator, attr) or all( + hasattr(est, attr) for est in self.estimators_ + ) + + return available_if(_check) + + +class _BaseChain(BaseEstimator, metaclass=ABCMeta): + _parameter_constraints: dict = { + "base_estimator": [HasMethods(["fit", "predict"])], + "order": ["array-like", StrOptions({"random"}), None], + "cv": ["cv_object", StrOptions({"prefit"})], + "random_state": ["random_state"], + "verbose": ["boolean"], + } + + def __init__( + self, base_estimator, *, order=None, cv=None, random_state=None, verbose=False + ): + self.base_estimator = base_estimator + self.order = order + self.cv = cv + self.random_state = random_state + self.verbose = verbose + + def _log_message(self, *, estimator_idx, n_estimators, processing_msg): + if not self.verbose: + return None + return f"({estimator_idx} of {n_estimators}) {processing_msg}" + + def _get_predictions(self, X, *, output_method): + """Get predictions for each model in the chain.""" + check_is_fitted(self) + X = validate_data(self, X, accept_sparse=True, reset=False) + Y_output_chain = np.zeros((X.shape[0], len(self.estimators_))) + Y_feature_chain = np.zeros((X.shape[0], len(self.estimators_))) + + # `RegressorChain` does not have a `chain_method_` parameter so we + # default to "predict" + chain_method = getattr(self, "chain_method_", "predict") + hstack = sp.hstack if sp.issparse(X) else np.hstack + for chain_idx, estimator in enumerate(self.estimators_): + previous_predictions = Y_feature_chain[:, :chain_idx] + # if `X` is a scipy sparse dok_array, we convert it to a sparse + # coo_array format before hstacking, it's faster; see + # https://github.com/scipy/scipy/issues/20060#issuecomment-1937007039: + if sp.issparse(X) and not sp.isspmatrix(X) and X.format == "dok": + X = sp.coo_array(X) + X_aug = hstack((X, previous_predictions)) + + feature_predictions, _ = _get_response_values( + estimator, + X_aug, + response_method=chain_method, + ) + Y_feature_chain[:, chain_idx] = feature_predictions + + output_predictions, _ = _get_response_values( + estimator, + X_aug, + response_method=output_method, + ) + Y_output_chain[:, chain_idx] = output_predictions + + inv_order = np.empty_like(self.order_) + inv_order[self.order_] = np.arange(len(self.order_)) + Y_output = Y_output_chain[:, inv_order] + + return Y_output + + @abstractmethod + def fit(self, X, Y, **fit_params): + """Fit the model to data matrix X and targets Y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Y : array-like of shape (n_samples, n_classes) + The target values. + + **fit_params : dict of string -> object + Parameters passed to the `fit` method of each step. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Returns a fitted instance. + """ + X, Y = validate_data(self, X, Y, multi_output=True, accept_sparse=True) + + random_state = check_random_state(self.random_state) + self.order_ = self.order + if isinstance(self.order_, tuple): + self.order_ = np.array(self.order_) + + if self.order_ is None: + self.order_ = np.array(range(Y.shape[1])) + elif isinstance(self.order_, str): + if self.order_ == "random": + self.order_ = random_state.permutation(Y.shape[1]) + elif sorted(self.order_) != list(range(Y.shape[1])): + raise ValueError("invalid order") + + self.estimators_ = [clone(self.base_estimator) for _ in range(Y.shape[1])] + + if self.cv is None: + Y_pred_chain = Y[:, self.order_] + if sp.issparse(X): + X_aug = sp.hstack((X, Y_pred_chain), format="lil") + X_aug = X_aug.tocsr() + else: + X_aug = np.hstack((X, Y_pred_chain)) + + elif sp.issparse(X): + # TODO: remove this condition check when the minimum supported scipy version + # doesn't support sparse matrices anymore + if not sp.isspmatrix(X): + # if `X` is a scipy sparse dok_array, we convert it to a sparse + # coo_array format before hstacking, it's faster; see + # https://github.com/scipy/scipy/issues/20060#issuecomment-1937007039: + if X.format == "dok": + X = sp.coo_array(X) + # in case that `X` is a sparse array we create `Y_pred_chain` as a + # sparse array format: + Y_pred_chain = sp.coo_array((X.shape[0], Y.shape[1])) + else: + Y_pred_chain = sp.coo_matrix((X.shape[0], Y.shape[1])) + X_aug = sp.hstack((X, Y_pred_chain), format="lil") + + else: + Y_pred_chain = np.zeros((X.shape[0], Y.shape[1])) + X_aug = np.hstack((X, Y_pred_chain)) + + del Y_pred_chain + + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + else: + routed_params = Bunch(estimator=Bunch(fit=fit_params)) + + if hasattr(self, "chain_method"): + chain_method = _check_response_method( + self.base_estimator, + self.chain_method, + ).__name__ + self.chain_method_ = chain_method + else: + # `RegressorChain` does not have a `chain_method` parameter + chain_method = "predict" + + for chain_idx, estimator in enumerate(self.estimators_): + message = self._log_message( + estimator_idx=chain_idx + 1, + n_estimators=len(self.estimators_), + processing_msg=f"Processing order {self.order_[chain_idx]}", + ) + y = Y[:, self.order_[chain_idx]] + with _print_elapsed_time("Chain", message): + estimator.fit( + X_aug[:, : (X.shape[1] + chain_idx)], + y, + **routed_params.estimator.fit, + ) + + if self.cv is not None and chain_idx < len(self.estimators_) - 1: + col_idx = X.shape[1] + chain_idx + cv_result = cross_val_predict( + self.base_estimator, + X_aug[:, :col_idx], + y=y, + cv=self.cv, + method=chain_method, + ) + # `predict_proba` output is 2D, we use only output for classes[-1] + if cv_result.ndim > 1: + cv_result = cv_result[:, 1] + if sp.issparse(X_aug): + X_aug[:, col_idx] = np.expand_dims(cv_result, 1) + else: + X_aug[:, col_idx] = cv_result + + return self + + def predict(self, X): + """Predict on the data matrix X using the ClassifierChain model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + Y_pred : array-like of shape (n_samples, n_classes) + The predicted values. + """ + return self._get_predictions(X, output_method="predict") + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = get_tags(self.base_estimator).input_tags.sparse + return tags + + +class ClassifierChain(MetaEstimatorMixin, ClassifierMixin, _BaseChain): + """A multi-label model that arranges binary classifiers into a chain. + + Each model makes a prediction in the order specified by the chain using + all of the available features provided to the model plus the predictions + of models that are earlier in the chain. + + For an example of how to use ``ClassifierChain`` and benefit from its + ensemble, see + :ref:`ClassifierChain on a yeast dataset + ` example. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.19 + + Parameters + ---------- + base_estimator : estimator + The base estimator from which the classifier chain is built. + + order : array-like of shape (n_outputs,) or 'random', default=None + If `None`, the order will be determined by the order of columns in + the label matrix Y.:: + + order = [0, 1, 2, ..., Y.shape[1] - 1] + + The order of the chain can be explicitly set by providing a list of + integers. For example, for a chain of length 5.:: + + order = [1, 3, 2, 4, 0] + + means that the first model in the chain will make predictions for + column 1 in the Y matrix, the second model will make predictions + for column 3, etc. + + If order is `random` a random ordering will be used. + + cv : int, cross-validation generator or an iterable, default=None + Determines whether to use cross validated predictions or true + labels for the results of previous estimators in the chain. + Possible inputs for cv are: + + - None, to use true labels when fitting, + - integer, to specify the number of folds in a (Stratified)KFold, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + chain_method : {'predict', 'predict_proba', 'predict_log_proba', \ + 'decision_function'} or list of such str's, default='predict' + + Prediction method to be used by estimators in the chain for + the 'prediction' features of previous estimators in the chain. + + - if `str`, name of the method; + - if a list of `str`, provides the method names in order of + preference. The method used corresponds to the first method in + the list that is implemented by `base_estimator`. + + .. versionadded:: 1.5 + + random_state : int, RandomState instance or None, optional (default=None) + If ``order='random'``, determines random number generation for the + chain order. + In addition, it controls the random seed given at each `base_estimator` + at each chaining iteration. Thus, it is only used when `base_estimator` + exposes a `random_state`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + verbose : bool, default=False + If True, chain progress is output as each model is completed. + + .. versionadded:: 1.2 + + Attributes + ---------- + classes_ : list + A list of arrays of length ``len(estimators_)`` containing the + class labels for each estimator in the chain. + + estimators_ : list + A list of clones of base_estimator. + + order_ : list + The order of labels in the classifier chain. + + chain_method_ : str + Prediction method used by estimators in the chain for the prediction + features. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying `base_estimator` exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + RegressorChain : Equivalent for regression. + MultiOutputClassifier : Classifies each output independently rather than + chaining. + + References + ---------- + Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank, "Classifier + Chains for Multi-label Classification", 2009. + + Examples + -------- + >>> from sklearn.datasets import make_multilabel_classification + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.multioutput import ClassifierChain + >>> X, Y = make_multilabel_classification( + ... n_samples=12, n_classes=3, random_state=0 + ... ) + >>> X_train, X_test, Y_train, Y_test = train_test_split( + ... X, Y, random_state=0 + ... ) + >>> base_lr = LogisticRegression(solver='lbfgs', random_state=0) + >>> chain = ClassifierChain(base_lr, order='random', random_state=0) + >>> chain.fit(X_train, Y_train).predict(X_test) + array([[1., 1., 0.], + [1., 0., 0.], + [0., 1., 0.]]) + >>> chain.predict_proba(X_test) + array([[0.8387..., 0.9431..., 0.4576...], + [0.8878..., 0.3684..., 0.2640...], + [0.0321..., 0.9935..., 0.0626...]]) + """ + + _parameter_constraints: dict = { + **_BaseChain._parameter_constraints, + "chain_method": [ + list, + tuple, + StrOptions( + {"predict", "predict_proba", "predict_log_proba", "decision_function"} + ), + ], + } + + def __init__( + self, + base_estimator, + *, + order=None, + cv=None, + chain_method="predict", + random_state=None, + verbose=False, + ): + super().__init__( + base_estimator, + order=order, + cv=cv, + random_state=random_state, + verbose=verbose, + ) + self.chain_method = chain_method + + @_fit_context( + # ClassifierChain.base_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, Y, **fit_params): + """Fit the model to data matrix X and targets Y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Y : array-like of shape (n_samples, n_classes) + The target values. + + **fit_params : dict of string -> object + Parameters passed to the `fit` method of each step. + + Only available if `enable_metadata_routing=True`. See the + :ref:`User Guide `. + + .. versionadded:: 1.3 + + Returns + ------- + self : object + Class instance. + """ + _raise_for_params(fit_params, self, "fit") + + super().fit(X, Y, **fit_params) + self.classes_ = [estimator.classes_ for estimator in self.estimators_] + return self + + @_available_if_base_estimator_has("predict_proba") + def predict_proba(self, X): + """Predict probability estimates. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + Y_prob : array-like of shape (n_samples, n_classes) + The predicted probabilities. + """ + return self._get_predictions(X, output_method="predict_proba") + + def predict_log_proba(self, X): + """Predict logarithm of probability estimates. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Returns + ------- + Y_log_prob : array-like of shape (n_samples, n_classes) + The predicted logarithm of the probabilities. + """ + return np.log(self.predict_proba(X)) + + @_available_if_base_estimator_has("decision_function") + def decision_function(self, X): + """Evaluate the decision_function of the models in the chain. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data. + + Returns + ------- + Y_decision : array-like of shape (n_samples, n_classes) + Returns the decision function of the sample for each model + in the chain. + """ + return self._get_predictions(X, output_method="decision_function") + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.base_estimator, + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + return router + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + # FIXME + tags._skip_test = True + tags.target_tags.single_output = False + tags.target_tags.multi_output = True + return tags + + +class RegressorChain(MetaEstimatorMixin, RegressorMixin, _BaseChain): + """A multi-label model that arranges regressions into a chain. + + Each model makes a prediction in the order specified by the chain using + all of the available features provided to the model plus the predictions + of models that are earlier in the chain. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + base_estimator : estimator + The base estimator from which the regressor chain is built. + + order : array-like of shape (n_outputs,) or 'random', default=None + If `None`, the order will be determined by the order of columns in + the label matrix Y.:: + + order = [0, 1, 2, ..., Y.shape[1] - 1] + + The order of the chain can be explicitly set by providing a list of + integers. For example, for a chain of length 5.:: + + order = [1, 3, 2, 4, 0] + + means that the first model in the chain will make predictions for + column 1 in the Y matrix, the second model will make predictions + for column 3, etc. + + If order is 'random' a random ordering will be used. + + cv : int, cross-validation generator or an iterable, default=None + Determines whether to use cross validated predictions or true + labels for the results of previous estimators in the chain. + Possible inputs for cv are: + + - None, to use true labels when fitting, + - integer, to specify the number of folds in a (Stratified)KFold, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + random_state : int, RandomState instance or None, optional (default=None) + If ``order='random'``, determines random number generation for the + chain order. + In addition, it controls the random seed given at each `base_estimator` + at each chaining iteration. Thus, it is only used when `base_estimator` + exposes a `random_state`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + verbose : bool, default=False + If True, chain progress is output as each model is completed. + + .. versionadded:: 1.2 + + Attributes + ---------- + estimators_ : list + A list of clones of base_estimator. + + order_ : list + The order of labels in the classifier chain. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying `base_estimator` exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + ClassifierChain : Equivalent for classification. + MultiOutputRegressor : Learns each output independently rather than + chaining. + + Examples + -------- + >>> from sklearn.multioutput import RegressorChain + >>> from sklearn.linear_model import LogisticRegression + >>> logreg = LogisticRegression(solver='lbfgs') + >>> X, Y = [[1, 0], [0, 1], [1, 1]], [[0, 2], [1, 1], [2, 0]] + >>> chain = RegressorChain(base_estimator=logreg, order=[0, 1]).fit(X, Y) + >>> chain.predict(X) + array([[0., 2.], + [1., 1.], + [2., 0.]]) + """ + + @_fit_context( + # RegressorChain.base_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, Y, **fit_params): + """Fit the model to data matrix X and targets Y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input data. + + Y : array-like of shape (n_samples, n_classes) + The target values. + + **fit_params : dict of string -> object + Parameters passed to the `fit` method at each step + of the regressor chain. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Returns a fitted instance. + """ + super().fit(X, Y, **fit_params) + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + estimator=self.base_estimator, + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + return router + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.target_tags.single_output = False + tags.target_tags.multi_output = True + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/naive_bayes.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/naive_bayes.py new file mode 100644 index 0000000000000000000000000000000000000000..0bb2daab25d0b43f584527e6a588cb567fa92bbd --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/naive_bayes.py @@ -0,0 +1,1539 @@ +"""Naive Bayes algorithms. + +These are supervised learning methods based on applying Bayes' theorem with strong +(naive) feature independence assumptions. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +from scipy.special import logsumexp + +from .base import ( + BaseEstimator, + ClassifierMixin, + _fit_context, +) +from .preprocessing import LabelBinarizer, binarize, label_binarize +from .utils._param_validation import Interval +from .utils.extmath import safe_sparse_dot +from .utils.multiclass import _check_partial_fit_first_call +from .utils.validation import ( + _check_n_features, + _check_sample_weight, + check_is_fitted, + check_non_negative, + validate_data, +) + +__all__ = [ + "BernoulliNB", + "GaussianNB", + "MultinomialNB", + "ComplementNB", + "CategoricalNB", +] + + +class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta): + """Abstract base class for naive Bayes estimators""" + + @abstractmethod + def _joint_log_likelihood(self, X): + """Compute the unnormalized posterior log probability of X + + I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of + shape (n_samples, n_classes). + + Public methods predict, predict_proba, predict_log_proba, and + predict_joint_log_proba pass the input through _check_X before handing it + over to _joint_log_likelihood. The term "joint log likelihood" is used + interchangibly with "joint log probability". + """ + + @abstractmethod + def _check_X(self, X): + """To be overridden in subclasses with the actual checks. + + Only used in predict* methods. + """ + + def predict_joint_log_proba(self, X): + """Return joint log probability estimates for the test vector X. + + For each row x of X and class y, the joint log probability is given by + ``log P(x, y) = log P(y) + log P(x|y),`` + where ``log P(y)`` is the class prior probability and ``log P(x|y)`` is + the class-conditional probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Returns the joint log-probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + """ + check_is_fitted(self) + X = self._check_X(X) + return self._joint_log_likelihood(X) + + def predict(self, X): + """ + Perform classification on an array of test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + C : ndarray of shape (n_samples,) + Predicted target values for X. + """ + check_is_fitted(self) + X = self._check_X(X) + jll = self._joint_log_likelihood(X) + return self.classes_[np.argmax(jll, axis=1)] + + def predict_log_proba(self, X): + """ + Return log-probability estimates for the test vector X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + C : array-like of shape (n_samples, n_classes) + Returns the log-probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + """ + check_is_fitted(self) + X = self._check_X(X) + jll = self._joint_log_likelihood(X) + # normalize by P(x) = P(f_1, ..., f_n) + log_prob_x = logsumexp(jll, axis=1) + return jll - np.atleast_2d(log_prob_x).T + + def predict_proba(self, X): + """ + Return probability estimates for the test vector X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Returns + ------- + C : array-like of shape (n_samples, n_classes) + Returns the probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + """ + return np.exp(self.predict_log_proba(X)) + + +class GaussianNB(_BaseNB): + """ + Gaussian Naive Bayes (GaussianNB). + + Can perform online updates to model parameters via :meth:`partial_fit`. + For details on algorithm used to update feature means and variance online, + see `Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque + `_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + priors : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. If specified, the priors are not + adjusted according to the data. + + var_smoothing : float, default=1e-9 + Portion of the largest variance of all features that is added to + variances for calculation stability. + + .. versionadded:: 0.20 + + Attributes + ---------- + class_count_ : ndarray of shape (n_classes,) + number of training samples observed in each class. + + class_prior_ : ndarray of shape (n_classes,) + probability of each class. + + classes_ : ndarray of shape (n_classes,) + class labels known to the classifier. + + epsilon_ : float + absolute additive value to variances. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + var_ : ndarray of shape (n_classes, n_features) + Variance of each feature per class. + + .. versionadded:: 1.0 + + theta_ : ndarray of shape (n_classes, n_features) + mean of each feature per class. + + See Also + -------- + BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. + CategoricalNB : Naive Bayes classifier for categorical features. + ComplementNB : Complement Naive Bayes classifier. + MultinomialNB : Naive Bayes classifier for multinomial models. + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> Y = np.array([1, 1, 1, 2, 2, 2]) + >>> from sklearn.naive_bayes import GaussianNB + >>> clf = GaussianNB() + >>> clf.fit(X, Y) + GaussianNB() + >>> print(clf.predict([[-0.8, -1]])) + [1] + >>> clf_pf = GaussianNB() + >>> clf_pf.partial_fit(X, Y, np.unique(Y)) + GaussianNB() + >>> print(clf_pf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + "priors": ["array-like", None], + "var_smoothing": [Interval(Real, 0, None, closed="left")], + } + + def __init__(self, *, priors=None, var_smoothing=1e-9): + self.priors = priors + self.var_smoothing = var_smoothing + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Gaussian Naive Bayes according to X, y. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + .. versionadded:: 0.17 + Gaussian Naive Bayes supports fitting with *sample_weight*. + + Returns + ------- + self : object + Returns the instance itself. + """ + y = validate_data(self, y=y) + return self._partial_fit( + X, y, np.unique(y), _refit=True, sample_weight=sample_weight + ) + + def _check_X(self, X): + """Validate X, used only in predict* methods.""" + return validate_data(self, X, reset=False) + + @staticmethod + def _update_mean_variance(n_past, mu, var, X, sample_weight=None): + """Compute online update of Gaussian mean and variance. + + Given starting sample count, mean, and variance, a new set of + points X, and optionally sample weights, return the updated mean and + variance. (NB - each dimension (column) in X is treated as independent + -- you get variance, not covariance). + + Can take scalar mean and variance, or vector mean and variance to + simultaneously update a number of independent Gaussians. + + See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: + + http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf + + Parameters + ---------- + n_past : int + Number of samples represented in old mean and variance. If sample + weights were given, this should contain the sum of sample + weights represented in old mean and variance. + + mu : array-like of shape (number of Gaussians,) + Means for Gaussians in original set. + + var : array-like of shape (number of Gaussians,) + Variances for Gaussians in original set. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + total_mu : array-like of shape (number of Gaussians,) + Updated mean for each Gaussian over the combined set. + + total_var : array-like of shape (number of Gaussians,) + Updated variance for each Gaussian over the combined set. + """ + if X.shape[0] == 0: + return mu, var + + # Compute (potentially weighted) mean and variance of new datapoints + if sample_weight is not None: + n_new = float(sample_weight.sum()) + if np.isclose(n_new, 0.0): + return mu, var + new_mu = np.average(X, axis=0, weights=sample_weight) + new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight) + else: + n_new = X.shape[0] + new_var = np.var(X, axis=0) + new_mu = np.mean(X, axis=0) + + if n_past == 0: + return new_mu, new_var + + n_total = float(n_past + n_new) + + # Combine mean of old and new data, taking into consideration + # (weighted) number of observations + total_mu = (n_new * new_mu + n_past * mu) / n_total + + # Combine variance of old and new data, taking into consideration + # (weighted) number of observations. This is achieved by combining + # the sum-of-squared-differences (ssd) + old_ssd = n_past * var + new_ssd = n_new * new_var + total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2 + total_var = total_ssd / n_total + + return total_mu, total_var + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Incremental fit on a batch of samples. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once. + + This method has some performance and numerical stability overhead, + hence it is better to call partial_fit on chunks of data that are + as large as possible (as long as fitting in the memory budget) to + hide the overhead. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + classes : array-like of shape (n_classes,), default=None + List of all the classes that can possibly appear in the y vector. + + Must be provided at the first call to partial_fit, can be omitted + in subsequent calls. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + .. versionadded:: 0.17 + + Returns + ------- + self : object + Returns the instance itself. + """ + return self._partial_fit( + X, y, classes, _refit=False, sample_weight=sample_weight + ) + + def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None): + """Actual implementation of Gaussian NB fitting. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + classes : array-like of shape (n_classes,), default=None + List of all the classes that can possibly appear in the y vector. + + Must be provided at the first call to partial_fit, can be omitted + in subsequent calls. + + _refit : bool, default=False + If true, act as though this were the first time we called + _partial_fit (ie, throw away any past fitting and start over). + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + """ + if _refit: + self.classes_ = None + + first_call = _check_partial_fit_first_call(self, classes) + X, y = validate_data(self, X, y, reset=first_call) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + # If the ratio of data variance between dimensions is too small, it + # will cause numerical errors. To address this, we artificially + # boost the variance by epsilon, a small fraction of the standard + # deviation of the largest dimension. + self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max() + + if first_call: + # This is the first call to partial_fit: + # initialize various cumulative counters + n_features = X.shape[1] + n_classes = len(self.classes_) + self.theta_ = np.zeros((n_classes, n_features)) + self.var_ = np.zeros((n_classes, n_features)) + + self.class_count_ = np.zeros(n_classes, dtype=np.float64) + + # Initialise the class prior + # Take into account the priors + if self.priors is not None: + priors = np.asarray(self.priors) + # Check that the provided prior matches the number of classes + if len(priors) != n_classes: + raise ValueError("Number of priors must match number of classes.") + # Check that the sum is 1 + if not np.isclose(priors.sum(), 1.0): + raise ValueError("The sum of the priors should be 1.") + # Check that the priors are non-negative + if (priors < 0).any(): + raise ValueError("Priors must be non-negative.") + self.class_prior_ = priors + else: + # Initialize the priors to zeros for each class + self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64) + else: + if X.shape[1] != self.theta_.shape[1]: + msg = "Number of features %d does not match previous data %d." + raise ValueError(msg % (X.shape[1], self.theta_.shape[1])) + # Put epsilon back in each time + self.var_[:, :] -= self.epsilon_ + + classes = self.classes_ + + unique_y = np.unique(y) + unique_y_in_classes = np.isin(unique_y, classes) + + if not np.all(unique_y_in_classes): + raise ValueError( + "The target label(s) %s in y do not exist in the initial classes %s" + % (unique_y[~unique_y_in_classes], classes) + ) + + for y_i in unique_y: + i = classes.searchsorted(y_i) + X_i = X[y == y_i, :] + + if sample_weight is not None: + sw_i = sample_weight[y == y_i] + N_i = sw_i.sum() + else: + sw_i = None + N_i = X_i.shape[0] + + new_theta, new_sigma = self._update_mean_variance( + self.class_count_[i], self.theta_[i, :], self.var_[i, :], X_i, sw_i + ) + + self.theta_[i, :] = new_theta + self.var_[i, :] = new_sigma + self.class_count_[i] += N_i + + self.var_[:, :] += self.epsilon_ + + # Update if only no priors is provided + if self.priors is None: + # Empirical prior, with sample_weight taken into account + self.class_prior_ = self.class_count_ / self.class_count_.sum() + + return self + + def _joint_log_likelihood(self, X): + joint_log_likelihood = [] + for i in range(np.size(self.classes_)): + jointi = np.log(self.class_prior_[i]) + n_ij = -0.5 * np.sum(np.log(2.0 * np.pi * self.var_[i, :])) + n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.var_[i, :]), 1) + joint_log_likelihood.append(jointi + n_ij) + + joint_log_likelihood = np.array(joint_log_likelihood).T + return joint_log_likelihood + + +class _BaseDiscreteNB(_BaseNB): + """Abstract base class for naive Bayes on discrete/categorical data + + Any estimator based on this class should provide: + + __init__ + _joint_log_likelihood(X) as per _BaseNB + _update_feature_log_prob(alpha) + _count(X, Y) + """ + + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], + "fit_prior": ["boolean"], + "class_prior": ["array-like", None], + "force_alpha": ["boolean"], + } + + def __init__(self, alpha=1.0, fit_prior=True, class_prior=None, force_alpha=True): + self.alpha = alpha + self.fit_prior = fit_prior + self.class_prior = class_prior + self.force_alpha = force_alpha + + @abstractmethod + def _count(self, X, Y): + """Update counts that are used to calculate probabilities. + + The counts make up a sufficient statistic extracted from the data. + Accordingly, this method is called each time `fit` or `partial_fit` + update the model. `class_count_` and `feature_count_` must be updated + here along with any model specific counts. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input samples. + Y : ndarray of shape (n_samples, n_classes) + Binarized class labels. + """ + + @abstractmethod + def _update_feature_log_prob(self, alpha): + """Update feature log probabilities based on counts. + + This method is called each time `fit` or `partial_fit` update the + model. + + Parameters + ---------- + alpha : float + smoothing parameter. See :meth:`_check_alpha`. + """ + + def _check_X(self, X): + """Validate X, used only in predict* methods.""" + return validate_data(self, X, accept_sparse="csr", reset=False) + + def _check_X_y(self, X, y, reset=True): + """Validate X and y in fit methods.""" + return validate_data(self, X, y, accept_sparse="csr", reset=reset) + + def _update_class_log_prior(self, class_prior=None): + """Update class log priors. + + The class log priors are based on `class_prior`, class count or the + number of classes. This method is called each time `fit` or + `partial_fit` update the model. + """ + n_classes = len(self.classes_) + if class_prior is not None: + if len(class_prior) != n_classes: + raise ValueError("Number of priors must match number of classes.") + self.class_log_prior_ = np.log(class_prior) + elif self.fit_prior: + with warnings.catch_warnings(): + # silence the warning when count is 0 because class was not yet + # observed + warnings.simplefilter("ignore", RuntimeWarning) + log_class_count = np.log(self.class_count_) + + # empirical prior, with sample_weight taken into account + self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum()) + else: + self.class_log_prior_ = np.full(n_classes, -np.log(n_classes)) + + def _check_alpha(self): + alpha = ( + np.asarray(self.alpha) if not isinstance(self.alpha, Real) else self.alpha + ) + alpha_min = np.min(alpha) + if isinstance(alpha, np.ndarray): + if not alpha.shape[0] == self.n_features_in_: + raise ValueError( + "When alpha is an array, it should contains `n_features`. " + f"Got {alpha.shape[0]} elements instead of {self.n_features_in_}." + ) + # check that all alpha are positive + if alpha_min < 0: + raise ValueError("All values in alpha must be greater than 0.") + alpha_lower_bound = 1e-10 + if alpha_min < alpha_lower_bound and not self.force_alpha: + warnings.warn( + "alpha too small will result in numeric errors, setting alpha =" + f" {alpha_lower_bound:.1e}. Use `force_alpha=True` to keep alpha" + " unchanged." + ) + return np.maximum(alpha, alpha_lower_bound) + return alpha + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Incremental fit on a batch of samples. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once. + + This method has some performance overhead hence it is better to call + partial_fit on chunks of data that are as large as possible + (as long as fitting in the memory budget) to hide the overhead. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + classes : array-like of shape (n_classes,), default=None + List of all the classes that can possibly appear in the y vector. + + Must be provided at the first call to partial_fit, can be omitted + in subsequent calls. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Returns the instance itself. + """ + first_call = not hasattr(self, "classes_") + + X, y = self._check_X_y(X, y, reset=first_call) + _, n_features = X.shape + + if _check_partial_fit_first_call(self, classes): + # This is the first call to partial_fit: + # initialize various cumulative counters + n_classes = len(classes) + self._init_counters(n_classes, n_features) + + Y = label_binarize(y, classes=self.classes_) + if Y.shape[1] == 1: + if len(self.classes_) == 2: + Y = np.concatenate((1 - Y, Y), axis=1) + else: # degenerate case: just one class + Y = np.ones_like(Y) + + if X.shape[0] != Y.shape[0]: + msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible." + raise ValueError(msg % (X.shape[0], y.shape[0])) + + # label_binarize() returns arrays with dtype=np.int64. + # We convert it to np.float64 to support sample_weight consistently + Y = Y.astype(np.float64, copy=False) + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + sample_weight = np.atleast_2d(sample_weight) + Y *= sample_weight.T + + class_prior = self.class_prior + + # Count raw events from data before updating the class log prior + # and feature log probas + self._count(X, Y) + + # XXX: OPTIM: we could introduce a public finalization method to + # be called by the user explicitly just once after several consecutive + # calls to partial_fit and prior any call to predict[_[log_]proba] + # to avoid computing the smooth log probas at each call to partial fit + alpha = self._check_alpha() + self._update_feature_log_prob(alpha) + self._update_class_log_prior(class_prior=class_prior) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Naive Bayes classifier according to X, y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Returns the instance itself. + """ + X, y = self._check_X_y(X, y) + _, n_features = X.shape + + labelbin = LabelBinarizer() + Y = labelbin.fit_transform(y) + self.classes_ = labelbin.classes_ + if Y.shape[1] == 1: + if len(self.classes_) == 2: + Y = np.concatenate((1 - Y, Y), axis=1) + else: # degenerate case: just one class + Y = np.ones_like(Y) + + # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. + # We convert it to np.float64 to support sample_weight consistently; + # this means we also don't have to cast X to floating point + if sample_weight is not None: + Y = Y.astype(np.float64, copy=False) + sample_weight = _check_sample_weight(sample_weight, X) + sample_weight = np.atleast_2d(sample_weight) + Y *= sample_weight.T + + class_prior = self.class_prior + + # Count raw events from data before updating the class log prior + # and feature log probas + n_classes = Y.shape[1] + self._init_counters(n_classes, n_features) + self._count(X, Y) + alpha = self._check_alpha() + self._update_feature_log_prob(alpha) + self._update_class_log_prior(class_prior=class_prior) + return self + + def _init_counters(self, n_classes, n_features): + self.class_count_ = np.zeros(n_classes, dtype=np.float64) + self.feature_count_ = np.zeros((n_classes, n_features), dtype=np.float64) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.classifier_tags.poor_score = True + return tags + + +class MultinomialNB(_BaseDiscreteNB): + """ + Naive Bayes classifier for multinomial models. + + The multinomial Naive Bayes classifier is suitable for classification with + discrete features (e.g., word counts for text classification). The + multinomial distribution normally requires integer feature counts. However, + in practice, fractional counts such as tf-idf may also work. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float or array-like of shape (n_features,), default=1.0 + Additive (Laplace/Lidstone) smoothing parameter + (set alpha=0 and force_alpha=True, for no smoothing). + + force_alpha : bool, default=True + If False and alpha is less than 1e-10, it will set alpha to + 1e-10. If True, alpha will remain unchanged. This may cause + numerical errors if alpha is too close to 0. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `force_alpha` changed to `True`. + + fit_prior : bool, default=True + Whether to learn class prior probabilities or not. + If false, a uniform prior will be used. + + class_prior : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. If specified, the priors are not + adjusted according to the data. + + Attributes + ---------- + class_count_ : ndarray of shape (n_classes,) + Number of samples encountered for each class during fitting. This + value is weighted by the sample weight when provided. + + class_log_prior_ : ndarray of shape (n_classes,) + Smoothed empirical log probability for each class. + + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier + + feature_count_ : ndarray of shape (n_classes, n_features) + Number of samples encountered for each (class, feature) + during fitting. This value is weighted by the sample weight when + provided. + + feature_log_prob_ : ndarray of shape (n_classes, n_features) + Empirical log probability of features + given a class, ``P(x_i|y)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. + CategoricalNB : Naive Bayes classifier for categorical features. + ComplementNB : Complement Naive Bayes classifier. + GaussianNB : Gaussian Naive Bayes. + + References + ---------- + C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to + Information Retrieval. Cambridge University Press, pp. 234-265. + https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.RandomState(1) + >>> X = rng.randint(5, size=(6, 100)) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> from sklearn.naive_bayes import MultinomialNB + >>> clf = MultinomialNB() + >>> clf.fit(X, y) + MultinomialNB() + >>> print(clf.predict(X[2:3])) + [3] + """ + + def __init__( + self, *, alpha=1.0, force_alpha=True, fit_prior=True, class_prior=None + ): + super().__init__( + alpha=alpha, + fit_prior=fit_prior, + class_prior=class_prior, + force_alpha=force_alpha, + ) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.positive_only = True + return tags + + def _count(self, X, Y): + """Count and smooth feature occurrences.""" + check_non_negative(X, "MultinomialNB (input X)") + self.feature_count_ += safe_sparse_dot(Y.T, X) + self.class_count_ += Y.sum(axis=0) + + def _update_feature_log_prob(self, alpha): + """Apply smoothing to raw counts and recompute log probabilities""" + smoothed_fc = self.feature_count_ + alpha + smoothed_cc = smoothed_fc.sum(axis=1) + + self.feature_log_prob_ = np.log(smoothed_fc) - np.log( + smoothed_cc.reshape(-1, 1) + ) + + def _joint_log_likelihood(self, X): + """Calculate the posterior log probability of the samples X""" + return safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_ + + +class ComplementNB(_BaseDiscreteNB): + """The Complement Naive Bayes classifier described in Rennie et al. (2003). + + The Complement Naive Bayes classifier was designed to correct the "severe + assumptions" made by the standard Multinomial Naive Bayes classifier. It is + particularly suited for imbalanced data sets. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + alpha : float or array-like of shape (n_features,), default=1.0 + Additive (Laplace/Lidstone) smoothing parameter + (set alpha=0 and force_alpha=True, for no smoothing). + + force_alpha : bool, default=True + If False and alpha is less than 1e-10, it will set alpha to + 1e-10. If True, alpha will remain unchanged. This may cause + numerical errors if alpha is too close to 0. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `force_alpha` changed to `True`. + + fit_prior : bool, default=True + Only used in edge case with a single class in the training set. + + class_prior : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. Not used. + + norm : bool, default=False + Whether or not a second normalization of the weights is performed. The + default behavior mirrors the implementations found in Mahout and Weka, + which do not follow the full algorithm described in Table 9 of the + paper. + + Attributes + ---------- + class_count_ : ndarray of shape (n_classes,) + Number of samples encountered for each class during fitting. This + value is weighted by the sample weight when provided. + + class_log_prior_ : ndarray of shape (n_classes,) + Smoothed empirical log probability for each class. Only used in edge + case with a single class in the training set. + + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier + + feature_all_ : ndarray of shape (n_features,) + Number of samples encountered for each feature during fitting. This + value is weighted by the sample weight when provided. + + feature_count_ : ndarray of shape (n_classes, n_features) + Number of samples encountered for each (class, feature) during fitting. + This value is weighted by the sample weight when provided. + + feature_log_prob_ : ndarray of shape (n_classes, n_features) + Empirical weights for class complements. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. + CategoricalNB : Naive Bayes classifier for categorical features. + GaussianNB : Gaussian Naive Bayes. + MultinomialNB : Naive Bayes classifier for multinomial models. + + References + ---------- + Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003). + Tackling the poor assumptions of naive bayes text classifiers. In ICML + (Vol. 3, pp. 616-623). + https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.RandomState(1) + >>> X = rng.randint(5, size=(6, 100)) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> from sklearn.naive_bayes import ComplementNB + >>> clf = ComplementNB() + >>> clf.fit(X, y) + ComplementNB() + >>> print(clf.predict(X[2:3])) + [3] + """ + + _parameter_constraints: dict = { + **_BaseDiscreteNB._parameter_constraints, + "norm": ["boolean"], + } + + def __init__( + self, + *, + alpha=1.0, + force_alpha=True, + fit_prior=True, + class_prior=None, + norm=False, + ): + super().__init__( + alpha=alpha, + force_alpha=force_alpha, + fit_prior=fit_prior, + class_prior=class_prior, + ) + self.norm = norm + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.positive_only = True + return tags + + def _count(self, X, Y): + """Count feature occurrences.""" + check_non_negative(X, "ComplementNB (input X)") + self.feature_count_ += safe_sparse_dot(Y.T, X) + self.class_count_ += Y.sum(axis=0) + self.feature_all_ = self.feature_count_.sum(axis=0) + + def _update_feature_log_prob(self, alpha): + """Apply smoothing to raw counts and compute the weights.""" + comp_count = self.feature_all_ + alpha - self.feature_count_ + logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True)) + # _BaseNB.predict uses argmax, but ComplementNB operates with argmin. + if self.norm: + summed = logged.sum(axis=1, keepdims=True) + feature_log_prob = logged / summed + else: + feature_log_prob = -logged + self.feature_log_prob_ = feature_log_prob + + def _joint_log_likelihood(self, X): + """Calculate the class scores for the samples in X.""" + jll = safe_sparse_dot(X, self.feature_log_prob_.T) + if len(self.classes_) == 1: + jll += self.class_log_prior_ + return jll + + +class BernoulliNB(_BaseDiscreteNB): + """Naive Bayes classifier for multivariate Bernoulli models. + + Like MultinomialNB, this classifier is suitable for discrete data. The + difference is that while MultinomialNB works with occurrence counts, + BernoulliNB is designed for binary/boolean features. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float or array-like of shape (n_features,), default=1.0 + Additive (Laplace/Lidstone) smoothing parameter + (set alpha=0 and force_alpha=True, for no smoothing). + + force_alpha : bool, default=True + If False and alpha is less than 1e-10, it will set alpha to + 1e-10. If True, alpha will remain unchanged. This may cause + numerical errors if alpha is too close to 0. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `force_alpha` changed to `True`. + + binarize : float or None, default=0.0 + Threshold for binarizing (mapping to booleans) of sample features. + If None, input is presumed to already consist of binary vectors. + + fit_prior : bool, default=True + Whether to learn class prior probabilities or not. + If false, a uniform prior will be used. + + class_prior : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. If specified, the priors are not + adjusted according to the data. + + Attributes + ---------- + class_count_ : ndarray of shape (n_classes,) + Number of samples encountered for each class during fitting. This + value is weighted by the sample weight when provided. + + class_log_prior_ : ndarray of shape (n_classes,) + Log probability of each class (smoothed). + + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier + + feature_count_ : ndarray of shape (n_classes, n_features) + Number of samples encountered for each (class, feature) + during fitting. This value is weighted by the sample weight when + provided. + + feature_log_prob_ : ndarray of shape (n_classes, n_features) + Empirical log probability of features given a class, P(x_i|y). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + CategoricalNB : Naive Bayes classifier for categorical features. + ComplementNB : The Complement Naive Bayes classifier + described in Rennie et al. (2003). + GaussianNB : Gaussian Naive Bayes (GaussianNB). + MultinomialNB : Naive Bayes classifier for multinomial models. + + References + ---------- + C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to + Information Retrieval. Cambridge University Press, pp. 234-265. + https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html + + A. McCallum and K. Nigam (1998). A comparison of event models for naive + Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for + Text Categorization, pp. 41-48. + + V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with + naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS). + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.RandomState(1) + >>> X = rng.randint(5, size=(6, 100)) + >>> Y = np.array([1, 2, 3, 4, 4, 5]) + >>> from sklearn.naive_bayes import BernoulliNB + >>> clf = BernoulliNB() + >>> clf.fit(X, Y) + BernoulliNB() + >>> print(clf.predict(X[2:3])) + [3] + """ + + _parameter_constraints: dict = { + **_BaseDiscreteNB._parameter_constraints, + "binarize": [None, Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + *, + alpha=1.0, + force_alpha=True, + binarize=0.0, + fit_prior=True, + class_prior=None, + ): + super().__init__( + alpha=alpha, + fit_prior=fit_prior, + class_prior=class_prior, + force_alpha=force_alpha, + ) + self.binarize = binarize + + def _check_X(self, X): + """Validate X, used only in predict* methods.""" + X = super()._check_X(X) + if self.binarize is not None: + X = binarize(X, threshold=self.binarize) + return X + + def _check_X_y(self, X, y, reset=True): + X, y = super()._check_X_y(X, y, reset=reset) + if self.binarize is not None: + X = binarize(X, threshold=self.binarize) + return X, y + + def _count(self, X, Y): + """Count and smooth feature occurrences.""" + self.feature_count_ += safe_sparse_dot(Y.T, X) + self.class_count_ += Y.sum(axis=0) + + def _update_feature_log_prob(self, alpha): + """Apply smoothing to raw counts and recompute log probabilities""" + smoothed_fc = self.feature_count_ + alpha + smoothed_cc = self.class_count_ + alpha * 2 + + self.feature_log_prob_ = np.log(smoothed_fc) - np.log( + smoothed_cc.reshape(-1, 1) + ) + + def _joint_log_likelihood(self, X): + """Calculate the posterior log probability of the samples X""" + n_features = self.feature_log_prob_.shape[1] + n_features_X = X.shape[1] + + if n_features_X != n_features: + raise ValueError( + "Expected input with %d features, got %d instead" + % (n_features, n_features_X) + ) + + neg_prob = np.log(1 - np.exp(self.feature_log_prob_)) + # Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob + jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T) + jll += self.class_log_prior_ + neg_prob.sum(axis=1) + + return jll + + +class CategoricalNB(_BaseDiscreteNB): + """Naive Bayes classifier for categorical features. + + The categorical Naive Bayes classifier is suitable for classification with + discrete features that are categorically distributed. The categories of + each feature are drawn from a categorical distribution. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Additive (Laplace/Lidstone) smoothing parameter + (set alpha=0 and force_alpha=True, for no smoothing). + + force_alpha : bool, default=True + If False and alpha is less than 1e-10, it will set alpha to + 1e-10. If True, alpha will remain unchanged. This may cause + numerical errors if alpha is too close to 0. + + .. versionadded:: 1.2 + .. versionchanged:: 1.4 + The default value of `force_alpha` changed to `True`. + + fit_prior : bool, default=True + Whether to learn class prior probabilities or not. + If false, a uniform prior will be used. + + class_prior : array-like of shape (n_classes,), default=None + Prior probabilities of the classes. If specified, the priors are not + adjusted according to the data. + + min_categories : int or array-like of shape (n_features,), default=None + Minimum number of categories per feature. + + - integer: Sets the minimum number of categories per feature to + `n_categories` for each features. + - array-like: shape (n_features,) where `n_categories[i]` holds the + minimum number of categories for the ith column of the input. + - None (default): Determines the number of categories automatically + from the training data. + + .. versionadded:: 0.24 + + Attributes + ---------- + category_count_ : list of arrays of shape (n_features,) + Holds arrays of shape (n_classes, n_categories of respective feature) + for each feature. Each array provides the number of samples + encountered for each class and category of the specific feature. + + class_count_ : ndarray of shape (n_classes,) + Number of samples encountered for each class during fitting. This + value is weighted by the sample weight when provided. + + class_log_prior_ : ndarray of shape (n_classes,) + Smoothed empirical log probability for each class. + + classes_ : ndarray of shape (n_classes,) + Class labels known to the classifier + + feature_log_prob_ : list of arrays of shape (n_features,) + Holds arrays of shape (n_classes, n_categories of respective feature) + for each feature. Each array provides the empirical log probability + of categories given the respective feature and class, ``P(x_i|y)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_categories_ : ndarray of shape (n_features,), dtype=np.int64 + Number of categories for each feature. This value is + inferred from the data or set by the minimum number of categories. + + .. versionadded:: 0.24 + + See Also + -------- + BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. + ComplementNB : Complement Naive Bayes classifier. + GaussianNB : Gaussian Naive Bayes. + MultinomialNB : Naive Bayes classifier for multinomial models. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.RandomState(1) + >>> X = rng.randint(5, size=(6, 100)) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> from sklearn.naive_bayes import CategoricalNB + >>> clf = CategoricalNB() + >>> clf.fit(X, y) + CategoricalNB() + >>> print(clf.predict(X[2:3])) + [3] + """ + + _parameter_constraints: dict = { + **_BaseDiscreteNB._parameter_constraints, + "min_categories": [ + None, + "array-like", + Interval(Integral, 1, None, closed="left"), + ], + "alpha": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + *, + alpha=1.0, + force_alpha=True, + fit_prior=True, + class_prior=None, + min_categories=None, + ): + super().__init__( + alpha=alpha, + force_alpha=force_alpha, + fit_prior=fit_prior, + class_prior=class_prior, + ) + self.min_categories = min_categories + + def fit(self, X, y, sample_weight=None): + """Fit Naive Bayes classifier according to X, y. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. Here, each feature of X is + assumed to be from a different categorical distribution. + It is further assumed that all categories of each feature are + represented by the numbers 0, ..., n - 1, where n refers to the + total number of categories for the given feature. This can, for + instance, be achieved with the help of OrdinalEncoder. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Returns the instance itself. + """ + return super().fit(X, y, sample_weight=sample_weight) + + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Incremental fit on a batch of samples. + + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + + This is especially useful when the whole dataset is too big to fit in + memory at once. + + This method has some performance overhead hence it is better to call + partial_fit on chunks of data that are as large as possible + (as long as fitting in the memory budget) to hide the overhead. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. Here, each feature of X is + assumed to be from a different categorical distribution. + It is further assumed that all categories of each feature are + represented by the numbers 0, ..., n - 1, where n refers to the + total number of categories for the given feature. This can, for + instance, be achieved with the help of OrdinalEncoder. + + y : array-like of shape (n_samples,) + Target values. + + classes : array-like of shape (n_classes,), default=None + List of all the classes that can possibly appear in the y vector. + + Must be provided at the first call to partial_fit, can be omitted + in subsequent calls. + + sample_weight : array-like of shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Returns the instance itself. + """ + return super().partial_fit(X, y, classes, sample_weight=sample_weight) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = False + tags.input_tags.positive_only = True + return tags + + def _check_X(self, X): + """Validate X, used only in predict* methods.""" + X = validate_data( + self, + X, + dtype="int", + accept_sparse=False, + ensure_all_finite=True, + reset=False, + ) + check_non_negative(X, "CategoricalNB (input X)") + return X + + def _check_X_y(self, X, y, reset=True): + X, y = validate_data( + self, + X, + y, + dtype="int", + accept_sparse=False, + ensure_all_finite=True, + reset=reset, + ) + check_non_negative(X, "CategoricalNB (input X)") + return X, y + + def _init_counters(self, n_classes, n_features): + self.class_count_ = np.zeros(n_classes, dtype=np.float64) + self.category_count_ = [np.zeros((n_classes, 0)) for _ in range(n_features)] + + @staticmethod + def _validate_n_categories(X, min_categories): + # rely on max for n_categories categories are encoded between 0...n-1 + n_categories_X = X.max(axis=0) + 1 + min_categories_ = np.array(min_categories) + if min_categories is not None: + if not np.issubdtype(min_categories_.dtype, np.signedinteger): + raise ValueError( + "'min_categories' should have integral type. Got " + f"{min_categories_.dtype} instead." + ) + n_categories_ = np.maximum(n_categories_X, min_categories_, dtype=np.int64) + if n_categories_.shape != n_categories_X.shape: + raise ValueError( + f"'min_categories' should have shape ({X.shape[1]}," + ") when an array-like is provided. Got" + f" {min_categories_.shape} instead." + ) + return n_categories_ + else: + return n_categories_X + + def _count(self, X, Y): + def _update_cat_count_dims(cat_count, highest_feature): + diff = highest_feature + 1 - cat_count.shape[1] + if diff > 0: + # we append a column full of zeros for each new category + return np.pad(cat_count, [(0, 0), (0, diff)], "constant") + return cat_count + + def _update_cat_count(X_feature, Y, cat_count, n_classes): + for j in range(n_classes): + mask = Y[:, j].astype(bool) + if Y.dtype.type == np.int64: + weights = None + else: + weights = Y[mask, j] + counts = np.bincount(X_feature[mask], weights=weights) + indices = np.nonzero(counts)[0] + cat_count[j, indices] += counts[indices] + + self.class_count_ += Y.sum(axis=0) + self.n_categories_ = self._validate_n_categories(X, self.min_categories) + for i in range(self.n_features_in_): + X_feature = X[:, i] + self.category_count_[i] = _update_cat_count_dims( + self.category_count_[i], self.n_categories_[i] - 1 + ) + _update_cat_count( + X_feature, Y, self.category_count_[i], self.class_count_.shape[0] + ) + + def _update_feature_log_prob(self, alpha): + feature_log_prob = [] + for i in range(self.n_features_in_): + smoothed_cat_count = self.category_count_[i] + alpha + smoothed_class_count = smoothed_cat_count.sum(axis=1) + feature_log_prob.append( + np.log(smoothed_cat_count) - np.log(smoothed_class_count.reshape(-1, 1)) + ) + self.feature_log_prob_ = feature_log_prob + + def _joint_log_likelihood(self, X): + _check_n_features(self, X, reset=False) + jll = np.zeros((X.shape[0], self.class_count_.shape[0])) + for i in range(self.n_features_in_): + indices = X[:, i] + jll += self.feature_log_prob_[i][:, indices].T + total_ll = jll + self.class_log_prior_ + return total_ll diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_binary_tree.pxi.tp b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_binary_tree.pxi.tp new file mode 100644 index 0000000000000000000000000000000000000000..c25740c0d6f6c2decaba6b3fae58e983c9263e9c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_binary_tree.pxi.tp @@ -0,0 +1,2481 @@ +{{py: + +# Generated file: _binary_tree.pxi + +implementation_specific_values = [ + # The values are arranged as follows: + # + # name_suffix, INPUT_DTYPE_t, INPUT_DTYPE, NPY_TYPE + # + ('64', 'float64_t', 'np.float64', 'cnp.NPY_DOUBLE'), + ('32', 'float32_t', 'np.float32', 'cnp.NPY_FLOAT') +] + +# KD Tree and Ball Tree +# ===================== +# +# Author: Jake Vanderplas , 2012-2013 +# Omar Salman +# +# License: BSD +# +# _binary_tree.pxi is generated and is then literally Cython included in +# ball_tree.pyx and kd_tree.pyx. See ball_tree.pyx.tp and kd_tree.pyx.tp. + +}} + + +# KD Tree and Ball Tree +# ===================== +# +# The routines here are the core algorithms of the KDTree and BallTree +# structures. If Cython supported polymorphism, we would be able to +# create a subclass and derive KDTree and BallTree from it. Because +# polymorphism is not an option, we use this single BinaryTree class +# as a literal include to avoid duplicating the entire file. +# +# A series of functions are implemented in kd_tree.pyx and ball_tree.pyx +# which use the information here to calculate the lower and upper bounds +# between a node and a point, and between two nodes. These functions are +# used here, and are all that are needed to differentiate between the two +# tree types. +# +# Description of Binary Tree Algorithms +# ------------------------------------- +# A binary tree can be thought of as a collection of nodes. The top node +# contains all the points. The next level consists of two nodes with half +# the points in each, and this continues recursively. Each node contains +# metadata which allow fast computation of distance bounds: in the case of +# a ball tree, the metadata is a center and a radius. In the case of a +# KD tree, the metadata is the minimum and maximum bound along each dimension. +# +# In a typical KD Tree or Ball Tree implementation, the nodes are implemented +# as dynamically allocated structures with pointers linking them. Here we +# take a different approach, storing all relevant data in a set of arrays +# so that the entire tree object can be saved in a pickle file. For efficiency, +# the data can be stored in such a way that explicit pointers are not +# necessary: for node data stored at index i, the two child nodes are at +# index (2 * i + 1) and (2 * i + 2); the parent node is (i - 1) // 2 +# (where // indicates integer division). +# +# The data arrays used here are as follows: +# data : the [n_samples x n_features] array of data from which the tree +# is built +# idx_array : the length n_samples array used to keep track of the indices +# of data within each node. Each node has values idx_start and +# idx_end: the points within the node are given by (using numpy +# syntax) data[idx_array[idx_start:idx_end]]. +# node_data : the length n_nodes array of structures which store the node +# indices, node radii, and leaf information for each node. +# node_bounds : the [* x n_nodes x n_features] array containing the node +# bound information. For ball tree, the first dimension is 1, and +# each row contains the centroid of the node. For kd tree, the first +# dimension is 2 and the rows for each point contain the arrays of +# lower bounds and upper bounds in each direction. +# +# The lack of dynamic allocation means the number of nodes must be computed +# before the building of the tree. This can be done assuming the points are +# divided equally between child nodes at each step; although this removes +# some flexibility in tree creation, it ensures a balanced tree and ensures +# that the number of nodes required can be computed beforehand. Given a +# specified leaf_size (the minimum number of points in any node), it is +# possible to show that a balanced tree will have +# +# n_levels = 1 + max(0, floor(log2((n_samples - 1) / leaf_size))) +# +# in order to satisfy +# +# leaf_size <= min(n_points) <= 2 * leaf_size +# +# with the exception of the special case where n_samples < leaf_size. +# for a given number of levels, the number of nodes in the tree is given by +# +# n_nodes = 2 ** n_levels - 1 +# +# both these results can be straightforwardly shown by induction. The +# following code uses these values in the construction of the tree. +# +# Distance Metrics +# ---------------- +# For flexibility, the trees can be built using a variety of distance metrics. +# The metrics are described in the DistanceMetric class: the standard +# Euclidean distance is the default, and is inlined to be faster than other +# metrics. In addition, each metric defines both a distance and a +# "reduced distance", which is often faster to compute, and is therefore +# used in the query architecture whenever possible. (For example, in the +# case of the standard Euclidean distance, the reduced distance is the +# squared-distance). +# +# Implementation Notes +# -------------------- +# This implementation uses the common object-oriented approach of having an +# abstract base class which is extended by the KDTree and BallTree +# specializations. +# +# The BinaryTree "base class" is defined here and then subclassed in the BallTree +# and KDTree pyx files. These files include implementations of the +# "abstract" methods. + +# Necessary Helper Functions +# -------------------------- +# These are the names and descriptions of the "abstract" functions which are +# defined in kd_tree.pyx and ball_tree.pyx: + +# cdef int allocate_data(BinaryTree tree, intp_t n_nodes, intp_t n_features): +# """Allocate arrays needed for the KD Tree""" + +# cdef int init_node(BinaryTree tree, intp_t i_node, +# intp_t idx_start, intp_t idx_end): +# """Initialize the node for the dataset stored in tree.data""" + +# cdef float64_t min_rdist(BinaryTree tree, intp_t i_node, float64_t* pt): +# """Compute the minimum reduced-distance between a point and a node""" + +# cdef float64_t min_dist(BinaryTree tree, intp_t i_node, float64_t* pt): +# """Compute the minimum distance between a point and a node""" + +# cdef float64_t max_rdist(BinaryTree tree, intp_t i_node, float64_t* pt): +# """Compute the maximum reduced-distance between a point and a node""" + +# cdef float64_t max_dist(BinaryTree tree, intp_t i_node, float64_t* pt): +# """Compute the maximum distance between a point and a node""" + +# cdef inline int min_max_dist(BinaryTree tree, intp_t i_node, float64_t* pt, +# float64_t* min_dist, float64_t* max_dist): +# """Compute the minimum and maximum distance between a point and a node""" + +# cdef inline float64_t min_rdist_dual(BinaryTree tree1, intp_t i_node1, +# BinaryTree tree2, intp_t i_node2): +# """Compute the minimum reduced distance between two nodes""" + +# cdef inline float64_t min_dist_dual(BinaryTree tree1, intp_t i_node1, +# BinaryTree tree2, intp_t i_node2): +# """Compute the minimum distance between two nodes""" + +# cdef inline float64_t max_rdist_dual(BinaryTree tree1, intp_t i_node1, +# BinaryTree tree2, intp_t i_node2): +# """Compute the maximum reduced distance between two nodes""" + +# cdef inline float64_t max_dist_dual(BinaryTree tree1, intp_t i_node1, +# BinaryTree tree2, intp_t i_node2): +# """Compute the maximum distance between two nodes""" + +cimport numpy as cnp +from cython cimport floating +from libc.math cimport fabs, sqrt, exp, cos, pow, log, lgamma +from libc.math cimport fmin, fmax +from libc.stdlib cimport calloc, malloc, free +from libc.string cimport memcpy + +import numpy as np +import warnings + +from ..metrics._dist_metrics cimport ( + DistanceMetric, + DistanceMetric64, + DistanceMetric32, + euclidean_dist64, + euclidean_dist32, + euclidean_rdist64, + euclidean_rdist32, + euclidean_dist_to_rdist64, + euclidean_dist_to_rdist32, +) + +from ._partition_nodes cimport partition_node_indices + +from ..utils import check_array +from ..utils._typedefs cimport float32_t, float64_t, intp_t +from ..utils._heap cimport heap_push +from ..utils._sorting cimport simultaneous_sort as _simultaneous_sort + +cnp.import_array() + + +# TODO: use cnp.PyArray_ENABLEFLAGS when Cython>=3.0 is used. +cdef extern from "numpy/arrayobject.h": + void PyArray_ENABLEFLAGS(cnp.ndarray arr, int flags) + + +# some handy constants +cdef float64_t INF = np.inf +cdef float64_t NEG_INF = -np.inf +cdef float64_t PI = np.pi +cdef float64_t ROOT_2PI = sqrt(2 * PI) +cdef float64_t LOG_PI = log(PI) +cdef float64_t LOG_2PI = log(2 * PI) + + +# Some compound datatypes used below: +cdef struct NodeHeapData_t: + float64_t val + intp_t i1 + intp_t i2 + +# build the corresponding numpy dtype for NodeHeapData +cdef NodeHeapData_t nhd_tmp +NodeHeapData = np.asarray((&nhd_tmp)).dtype + +cdef struct NodeData_t: + intp_t idx_start + intp_t idx_end + intp_t is_leaf + float64_t radius + +# build the corresponding numpy dtype for NodeData +cdef NodeData_t nd_tmp +NodeData = np.asarray((&nd_tmp)).dtype + + +###################################################################### +# Define doc strings, substituting the appropriate class name using +# the DOC_DICT variable defined in the pyx files. +CLASS_DOC = """{BinaryTree} for fast generalized N-point problems + +Read more in the :ref:`User Guide `. + +Parameters +---------- +X : array-like of shape (n_samples, n_features) + n_samples is the number of points in the data set, and + n_features is the dimension of the parameter space. + Note: if X is a C-contiguous array of doubles then data will + not be copied. Otherwise, an internal copy will be made. + +leaf_size : positive int, default=40 + Number of points at which to switch to brute-force. Changing + leaf_size will not affect the results of a query, but can + significantly impact the speed of a query and the memory required + to store the constructed tree. The amount of memory needed to + store the tree scales as approximately n_samples / leaf_size. + For a specified ``leaf_size``, a leaf node is guaranteed to + satisfy ``leaf_size <= n_points <= 2 * leaf_size``, except in + the case that ``n_samples < leaf_size``. + +metric : str or DistanceMetric64 object, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. + A list of valid metrics for {BinaryTree} is given by the attribute + `valid_metrics`. + See the documentation of `scipy.spatial.distance + `_ and + the metrics listed in :class:`~sklearn.metrics.pairwise.distance_metrics` for + more information on any distance metric. + +Additional keywords are passed to the distance metric class. +Note: Callable functions in the metric parameter are NOT supported for KDTree +and Ball Tree. Function call overhead will result in very poor performance. + +Attributes +---------- +data : memory view + The training data +valid_metrics: list of str + List of valid distance metrics. + +Examples +-------- +Query for k-nearest neighbors + + >>> import numpy as np + >>> from sklearn.neighbors import {BinaryTree} + >>> rng = np.random.RandomState(0) + >>> X = rng.random_sample((10, 3)) # 10 points in 3 dimensions + >>> tree = {BinaryTree}(X, leaf_size=2) # doctest: +SKIP + >>> dist, ind = tree.query(X[:1], k=3) # doctest: +SKIP + >>> print(ind) # indices of 3 closest neighbors + [0 3 1] + >>> print(dist) # distances to 3 closest neighbors + [ 0. 0.19662693 0.29473397] + +Pickle and Unpickle a tree. Note that the state of the tree is saved in the +pickle operation: the tree needs not be rebuilt upon unpickling. + + >>> import numpy as np + >>> import pickle + >>> rng = np.random.RandomState(0) + >>> X = rng.random_sample((10, 3)) # 10 points in 3 dimensions + >>> tree = {BinaryTree}(X, leaf_size=2) # doctest: +SKIP + >>> s = pickle.dumps(tree) # doctest: +SKIP + >>> tree_copy = pickle.loads(s) # doctest: +SKIP + >>> dist, ind = tree_copy.query(X[:1], k=3) # doctest: +SKIP + >>> print(ind) # indices of 3 closest neighbors + [0 3 1] + >>> print(dist) # distances to 3 closest neighbors + [ 0. 0.19662693 0.29473397] + +Query for neighbors within a given radius + + >>> import numpy as np + >>> rng = np.random.RandomState(0) + >>> X = rng.random_sample((10, 3)) # 10 points in 3 dimensions + >>> tree = {BinaryTree}(X, leaf_size=2) # doctest: +SKIP + >>> print(tree.query_radius(X[:1], r=0.3, count_only=True)) + 3 + >>> ind = tree.query_radius(X[:1], r=0.3) # doctest: +SKIP + >>> print(ind) # indices of neighbors within distance 0.3 + [3 0 1] + + +Compute a gaussian kernel density estimate: + + >>> import numpy as np + >>> rng = np.random.RandomState(42) + >>> X = rng.random_sample((100, 3)) + >>> tree = {BinaryTree}(X) # doctest: +SKIP + >>> tree.kernel_density(X[:3], h=0.1, kernel='gaussian') + array([ 6.94114649, 7.83281226, 7.2071716 ]) + +Compute a two-point auto-correlation function + + >>> import numpy as np + >>> rng = np.random.RandomState(0) + >>> X = rng.random_sample((30, 3)) + >>> r = np.linspace(0, 1, 5) + >>> tree = {BinaryTree}(X) # doctest: +SKIP + >>> tree.two_point_correlation(X, r) + array([ 30, 62, 278, 580, 820]) + +""" + + +###################################################################### +# Utility functions +cdef float64_t logaddexp(float64_t x1, float64_t x2): + """logaddexp(x1, x2) -> log(exp(x1) + exp(x2))""" + cdef float64_t a = fmax(x1, x2) + if a == NEG_INF: + return NEG_INF + else: + return a + log(exp(x1 - a) + exp(x2 - a)) + +cdef float64_t logsubexp(float64_t x1, float64_t x2): + """logsubexp(x1, x2) -> log(exp(x1) - exp(x2))""" + if x1 <= x2: + return NEG_INF + else: + return x1 + log(1 - exp(x2 - x1)) + + +###################################################################### +# Kernel functions +# +# Note: Kernels assume dist is non-negative and h is positive +# All kernel functions are normalized such that K(0, h) = 1. +# The fully normalized kernel is: +# K = exp[kernel_norm(h, d, kernel) + compute_kernel(dist, h, kernel)] +# The code only works with non-negative kernels: i.e. K(d, h) >= 0 +# for all valid d and h. Note that for precision, the log of both +# the kernel and kernel norm is returned. +cdef enum KernelType: + GAUSSIAN_KERNEL = 1 + TOPHAT_KERNEL = 2 + EPANECHNIKOV_KERNEL = 3 + EXPONENTIAL_KERNEL = 4 + LINEAR_KERNEL = 5 + COSINE_KERNEL = 6 + + +cdef inline float64_t log_gaussian_kernel(float64_t dist, float64_t h): + """log of the gaussian kernel for bandwidth h (unnormalized)""" + return -0.5 * (dist * dist) / (h * h) + + +cdef inline float64_t log_tophat_kernel(float64_t dist, float64_t h): + """log of the tophat kernel for bandwidth h (unnormalized)""" + if dist < h: + return 0.0 + else: + return NEG_INF + + +cdef inline float64_t log_epanechnikov_kernel(float64_t dist, float64_t h): + """log of the epanechnikov kernel for bandwidth h (unnormalized)""" + if dist < h: + return log(1.0 - (dist * dist) / (h * h)) + else: + return NEG_INF + + +cdef inline float64_t log_exponential_kernel(float64_t dist, float64_t h): + """log of the exponential kernel for bandwidth h (unnormalized)""" + return -dist / h + + +cdef inline float64_t log_linear_kernel(float64_t dist, float64_t h): + """log of the linear kernel for bandwidth h (unnormalized)""" + if dist < h: + return log(1 - dist / h) + else: + return NEG_INF + + +cdef inline float64_t log_cosine_kernel(float64_t dist, float64_t h): + """log of the cosine kernel for bandwidth h (unnormalized)""" + if dist < h: + return log(cos(0.5 * PI * dist / h)) + else: + return NEG_INF + + +cdef inline float64_t compute_log_kernel(float64_t dist, float64_t h, + KernelType kernel): + """Given a KernelType enumeration, compute the appropriate log-kernel""" + if kernel == GAUSSIAN_KERNEL: + return log_gaussian_kernel(dist, h) + elif kernel == TOPHAT_KERNEL: + return log_tophat_kernel(dist, h) + elif kernel == EPANECHNIKOV_KERNEL: + return log_epanechnikov_kernel(dist, h) + elif kernel == EXPONENTIAL_KERNEL: + return log_exponential_kernel(dist, h) + elif kernel == LINEAR_KERNEL: + return log_linear_kernel(dist, h) + elif kernel == COSINE_KERNEL: + return log_cosine_kernel(dist, h) + + +# ------------------------------------------------------------ +# Kernel norms are defined via the volume element V_n +# and surface element S_(n-1) of an n-sphere. +cdef float64_t logVn(intp_t n): + """V_n = pi^(n/2) / gamma(n/2 - 1)""" + return 0.5 * n * LOG_PI - lgamma(0.5 * n + 1) + + +cdef float64_t logSn(intp_t n): + """V_(n+1) = int_0^1 S_n r^n dr""" + return LOG_2PI + logVn(n - 1) + + +cdef float64_t _log_kernel_norm(float64_t h, intp_t d, + KernelType kernel) except -1: + """Given a KernelType enumeration, compute the kernel normalization. + + h is the bandwidth, d is the dimension. + """ + cdef float64_t tmp, factor = 0 + cdef intp_t k + if kernel == GAUSSIAN_KERNEL: + factor = 0.5 * d * LOG_2PI + elif kernel == TOPHAT_KERNEL: + factor = logVn(d) + elif kernel == EPANECHNIKOV_KERNEL: + factor = logVn(d) + log(2. / (d + 2.)) + elif kernel == EXPONENTIAL_KERNEL: + factor = logSn(d - 1) + lgamma(d) + elif kernel == LINEAR_KERNEL: + factor = logVn(d) - log(d + 1.) + elif kernel == COSINE_KERNEL: + # this is derived from a chain rule integration + factor = 0 + tmp = 2. / PI + for k in range(1, d + 1, 2): + factor += tmp + tmp *= -(d - k) * (d - k - 1) * (2. / PI) ** 2 + factor = log(factor) + logSn(d - 1) + else: + raise ValueError("Kernel code not recognized") + return -factor - d * log(h) + + +def kernel_norm(h, d, kernel, return_log=False): + """Given a string specification of a kernel, compute the normalization. + + Parameters + ---------- + h : float + The bandwidth of the kernel. + d : int + The dimension of the space in which the kernel norm is computed. + kernel : str + The kernel identifier. Must be one of + ['gaussian'|'tophat'|'epanechnikov'| + 'exponential'|'linear'|'cosine'] + return_log : bool, default=False + If True, return the log of the kernel norm. Otherwise, return the + kernel norm. + Returns + ------- + knorm or log_knorm : float + the kernel norm or logarithm of the kernel norm. + """ + if kernel == 'gaussian': + result = _log_kernel_norm(h, d, GAUSSIAN_KERNEL) + elif kernel == 'tophat': + result = _log_kernel_norm(h, d, TOPHAT_KERNEL) + elif kernel == 'epanechnikov': + result = _log_kernel_norm(h, d, EPANECHNIKOV_KERNEL) + elif kernel == 'exponential': + result = _log_kernel_norm(h, d, EXPONENTIAL_KERNEL) + elif kernel == 'linear': + result = _log_kernel_norm(h, d, LINEAR_KERNEL) + elif kernel == 'cosine': + result = _log_kernel_norm(h, d, COSINE_KERNEL) + else: + raise ValueError('kernel not recognized') + + if return_log: + return result + else: + return np.exp(result) + +{{for name_suffix, INPUT_DTYPE_t, INPUT_DTYPE, NPY_TYPE in implementation_specific_values}} + +cdef class NeighborsHeap{{name_suffix}}: + """A max-heap structure to keep track of distances/indices of neighbors + + This implements an efficient pre-allocated set of fixed-size heaps + for chasing neighbors, holding both an index and a distance. + When any row of the heap is full, adding an additional point will push + the furthest point off the heap. + + Parameters + ---------- + n_pts : int + the number of heaps to use + n_nbrs : int + the size of each heap. + """ + cdef {{INPUT_DTYPE_t}}[:, ::1] distances + cdef intp_t[:, ::1] indices + + def __cinit__(self): + # One-element arrays are used as placeholders to prevent + # any problem due to potential access to those attributes + # (e.g. assigning to NULL or a to value in another segment). + self.distances = np.zeros((1, 1), dtype={{INPUT_DTYPE}}, order='C') + self.indices = np.zeros((1, 1), dtype=np.intp, order='C') + + def __init__(self, n_pts, n_nbrs): + self.distances = np.full( + (n_pts, n_nbrs), np.inf, dtype={{INPUT_DTYPE}}, order='C' + ) + self.indices = np.zeros((n_pts, n_nbrs), dtype=np.intp, order='C') + + def get_arrays(self, sort=True): + """Get the arrays of distances and indices within the heap. + + If sort=True, then simultaneously sort the indices and distances, + so the closer points are listed first. + """ + if sort: + self._sort() + return self.distances.base, self.indices.base + + cdef inline float64_t largest(self, intp_t row) except -1 nogil: + """Return the largest distance in the given row""" + return self.distances[row, 0] + + def push(self, intp_t row, float64_t val, intp_t i_val): + return self._push(row, val, i_val) + + cdef int _push(self, intp_t row, float64_t val, + intp_t i_val) except -1 nogil: + """push (val, i_val) into the given row""" + return heap_push( + values=&self.distances[row, 0], + indices=&self.indices[row, 0], + size=self.distances.shape[1], + val=val, + val_idx=i_val, + ) + + cdef int _sort(self) except -1: + """simultaneously sort the distances and indices""" + cdef intp_t row + for row in range(self.distances.shape[0]): + _simultaneous_sort( + dist=&self.distances[row, 0], + idx=&self.indices[row, 0], + size=self.distances.shape[1], + ) + return 0 + +{{endfor}} + +#------------------------------------------------------------ +# find_node_split_dim: +# this computes the equivalent of +# j_max = np.argmax(np.max(data, 0) - np.min(data, 0)) +cdef intp_t find_node_split_dim(const floating* data, + const intp_t* node_indices, + intp_t n_features, + intp_t n_points) except -1: + """Find the dimension with the largest spread. + + Parameters + ---------- + data : double pointer + Pointer to a 2D array of the training data, of shape [N, n_features]. + N must be greater than any of the values in node_indices. + node_indices : int pointer + Pointer to a 1D array of length n_points. This lists the indices of + each of the points within the current node. + + Returns + ------- + i_max : int + The index of the feature (dimension) within the node that has the + largest spread. + + Notes + ----- + In numpy, this operation is equivalent to + + def find_node_split_dim(data, node_indices): + return np.argmax(data[node_indices].max(0) - data[node_indices].min(0)) + + The cython version is much more efficient in both computation and memory. + """ + cdef float64_t min_val, max_val, val, spread, max_spread + cdef intp_t i, j, j_max + + j_max = 0 + max_spread = 0 + + for j in range(n_features): + max_val = data[node_indices[0] * n_features + j] + min_val = max_val + for i in range(1, n_points): + val = data[node_indices[i] * n_features + j] + max_val = fmax(max_val, val) + min_val = fmin(min_val, val) + spread = max_val - min_val + if spread > max_spread: + max_spread = spread + j_max = j + return j_max + + +###################################################################### +# NodeHeap : min-heap used to keep track of nodes during +# breadth-first query +cdef inline void swap_nodes(NodeHeapData_t* arr, intp_t i1, intp_t i2): + cdef NodeHeapData_t tmp = arr[i1] + arr[i1] = arr[i2] + arr[i2] = tmp + + +cdef class NodeHeap: + """NodeHeap + + This is a min-heap implementation for keeping track of nodes + during a breadth-first search. Unlike the NeighborsHeap above, + the NodeHeap does not have a fixed size and must be able to grow + as elements are added. + + Internally, the data is stored in a simple binary heap which meets + the min heap condition: + + heap[i].val < min(heap[2 * i + 1].val, heap[2 * i + 2].val) + """ + cdef NodeHeapData_t[:] data + cdef intp_t n + + def __cinit__(self): + # A one-elements array is used as a placeholder to prevent + # any problem due to potential access to this attribute + # (e.g. assigning to NULL or a to value in another segment). + self.data = np.zeros(1, dtype=NodeHeapData, order='C') + + def __init__(self, size_guess=100): + size_guess = max(size_guess, 1) # need space for at least one item + self.data = np.zeros(size_guess, dtype=NodeHeapData, order='C') + self.n = size_guess + self.clear() + + cdef int resize(self, intp_t new_size) except -1: + """Resize the heap to be either larger or smaller""" + cdef: + NodeHeapData_t *data_ptr + NodeHeapData_t *new_data_ptr + intp_t i + intp_t size = self.data.shape[0] + NodeHeapData_t[:] new_data = np.zeros( + new_size, + dtype=NodeHeapData, + ) + + if size > 0 and new_size > 0: + data_ptr = &self.data[0] + new_data_ptr = &new_data[0] + for i in range(min(size, new_size)): + new_data_ptr[i] = data_ptr[i] + + if new_size < size: + self.n = new_size + + self.data = new_data + return 0 + + cdef int push(self, NodeHeapData_t data) except -1: + """Push a new item onto the heap""" + cdef intp_t i, i_parent + cdef NodeHeapData_t* data_arr + self.n += 1 + if self.n > self.data.shape[0]: + self.resize(2 * self.n) + + # put the new element at the end, + # and then perform swaps until the heap is in order + data_arr = &self.data[0] + i = self.n - 1 + data_arr[i] = data + + while i > 0: + i_parent = (i - 1) // 2 + if data_arr[i_parent].val <= data_arr[i].val: + break + else: + swap_nodes(data_arr, i, i_parent) + i = i_parent + return 0 + + cdef NodeHeapData_t peek(self): + """Peek at the root of the heap, without removing it""" + return self.data[0] + + cdef NodeHeapData_t pop(self): + """Remove the root of the heap, and update the remaining nodes""" + if self.n == 0: + raise ValueError('cannot pop on empty heap') + + cdef intp_t i, i_child1, i_child2, i_swap + cdef NodeHeapData_t* data_arr = &self.data[0] + cdef NodeHeapData_t popped_element = data_arr[0] + + # pop off the first element, move the last element to the front, + # and then perform swaps until the heap is back in order + data_arr[0] = data_arr[self.n - 1] + self.n -= 1 + + i = 0 + + while (i < self.n): + i_child1 = 2 * i + 1 + i_child2 = 2 * i + 2 + i_swap = 0 + + if i_child2 < self.n: + if data_arr[i_child1].val <= data_arr[i_child2].val: + i_swap = i_child1 + else: + i_swap = i_child2 + elif i_child1 < self.n: + i_swap = i_child1 + else: + break + + if (i_swap > 0) and (data_arr[i_swap].val <= data_arr[i].val): + swap_nodes(data_arr, i, i_swap) + i = i_swap + else: + break + + return popped_element + + cdef void clear(self): + """Clear the heap""" + self.n = 0 + + +###################################################################### +# newObj function +# this is a helper function for pickling +def newObj(obj): + return obj.__new__(obj) + + +{{for name_suffix, INPUT_DTYPE_t, INPUT_DTYPE, NPY_TYPE in implementation_specific_values}} + +###################################################################### +# define the reverse mapping of VALID_METRICS{{name_suffix}} +from sklearn.metrics._dist_metrics import get_valid_metric_ids +VALID_METRIC_IDS{{name_suffix}} = get_valid_metric_ids(VALID_METRICS{{name_suffix}}) + + +###################################################################### +# Binary Tree class +cdef class BinaryTree{{name_suffix}}: + + cdef readonly const {{INPUT_DTYPE_t}}[:, ::1] data + cdef readonly const {{INPUT_DTYPE_t}}[::1] sample_weight + cdef public float64_t sum_weight + + # TODO: idx_array and node_bounds must not be const, but this change needs + # to happen in a way which preserves pickling + # See also: https://github.com/cython/cython/issues/5639 + cdef public const intp_t[::1] idx_array + cdef public const NodeData_t[::1] node_data + cdef public const {{INPUT_DTYPE_t}}[:, :, ::1] node_bounds + + cdef intp_t leaf_size + cdef intp_t n_levels + cdef intp_t n_nodes + + cdef DistanceMetric{{name_suffix}} dist_metric + cdef int euclidean + + # variables to keep track of building & querying stats + cdef int n_trims + cdef int n_leaves + cdef int n_splits + cdef int n_calls + + valid_metrics = VALID_METRIC_IDS{{name_suffix}} + + # Use cinit to initialize all arrays to empty: this will prevent memory + # errors and seg-faults in rare cases where __init__ is not called + # A one-elements array is used as a placeholder to prevent + # any problem due to potential access to this attribute + # (e.g. assigning to NULL or a to value in another segment). + def __cinit__(self): + self.data = np.empty((1, 1), dtype={{INPUT_DTYPE}}, order='C') + self.sample_weight = np.empty(1, dtype={{INPUT_DTYPE}}, order='C') + self.idx_array = np.empty(1, dtype=np.intp, order='C') + self.node_data = np.empty(1, dtype=NodeData, order='C') + self.node_bounds = np.empty((1, 1, 1), dtype={{INPUT_DTYPE}}) + + self.leaf_size = 0 + self.n_levels = 0 + self.n_nodes = 0 + + self.euclidean = False + + self.n_trims = 0 + self.n_leaves = 0 + self.n_splits = 0 + self.n_calls = 0 + + def __init__(self, data, + leaf_size=40, metric='minkowski', sample_weight=None, **kwargs): + # validate data + self.data = check_array(data, dtype={{INPUT_DTYPE}}, order='C') + if self.data.size == 0: + raise ValueError("X is an empty array") + + n_samples = self.data.shape[0] + n_features = self.data.shape[1] + + if leaf_size < 1: + raise ValueError("leaf_size must be greater than or equal to 1") + self.leaf_size = leaf_size + + self.dist_metric = DistanceMetric.get_metric(metric, dtype={{INPUT_DTYPE}}, **kwargs) + self.euclidean = (self.dist_metric.__class__.__name__ + == 'EuclideanDistance{{name_suffix}}') + + metric = self.dist_metric.__class__.__name__ + if metric not in VALID_METRICS{{name_suffix}}: + raise ValueError('metric {metric} is not valid for ' + '{BinaryTree}'.format(metric=metric, + **DOC_DICT{{name_suffix}})) + self.dist_metric._validate_data(self.data) + + # determine number of levels in the tree, and from this + # the number of nodes in the tree. This results in leaf nodes + # with numbers of points between leaf_size and 2 * leaf_size + self.n_levels = int( + np.log2(fmax(1, (n_samples - 1) / self.leaf_size)) + 1) + self.n_nodes = (2 ** self.n_levels) - 1 + + # allocate arrays for storage + self.idx_array = np.arange(n_samples, dtype=np.intp) + self.node_data = np.zeros(self.n_nodes, dtype=NodeData) + + self._update_sample_weight(n_samples, sample_weight) + + # Allocate tree-specific data + allocate_data{{name_suffix}}(self, self.n_nodes, n_features) + self._recursive_build( + node_data=self.node_data.base, + i_node=0, + idx_start=0, + idx_end=n_samples + ) + + def _update_sample_weight(self, n_samples, sample_weight): + if sample_weight is not None: + self.sample_weight = np.asarray( + sample_weight, dtype={{INPUT_DTYPE}}, order='C') + self.sum_weight = np.sum(self.sample_weight) + else: + self.sample_weight = None + self.sum_weight = n_samples + + def __reduce__(self): + """ + reduce method used for pickling + """ + return (newObj, (type(self),), self.__getstate__()) + + def __getstate__(self): + """ + get state for pickling + """ + if self.sample_weight is not None: + # pass the numpy array + sample_weight = self.sample_weight.base + else: + # pass None to avoid confusion with the empty place holder + # of size 1 from __cinit__ + sample_weight = None + return (self.data.base, + self.idx_array.base, + self.node_data.base, + self.node_bounds.base, + int(self.leaf_size), + int(self.n_levels), + int(self.n_nodes), + int(self.n_trims), + int(self.n_leaves), + int(self.n_splits), + int(self.n_calls), + self.dist_metric, + sample_weight) + + def __setstate__(self, state): + """ + set state for pickling + """ + self.data = state[0] + self.idx_array = state[1] + self.node_data = state[2] + self.node_bounds = state[3] + self.leaf_size = state[4] + self.n_levels = state[5] + self.n_nodes = state[6] + self.n_trims = state[7] + self.n_leaves = state[8] + self.n_splits = state[9] + self.n_calls = state[10] + self.dist_metric = state[11] + sample_weight = state[12] + + self.euclidean = (self.dist_metric.__class__.__name__ + == 'EuclideanDistance64') + n_samples = self.data.shape[0] + self._update_sample_weight(n_samples, sample_weight) + + def get_tree_stats(self): + """ + get_tree_stats() + + Get tree status. + + Returns + ------- + tree_stats: tuple of int + (number of trims, number of leaves, number of splits) + """ + return (self.n_trims, self.n_leaves, self.n_splits) + + def reset_n_calls(self): + """ + reset_n_calls() + + Reset number of calls to 0. + """ + self.n_calls = 0 + + def get_n_calls(self): + """ + get_n_calls() + + Get number of calls. + + Returns + ------- + n_calls: int + number of distance computation calls + """ + return self.n_calls + + def get_arrays(self): + """ + get_arrays() + + Get data and node arrays. + + Returns + ------- + arrays: tuple of array + Arrays for storing tree data, index, node data and node bounds. + """ + return ( + self.data.base, + self.idx_array.base, + self.node_data.base, + self.node_bounds.base, + ) + + cdef inline float64_t dist(self, const {{INPUT_DTYPE_t}}* x1, const {{INPUT_DTYPE_t}}* x2, + intp_t size) except -1 nogil: + """Compute the distance between arrays x1 and x2""" + self.n_calls += 1 + if self.euclidean: + return euclidean_dist{{name_suffix}}(x1, x2, size) + else: + return self.dist_metric.dist(x1, x2, size) + + cdef inline float64_t rdist(self, const {{INPUT_DTYPE_t}}* x1, const {{INPUT_DTYPE_t}}* x2, + intp_t size) except -1 nogil: + """Compute the reduced distance between arrays x1 and x2. + + The reduced distance, defined for some metrics, is a quantity which + is more efficient to compute than the distance, but preserves the + relative rankings of the true distance. For example, the reduced + distance for the Euclidean metric is the squared-euclidean distance. + """ + self.n_calls += 1 + if self.euclidean: + return euclidean_rdist{{name_suffix}}(x1, x2, size) + else: + return self.dist_metric.rdist(x1, x2, size) + + cdef int _recursive_build(self, NodeData_t[::1] node_data, intp_t i_node, intp_t idx_start, + intp_t idx_end) except -1: + """Recursively build the tree. + + Parameters + ---------- + i_node : int + the node for the current step + idx_start, idx_end : int + the bounding indices in the idx_array which define the points that + belong to this node. + """ + cdef intp_t imax + cdef intp_t n_features = self.data.shape[1] + cdef intp_t n_points = idx_end - idx_start + cdef intp_t n_mid = n_points / 2 + cdef intp_t* idx_array = &self.idx_array[idx_start] + cdef const {{INPUT_DTYPE_t}}* data = &self.data[0, 0] + + # initialize node data + init_node{{name_suffix}}(self, node_data, i_node, idx_start, idx_end) + + if 2 * i_node + 1 >= self.n_nodes: + node_data[i_node].is_leaf = True + if idx_end - idx_start > 2 * self.leaf_size: + # this shouldn't happen if our memory allocation is correct + # we'll proactively prevent memory errors, but raise a + # warning saying we're doing so. + import warnings + warnings.warn("Internal: memory layout is flawed: " + "not enough nodes allocated") + + elif idx_end - idx_start < 2: + # again, this shouldn't happen if our memory allocation + # is correct. Raise a warning. + import warnings + warnings.warn("Internal: memory layout is flawed: " + "too many nodes allocated") + node_data[i_node].is_leaf = True + + else: + # split node and recursively construct child nodes. + node_data[i_node].is_leaf = False + i_max = find_node_split_dim(data, idx_array, + n_features, n_points) + partition_node_indices(data, idx_array, i_max, n_mid, + n_features, n_points) + self._recursive_build(node_data, 2 * i_node + 1, + idx_start, idx_start + n_mid) + self._recursive_build(node_data, 2 * i_node + 2, + idx_start + n_mid, idx_end) + + def query(self, X, k=1, return_distance=True, + dualtree=False, breadth_first=False, + sort_results=True): + """ + query(X, k=1, return_distance=True, + dualtree=False, breadth_first=False) + + query the tree for the k nearest neighbors + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + An array of points to query + k : int, default=1 + The number of nearest neighbors to return + return_distance : bool, default=True + if True, return a tuple (d, i) of distances and indices + if False, return array i + dualtree : bool, default=False + if True, use the dual tree formalism for the query: a tree is + built for the query points, and the pair of trees is used to + efficiently search this space. This can lead to better + performance as the number of points grows large. + breadth_first : bool, default=False + if True, then query the nodes in a breadth-first manner. + Otherwise, query the nodes in a depth-first manner. + sort_results : bool, default=True + if True, then distances and indices of each point are sorted + on return, so that the first column contains the closest points. + Otherwise, neighbors are returned in an arbitrary order. + + Returns + ------- + i : if return_distance == False + (d,i) : if return_distance == True + + d : ndarray of shape X.shape[:-1] + (k,), dtype=double + Each entry gives the list of distances to the neighbors of the + corresponding point. + + i : ndarray of shape X.shape[:-1] + (k,), dtype=int + Each entry gives the list of indices of neighbors of the + corresponding point. + """ + # XXX: we should allow X to be a pre-built tree. + X = check_array(X, dtype={{INPUT_DTYPE}}, order='C') + + if X.shape[X.ndim - 1] != self.data.shape[1]: + raise ValueError("query data dimension must " + "match training data dimension") + + if self.data.shape[0] < k: + raise ValueError("k must be less than or equal " + "to the number of training points") + + # flatten X, and save original shape information + np_Xarr = X.reshape((-1, self.data.shape[1])) + cdef const {{INPUT_DTYPE_t}}[:, ::1] Xarr = np_Xarr + cdef float64_t reduced_dist_LB + cdef intp_t i + cdef const {{INPUT_DTYPE_t}}* pt + + # initialize heap for neighbors + cdef NeighborsHeap{{name_suffix}} heap = NeighborsHeap{{name_suffix}}(Xarr.shape[0], k) + + # node heap for breadth-first queries + cdef NodeHeap nodeheap + if breadth_first: + nodeheap = NodeHeap(self.data.shape[0] // self.leaf_size) + + # bounds is needed for the dual tree algorithm + cdef float64_t[::1] bounds + + self.n_trims = 0 + self.n_leaves = 0 + self.n_splits = 0 + + if dualtree: + other = self.__class__(np_Xarr, metric=self.dist_metric, + leaf_size=self.leaf_size) + if breadth_first: + self._query_dual_breadthfirst(other, heap, nodeheap) + else: + reduced_dist_LB = min_rdist_dual{{name_suffix}}(self, 0, other, 0) + bounds = np.full(other.node_data.shape[0], np.inf) + self._query_dual_depthfirst(0, other, 0, bounds, + heap, reduced_dist_LB) + + else: + pt = &Xarr[0, 0] + if breadth_first: + for i in range(Xarr.shape[0]): + self._query_single_breadthfirst(pt, i, heap, nodeheap) + pt += Xarr.shape[1] + else: + with nogil: + for i in range(Xarr.shape[0]): + reduced_dist_LB = min_rdist{{name_suffix}}(self, 0, pt) + self._query_single_depthfirst(0, pt, i, heap, + reduced_dist_LB) + pt += Xarr.shape[1] + + distances, indices = heap.get_arrays(sort=sort_results) + distances = self.dist_metric.rdist_to_dist(distances) + + # deflatten results + if return_distance: + return (distances.reshape(X.shape[:X.ndim - 1] + (k,)), + indices.reshape(X.shape[:X.ndim - 1] + (k,))) + else: + return indices.reshape(X.shape[:X.ndim - 1] + (k,)) + + def query_radius(self, X, r, int return_distance=False, + int count_only=False, int sort_results=False): + """ + query_radius(X, r, return_distance=False, + count_only=False, sort_results=False) + + query the tree for neighbors within a radius r + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + An array of points to query + r : distance within which neighbors are returned + r can be a single value, or an array of values of shape + x.shape[:-1] if different radii are desired for each point. + return_distance : bool, default=False + if True, return distances to neighbors of each point + if False, return only neighbors + Note that unlike the query() method, setting return_distance=True + here adds to the computation time. Not all distances need to be + calculated explicitly for return_distance=False. Results are + not sorted by default: see ``sort_results`` keyword. + count_only : bool, default=False + if True, return only the count of points within distance r + if False, return the indices of all points within distance r + If return_distance==True, setting count_only=True will + result in an error. + sort_results : bool, default=False + if True, the distances and indices will be sorted before being + returned. If False, the results will not be sorted. If + return_distance == False, setting sort_results = True will + result in an error. + + Returns + ------- + count : if count_only == True + ind : if count_only == False and return_distance == False + (ind, dist) : if count_only == False and return_distance == True + + count : ndarray of shape X.shape[:-1], dtype=int + Each entry gives the number of neighbors within a distance r of the + corresponding point. + + ind : ndarray of shape X.shape[:-1], dtype=object + Each element is a numpy integer array listing the indices of + neighbors of the corresponding point. Note that unlike + the results of a k-neighbors query, the returned neighbors + are not sorted by distance by default. + + dist : ndarray of shape X.shape[:-1], dtype=object + Each element is a numpy double array listing the distances + corresponding to indices in i. + """ + if count_only and return_distance: + raise ValueError("count_only and return_distance " + "cannot both be true") + + if sort_results and not return_distance: + raise ValueError("return_distance must be True " + "if sort_results is True") + + cdef intp_t i, count_i = 0 + cdef intp_t n_features = self.data.shape[1] + cdef {{INPUT_DTYPE_t}}[::1] dist_arr_i + cdef intp_t[::1] idx_arr_i, counts + cdef const {{INPUT_DTYPE_t}}* pt + cdef intp_t** indices = NULL + cdef {{INPUT_DTYPE_t}}** distances = NULL + + # validate X and prepare for query + X = check_array(X, dtype={{INPUT_DTYPE}}, order='C') + + if X.shape[X.ndim - 1] != self.data.shape[1]: + raise ValueError("query data dimension must " + "match training data dimension") + + cdef const {{INPUT_DTYPE_t}}[:, ::1] Xarr = X.reshape((-1, self.data.shape[1])) + + # prepare r for query + r = np.asarray(r, dtype=np.float64, order='C') + r = np.atleast_1d(r) + if r.shape == (1,): + r = np.full(X.shape[:X.ndim - 1], r[0], dtype=np.float64) + else: + if r.shape != X.shape[:X.ndim - 1]: + raise ValueError("r must be broadcastable to X.shape") + + rarr_np = r.reshape(-1) # store explicitly to keep in scope + cdef float64_t[::1] rarr = rarr_np + + if not count_only: + indices = calloc(Xarr.shape[0], sizeof(intp_t*)) + if indices == NULL: + raise MemoryError() + if return_distance: + distances = <{{INPUT_DTYPE_t}}**>calloc(Xarr.shape[0], sizeof({{INPUT_DTYPE_t}}*)) + if distances == NULL: + free(indices) + raise MemoryError() + + np_idx_arr = np.zeros(self.data.shape[0], dtype=np.intp) + idx_arr_i = np_idx_arr + + np_dist_arr = np.zeros(self.data.shape[0], dtype={{INPUT_DTYPE}}) + dist_arr_i = np_dist_arr + + counts_arr = np.zeros(Xarr.shape[0], dtype=np.intp) + counts = counts_arr + + pt = &Xarr[0, 0] + memory_error = False + with nogil: + for i in range(Xarr.shape[0]): + counts[i] = self._query_radius_single(0, pt, rarr[i], + &idx_arr_i[0], + &dist_arr_i[0], + 0, count_only, + return_distance) + pt += n_features + + if count_only: + continue + + if sort_results: + _simultaneous_sort(&dist_arr_i[0], &idx_arr_i[0], + counts[i]) + + # equivalent to: indices[i] = np_idx_arr[:counts[i]].copy() + indices[i] = malloc(counts[i] * sizeof(intp_t)) + if indices[i] == NULL: + memory_error = True + break + memcpy(indices[i], &idx_arr_i[0], counts[i] * sizeof(intp_t)) + + if return_distance: + # equivalent to: distances[i] = np_dist_arr[:counts[i]].copy() + distances[i] = <{{INPUT_DTYPE_t}}*>malloc(counts[i] * sizeof({{INPUT_DTYPE_t}})) + if distances[i] == NULL: + memory_error = True + break + memcpy(distances[i], &dist_arr_i[0], counts[i] * sizeof({{INPUT_DTYPE_t}})) + + try: + if memory_error: + raise MemoryError() + + if count_only: + # deflatten results + return counts_arr.reshape(X.shape[:X.ndim - 1]) + elif return_distance: + indices_npy = np.zeros(Xarr.shape[0], dtype='object') + distances_npy = np.zeros(Xarr.shape[0], dtype='object') + for i in range(Xarr.shape[0]): + # make a new numpy array that wraps the existing data + # TODO: remove the explicit cast to cnp.intp_t* when cython min version >= 3.0 + indices_npy[i] = cnp.PyArray_SimpleNewFromData(1, &counts[i], cnp.NPY_INTP, indices[i]) + # make sure the data will be freed when the numpy array is garbage collected + PyArray_ENABLEFLAGS(indices_npy[i], cnp.NPY_ARRAY_OWNDATA) + # make sure the data is not freed twice + indices[i] = NULL + + # make a new numpy array that wraps the existing data + # TODO: remove the explicit cast to cnp.intp_t* when cython min version >= 3.0 + distances_npy[i] = cnp.PyArray_SimpleNewFromData(1, &counts[i], {{NPY_TYPE}}, distances[i]) + # make sure the data will be freed when the numpy array is garbage collected + PyArray_ENABLEFLAGS(distances_npy[i], cnp.NPY_ARRAY_OWNDATA) + # make sure the data is not freed twice + distances[i] = NULL + + # deflatten results + return (indices_npy.reshape(X.shape[:X.ndim - 1]), + distances_npy.reshape(X.shape[:X.ndim - 1])) + else: + indices_npy = np.zeros(Xarr.shape[0], dtype='object') + for i in range(Xarr.shape[0]): + # make a new numpy array that wraps the existing data + # TODO: remove the explicit cast to cnp.intp_t* when cython min version >= 3.0 + indices_npy[i] = cnp.PyArray_SimpleNewFromData(1, &counts[i], cnp.NPY_INTP, indices[i]) + # make sure the data will be freed when the numpy array is garbage collected + PyArray_ENABLEFLAGS(indices_npy[i], cnp.NPY_ARRAY_OWNDATA) + # make sure the data is not freed twice + indices[i] = NULL + + # deflatten results + return indices_npy.reshape(X.shape[:X.ndim - 1]) + except MemoryError: + # free any buffer that is not owned by a numpy array + for i in range(Xarr.shape[0]): + free(indices[i]) + if return_distance: + free(distances[i]) + raise + finally: + free(indices) + free(distances) + + def kernel_density(self, X, h, kernel='gaussian', + atol=0, rtol=1E-8, + breadth_first=True, return_log=False): + """ + kernel_density(X, h, kernel='gaussian', atol=0, rtol=1E-8, + breadth_first=True, return_log=False) + + Compute the kernel density estimate at points X with the given kernel, + using the distance metric specified at tree creation. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + An array of points to query. Last dimension should match dimension + of training data. + h : float + the bandwidth of the kernel + kernel : str, default="gaussian" + specify the kernel to use. Options are + - 'gaussian' + - 'tophat' + - 'epanechnikov' + - 'exponential' + - 'linear' + - 'cosine' + Default is kernel = 'gaussian' + atol : float, default=0 + Specify the desired absolute tolerance of the result. + If the true result is `K_true`, then the returned result `K_ret` + satisfies ``abs(K_true - K_ret) < atol + rtol * K_ret`` + The default is zero (i.e. machine precision). + rtol : float, default=1e-8 + Specify the desired relative tolerance of the result. + If the true result is `K_true`, then the returned result `K_ret` + satisfies ``abs(K_true - K_ret) < atol + rtol * K_ret`` + The default is `1e-8` (i.e. machine precision). + breadth_first : bool, default=False + If True, use a breadth-first search. If False (default) use a + depth-first search. Breadth-first is generally faster for + compact kernels and/or high tolerances. + return_log : bool, default=False + Return the logarithm of the result. This can be more accurate + than returning the result itself for narrow kernels. + + Returns + ------- + density : ndarray of shape X.shape[:-1] + The array of (log)-density evaluations + """ + cdef float64_t h_c = h + cdef float64_t log_atol = log(atol) + cdef float64_t log_rtol = log(rtol) + cdef float64_t log_min_bound, log_max_bound, log_bound_spread + cdef float64_t dist_LB = 0, dist_UB = 0 + + cdef intp_t n_samples = self.data.shape[0] + cdef intp_t n_features = self.data.shape[1] + cdef intp_t i + cdef KernelType kernel_c + + # validate kernel + if kernel == 'gaussian': + kernel_c = GAUSSIAN_KERNEL + elif kernel == 'tophat': + kernel_c = TOPHAT_KERNEL + elif kernel == 'epanechnikov': + kernel_c = EPANECHNIKOV_KERNEL + elif kernel == 'exponential': + kernel_c = EXPONENTIAL_KERNEL + elif kernel == 'linear': + kernel_c = LINEAR_KERNEL + elif kernel == 'cosine': + kernel_c = COSINE_KERNEL + else: + raise ValueError("kernel = '%s' not recognized" % kernel) + + cdef float64_t log_knorm = _log_kernel_norm(h_c, n_features, kernel_c) + + # validate X and prepare for query + X = check_array(X, dtype={{INPUT_DTYPE}}, order='C') + + if X.shape[X.ndim - 1] != n_features: + raise ValueError("query data dimension must " + "match training data dimension") + Xarr_np = X.reshape((-1, n_features)) + cdef const {{INPUT_DTYPE_t}}[:, ::1] Xarr = Xarr_np + + log_density_arr = np.zeros(Xarr.shape[0], dtype={{INPUT_DTYPE}}) + cdef {{INPUT_DTYPE_t}}[::1] log_density = log_density_arr + + cdef const {{INPUT_DTYPE_t}}* pt = &Xarr[0, 0] + + cdef NodeHeap nodeheap + if breadth_first: + nodeheap = NodeHeap(self.data.shape[0] // self.leaf_size) + cdef float64_t[::1] node_log_min_bounds + cdef float64_t[::1] node_bound_widths + # TODO: implement dual tree approach. + # this is difficult because of the need to cache values + # computed between node pairs. + if breadth_first: + node_log_min_bounds_arr = np.full(self.n_nodes, -np.inf) + node_log_min_bounds = node_log_min_bounds_arr + node_bound_widths_arr = np.zeros(self.n_nodes) + node_bound_widths = node_bound_widths_arr + for i in range(Xarr.shape[0]): + log_density[i] = self._kde_single_breadthfirst( + pt, kernel_c, h_c, + log_knorm, log_atol, log_rtol, + nodeheap, + &node_log_min_bounds[0], + &node_bound_widths[0]) + pt += n_features + else: + for i in range(Xarr.shape[0]): + min_max_dist{{name_suffix}}(self, 0, pt, &dist_LB, &dist_UB) + # compute max & min bounds on density within top node + log_min_bound = (log(self.sum_weight) + + compute_log_kernel(dist_UB, + h_c, kernel_c)) + log_max_bound = (log(self.sum_weight) + + compute_log_kernel(dist_LB, + h_c, kernel_c)) + log_bound_spread = logsubexp(log_max_bound, log_min_bound) + self._kde_single_depthfirst(0, pt, kernel_c, h_c, + log_knorm, log_atol, log_rtol, + log_min_bound, + log_bound_spread, + &log_min_bound, + &log_bound_spread) + log_density[i] = logaddexp(log_min_bound, + log_bound_spread - log(2)) + pt += n_features + + # normalize the results + for i in range(log_density.shape[0]): + log_density[i] += log_knorm + + log_density_arr = log_density_arr.reshape(X.shape[:X.ndim - 1]) + + if return_log: + return log_density_arr + else: + return np.exp(log_density_arr) + + def two_point_correlation(self, X, r, dualtree=False): + """ + two_point_correlation(X, r, dualtree=False) + + Compute the two-point correlation function + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + An array of points to query. Last dimension should match dimension + of training data. + r : array-like + A one-dimensional array of distances + dualtree : bool, default=False + If True, use a dualtree algorithm. Otherwise, use a single-tree + algorithm. Dual tree algorithms can have better scaling for + large N. + + Returns + ------- + counts : ndarray + counts[i] contains the number of pairs of points with distance + less than or equal to r[i] + """ + cdef intp_t n_features = self.data.shape[1] + cdef intp_t i + + # validate X and prepare for query + X = check_array(X, dtype={{INPUT_DTYPE}}, order='C') + + if X.shape[X.ndim - 1] != self.data.shape[1]: + raise ValueError("query data dimension must " + "match training data dimension") + + np_Xarr = X.reshape((-1, self.data.shape[1])) + cdef {{INPUT_DTYPE_t}}[:, ::1] Xarr = np_Xarr + + # prepare r for query + r = np.asarray(r, dtype=np.float64, order='C') + r = np.atleast_1d(r) + if r.ndim != 1: + raise ValueError("r must be a 1-dimensional array") + i_rsort = np.argsort(r) + rarr_np = r[i_rsort] # needed to keep memory in scope + cdef float64_t[::1] rarr = rarr_np + + # create array to hold counts + count = np.zeros(r.shape[0], dtype=np.intp) + cdef intp_t[::1] carr = count + + cdef const {{INPUT_DTYPE_t}}* pt = &Xarr[0, 0] + + if dualtree: + other = self.__class__(Xarr, metric=self.dist_metric, + leaf_size=self.leaf_size) + self._two_point_dual(0, other, 0, &rarr[0], &carr[0], + 0, rarr.shape[0]) + else: + for i in range(Xarr.shape[0]): + self._two_point_single(0, pt, &rarr[0], &carr[0], + 0, rarr.shape[0]) + pt += n_features + + return count + + cdef int _query_single_depthfirst( + self, + intp_t i_node, + const {{INPUT_DTYPE_t}}* pt, + intp_t i_pt, + NeighborsHeap{{name_suffix}} heap, + float64_t reduced_dist_LB, + ) except -1 nogil: + """Recursive Single-tree k-neighbors query, depth-first approach""" + cdef NodeData_t node_info = self.node_data[i_node] + + cdef float64_t dist_pt, reduced_dist_LB_1, reduced_dist_LB_2 + cdef intp_t i, i1, i2 + + cdef const {{INPUT_DTYPE_t}}* data = &self.data[0, 0] + + # ------------------------------------------------------------ + # Case 1: query point is outside node radius: + # trim it from the query + if reduced_dist_LB > heap.largest(i_pt): + self.n_trims += 1 + + # ------------------------------------------------------------ + # Case 2: this is a leaf node. Update set of nearby points + elif node_info.is_leaf: + self.n_leaves += 1 + for i in range(node_info.idx_start, node_info.idx_end): + dist_pt = self.rdist(pt, + &self.data[self.idx_array[i], 0], + self.data.shape[1]) + heap._push(i_pt, dist_pt, self.idx_array[i]) + + # ------------------------------------------------------------ + # Case 3: Node is not a leaf. Recursively query subnodes + # starting with the closest + else: + self.n_splits += 1 + i1 = 2 * i_node + 1 + i2 = i1 + 1 + reduced_dist_LB_1 = min_rdist{{name_suffix}}(self, i1, pt) + reduced_dist_LB_2 = min_rdist{{name_suffix}}(self, i2, pt) + + # recursively query subnodes + if reduced_dist_LB_1 <= reduced_dist_LB_2: + self._query_single_depthfirst(i1, pt, i_pt, heap, + reduced_dist_LB_1) + self._query_single_depthfirst(i2, pt, i_pt, heap, + reduced_dist_LB_2) + else: + self._query_single_depthfirst(i2, pt, i_pt, heap, + reduced_dist_LB_2) + self._query_single_depthfirst(i1, pt, i_pt, heap, + reduced_dist_LB_1) + return 0 + + cdef int _query_single_breadthfirst( + self, + const {{INPUT_DTYPE_t}}* pt, + intp_t i_pt, + NeighborsHeap{{name_suffix}} heap, + NodeHeap nodeheap, + ) except -1: + """Non-recursive single-tree k-neighbors query, breadth-first search""" + cdef intp_t i, i_node + cdef float64_t dist_pt, reduced_dist_LB + cdef const NodeData_t* node_data = &self.node_data[0] + cdef const {{INPUT_DTYPE_t}}* data = &self.data[0, 0] + + # Set up the node heap and push the head node onto it + cdef NodeHeapData_t nodeheap_item + nodeheap_item.val = min_rdist{{name_suffix}}(self, 0, pt) + nodeheap_item.i1 = 0 + nodeheap.push(nodeheap_item) + + while nodeheap.n > 0: + nodeheap_item = nodeheap.pop() + reduced_dist_LB = nodeheap_item.val + i_node = nodeheap_item.i1 + node_info = node_data[i_node] + + # ------------------------------------------------------------ + # Case 1: query point is outside node radius: + # trim it from the query + if reduced_dist_LB > heap.largest(i_pt): + self.n_trims += 1 + + # ------------------------------------------------------------ + # Case 2: this is a leaf node. Update set of nearby points + elif node_data[i_node].is_leaf: + self.n_leaves += 1 + for i in range(node_data[i_node].idx_start, + node_data[i_node].idx_end): + dist_pt = self.rdist(pt, + &self.data[self.idx_array[i], 0], + self.data.shape[1]) + heap._push(i_pt, dist_pt, self.idx_array[i]) + + # ------------------------------------------------------------ + # Case 3: Node is not a leaf. Add subnodes to the node heap + else: + self.n_splits += 1 + for i in range(2 * i_node + 1, 2 * i_node + 3): + nodeheap_item.i1 = i + nodeheap_item.val = min_rdist{{name_suffix}}(self, i, pt) + nodeheap.push(nodeheap_item) + return 0 + + cdef int _query_dual_depthfirst( + self, + intp_t i_node1, + BinaryTree{{name_suffix}} other, + intp_t i_node2, + float64_t[::1] bounds, + NeighborsHeap{{name_suffix}} heap, + float64_t reduced_dist_LB, + ) except -1: + """Recursive dual-tree k-neighbors query, depth-first""" + # note that the array `bounds` is maintained such that + # bounds[i] is the largest distance among any of the + # current neighbors in node i of the other tree. + cdef NodeData_t node_info1 = self.node_data[i_node1] + cdef NodeData_t node_info2 = other.node_data[i_node2] + + cdef const {{INPUT_DTYPE_t}}* data1 = &self.data[0, 0] + cdef const {{INPUT_DTYPE_t}}* data2 = &other.data[0, 0] + cdef intp_t n_features = self.data.shape[1] + + cdef float64_t bound_max, dist_pt, reduced_dist_LB1, reduced_dist_LB2 + cdef intp_t i1, i2, i_pt, i_parent + + # ------------------------------------------------------------ + # Case 1: nodes are further apart than the current bound: + # trim both from the query + if reduced_dist_LB > bounds[i_node2]: + pass + + # ------------------------------------------------------------ + # Case 2: both nodes are leaves: + # do a brute-force search comparing all pairs + elif node_info1.is_leaf and node_info2.is_leaf: + bounds[i_node2] = 0 + + for i2 in range(node_info2.idx_start, node_info2.idx_end): + i_pt = other.idx_array[i2] + + if heap.largest(i_pt) <= reduced_dist_LB: + continue + + for i1 in range(node_info1.idx_start, node_info1.idx_end): + dist_pt = self.rdist( + data1 + n_features * self.idx_array[i1], + data2 + n_features * i_pt, + n_features) + heap._push(i_pt, dist_pt, self.idx_array[i1]) + + # keep track of node bound + bounds[i_node2] = fmax(bounds[i_node2], + heap.largest(i_pt)) + + # update bounds up the tree + while i_node2 > 0: + i_parent = (i_node2 - 1) // 2 + bound_max = fmax(bounds[2 * i_parent + 1], + bounds[2 * i_parent + 2]) + if bound_max < bounds[i_parent]: + bounds[i_parent] = bound_max + i_node2 = i_parent + else: + break + + # ------------------------------------------------------------ + # Case 3a: node 1 is a leaf or is smaller: split node 2 and + # recursively query, starting with the nearest subnode + elif node_info1.is_leaf or (not node_info2.is_leaf + and node_info2.radius > node_info1.radius): + reduced_dist_LB1 = min_rdist_dual{{name_suffix}}(self, i_node1, + other, 2 * i_node2 + 1) + reduced_dist_LB2 = min_rdist_dual{{name_suffix}}(self, i_node1, + other, 2 * i_node2 + 2) + + if reduced_dist_LB1 < reduced_dist_LB2: + self._query_dual_depthfirst(i_node1, other, 2 * i_node2 + 1, + bounds, heap, reduced_dist_LB1) + self._query_dual_depthfirst(i_node1, other, 2 * i_node2 + 2, + bounds, heap, reduced_dist_LB2) + else: + self._query_dual_depthfirst(i_node1, other, 2 * i_node2 + 2, + bounds, heap, reduced_dist_LB2) + self._query_dual_depthfirst(i_node1, other, 2 * i_node2 + 1, + bounds, heap, reduced_dist_LB1) + + # ------------------------------------------------------------ + # Case 3b: node 2 is a leaf or is smaller: split node 1 and + # recursively query, starting with the nearest subnode + else: + reduced_dist_LB1 = min_rdist_dual{{name_suffix}}(self, 2 * i_node1 + 1, + other, i_node2) + reduced_dist_LB2 = min_rdist_dual{{name_suffix}}(self, 2 * i_node1 + 2, + other, i_node2) + + if reduced_dist_LB1 < reduced_dist_LB2: + self._query_dual_depthfirst(2 * i_node1 + 1, other, i_node2, + bounds, heap, reduced_dist_LB1) + self._query_dual_depthfirst(2 * i_node1 + 2, other, i_node2, + bounds, heap, reduced_dist_LB2) + else: + self._query_dual_depthfirst(2 * i_node1 + 2, other, i_node2, + bounds, heap, reduced_dist_LB2) + self._query_dual_depthfirst(2 * i_node1 + 1, other, i_node2, + bounds, heap, reduced_dist_LB1) + return 0 + + cdef int _query_dual_breadthfirst( + self, + BinaryTree{{name_suffix}} other, + NeighborsHeap{{name_suffix}} heap, + NodeHeap nodeheap, + ) except -1: + """Non-recursive dual-tree k-neighbors query, breadth-first""" + cdef intp_t i, i1, i2, i_node1, i_node2, i_pt + cdef float64_t dist_pt, reduced_dist_LB + cdef float64_t[::1] bounds = np.full(other.node_data.shape[0], np.inf) + cdef const NodeData_t* node_data1 = &self.node_data[0] + cdef const NodeData_t* node_data2 = &other.node_data[0] + cdef NodeData_t node_info1, node_info2 + cdef const {{INPUT_DTYPE_t}}* data1 = &self.data[0, 0] + cdef const {{INPUT_DTYPE_t}}* data2 = &other.data[0, 0] + cdef intp_t n_features = self.data.shape[1] + + # Set up the node heap and push the head nodes onto it + cdef NodeHeapData_t nodeheap_item + nodeheap_item.val = min_rdist_dual{{name_suffix}}(self, 0, other, 0) + nodeheap_item.i1 = 0 + nodeheap_item.i2 = 0 + nodeheap.push(nodeheap_item) + + while nodeheap.n > 0: + nodeheap_item = nodeheap.pop() + reduced_dist_LB = nodeheap_item.val + i_node1 = nodeheap_item.i1 + i_node2 = nodeheap_item.i2 + + node_info1 = node_data1[i_node1] + node_info2 = node_data2[i_node2] + + # ------------------------------------------------------------ + # Case 1: nodes are further apart than the current bound: + # trim both from the query + if reduced_dist_LB > bounds[i_node2]: + pass + + # ------------------------------------------------------------ + # Case 2: both nodes are leaves: + # do a brute-force search comparing all pairs + elif node_info1.is_leaf and node_info2.is_leaf: + bounds[i_node2] = -1 + + for i2 in range(node_info2.idx_start, node_info2.idx_end): + i_pt = other.idx_array[i2] + + if heap.largest(i_pt) <= reduced_dist_LB: + continue + + for i1 in range(node_info1.idx_start, node_info1.idx_end): + dist_pt = self.rdist( + data1 + n_features * self.idx_array[i1], + data2 + n_features * i_pt, + n_features) + heap._push(i_pt, dist_pt, self.idx_array[i1]) + + # keep track of node bound + bounds[i_node2] = fmax(bounds[i_node2], + heap.largest(i_pt)) + + # ------------------------------------------------------------ + # Case 3a: node 1 is a leaf or is smaller: split node 2 and + # recursively query, starting with the nearest subnode + elif node_info1.is_leaf or (not node_info2.is_leaf + and (node_info2.radius + > node_info1.radius)): + nodeheap_item.i1 = i_node1 + for i2 in range(2 * i_node2 + 1, 2 * i_node2 + 3): + nodeheap_item.i2 = i2 + nodeheap_item.val = min_rdist_dual{{name_suffix}}(self, i_node1, + other, i2) + nodeheap.push(nodeheap_item) + + # ------------------------------------------------------------ + # Case 3b: node 2 is a leaf or is smaller: split node 1 and + # recursively query, starting with the nearest subnode + else: + nodeheap_item.i2 = i_node2 + for i1 in range(2 * i_node1 + 1, 2 * i_node1 + 3): + nodeheap_item.i1 = i1 + nodeheap_item.val = min_rdist_dual{{name_suffix}}(self, i1, + other, i_node2) + nodeheap.push(nodeheap_item) + return 0 + + cdef intp_t _query_radius_single( + self, + intp_t i_node, + const {{INPUT_DTYPE_t}}* pt, + float64_t r, + intp_t* indices, + {{INPUT_DTYPE_t}}* distances, + intp_t count, + int count_only, + int return_distance, + ) noexcept nogil: + """recursive single-tree radius query, depth-first""" + cdef const {{INPUT_DTYPE_t}}* data = &self.data[0, 0] + cdef intp_t* idx_array = &self.idx_array[0] + cdef intp_t n_features = self.data.shape[1] + cdef NodeData_t node_info = self.node_data[i_node] + + cdef intp_t i + cdef float64_t reduced_r + + cdef float64_t dist_pt, dist_LB = 0, dist_UB = 0 + min_max_dist{{name_suffix}}(self, i_node, pt, &dist_LB, &dist_UB) + + # ------------------------------------------------------------ + # Case 1: all node points are outside distance r. + # prune this branch. + if dist_LB > r: + pass + + # ------------------------------------------------------------ + # Case 2: all node points are within distance r + # add all points to neighbors + elif dist_UB <= r: + if count_only: + count += (node_info.idx_end - node_info.idx_start) + else: + for i in range(node_info.idx_start, node_info.idx_end): + if (count < 0) or (count >= self.data.shape[0]): + return -1 + indices[count] = idx_array[i] + if return_distance: + distances[count] = self.dist(pt, (data + n_features + * idx_array[i]), + n_features) + count += 1 + + # ------------------------------------------------------------ + # Case 3: this is a leaf node. Go through all points to + # determine if they fall within radius + elif node_info.is_leaf: + reduced_r = self.dist_metric._dist_to_rdist(r) + + for i in range(node_info.idx_start, node_info.idx_end): + dist_pt = self.rdist(pt, (data + n_features * idx_array[i]), + n_features) + if dist_pt <= reduced_r: + if (count < 0) or (count >= self.data.shape[0]): + return -1 + if count_only: + pass + else: + indices[count] = idx_array[i] + if return_distance: + distances[count] =\ + self.dist_metric._rdist_to_dist(dist_pt) + count += 1 + + # ------------------------------------------------------------ + # Case 4: Node is not a leaf. Recursively query subnodes + else: + count = self._query_radius_single(2 * i_node + 1, pt, r, + indices, distances, count, + count_only, return_distance) + count = self._query_radius_single(2 * i_node + 2, pt, r, + indices, distances, count, + count_only, return_distance) + + return count + + cdef float64_t _kde_single_breadthfirst( + self, const {{INPUT_DTYPE_t}}* pt, + KernelType kernel, + float64_t h, + float64_t log_knorm, + float64_t log_atol, + float64_t log_rtol, + NodeHeap nodeheap, + float64_t* node_log_min_bounds, + float64_t* node_log_bound_spreads, + ): + """non-recursive single-tree kernel density estimation""" + # For the given point, node_log_min_bounds and node_log_bound_spreads + # will encode the current bounds on the density between the point + # and the associated node. + # The variables global_log_min_bound and global_log_bound_spread + # keep track of the global bounds on density. The procedure here is + # to split nodes, updating these bounds, until the bounds are within + # atol & rtol. + cdef intp_t i, i1, i2, i_node + cdef float64_t N1, N2 + cdef float64_t global_log_min_bound, global_log_bound_spread + cdef float64_t global_log_max_bound + + cdef const {{INPUT_DTYPE_t}}* data = &self.data[0, 0] + cdef bint with_sample_weight = self.sample_weight is not None + cdef const {{INPUT_DTYPE_t}}* sample_weight + if with_sample_weight: + sample_weight = &self.sample_weight[0] + cdef intp_t* idx_array = &self.idx_array[0] + cdef const NodeData_t* node_data = &self.node_data[0] + cdef float64_t N + cdef float64_t log_weight + if with_sample_weight: + N = self.sum_weight + else: + N = self.data.shape[0] + cdef intp_t n_features = self.data.shape[1] + + cdef NodeData_t node_info + cdef float64_t dist_pt, log_density + cdef float64_t dist_LB_1 = 0, dist_LB_2 = 0 + cdef float64_t dist_UB_1 = 0, dist_UB_2 = 0 + + cdef float64_t dist_UB, dist_LB + + # push the top node to the heap + cdef NodeHeapData_t nodeheap_item + nodeheap_item.val = min_dist{{name_suffix}}(self, 0, pt) + nodeheap_item.i1 = 0 + nodeheap.push(nodeheap_item) + + global_log_min_bound = log(N) + compute_log_kernel( + max_dist{{name_suffix}}(self, 0, pt), h, kernel + ) + global_log_max_bound = log(N) + compute_log_kernel(nodeheap_item.val, + h, kernel) + global_log_bound_spread = logsubexp(global_log_max_bound, + global_log_min_bound) + + node_log_min_bounds[0] = global_log_min_bound + node_log_bound_spreads[0] = global_log_bound_spread + + while nodeheap.n > 0: + nodeheap_item = nodeheap.pop() + i_node = nodeheap_item.i1 + + node_info = node_data[i_node] + if with_sample_weight: + N1 = _total_node_weight(node_data, sample_weight, + idx_array, i_node) + else: + N1 = node_info.idx_end - node_info.idx_start + + # ------------------------------------------------------------ + # Case 1: local bounds are equal to within per-point tolerance. + if (log_knorm + node_log_bound_spreads[i_node] - log(N1) + log(N) + <= logaddexp(log_atol, (log_rtol + log_knorm + + node_log_min_bounds[i_node]))): + pass + + # ------------------------------------------------------------ + # Case 2: global bounds are within rtol & atol. + elif (log_knorm + global_log_bound_spread + <= logaddexp(log_atol, + log_rtol + log_knorm + global_log_min_bound)): + break + + # ------------------------------------------------------------ + # Case 3: node is a leaf. Count contributions from all points + elif node_info.is_leaf: + global_log_min_bound =\ + logsubexp(global_log_min_bound, + node_log_min_bounds[i_node]) + global_log_bound_spread =\ + logsubexp(global_log_bound_spread, + node_log_bound_spreads[i_node]) + for i in range(node_info.idx_start, node_info.idx_end): + dist_pt = self.dist(pt, data + n_features * idx_array[i], + n_features) + log_density = compute_log_kernel(dist_pt, h, kernel) + if with_sample_weight: + log_weight = np.log(sample_weight[idx_array[i]]) + else: + log_weight = 0. + global_log_min_bound = logaddexp(global_log_min_bound, + log_density + log_weight) + + # ------------------------------------------------------------ + # Case 4: split node and query subnodes + else: + i1 = 2 * i_node + 1 + i2 = 2 * i_node + 2 + + if with_sample_weight: + N1 = _total_node_weight(node_data, sample_weight, + idx_array, i1) + N2 = _total_node_weight(node_data, sample_weight, + idx_array, i2) + else: + N1 = node_data[i1].idx_end - node_data[i1].idx_start + N2 = node_data[i2].idx_end - node_data[i2].idx_start + + min_max_dist{{name_suffix}}(self, i1, pt, &dist_LB_1, &dist_UB_1) + min_max_dist{{name_suffix}}(self, i2, pt, &dist_LB_2, &dist_UB_2) + + node_log_min_bounds[i1] = (log(N1) + + compute_log_kernel(dist_UB_1, + h, kernel)) + node_log_bound_spreads[i1] = (log(N1) + + compute_log_kernel(dist_LB_1, + h, kernel)) + + node_log_min_bounds[i2] = (log(N2) + + compute_log_kernel(dist_UB_2, + h, kernel)) + node_log_bound_spreads[i2] = (log(N2) + + compute_log_kernel(dist_LB_2, + h, kernel)) + + global_log_min_bound = logsubexp(global_log_min_bound, + node_log_min_bounds[i_node]) + global_log_min_bound = logaddexp(global_log_min_bound, + node_log_min_bounds[i1]) + global_log_min_bound = logaddexp(global_log_min_bound, + node_log_min_bounds[i2]) + + global_log_bound_spread =\ + logsubexp(global_log_bound_spread, + node_log_bound_spreads[i_node]) + global_log_bound_spread = logaddexp(global_log_bound_spread, + node_log_bound_spreads[i1]) + global_log_bound_spread = logaddexp(global_log_bound_spread, + node_log_bound_spreads[i2]) + + # TODO: rank by the spread rather than the distance? + nodeheap_item.val = dist_LB_1 + nodeheap_item.i1 = i1 + nodeheap.push(nodeheap_item) + + nodeheap_item.val = dist_LB_2 + nodeheap_item.i1 = i2 + nodeheap.push(nodeheap_item) + + nodeheap.clear() + return logaddexp(global_log_min_bound, + global_log_bound_spread - log(2)) + + cdef int _kde_single_depthfirst( + self, + intp_t i_node, + const {{INPUT_DTYPE_t}}* pt, + KernelType kernel, + float64_t h, + float64_t log_knorm, + float64_t log_atol, + float64_t log_rtol, + float64_t local_log_min_bound, + float64_t local_log_bound_spread, + float64_t* global_log_min_bound, + float64_t* global_log_bound_spread, + ) except -1: + """recursive single-tree kernel density estimate, depth-first""" + # For the given point, local_min_bound and local_max_bound give the + # minimum and maximum density for the current node, while + # global_min_bound and global_max_bound give the minimum and maximum + # density over the entire tree. We recurse down until global_min_bound + # and global_max_bound are within rtol and atol. + cdef intp_t i, i1, i2, iw, start, end + cdef float64_t N1, N2 + + cdef const {{INPUT_DTYPE_t}}* data = &self.data[0, 0] + cdef const NodeData_t* node_data = &self.node_data[0] + cdef bint with_sample_weight = self.sample_weight is not None + cdef const {{INPUT_DTYPE_t}}* sample_weight + cdef float64_t log_weight + if with_sample_weight: + sample_weight = &self.sample_weight[0] + cdef intp_t* idx_array = &self.idx_array[0] + cdef intp_t n_features = self.data.shape[1] + + cdef NodeData_t node_info = self.node_data[i_node] + cdef float64_t dist_pt, log_dens_contribution + + cdef float64_t child1_log_min_bound, child2_log_min_bound + cdef float64_t child1_log_bound_spread, child2_log_bound_spread + cdef float64_t dist_UB = 0, dist_LB = 0 + + if with_sample_weight: + N1 = _total_node_weight(node_data, sample_weight, + idx_array, i_node) + N2 = self.sum_weight + else: + N1 = (node_info.idx_end - node_info.idx_start) + N2 = self.data.shape[0] + + # ------------------------------------------------------------ + # Case 1: local bounds are equal to within errors. Return + if ( + log_knorm + local_log_bound_spread - log(N1) + log(N2) + <= logaddexp(log_atol, (log_rtol + log_knorm + local_log_min_bound)) + ): + pass + + # ------------------------------------------------------------ + # Case 2: global bounds are within rtol & atol. Return + elif ( + log_knorm + global_log_bound_spread[0] + <= logaddexp(log_atol, (log_rtol + log_knorm + global_log_min_bound[0])) + ): + pass + + # ------------------------------------------------------------ + # Case 3: node is a leaf. Count contributions from all points + elif node_info.is_leaf: + global_log_min_bound[0] = logsubexp(global_log_min_bound[0], + local_log_min_bound) + global_log_bound_spread[0] = logsubexp(global_log_bound_spread[0], + local_log_bound_spread) + for i in range(node_info.idx_start, node_info.idx_end): + dist_pt = self.dist(pt, (data + n_features * idx_array[i]), + n_features) + log_dens_contribution = compute_log_kernel(dist_pt, h, kernel) + if with_sample_weight: + log_weight = np.log(sample_weight[idx_array[i]]) + else: + log_weight = 0. + global_log_min_bound[0] = logaddexp(global_log_min_bound[0], + (log_dens_contribution + + log_weight)) + + # ------------------------------------------------------------ + # Case 4: split node and query subnodes + else: + i1 = 2 * i_node + 1 + i2 = 2 * i_node + 2 + + if with_sample_weight: + N1 = _total_node_weight(node_data, sample_weight, + idx_array, i1) + N2 = _total_node_weight(node_data, sample_weight, + idx_array, i2) + else: + N1 = (self.node_data[i1].idx_end - self.node_data[i1].idx_start) + N2 = (self.node_data[i2].idx_end - self.node_data[i2].idx_start) + + min_max_dist{{name_suffix}}(self, i1, pt, &dist_LB, &dist_UB) + child1_log_min_bound = log(N1) + compute_log_kernel(dist_UB, h, + kernel) + child1_log_bound_spread = logsubexp(log(N1) + + compute_log_kernel(dist_LB, h, + kernel), + child1_log_min_bound) + + min_max_dist{{name_suffix}}(self, i2, pt, &dist_LB, &dist_UB) + child2_log_min_bound = log(N2) + compute_log_kernel(dist_UB, h, + kernel) + child2_log_bound_spread = logsubexp(log(N2) + + compute_log_kernel(dist_LB, h, + kernel), + child2_log_min_bound) + + global_log_min_bound[0] = logsubexp(global_log_min_bound[0], + local_log_min_bound) + global_log_min_bound[0] = logaddexp(global_log_min_bound[0], + child1_log_min_bound) + global_log_min_bound[0] = logaddexp(global_log_min_bound[0], + child2_log_min_bound) + + global_log_bound_spread[0] = logsubexp(global_log_bound_spread[0], + local_log_bound_spread) + global_log_bound_spread[0] = logaddexp(global_log_bound_spread[0], + child1_log_bound_spread) + global_log_bound_spread[0] = logaddexp(global_log_bound_spread[0], + child2_log_bound_spread) + + self._kde_single_depthfirst(i1, pt, kernel, h, log_knorm, + log_atol, log_rtol, + child1_log_min_bound, + child1_log_bound_spread, + global_log_min_bound, + global_log_bound_spread) + self._kde_single_depthfirst(i2, pt, kernel, h, log_knorm, + log_atol, log_rtol, + child2_log_min_bound, + child2_log_bound_spread, + global_log_min_bound, + global_log_bound_spread) + return 0 + + cdef int _two_point_single( + self, + intp_t i_node, + const {{INPUT_DTYPE_t}}* pt, + float64_t* r, + intp_t* count, + intp_t i_min, + intp_t i_max, + ) except -1: + """recursive single-tree two-point correlation function query""" + cdef const {{INPUT_DTYPE_t}}* data = &self.data[0, 0] + cdef intp_t* idx_array = &self.idx_array[0] + cdef intp_t n_features = self.data.shape[1] + cdef NodeData_t node_info = self.node_data[i_node] + + cdef intp_t i, j, Npts + cdef float64_t reduced_r + + cdef float64_t dist_pt, dist_LB = 0, dist_UB = 0 + min_max_dist{{name_suffix}}(self, i_node, pt, &dist_LB, &dist_UB) + + # ------------------------------------------------------------ + # Go through bounds and check for cuts + while i_min < i_max: + if dist_LB > r[i_min]: + i_min += 1 + else: + break + + while i_max > i_min: + Npts = (node_info.idx_end - node_info.idx_start) + if dist_UB <= r[i_max - 1]: + count[i_max - 1] += Npts + i_max -= 1 + else: + break + + if i_min < i_max: + # If node is a leaf, go through all points + if node_info.is_leaf: + for i in range(node_info.idx_start, node_info.idx_end): + dist_pt = self.dist(pt, (data + n_features * idx_array[i]), + n_features) + j = i_max - 1 + while (j >= i_min) and (dist_pt <= r[j]): + count[j] += 1 + j -= 1 + + else: + self._two_point_single(2 * i_node + 1, pt, r, + count, i_min, i_max) + self._two_point_single(2 * i_node + 2, pt, r, + count, i_min, i_max) + return 0 + + cdef int _two_point_dual( + self, + intp_t i_node1, + BinaryTree{{name_suffix}} other, + intp_t i_node2, + float64_t* r, + intp_t* count, + intp_t i_min, + intp_t i_max, + ) except -1: + """recursive dual-tree two-point correlation function query""" + cdef const {{INPUT_DTYPE_t}}* data1 = &self.data[0, 0] + cdef const {{INPUT_DTYPE_t}}* data2 = &other.data[0, 0] + cdef intp_t* idx_array1 = &self.idx_array[0] + cdef intp_t* idx_array2 = &other.idx_array[0] + cdef NodeData_t node_info1 = self.node_data[i_node1] + cdef NodeData_t node_info2 = other.node_data[i_node2] + + cdef intp_t n_features = self.data.shape[1] + + cdef intp_t i1, i2, j, Npts + cdef float64_t reduced_r + + cdef float64_t dist_pt, dist_LB = 0, dist_UB = 0 + dist_LB = min_dist_dual{{name_suffix}}(self, i_node1, other, i_node2) + dist_UB = max_dist_dual{{name_suffix}}(self, i_node1, other, i_node2) + + # ------------------------------------------------------------ + # Go through bounds and check for cuts + while i_min < i_max: + if dist_LB > r[i_min]: + i_min += 1 + else: + break + + while i_max > i_min: + Npts = ((node_info1.idx_end - node_info1.idx_start) + * (node_info2.idx_end - node_info2.idx_start)) + if dist_UB <= r[i_max - 1]: + count[i_max - 1] += Npts + i_max -= 1 + else: + break + + if i_min < i_max: + if node_info1.is_leaf and node_info2.is_leaf: + # If both nodes are leaves, go through all points + for i1 in range(node_info1.idx_start, node_info1.idx_end): + for i2 in range(node_info2.idx_start, node_info2.idx_end): + dist_pt = self.dist((data1 + n_features + * idx_array1[i1]), + (data2 + n_features + * idx_array2[i2]), + n_features) + j = i_max - 1 + while (j >= i_min) and (dist_pt <= r[j]): + count[j] += 1 + j -= 1 + + elif node_info1.is_leaf: + # If only one is a leaf, split the other + for i2 in range(2 * i_node2 + 1, 2 * i_node2 + 3): + self._two_point_dual(i_node1, other, i2, + r, count, i_min, i_max) + + elif node_info2.is_leaf: + for i1 in range(2 * i_node1 + 1, 2 * i_node1 + 3): + self._two_point_dual(i1, other, i_node2, + r, count, i_min, i_max) + + else: + # neither is a leaf: split & query both + for i1 in range(2 * i_node1 + 1, 2 * i_node1 + 3): + for i2 in range(2 * i_node2 + 1, 2 * i_node2 + 3): + self._two_point_dual(i1, other, i2, + r, count, i_min, i_max) + return 0 + +{{endfor}} + +###################################################################### +# Python functions for benchmarking and testing C implementations + +def simultaneous_sort(float64_t[:, ::1] distances, intp_t[:, ::1] indices): + """In-place simultaneous sort the given row of the arrays + + This python wrapper exists primarily to enable unit testing + of the _simultaneous_sort C routine. + """ + assert distances.shape[0] == indices.shape[0] + assert distances.shape[1] == indices.shape[1] + cdef intp_t row + for row in range(distances.shape[0]): + _simultaneous_sort(&distances[row, 0], + &indices[row, 0], + distances.shape[1]) + + +def nodeheap_sort(float64_t[::1] vals): + """In-place reverse sort of vals using NodeHeap""" + cdef intp_t[::1] indices = np.zeros(vals.shape[0], dtype=np.intp) + cdef float64_t[::1] vals_sorted = np.zeros_like(vals) + + # use initial size 0 to check corner case + cdef NodeHeap heap = NodeHeap(0) + cdef NodeHeapData_t data + cdef intp_t i + for i in range(vals.shape[0]): + data.val = vals[i] + data.i1 = i + data.i2 = i + 1 + heap.push(data) + + for i in range(vals.shape[0]): + data = heap.pop() + vals_sorted[i] = data.val + indices[i] = data.i1 + + return np.asarray(vals_sorted), np.asarray(indices) + + +cdef inline float64_t _total_node_weight( + const NodeData_t* node_data, + const floating* sample_weight, + const intp_t* idx_array, + intp_t i_node, +): + cdef intp_t i + cdef float64_t N = 0.0 + for i in range(node_data[i_node].idx_start, node_data[i_node].idx_end): + N += sample_weight[idx_array[i]] + return N diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_graph.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..ad4afc0a81a668df3233f533aca3f5e8051a6664 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_graph.py @@ -0,0 +1,704 @@ +"""Nearest Neighbors graph functions""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import itertools + +from ..base import ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context +from ..utils._param_validation import ( + Integral, + Interval, + Real, + StrOptions, + validate_params, +) +from ..utils.validation import check_is_fitted +from ._base import VALID_METRICS, KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin +from ._unsupervised import NearestNeighbors + + +def _check_params(X, metric, p, metric_params): + """Check the validity of the input parameters""" + params = zip(["metric", "p", "metric_params"], [metric, p, metric_params]) + est_params = X.get_params() + for param_name, func_param in params: + if func_param != est_params[param_name]: + raise ValueError( + "Got %s for %s, while the estimator has %s for the same parameter." + % (func_param, param_name, est_params[param_name]) + ) + + +def _query_include_self(X, include_self, mode): + """Return the query based on include_self param""" + if include_self == "auto": + include_self = mode == "connectivity" + + # it does not include each sample as its own neighbors + if not include_self: + X = None + + return X + + +@validate_params( + { + "X": ["array-like", "sparse matrix", KNeighborsMixin], + "n_neighbors": [Interval(Integral, 1, None, closed="left")], + "mode": [StrOptions({"connectivity", "distance"})], + "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric_params": [dict, None], + "include_self": ["boolean", StrOptions({"auto"})], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def kneighbors_graph( + X, + n_neighbors, + *, + mode="connectivity", + metric="minkowski", + p=2, + metric_params=None, + include_self=False, + n_jobs=None, +): + """Compute the (weighted) graph of k-Neighbors for points in X. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Sample data. + + n_neighbors : int + Number of neighbors for each sample. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + metric : str, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is equivalent + to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. + For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected + to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + include_self : bool or 'auto', default=False + Whether or not to mark each sample as the first nearest neighbor to + itself. If 'auto', then True is used for mode='connectivity' and False + for mode='distance'. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + A : sparse matrix of shape (n_samples, n_samples) + Graph where A[i, j] is assigned the weight of edge that + connects i to j. The matrix is of CSR format. + + See Also + -------- + radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import kneighbors_graph + >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 1.], + [1., 0., 1.]]) + """ + if not isinstance(X, KNeighborsMixin): + X = NearestNeighbors( + n_neighbors=n_neighbors, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ).fit(X) + else: + _check_params(X, metric, p, metric_params) + + query = _query_include_self(X._fit_X, include_self, mode) + return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode) + + +@validate_params( + { + "X": ["array-like", "sparse matrix", RadiusNeighborsMixin], + "radius": [Interval(Real, 0, None, closed="both")], + "mode": [StrOptions({"connectivity", "distance"})], + "metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable], + "p": [Interval(Real, 0, None, closed="right"), None], + "metric_params": [dict, None], + "include_self": ["boolean", StrOptions({"auto"})], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def radius_neighbors_graph( + X, + radius, + *, + mode="connectivity", + metric="minkowski", + p=2, + metric_params=None, + include_self=False, + n_jobs=None, +): + """Compute the (weighted) graph of Neighbors for points in X. + + Neighborhoods are restricted the points at a distance lower than + radius. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Sample data. + + radius : float + Radius of neighborhoods. + + mode : {'connectivity', 'distance'}, default='connectivity' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + metric : str, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + p : float, default=2 + Power parameter for the Minkowski metric. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + include_self : bool or 'auto', default=False + Whether or not to mark each sample as the first nearest neighbor to + itself. If 'auto', then True is used for mode='connectivity' and False + for mode='distance'. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Returns + ------- + A : sparse matrix of shape (n_samples, n_samples) + Graph where A[i, j] is assigned the weight of edge that connects + i to j. The matrix is of CSR format. + + See Also + -------- + kneighbors_graph: Compute the weighted graph of k-neighbors for points in X. + + Examples + -------- + >>> X = [[0], [3], [1]] + >>> from sklearn.neighbors import radius_neighbors_graph + >>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', + ... include_self=True) + >>> A.toarray() + array([[1., 0., 1.], + [0., 1., 0.], + [1., 0., 1.]]) + """ + if not isinstance(X, RadiusNeighborsMixin): + X = NearestNeighbors( + radius=radius, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ).fit(X) + else: + _check_params(X, metric, p, metric_params) + + query = _query_include_self(X._fit_X, include_self, mode) + return X.radius_neighbors_graph(query, radius, mode) + + +class KNeighborsTransformer( + ClassNamePrefixFeaturesOutMixin, KNeighborsMixin, TransformerMixin, NeighborsBase +): + """Transform X into a (weighted) graph of k nearest neighbors. + + The transformed data is a sparse graph as returned by kneighbors_graph. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + mode : {'distance', 'connectivity'}, default='distance' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + n_neighbors : int, default=5 + Number of neighbors for each sample in the transformed sparse graph. + For compatibility reasons, as each sample is considered as its own + neighbor, one extra neighbor will be computed when mode == 'distance'. + In this case, the sparse graph contains (n_neighbors + 1) neighbors. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + Distance matrices are not supported. + + p : float, default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + This parameter is expected to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + If ``-1``, then the number of jobs is set to the number of CPU cores. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + kneighbors_graph : Compute the weighted graph of k-neighbors for + points in X. + RadiusNeighborsTransformer : Transform X into a weighted graph of + neighbors nearer than a radius. + + Notes + ----- + For an example of using :class:`~sklearn.neighbors.KNeighborsTransformer` + in combination with :class:`~sklearn.manifold.TSNE` see + :ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`. + + Examples + -------- + >>> from sklearn.datasets import load_wine + >>> from sklearn.neighbors import KNeighborsTransformer + >>> X, _ = load_wine(return_X_y=True) + >>> X.shape + (178, 13) + >>> transformer = KNeighborsTransformer(n_neighbors=5, mode='distance') + >>> X_dist_graph = transformer.fit_transform(X) + >>> X_dist_graph.shape + (178, 178) + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "mode": [StrOptions({"distance", "connectivity"})], + } + _parameter_constraints.pop("radius") + + def __init__( + self, + *, + mode="distance", + n_neighbors=5, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super(KNeighborsTransformer, self).__init__( + n_neighbors=n_neighbors, + radius=None, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.mode = mode + + @_fit_context( + # KNeighborsTransformer.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the k-nearest neighbors transformer from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : KNeighborsTransformer + The fitted k-nearest neighbors transformer. + """ + self._fit(X) + self._n_features_out = self.n_samples_fit_ + return self + + def transform(self, X): + """Compute the (weighted) graph of Neighbors for points in X. + + Parameters + ---------- + X : array-like of shape (n_samples_transform, n_features) + Sample data. + + Returns + ------- + Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + check_is_fitted(self) + add_one = self.mode == "distance" + return self.kneighbors_graph( + X, mode=self.mode, n_neighbors=self.n_neighbors + add_one + ) + + def fit_transform(self, X, y=None): + """Fit to data, then transform it. + + Fits transformer to X and y with optional parameters fit_params + and returns a transformed version of X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : sparse matrix of shape (n_samples, n_samples) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + return self.fit(X).transform(X) + + +class RadiusNeighborsTransformer( + ClassNamePrefixFeaturesOutMixin, + RadiusNeighborsMixin, + TransformerMixin, + NeighborsBase, +): + """Transform X into a (weighted) graph of neighbors nearer than a radius. + + The transformed data is a sparse graph as returned by + `radius_neighbors_graph`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + mode : {'distance', 'connectivity'}, default='distance' + Type of returned matrix: 'connectivity' will return the connectivity + matrix with ones and zeros, and 'distance' will return the distances + between neighbors according to the given metric. + + radius : float, default=1.0 + Radius of neighborhood in the transformed sparse graph. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + Distance matrices are not supported. + + p : float, default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + This parameter is expected to be positive. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + If ``-1``, then the number of jobs is set to the number of CPU cores. + + Attributes + ---------- + effective_metric_ : str or callable + The distance metric used. It will be same as the `metric` parameter + or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to + 'minkowski' and `p` parameter set to 2. + + effective_metric_params_ : dict + Additional keyword arguments for the metric function. For most metrics + will be same with `metric_params` parameter, but may also contain the + `p` parameter value if the `effective_metric_` attribute is set to + 'minkowski'. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + kneighbors_graph : Compute the weighted graph of k-neighbors for + points in X. + KNeighborsTransformer : Transform X into a weighted graph of k + nearest neighbors. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import load_wine + >>> from sklearn.cluster import DBSCAN + >>> from sklearn.neighbors import RadiusNeighborsTransformer + >>> from sklearn.pipeline import make_pipeline + >>> X, _ = load_wine(return_X_y=True) + >>> estimator = make_pipeline( + ... RadiusNeighborsTransformer(radius=42.0, mode='distance'), + ... DBSCAN(eps=25.0, metric='precomputed')) + >>> X_clustered = estimator.fit_predict(X) + >>> clusters, counts = np.unique(X_clustered, return_counts=True) + >>> print(counts) + [ 29 15 111 11 12] + """ + + _parameter_constraints: dict = { + **NeighborsBase._parameter_constraints, + "mode": [StrOptions({"distance", "connectivity"})], + } + _parameter_constraints.pop("n_neighbors") + + def __init__( + self, + *, + mode="distance", + radius=1.0, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super(RadiusNeighborsTransformer, self).__init__( + n_neighbors=None, + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + self.mode = mode + + @_fit_context( + # RadiusNeighborsTransformer.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the radius neighbors transformer from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : RadiusNeighborsTransformer + The fitted radius neighbors transformer. + """ + self._fit(X) + self._n_features_out = self.n_samples_fit_ + return self + + def transform(self, X): + """Compute the (weighted) graph of Neighbors for points in X. + + Parameters + ---------- + X : array-like of shape (n_samples_transform, n_features) + Sample data. + + Returns + ------- + Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + check_is_fitted(self) + return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True) + + def fit_transform(self, X, y=None): + """Fit to data, then transform it. + + Fits transformer to X and y with optional parameters fit_params + and returns a transformed version of X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training set. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + Xt : sparse matrix of shape (n_samples, n_samples) + Xt[i, j] is assigned the weight of edge that connects i to j. + Only the neighbors have an explicit value. + The diagonal is always explicit. + The matrix is of CSR format. + """ + return self.fit(X).transform(X) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..80068ed8d5699c7fbde7c7234b26a30ef3ebda29 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.cpython-310-x86_64-linux-gnu.so differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pyx b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pyx new file mode 100644 index 0000000000000000000000000000000000000000..111353c49a22becb74cf2d3d609241d208784508 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_partition_nodes.pyx @@ -0,0 +1,122 @@ +# BinaryTrees rely on partial sorts to partition their nodes during their +# initialisation. +# +# The C++ std library exposes nth_element, an efficient partial sort for this +# situation which has a linear time complexity as well as the best performances. +# +# To use std::algorithm::nth_element, a few fixture are defined using Cython: +# - partition_node_indices, a Cython function used in BinaryTrees, that calls +# - partition_node_indices_inner, a C++ function that wraps nth_element and uses +# - an IndexComparator to state how to compare KDTrees' indices +# +# IndexComparator has been defined so that partial sorts are stable with +# respect to the nodes initial indices. +# +# See for reference: +# - https://en.cppreference.com/w/cpp/algorithm/nth_element. +# - https://github.com/scikit-learn/scikit-learn/pull/11103 +# - https://github.com/scikit-learn/scikit-learn/pull/19473 +from cython cimport floating + + +cdef extern from *: + """ + #include + + template + class IndexComparator { + private: + const D *data; + I split_dim, n_features; + public: + IndexComparator(const D *data, const I &split_dim, const I &n_features): + data(data), split_dim(split_dim), n_features(n_features) {} + + bool operator()(const I &a, const I &b) const { + D a_value = data[a * n_features + split_dim]; + D b_value = data[b * n_features + split_dim]; + return a_value == b_value ? a < b : a_value < b_value; + } + }; + + template + void partition_node_indices_inner( + const D *data, + I *node_indices, + const I &split_dim, + const I &split_index, + const I &n_features, + const I &n_points) { + IndexComparator index_comparator(data, split_dim, n_features); + std::nth_element( + node_indices, + node_indices + split_index, + node_indices + n_points, + index_comparator); + } + """ + void partition_node_indices_inner[D, I]( + const D *data, + I *node_indices, + I split_dim, + I split_index, + I n_features, + I n_points) except + + + +cdef int partition_node_indices( + const floating *data, + intp_t *node_indices, + intp_t split_dim, + intp_t split_index, + intp_t n_features, + intp_t n_points) except -1: + """Partition points in the node into two equal-sized groups. + + Upon return, the values in node_indices will be rearranged such that + (assuming numpy-style indexing): + + data[node_indices[0:split_index], split_dim] + <= data[node_indices[split_index], split_dim] + + and + + data[node_indices[split_index], split_dim] + <= data[node_indices[split_index:n_points], split_dim] + + The algorithm is essentially a partial in-place quicksort around a + set pivot. + + Parameters + ---------- + data : double pointer + Pointer to a 2D array of the training data, of shape [N, n_features]. + N must be greater than any of the values in node_indices. + node_indices : int pointer + Pointer to a 1D array of length n_points. This lists the indices of + each of the points within the current node. This will be modified + in-place. + split_dim : int + the dimension on which to split. This will usually be computed via + the routine ``find_node_split_dim``. + split_index : int + the index within node_indices around which to split the points. + n_features: int + the number of features (i.e columns) in the 2D array pointed by data. + n_points : int + the length of node_indices. This is also the number of points in + the original dataset. + Returns + ------- + status : int + integer exit status. On return, the contents of node_indices are + modified as noted above. + """ + partition_node_indices_inner( + data, + node_indices, + split_dim, + split_index, + n_features, + n_points) + return 0 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9ed033e747314ef4b2f7599c99da85be6dbce73e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_quad_tree.pxd @@ -0,0 +1,92 @@ +# Author: Thomas Moreau +# Author: Olivier Grisel + +# See quad_tree.pyx for details. + +cimport numpy as cnp +from ..utils._typedefs cimport float32_t, intp_t + +# This is effectively an ifdef statement in Cython +# It allows us to write printf debugging lines +# and remove them at compile time +cdef enum: + DEBUGFLAG = 0 + +cdef float EPSILON = 1e-6 + +# XXX: Careful to not change the order of the arguments. It is important to +# have is_leaf and max_width consecutive as it permits to avoid padding by +# the compiler and keep the size coherent for both C and numpy data structures. +cdef struct Cell: + # Base storage structure for cells in a QuadTree object + + # Tree structure + intp_t parent # Parent cell of this cell + intp_t[8] children # Array pointing to children of this cell + + # Cell description + intp_t cell_id # Id of the cell in the cells array in the Tree + intp_t point_index # Index of the point at this cell (only defined + # # in non empty leaf) + bint is_leaf # Does this cell have children? + float32_t squared_max_width # Squared value of the maximum width w + intp_t depth # Depth of the cell in the tree + intp_t cumulative_size # Number of points included in the subtree with + # # this cell as a root. + + # Internal constants + float32_t[3] center # Store the center for quick split of cells + float32_t[3] barycenter # Keep track of the center of mass of the cell + + # Cell boundaries + float32_t[3] min_bounds # Inferior boundaries of this cell (inclusive) + float32_t[3] max_bounds # Superior boundaries of this cell (exclusive) + + +cdef class _QuadTree: + # The QuadTree object is a quad tree structure constructed by inserting + # recursively points in the tree and splitting cells in 4 so that each + # leaf cell contains at most one point. + # This structure also handle 3D data, inserted in trees with 8 children + # for each node. + + # Parameters of the tree + cdef public int n_dimensions # Number of dimensions in X + cdef public int verbose # Verbosity of the output + cdef intp_t n_cells_per_cell # Number of children per node. (2 ** n_dimension) + + # Tree inner structure + cdef public intp_t max_depth # Max depth of the tree + cdef public intp_t cell_count # Counter for node IDs + cdef public intp_t capacity # Capacity of tree, in terms of nodes + cdef public intp_t n_points # Total number of points + cdef Cell* cells # Array of nodes + + # Point insertion methods + cdef int insert_point(self, float32_t[3] point, intp_t point_index, + intp_t cell_id=*) except -1 nogil + cdef intp_t _insert_point_in_new_child(self, float32_t[3] point, Cell* cell, + intp_t point_index, intp_t size=* + ) noexcept nogil + cdef intp_t _select_child(self, float32_t[3] point, Cell* cell) noexcept nogil + cdef bint _is_duplicate(self, float32_t[3] point1, float32_t[3] point2) noexcept nogil + + # Create a summary of the Tree compare to a query point + cdef long summarize(self, float32_t[3] point, float32_t* results, + float squared_theta=*, intp_t cell_id=*, long idx=* + ) noexcept nogil + + # Internal cell initialization methods + cdef void _init_cell(self, Cell* cell, intp_t parent, intp_t depth) noexcept nogil + cdef void _init_root(self, float32_t[3] min_bounds, float32_t[3] max_bounds + ) noexcept nogil + + # Private methods + cdef int _check_point_in_cell(self, float32_t[3] point, Cell* cell + ) except -1 nogil + + # Private array manipulation to manage the ``cells`` array + cdef int _resize(self, intp_t capacity) except -1 nogil + cdef int _resize_c(self, intp_t capacity=*) except -1 nogil + cdef int _get_cell(self, float32_t[3] point, intp_t cell_id=*) except -1 nogil + cdef Cell[:] _get_cell_ndarray(self) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py new file mode 100644 index 0000000000000000000000000000000000000000..8888fe18483c6ae5f7008d78b0d6ff97d096a419 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/_unsupervised.py @@ -0,0 +1,179 @@ +"""Unsupervised nearest neighbors learner""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from ..base import _fit_context +from ._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin + + +class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase): + """Unsupervised learner for implementing neighbor searches. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.9 + + Parameters + ---------- + n_neighbors : int, default=5 + Number of neighbors to use by default for :meth:`kneighbors` queries. + + radius : float, default=1.0 + Range of parameter space to use by default for :meth:`radius_neighbors` + queries. + + algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' + Algorithm used to compute the nearest neighbors: + + - 'ball_tree' will use :class:`BallTree` + - 'kd_tree' will use :class:`KDTree` + - 'brute' will use a brute-force search. + - 'auto' will attempt to decide the most appropriate algorithm + based on the values passed to :meth:`fit` method. + + Note: fitting on sparse input will override the setting of + this parameter, using brute force. + + leaf_size : int, default=30 + Leaf size passed to BallTree or KDTree. This can affect the + speed of the construction and query, as well as the memory + required to store the tree. The optimal value depends on the + nature of the problem. + + metric : str or callable, default='minkowski' + Metric to use for distance computation. Default is "minkowski", which + results in the standard Euclidean distance when p = 2. See the + documentation of `scipy.spatial.distance + `_ and + the metrics listed in + :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric + values. + + If metric is "precomputed", X is assumed to be a distance matrix and + must be square during fit. X may be a :term:`sparse graph`, in which + case only "nonzero" elements may be considered neighbors. + + If metric is a callable function, it takes two arrays representing 1D + vectors as inputs and must return one value indicating the distance + between those vectors. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + p : float (positive), default=2 + Parameter for the Minkowski metric from + sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is + equivalent to using manhattan_distance (l1), and euclidean_distance + (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. + + metric_params : dict, default=None + Additional keyword arguments for the metric function. + + n_jobs : int, default=None + The number of parallel jobs to run for neighbors search. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + effective_metric_ : str + Metric used to compute distances to neighbors. + + effective_metric_params_ : dict + Parameters for the metric used to compute distances to neighbors. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_samples_fit_ : int + Number of samples in the fitted data. + + See Also + -------- + KNeighborsClassifier : Classifier implementing the k-nearest neighbors + vote. + RadiusNeighborsClassifier : Classifier implementing a vote among neighbors + within a given radius. + KNeighborsRegressor : Regression based on k-nearest neighbors. + RadiusNeighborsRegressor : Regression based on neighbors within a fixed + radius. + BallTree : Space partitioning data structure for organizing points in a + multi-dimensional space, used for nearest neighbor search. + + Notes + ----- + See :ref:`Nearest Neighbors ` in the online documentation + for a discussion of the choice of ``algorithm`` and ``leaf_size``. + + https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm + + Examples + -------- + >>> import numpy as np + >>> from sklearn.neighbors import NearestNeighbors + >>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]] + >>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4) + >>> neigh.fit(samples) + NearestNeighbors(...) + >>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False) + array([[2, 0]]...) + >>> nbrs = neigh.radius_neighbors( + ... [[0, 0, 1.3]], 0.4, return_distance=False + ... ) + >>> np.asarray(nbrs[0][0]) + array(2) + """ + + def __init__( + self, + *, + n_neighbors=5, + radius=1.0, + algorithm="auto", + leaf_size=30, + metric="minkowski", + p=2, + metric_params=None, + n_jobs=None, + ): + super().__init__( + n_neighbors=n_neighbors, + radius=radius, + algorithm=algorithm, + leaf_size=leaf_size, + metric=metric, + p=p, + metric_params=metric_params, + n_jobs=n_jobs, + ) + + @_fit_context( + # NearestNeighbors.metric is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None): + """Fit the nearest neighbors estimator from the training dataset. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ + (n_samples, n_samples) if metric='precomputed' + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : NearestNeighbors + The fitted nearest neighbors estimator. + """ + return self._fit(X) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/meson.build b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..22f81d597948b3d2f8a186718c434892ebcbc0dc --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/neighbors/meson.build @@ -0,0 +1,56 @@ +_binary_tree_pxi = custom_target( + '_binary_tree_pxi', + output: '_binary_tree.pxi', + input: '_binary_tree.pxi.tp', + command: [py, tempita, '@INPUT@', '-o', '@OUTDIR@'], +) + +# .pyx is generated so this is needed to make Cython compilation work. The pxi +# file is included avoid "missing dependency paths" with ninja -t missindeps +neighbors_cython_tree = [ + fs.copyfile('__init__.py'), + fs.copyfile('_partition_nodes.pxd'), + _binary_tree_pxi, +] + +name_list = ['_ball_tree', '_kd_tree'] + +foreach name: name_list + pyx = custom_target( + name + '_pyx', + output: name + '.pyx', + input: name + '.pyx.tp', + command: [py, tempita, '@INPUT@', '-o', '@OUTDIR@'], + # TODO in principle this should go in py.exension_module below. This is + # temporary work-around for dependency issue with .pyx.tp files. For more + # details, see https://github.com/mesonbuild/meson/issues/13212 + depends: [neighbors_cython_tree, utils_cython_tree, metrics_cython_tree], + ) + py.extension_module( + name, + pyx, + dependencies: [np_dep], + cython_args: cython_args, + subdir: 'sklearn/neighbors', + install: true +) +endforeach + +neighbors_extension_metadata = { + '_partition_nodes': + {'sources': ['_partition_nodes.pyx'], + 'override_options': ['cython_language=cpp'], 'dependencies': [np_dep]}, + '_quad_tree': {'sources': ['_quad_tree.pyx'], 'dependencies': [np_dep]}, +} + +foreach ext_name, ext_dict : neighbors_extension_metadata + py.extension_module( + ext_name, + [ext_dict.get('sources'), utils_cython_tree], + dependencies: ext_dict.get('dependencies'), + override_options : ext_dict.get('override_options', []), + cython_args: cython_args, + subdir: 'sklearn/neighbors', + install: true + ) +endforeach diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/pipeline.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..fc5be7e3c51f76ddf0a812ad36e66e731c2b47a3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/pipeline.py @@ -0,0 +1,2188 @@ +"""Utilities to build a composite estimator as a chain of transforms and estimators.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from collections import Counter, defaultdict +from contextlib import contextmanager +from copy import deepcopy +from itertools import chain, islice + +import numpy as np +from scipy import sparse + +from .base import TransformerMixin, _fit_context, clone +from .exceptions import NotFittedError +from .preprocessing import FunctionTransformer +from .utils import Bunch +from .utils._estimator_html_repr import _VisualBlock +from .utils._metadata_requests import METHODS +from .utils._param_validation import HasMethods, Hidden +from .utils._set_output import ( + _get_container_adapter, + _safe_set_output, +) +from .utils._tags import get_tags +from .utils._user_interface import _print_elapsed_time +from .utils.deprecation import _deprecate_Xt_in_inverse_transform +from .utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + get_routing_for_object, + process_routing, +) +from .utils.metaestimators import _BaseComposition, available_if +from .utils.parallel import Parallel, delayed +from .utils.validation import check_is_fitted, check_memory + +__all__ = ["Pipeline", "FeatureUnion", "make_pipeline", "make_union"] + + +@contextmanager +def _raise_or_warn_if_not_fitted(estimator): + """A context manager to make sure a NotFittedError is raised, if a sub-estimator + raises the error. + + Otherwise, we raise a warning if the pipeline is not fitted, with the deprecation. + + TODO(1.8): remove this context manager and replace with check_is_fitted. + """ + try: + yield + except NotFittedError as exc: + raise NotFittedError("Pipeline is not fitted yet.") from exc + + # we only get here if the above didn't raise + try: + check_is_fitted(estimator) + except NotFittedError: + warnings.warn( + "This Pipeline instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using other methods such as transform, " + "predict, etc. This will raise an error in 1.8 instead of the current " + "warning.", + FutureWarning, + ) + + +def _final_estimator_has(attr): + """Check that final_estimator has `attr`. + + Used together with `available_if` in `Pipeline`.""" + + def check(self): + # raise original `AttributeError` if `attr` does not exist + getattr(self._final_estimator, attr) + return True + + return check + + +def _cached_transform( + sub_pipeline, *, cache, param_name, param_value, transform_params +): + """Transform a parameter value using a sub-pipeline and cache the result. + + Parameters + ---------- + sub_pipeline : Pipeline + The sub-pipeline to be used for transformation. + cache : dict + The cache dictionary to store the transformed values. + param_name : str + The name of the parameter to be transformed. + param_value : object + The value of the parameter to be transformed. + transform_params : dict + The metadata to be used for transformation. This passed to the + `transform` method of the sub-pipeline. + + Returns + ------- + transformed_value : object + The transformed value of the parameter. + """ + if param_name not in cache: + # If the parameter is a tuple, transform each element of the + # tuple. This is needed to support the pattern present in + # `lightgbm` and `xgboost` where users can pass multiple + # validation sets. + if isinstance(param_value, tuple): + cache[param_name] = tuple( + sub_pipeline.transform(element, **transform_params) + for element in param_value + ) + else: + cache[param_name] = sub_pipeline.transform(param_value, **transform_params) + + return cache[param_name] + + +class Pipeline(_BaseComposition): + """ + A sequence of data transformers with an optional final predictor. + + `Pipeline` allows you to sequentially apply a list of transformers to + preprocess the data and, if desired, conclude the sequence with a final + :term:`predictor` for predictive modeling. + + Intermediate steps of the pipeline must be transformers, that is, they + must implement `fit` and `transform` methods. + The final :term:`estimator` only needs to implement `fit`. + The transformers in the pipeline can be cached using ``memory`` argument. + + The purpose of the pipeline is to assemble several steps that can be + cross-validated together while setting different parameters. For this, it + enables setting parameters of the various steps using their names and the + parameter name separated by a `'__'`, as in the example below. A step's + estimator may be replaced entirely by setting the parameter with its name + to another estimator, or a transformer removed by setting it to + `'passthrough'` or `None`. + + For an example use case of `Pipeline` combined with + :class:`~sklearn.model_selection.GridSearchCV`, refer to + :ref:`sphx_glr_auto_examples_compose_plot_compare_reduction.py`. The + example :ref:`sphx_glr_auto_examples_compose_plot_digits_pipe.py` shows how + to grid search on a pipeline using `'__'` as a separator in the parameter names. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.5 + + Parameters + ---------- + steps : list of tuples + List of (name of step, estimator) tuples that are to be chained in + sequential order. To be compatible with the scikit-learn API, all steps + must define `fit`. All non-last steps must also define `transform`. See + :ref:`Combining Estimators ` for more details. + + transform_input : list of str, default=None + The names of the :term:`metadata` parameters that should be transformed by the + pipeline before passing it to the step consuming it. + + This enables transforming some input arguments to ``fit`` (other than ``X``) + to be transformed by the steps of the pipeline up to the step which requires + them. Requirement is defined via :ref:`metadata routing `. + For instance, this can be used to pass a validation set through the pipeline. + + You can only set this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.6 + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the fitted transformers of the pipeline. The last step + will never be cached, even if it is a transformer. By default, no + caching is performed. If a string is given, it is the path to the + caching directory. Enabling caching triggers a clone of the transformers + before fitting. Therefore, the transformer instance given to the + pipeline cannot be inspected directly. Use the attribute ``named_steps`` + or ``steps`` to inspect estimators within the pipeline. Caching the + transformers is advantageous when fitting is time consuming. See + :ref:`sphx_glr_auto_examples_neighbors_plot_caching_nearest_neighbors.py` + for an example on how to enable caching. + + verbose : bool, default=False + If True, the time elapsed while fitting each step will be printed as it + is completed. + + Attributes + ---------- + named_steps : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + Read-only attribute to access any step parameter by user given name. + Keys are step names and values are steps parameters. + + classes_ : ndarray of shape (n_classes,) + The classes labels. Only exist if the last step of the pipeline is a + classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying first estimator in `steps` exposes such an attribute + when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if the + underlying estimator exposes such an attribute when fit. + + .. versionadded:: 1.0 + + See Also + -------- + make_pipeline : Convenience function for simplified pipeline construction. + + Examples + -------- + >>> from sklearn.svm import SVC + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.pipeline import Pipeline + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, + ... random_state=0) + >>> pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC())]) + >>> # The pipeline can be used as any other estimator + >>> # and avoids leaking the test set into the train set + >>> pipe.fit(X_train, y_train).score(X_test, y_test) + 0.88 + >>> # An estimator's parameter can be set using '__' syntax + >>> pipe.set_params(svc__C=10).fit(X_train, y_train).score(X_test, y_test) + 0.76 + """ + + # BaseEstimator interface + _parameter_constraints: dict = { + "steps": [list, Hidden(tuple)], + "transform_input": [list, None], + "memory": [None, str, HasMethods(["cache"])], + "verbose": ["boolean"], + } + + def __init__(self, steps, *, transform_input=None, memory=None, verbose=False): + self.steps = steps + self.transform_input = transform_input + self.memory = memory + self.verbose = verbose + + def set_output(self, *, transform=None): + """Set the output container when `"transform"` and `"fit_transform"` are called. + + Calling `set_output` will set the output of all estimators in `steps`. + + Parameters + ---------- + transform : {"default", "pandas", "polars"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.4 + `"polars"` option was added. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + for _, _, step in self._iter(): + _safe_set_output(step, transform=transform) + return self + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Returns the parameters given in the constructor as well as the + estimators contained within the `steps` of the `Pipeline`. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : mapping of string to any + Parameter names mapped to their values. + """ + return self._get_params("steps", deep=deep) + + def set_params(self, **kwargs): + """Set the parameters of this estimator. + + Valid parameter keys can be listed with ``get_params()``. Note that + you can directly set the parameters of the estimators contained in + `steps`. + + Parameters + ---------- + **kwargs : dict + Parameters of this estimator or parameters of estimators contained + in `steps`. Parameters of the steps may be set using its name and + the parameter name separated by a '__'. + + Returns + ------- + self : object + Pipeline class instance. + """ + self._set_params("steps", **kwargs) + return self + + def _validate_steps(self): + names, estimators = zip(*self.steps) + + # validate names + self._validate_names(names) + + # validate estimators + transformers = estimators[:-1] + estimator = estimators[-1] + + for t in transformers: + if t is None or t == "passthrough": + continue + if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr( + t, "transform" + ): + raise TypeError( + "All intermediate steps should be " + "transformers and implement fit and transform " + "or be the string 'passthrough' " + "'%s' (type %s) doesn't" % (t, type(t)) + ) + + # We allow last estimator to be None as an identity transformation + if ( + estimator is not None + and estimator != "passthrough" + and not hasattr(estimator, "fit") + ): + raise TypeError( + "Last step of Pipeline should implement fit " + "or be the string 'passthrough'. " + "'%s' (type %s) doesn't" % (estimator, type(estimator)) + ) + + def _iter(self, with_final=True, filter_passthrough=True): + """ + Generate (idx, (name, trans)) tuples from self.steps + + When filter_passthrough is True, 'passthrough' and None transformers + are filtered out. + """ + stop = len(self.steps) + if not with_final: + stop -= 1 + + for idx, (name, trans) in enumerate(islice(self.steps, 0, stop)): + if not filter_passthrough: + yield idx, name, trans + elif trans is not None and trans != "passthrough": + yield idx, name, trans + + def __len__(self): + """ + Returns the length of the Pipeline + """ + return len(self.steps) + + def __getitem__(self, ind): + """Returns a sub-pipeline or a single estimator in the pipeline + + Indexing with an integer will return an estimator; using a slice + returns another Pipeline instance which copies a slice of this + Pipeline. This copy is shallow: modifying (or fitting) estimators in + the sub-pipeline will affect the larger pipeline and vice-versa. + However, replacing a value in `step` will not affect a copy. + + See + :ref:`sphx_glr_auto_examples_feature_selection_plot_feature_selection_pipeline.py` + for an example of how to use slicing to inspect part of a pipeline. + """ + if isinstance(ind, slice): + if ind.step not in (1, None): + raise ValueError("Pipeline slicing only supports a step of 1") + return self.__class__( + self.steps[ind], memory=self.memory, verbose=self.verbose + ) + try: + name, est = self.steps[ind] + except TypeError: + # Not an int, try get step by name + return self.named_steps[ind] + return est + + # TODO(1.8): Remove this property + @property + def _estimator_type(self): + """Return the estimator type of the last step in the pipeline.""" + + if not self.steps: + return None + + return self.steps[-1][1]._estimator_type + + @property + def named_steps(self): + """Access the steps by name. + + Read-only attribute to access any step by given name. + Keys are steps names and values are the steps objects.""" + # Use Bunch object to improve autocomplete + return Bunch(**dict(self.steps)) + + @property + def _final_estimator(self): + try: + estimator = self.steps[-1][1] + return "passthrough" if estimator is None else estimator + except (ValueError, AttributeError, TypeError): + # This condition happens when a call to a method is first calling + # `_available_if` and `fit` did not validate `steps` yet. We + # return `None` and an `InvalidParameterError` will be raised + # right after. + return None + + def _log_message(self, step_idx): + if not self.verbose: + return None + name, _ = self.steps[step_idx] + + return "(step %d of %d) Processing %s" % (step_idx + 1, len(self.steps), name) + + def _check_method_params(self, method, props, **kwargs): + if _routing_enabled(): + routed_params = process_routing(self, method, **props, **kwargs) + return routed_params + else: + fit_params_steps = Bunch( + **{ + name: Bunch(**{method: {} for method in METHODS}) + for name, step in self.steps + if step is not None + } + ) + for pname, pval in props.items(): + if "__" not in pname: + raise ValueError( + "Pipeline.fit does not accept the {} parameter. " + "You can pass parameters to specific steps of your " + "pipeline using the stepname__parameter format, e.g. " + "`Pipeline.fit(X, y, logisticregression__sample_weight" + "=sample_weight)`.".format(pname) + ) + step, param = pname.split("__", 1) + fit_params_steps[step]["fit"][param] = pval + # without metadata routing, fit_transform and fit_predict + # get all the same params and pass it to the last fit. + fit_params_steps[step]["fit_transform"][param] = pval + fit_params_steps[step]["fit_predict"][param] = pval + return fit_params_steps + + def _get_metadata_for_step(self, *, step_idx, step_params, all_params): + """Get params (metadata) for step `name`. + + This transforms the metadata up to this step if required, which is + indicated by the `transform_input` parameter. + + If a param in `step_params` is included in the `transform_input` list, + it will be transformed. + + Parameters + ---------- + step_idx : int + Index of the step in the pipeline. + + step_params : dict + Parameters specific to the step. These are routed parameters, e.g. + `routed_params[name]`. If a parameter name here is included in the + `pipeline.transform_input`, then it will be transformed. Note that + these parameters are *after* routing, so the aliases are already + resolved. + + all_params : dict + All parameters passed by the user. Here this is used to call + `transform` on the slice of the pipeline itself. + + Returns + ------- + dict + Parameters to be passed to the step. The ones which should be + transformed are transformed. + """ + if ( + self.transform_input is None + or not all_params + or not step_params + or step_idx == 0 + ): + # we only need to process step_params if transform_input is set + # and metadata is given by the user. + return step_params + + sub_pipeline = self[:step_idx] + sub_metadata_routing = get_routing_for_object(sub_pipeline) + # here we get the metadata required by sub_pipeline.transform + transform_params = { + key: value + for key, value in all_params.items() + if key + in sub_metadata_routing.consumes( + method="transform", params=all_params.keys() + ) + } + transformed_params = dict() # this is to be returned + transformed_cache = dict() # used to transform each param once + # `step_params` is the output of `process_routing`, so it has a dict for each + # method (e.g. fit, transform, predict), which are the args to be passed to + # those methods. We need to transform the parameters which are in the + # `transform_input`, before returning these dicts. + for method, method_params in step_params.items(): + transformed_params[method] = Bunch() + for param_name, param_value in method_params.items(): + # An example of `(param_name, param_value)` is + # `('sample_weight', array([0.5, 0.5, ...]))` + if param_name in self.transform_input: + # This parameter now needs to be transformed by the sub_pipeline, to + # this step. We cache these computations to avoid repeating them. + transformed_params[method][param_name] = _cached_transform( + sub_pipeline, + cache=transformed_cache, + param_name=param_name, + param_value=param_value, + transform_params=transform_params, + ) + else: + transformed_params[method][param_name] = param_value + return transformed_params + + # Estimator interface + + def _fit(self, X, y=None, routed_params=None, raw_params=None): + """Fit the pipeline except the last step. + + routed_params is the output of `process_routing` + raw_params is the parameters passed by the user, used when `transform_input` + is set by the user, to transform metadata using a sub-pipeline. + """ + # shallow copy of steps - this should really be steps_ + self.steps = list(self.steps) + self._validate_steps() + # Setup the memory + memory = check_memory(self.memory) + + fit_transform_one_cached = memory.cache(_fit_transform_one) + + for step_idx, name, transformer in self._iter( + with_final=False, filter_passthrough=False + ): + if transformer is None or transformer == "passthrough": + with _print_elapsed_time("Pipeline", self._log_message(step_idx)): + continue + + if hasattr(memory, "location") and memory.location is None: + # we do not clone when caching is disabled to + # preserve backward compatibility + cloned_transformer = transformer + else: + cloned_transformer = clone(transformer) + # Fit or load from cache the current transformer + step_params = self._get_metadata_for_step( + step_idx=step_idx, + step_params=routed_params[name], + all_params=raw_params, + ) + + X, fitted_transformer = fit_transform_one_cached( + cloned_transformer, + X, + y, + weight=None, + message_clsname="Pipeline", + message=self._log_message(step_idx), + params=step_params, + ) + # Replace the transformer of the step with the fitted + # transformer. This is necessary when loading the transformer + # from the cache. + self.steps[step_idx] = (name, fitted_transformer) + return X + + @_fit_context( + # estimators in Pipeline.steps are not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **params): + """Fit the model. + + Fit all the transformers one after the other and sequentially transform the + data. Finally, fit the transformed data using the final estimator. + + Parameters + ---------- + X : iterable + Training data. Must fulfill input requirements of first step of the + pipeline. + + y : iterable, default=None + Training targets. Must fulfill label requirements for all steps of + the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): Parameters passed to the + ``fit`` method of each step, where each parameter name is prefixed such + that parameter ``p`` for step ``s`` has key ``s__p``. + + - If `enable_metadata_routing=True`: Parameters requested and accepted by + steps. Each step must have requested certain metadata for these parameters + to be forwarded to them. + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True` is set via + :func:`~sklearn.set_config`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + self : object + Pipeline with fitted steps. + """ + if not _routing_enabled() and self.transform_input is not None: + raise ValueError( + "The `transform_input` parameter can only be set if metadata " + "routing is enabled. You can enable metadata routing using " + "`sklearn.set_config(enable_metadata_routing=True)`." + ) + + routed_params = self._check_method_params(method="fit", props=params) + Xt = self._fit(X, y, routed_params, raw_params=params) + with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): + if self._final_estimator != "passthrough": + last_step_params = self._get_metadata_for_step( + step_idx=len(self) - 1, + step_params=routed_params[self.steps[-1][0]], + all_params=params, + ) + self._final_estimator.fit(Xt, y, **last_step_params["fit"]) + + return self + + def _can_fit_transform(self): + return ( + self._final_estimator == "passthrough" + or hasattr(self._final_estimator, "transform") + or hasattr(self._final_estimator, "fit_transform") + ) + + @available_if(_can_fit_transform) + @_fit_context( + # estimators in Pipeline.steps are not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None, **params): + """Fit the model and transform with the final estimator. + + Fit all the transformers one after the other and sequentially transform + the data. Only valid if the final estimator either implements + `fit_transform` or `fit` and `transform`. + + Parameters + ---------- + X : iterable + Training data. Must fulfill input requirements of first step of the + pipeline. + + y : iterable, default=None + Training targets. Must fulfill label requirements for all steps of + the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): Parameters passed to the + ``fit`` method of each step, where each parameter name is prefixed such + that parameter ``p`` for step ``s`` has key ``s__p``. + + - If `enable_metadata_routing=True`: Parameters requested and accepted by + steps. Each step must have requested certain metadata for these parameters + to be forwarded to them. + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_transformed_features) + Transformed samples. + """ + routed_params = self._check_method_params(method="fit_transform", props=params) + Xt = self._fit(X, y, routed_params) + + last_step = self._final_estimator + with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): + if last_step == "passthrough": + return Xt + last_step_params = self._get_metadata_for_step( + step_idx=len(self) - 1, + step_params=routed_params[self.steps[-1][0]], + all_params=params, + ) + if hasattr(last_step, "fit_transform"): + return last_step.fit_transform( + Xt, y, **last_step_params["fit_transform"] + ) + else: + return last_step.fit(Xt, y, **last_step_params["fit"]).transform( + Xt, **last_step_params["transform"] + ) + + @available_if(_final_estimator_has("predict")) + def predict(self, X, **params): + """Transform the data, and apply `predict` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls `predict` + method. Only valid if the final estimator implements `predict`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): Parameters to the + ``predict`` called at the end of all transformations in the pipeline. + + - If `enable_metadata_routing=True`: Parameters requested and accepted by + steps. Each step must have requested certain metadata for these parameters + to be forwarded to them. + + .. versionadded:: 0.20 + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True` is set via + :func:`~sklearn.set_config`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Note that while this may be used to return uncertainties from some + models with ``return_std`` or ``return_cov``, uncertainties that are + generated by the transformations in the pipeline are not propagated + to the final estimator. + + Returns + ------- + y_pred : ndarray + Result of calling `predict` on the final estimator. + """ + # TODO(1.8): Remove the context manager and use check_is_fitted(self) + with _raise_or_warn_if_not_fitted(self): + Xt = X + + if not _routing_enabled(): + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt) + return self.steps[-1][1].predict(Xt, **params) + + # metadata routing enabled + routed_params = process_routing(self, "predict", **params) + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt, **routed_params[name].transform) + return self.steps[-1][1].predict( + Xt, **routed_params[self.steps[-1][0]].predict + ) + + @available_if(_final_estimator_has("fit_predict")) + @_fit_context( + # estimators in Pipeline.steps are not validated yet + prefer_skip_nested_validation=False + ) + def fit_predict(self, X, y=None, **params): + """Transform the data, and apply `fit_predict` with the final estimator. + + Call `fit_transform` of each transformer in the pipeline. The + transformed data are finally passed to the final estimator that calls + `fit_predict` method. Only valid if the final estimator implements + `fit_predict`. + + Parameters + ---------- + X : iterable + Training data. Must fulfill input requirements of first step of + the pipeline. + + y : iterable, default=None + Training targets. Must fulfill label requirements for all steps + of the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): Parameters to the + ``predict`` called at the end of all transformations in the pipeline. + + - If `enable_metadata_routing=True`: Parameters requested and accepted by + steps. Each step must have requested certain metadata for these parameters + to be forwarded to them. + + .. versionadded:: 0.20 + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Note that while this may be used to return uncertainties from some + models with ``return_std`` or ``return_cov``, uncertainties that are + generated by the transformations in the pipeline are not propagated + to the final estimator. + + Returns + ------- + y_pred : ndarray + Result of calling `fit_predict` on the final estimator. + """ + routed_params = self._check_method_params(method="fit_predict", props=params) + Xt = self._fit(X, y, routed_params) + + params_last_step = routed_params[self.steps[-1][0]] + with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): + y_pred = self.steps[-1][1].fit_predict( + Xt, y, **params_last_step.get("fit_predict", {}) + ) + return y_pred + + @available_if(_final_estimator_has("predict_proba")) + def predict_proba(self, X, **params): + """Transform the data, and apply `predict_proba` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `predict_proba` method. Only valid if the final estimator implements + `predict_proba`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): Parameters to the + `predict_proba` called at the end of all transformations in the pipeline. + + - If `enable_metadata_routing=True`: Parameters requested and accepted by + steps. Each step must have requested certain metadata for these parameters + to be forwarded to them. + + .. versionadded:: 0.20 + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + y_proba : ndarray of shape (n_samples, n_classes) + Result of calling `predict_proba` on the final estimator. + """ + # TODO(1.8): Remove the context manager and use check_is_fitted(self) + with _raise_or_warn_if_not_fitted(self): + Xt = X + + if not _routing_enabled(): + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt) + return self.steps[-1][1].predict_proba(Xt, **params) + + # metadata routing enabled + routed_params = process_routing(self, "predict_proba", **params) + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt, **routed_params[name].transform) + return self.steps[-1][1].predict_proba( + Xt, **routed_params[self.steps[-1][0]].predict_proba + ) + + @available_if(_final_estimator_has("decision_function")) + def decision_function(self, X, **params): + """Transform the data, and apply `decision_function` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `decision_function` method. Only valid if the final estimator + implements `decision_function`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of string -> object + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + y_score : ndarray of shape (n_samples, n_classes) + Result of calling `decision_function` on the final estimator. + """ + # TODO(1.8): Remove the context manager and use check_is_fitted(self) + with _raise_or_warn_if_not_fitted(self): + _raise_for_params(params, self, "decision_function") + + # not branching here since params is only available if + # enable_metadata_routing=True + routed_params = process_routing(self, "decision_function", **params) + + Xt = X + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform( + Xt, **routed_params.get(name, {}).get("transform", {}) + ) + return self.steps[-1][1].decision_function( + Xt, + **routed_params.get(self.steps[-1][0], {}).get("decision_function", {}), + ) + + @available_if(_final_estimator_has("score_samples")) + def score_samples(self, X): + """Transform the data, and apply `score_samples` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `score_samples` method. Only valid if the final estimator implements + `score_samples`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + Returns + ------- + y_score : ndarray of shape (n_samples,) + Result of calling `score_samples` on the final estimator. + """ + # TODO(1.8): Remove the context manager and use check_is_fitted(self) + with _raise_or_warn_if_not_fitted(self): + Xt = X + for _, _, transformer in self._iter(with_final=False): + Xt = transformer.transform(Xt) + return self.steps[-1][1].score_samples(Xt) + + @available_if(_final_estimator_has("predict_log_proba")) + def predict_log_proba(self, X, **params): + """Transform the data, and apply `predict_log_proba` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `predict_log_proba` method. Only valid if the final estimator + implements `predict_log_proba`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of str -> object + - If `enable_metadata_routing=False` (default): Parameters to the + `predict_log_proba` called at the end of all transformations in the + pipeline. + + - If `enable_metadata_routing=True`: Parameters requested and accepted by + steps. Each step must have requested certain metadata for these parameters + to be forwarded to them. + + .. versionadded:: 0.20 + + .. versionchanged:: 1.4 + Parameters are now passed to the ``transform`` method of the + intermediate steps as well, if requested, and if + `enable_metadata_routing=True`. + + See :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + y_log_proba : ndarray of shape (n_samples, n_classes) + Result of calling `predict_log_proba` on the final estimator. + """ + # TODO(1.8): Remove the context manager and use check_is_fitted(self) + with _raise_or_warn_if_not_fitted(self): + Xt = X + + if not _routing_enabled(): + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt) + return self.steps[-1][1].predict_log_proba(Xt, **params) + + # metadata routing enabled + routed_params = process_routing(self, "predict_log_proba", **params) + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt, **routed_params[name].transform) + return self.steps[-1][1].predict_log_proba( + Xt, **routed_params[self.steps[-1][0]].predict_log_proba + ) + + def _can_transform(self): + return self._final_estimator == "passthrough" or hasattr( + self._final_estimator, "transform" + ) + + @available_if(_can_transform) + def transform(self, X, **params): + """Transform the data, and apply `transform` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `transform` method. Only valid if the final estimator + implements `transform`. + + This also works where final estimator is `None` in which case all prior + transformations are applied. + + Parameters + ---------- + X : iterable + Data to transform. Must fulfill input requirements of first step + of the pipeline. + + **params : dict of str -> object + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_transformed_features) + Transformed data. + """ + # TODO(1.8): Remove the context manager and use check_is_fitted(self) + with _raise_or_warn_if_not_fitted(self): + _raise_for_params(params, self, "transform") + + # not branching here since params is only available if + # enable_metadata_routing=True + routed_params = process_routing(self, "transform", **params) + Xt = X + for _, name, transform in self._iter(): + Xt = transform.transform(Xt, **routed_params[name].transform) + return Xt + + def _can_inverse_transform(self): + return all(hasattr(t, "inverse_transform") for _, _, t in self._iter()) + + @available_if(_can_inverse_transform) + def inverse_transform(self, X=None, *, Xt=None, **params): + """Apply `inverse_transform` for each step in a reverse order. + + All estimators in the pipeline must support `inverse_transform`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_transformed_features) + Data samples, where ``n_samples`` is the number of samples and + ``n_features`` is the number of features. Must fulfill + input requirements of last step of pipeline's + ``inverse_transform`` method. + + Xt : array-like of shape (n_samples, n_transformed_features) + Data samples, where ``n_samples`` is the number of samples and + ``n_features`` is the number of features. Must fulfill + input requirements of last step of pipeline's + ``inverse_transform`` method. + + .. deprecated:: 1.5 + `Xt` was deprecated in 1.5 and will be removed in 1.7. Use `X` instead. + + **params : dict of str -> object + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + Xt : ndarray of shape (n_samples, n_features) + Inverse transformed data, that is, data in the original feature + space. + """ + # TODO(1.8): Remove the context manager and use check_is_fitted(self) + with _raise_or_warn_if_not_fitted(self): + _raise_for_params(params, self, "inverse_transform") + + X = _deprecate_Xt_in_inverse_transform(X, Xt) + + # we don't have to branch here, since params is only non-empty if + # enable_metadata_routing=True. + routed_params = process_routing(self, "inverse_transform", **params) + reverse_iter = reversed(list(self._iter())) + for _, name, transform in reverse_iter: + X = transform.inverse_transform( + X, **routed_params[name].inverse_transform + ) + return X + + @available_if(_final_estimator_has("score")) + def score(self, X, y=None, sample_weight=None, **params): + """Transform the data, and apply `score` with the final estimator. + + Call `transform` of each transformer in the pipeline. The transformed + data are finally passed to the final estimator that calls + `score` method. Only valid if the final estimator implements `score`. + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements of first step + of the pipeline. + + y : iterable, default=None + Targets used for scoring. Must fulfill label requirements for all + steps of the pipeline. + + sample_weight : array-like, default=None + If not None, this argument is passed as ``sample_weight`` keyword + argument to the ``score`` method of the final estimator. + + **params : dict of str -> object + Parameters requested and accepted by steps. Each step must have + requested certain metadata for these parameters to be forwarded to + them. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + score : float + Result of calling `score` on the final estimator. + """ + # TODO(1.8): Remove the context manager and use check_is_fitted(self) + with _raise_or_warn_if_not_fitted(self): + Xt = X + if not _routing_enabled(): + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt) + score_params = {} + if sample_weight is not None: + score_params["sample_weight"] = sample_weight + return self.steps[-1][1].score(Xt, y, **score_params) + + # metadata routing is enabled. + routed_params = process_routing( + self, "score", sample_weight=sample_weight, **params + ) + + Xt = X + for _, name, transform in self._iter(with_final=False): + Xt = transform.transform(Xt, **routed_params[name].transform) + return self.steps[-1][1].score( + Xt, y, **routed_params[self.steps[-1][0]].score + ) + + @property + def classes_(self): + """The classes labels. Only exist if the last step is a classifier.""" + return self.steps[-1][1].classes_ + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + + if not self.steps: + return tags + + try: + if self.steps[0][1] is not None and self.steps[0][1] != "passthrough": + tags.input_tags.pairwise = get_tags( + self.steps[0][1] + ).input_tags.pairwise + # WARNING: the sparse tag can be incorrect. + # Some Pipelines accepting sparse data are wrongly tagged sparse=False. + # For example Pipeline([PCA(), estimator]) accepts sparse data + # even if the estimator doesn't as PCA outputs a dense array. + tags.input_tags.sparse = all( + get_tags(step).input_tags.sparse + for name, step in self.steps + if step != "passthrough" + ) + except (ValueError, AttributeError, TypeError): + # This happens when the `steps` is not a list of (name, estimator) + # tuples and `fit` is not called yet to validate the steps. + pass + + try: + if self.steps[-1][1] is not None and self.steps[-1][1] != "passthrough": + last_step_tags = get_tags(self.steps[-1][1]) + tags.estimator_type = last_step_tags.estimator_type + tags.target_tags.multi_output = last_step_tags.target_tags.multi_output + tags.classifier_tags = deepcopy(last_step_tags.classifier_tags) + tags.regressor_tags = deepcopy(last_step_tags.regressor_tags) + tags.transformer_tags = deepcopy(last_step_tags.transformer_tags) + except (ValueError, AttributeError, TypeError): + # This happens when the `steps` is not a list of (name, estimator) + # tuples and `fit` is not called yet to validate the steps. + pass + + return tags + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Transform input features using the pipeline. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + feature_names_out = input_features + for _, name, transform in self._iter(): + if not hasattr(transform, "get_feature_names_out"): + raise AttributeError( + "Estimator {} does not provide get_feature_names_out. " + "Did you mean to call pipeline[:-1].get_feature_names_out" + "()?".format(name) + ) + feature_names_out = transform.get_feature_names_out(feature_names_out) + return feature_names_out + + @property + def n_features_in_(self): + """Number of features seen during first step `fit` method.""" + # delegate to first step (which will call check_is_fitted) + return self.steps[0][1].n_features_in_ + + @property + def feature_names_in_(self): + """Names of features seen during first step `fit` method.""" + # delegate to first step (which will call check_is_fitted) + return self.steps[0][1].feature_names_in_ + + def __sklearn_is_fitted__(self): + """Indicate whether pipeline has been fit. + + This is done by checking whether the last non-`passthrough` step of the + pipeline is fitted. + + An empty pipeline is considered fitted. + """ + + # First find the last step that is not 'passthrough' + last_step = None + for _, estimator in reversed(self.steps): + if estimator != "passthrough": + last_step = estimator + break + + if last_step is None: + # All steps are 'passthrough', so the pipeline is considered fitted + return True + + try: + # check if the last step of the pipeline is fitted + # we only check the last step since if the last step is fit, it + # means the previous steps should also be fit. This is faster than + # checking if every step of the pipeline is fit. + check_is_fitted(last_step) + return True + except NotFittedError: + return False + + def _sk_visual_block_(self): + _, estimators = zip(*self.steps) + + def _get_name(name, est): + if est is None or est == "passthrough": + return f"{name}: passthrough" + # Is an estimator + return f"{name}: {est.__class__.__name__}" + + names = [_get_name(name, est) for name, est in self.steps] + name_details = [str(est) for est in estimators] + return _VisualBlock( + "serial", + estimators, + names=names, + name_details=name_details, + dash_wrapped=False, + ) + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + + # first we add all steps except the last one + for _, name, trans in self._iter(with_final=False, filter_passthrough=True): + method_mapping = MethodMapping() + # fit, fit_predict, and fit_transform call fit_transform if it + # exists, or else fit and transform + if hasattr(trans, "fit_transform"): + ( + method_mapping.add(caller="fit", callee="fit_transform") + .add(caller="fit_transform", callee="fit_transform") + .add(caller="fit_predict", callee="fit_transform") + ) + else: + ( + method_mapping.add(caller="fit", callee="fit") + .add(caller="fit", callee="transform") + .add(caller="fit_transform", callee="fit") + .add(caller="fit_transform", callee="transform") + .add(caller="fit_predict", callee="fit") + .add(caller="fit_predict", callee="transform") + ) + + ( + method_mapping.add(caller="predict", callee="transform") + .add(caller="predict", callee="transform") + .add(caller="predict_proba", callee="transform") + .add(caller="decision_function", callee="transform") + .add(caller="predict_log_proba", callee="transform") + .add(caller="transform", callee="transform") + .add(caller="inverse_transform", callee="inverse_transform") + .add(caller="score", callee="transform") + ) + + router.add(method_mapping=method_mapping, **{name: trans}) + + final_name, final_est = self.steps[-1] + if final_est is None or final_est == "passthrough": + return router + + # then we add the last step + method_mapping = MethodMapping() + if hasattr(final_est, "fit_transform"): + method_mapping.add(caller="fit_transform", callee="fit_transform") + else: + method_mapping.add(caller="fit", callee="fit").add( + caller="fit", callee="transform" + ) + ( + method_mapping.add(caller="fit", callee="fit") + .add(caller="predict", callee="predict") + .add(caller="fit_predict", callee="fit_predict") + .add(caller="predict_proba", callee="predict_proba") + .add(caller="decision_function", callee="decision_function") + .add(caller="predict_log_proba", callee="predict_log_proba") + .add(caller="transform", callee="transform") + .add(caller="inverse_transform", callee="inverse_transform") + .add(caller="score", callee="score") + ) + + router.add(method_mapping=method_mapping, **{final_name: final_est}) + return router + + +def _name_estimators(estimators): + """Generate names for estimators.""" + + names = [ + estimator if isinstance(estimator, str) else type(estimator).__name__.lower() + for estimator in estimators + ] + namecount = defaultdict(int) + for est, name in zip(estimators, names): + namecount[name] += 1 + + for k, v in list(namecount.items()): + if v == 1: + del namecount[k] + + for i in reversed(range(len(estimators))): + name = names[i] + if name in namecount: + names[i] += "-%d" % namecount[name] + namecount[name] -= 1 + + return list(zip(names, estimators)) + + +def make_pipeline(*steps, memory=None, transform_input=None, verbose=False): + """Construct a :class:`Pipeline` from the given estimators. + + This is a shorthand for the :class:`Pipeline` constructor; it does not + require, and does not permit, naming the estimators. Instead, their names + will be set to the lowercase of their types automatically. + + Parameters + ---------- + *steps : list of Estimator objects + List of the scikit-learn estimators that are chained together. + + memory : str or object with the joblib.Memory interface, default=None + Used to cache the fitted transformers of the pipeline. The last step + will never be cached, even if it is a transformer. By default, no + caching is performed. If a string is given, it is the path to the + caching directory. Enabling caching triggers a clone of the transformers + before fitting. Therefore, the transformer instance given to the + pipeline cannot be inspected directly. Use the attribute ``named_steps`` + or ``steps`` to inspect estimators within the pipeline. Caching the + transformers is advantageous when fitting is time consuming. + + transform_input : list of str, default=None + This enables transforming some input arguments to ``fit`` (other than ``X``) + to be transformed by the steps of the pipeline up to the step which requires + them. Requirement is defined via :ref:`metadata routing `. + This can be used to pass a validation set through the pipeline for instance. + + You can only set this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.6 + + verbose : bool, default=False + If True, the time elapsed while fitting each step will be printed as it + is completed. + + Returns + ------- + p : Pipeline + Returns a scikit-learn :class:`Pipeline` object. + + See Also + -------- + Pipeline : Class for creating a pipeline of transforms with a final + estimator. + + Examples + -------- + >>> from sklearn.naive_bayes import GaussianNB + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.pipeline import make_pipeline + >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('gaussiannb', GaussianNB())]) + """ + return Pipeline( + _name_estimators(steps), + transform_input=transform_input, + memory=memory, + verbose=verbose, + ) + + +def _transform_one(transformer, X, y, weight, params=None): + """Call transform and apply weight to output. + + Parameters + ---------- + transformer : estimator + Estimator to be used for transformation. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data to be transformed. + + y : ndarray of shape (n_samples,) + Ignored. + + weight : float + Weight to be applied to the output of the transformation. + + params : dict + Parameters to be passed to the transformer's ``transform`` method. + + This should be of the form ``process_routing()["step_name"]``. + """ + res = transformer.transform(X, **params.transform) + # if we have a weight for this transformer, multiply output + if weight is None: + return res + return res * weight + + +def _fit_transform_one( + transformer, X, y, weight, message_clsname="", message=None, params=None +): + """ + Fits ``transformer`` to ``X`` and ``y``. The transformed result is returned + with the fitted transformer. If ``weight`` is not ``None``, the result will + be multiplied by ``weight``. + + ``params`` needs to be of the form ``process_routing()["step_name"]``. + """ + params = params or {} + with _print_elapsed_time(message_clsname, message): + if hasattr(transformer, "fit_transform"): + res = transformer.fit_transform(X, y, **params.get("fit_transform", {})) + else: + res = transformer.fit(X, y, **params.get("fit", {})).transform( + X, **params.get("transform", {}) + ) + + if weight is None: + return res, transformer + return res * weight, transformer + + +def _fit_one(transformer, X, y, weight, message_clsname="", message=None, params=None): + """ + Fits ``transformer`` to ``X`` and ``y``. + """ + with _print_elapsed_time(message_clsname, message): + return transformer.fit(X, y, **params["fit"]) + + +class FeatureUnion(TransformerMixin, _BaseComposition): + """Concatenates results of multiple transformer objects. + + This estimator applies a list of transformer objects in parallel to the + input data, then concatenates the results. This is useful to combine + several feature extraction mechanisms into a single transformer. + + Parameters of the transformers may be set using its name and the parameter + name separated by a '__'. A transformer may be replaced entirely by + setting the parameter with its name to another transformer, removed by + setting to 'drop' or disabled by setting to 'passthrough' (features are + passed without transformation). + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + transformer_list : list of (str, transformer) tuples + List of transformer objects to be applied to the data. The first + half of each tuple is the name of the transformer. The transformer can + be 'drop' for it to be ignored or can be 'passthrough' for features to + be passed unchanged. + + .. versionadded:: 1.1 + Added the option `"passthrough"`. + + .. versionchanged:: 0.22 + Deprecated `None` as a transformer in favor of 'drop'. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + transformer_weights : dict, default=None + Multiplicative weights for features per transformer. + Keys are transformer names, values the weights. + Raises ValueError if key not present in ``transformer_list``. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + verbose_feature_names_out : bool, default=True + If True, :meth:`get_feature_names_out` will prefix all feature names + with the name of the transformer that generated that feature. + If False, :meth:`get_feature_names_out` will not prefix any feature + names and will error if feature names are not unique. + + .. versionadded:: 1.5 + + Attributes + ---------- + named_transformers : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + Read-only attribute to access any transformer parameter by user + given name. Keys are transformer names and values are + transformer parameters. + + .. versionadded:: 1.2 + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying first transformer in `transformer_list` exposes such an + attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when + `X` has feature names that are all strings. + + .. versionadded:: 1.3 + + See Also + -------- + make_union : Convenience function for simplified feature union + construction. + + Examples + -------- + >>> from sklearn.pipeline import FeatureUnion + >>> from sklearn.decomposition import PCA, TruncatedSVD + >>> union = FeatureUnion([("pca", PCA(n_components=1)), + ... ("svd", TruncatedSVD(n_components=2))]) + >>> X = [[0., 1., 3], [2., 2., 5]] + >>> union.fit_transform(X) + array([[-1.5 , 3.0..., -0.8...], + [ 1.5 , 5.7..., 0.4...]]) + >>> # An estimator's parameter can be set using '__' syntax + >>> union.set_params(svd__n_components=1).fit_transform(X) + array([[-1.5 , 3.0...], + [ 1.5 , 5.7...]]) + + For a more detailed example of usage, see + :ref:`sphx_glr_auto_examples_compose_plot_feature_union.py`. + """ + + def __init__( + self, + transformer_list, + *, + n_jobs=None, + transformer_weights=None, + verbose=False, + verbose_feature_names_out=True, + ): + self.transformer_list = transformer_list + self.n_jobs = n_jobs + self.transformer_weights = transformer_weights + self.verbose = verbose + self.verbose_feature_names_out = verbose_feature_names_out + + def set_output(self, *, transform=None): + """Set the output container when `"transform"` and `"fit_transform"` are called. + + `set_output` will set the output of all estimators in `transformer_list`. + + Parameters + ---------- + transform : {"default", "pandas", "polars"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + Returns + ------- + self : estimator instance + Estimator instance. + """ + super().set_output(transform=transform) + for _, step, _ in self._iter(): + _safe_set_output(step, transform=transform) + return self + + @property + def named_transformers(self): + # Use Bunch object to improve autocomplete + return Bunch(**dict(self.transformer_list)) + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Returns the parameters given in the constructor as well as the + estimators contained within the `transformer_list` of the + `FeatureUnion`. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : mapping of string to any + Parameter names mapped to their values. + """ + return self._get_params("transformer_list", deep=deep) + + def set_params(self, **kwargs): + """Set the parameters of this estimator. + + Valid parameter keys can be listed with ``get_params()``. Note that + you can directly set the parameters of the estimators contained in + `transformer_list`. + + Parameters + ---------- + **kwargs : dict + Parameters of this estimator or parameters of estimators contained + in `transform_list`. Parameters of the transformers may be set + using its name and the parameter name separated by a '__'. + + Returns + ------- + self : object + FeatureUnion class instance. + """ + self._set_params("transformer_list", **kwargs) + return self + + def _validate_transformers(self): + names, transformers = zip(*self.transformer_list) + + # validate names + self._validate_names(names) + + # validate estimators + for t in transformers: + if t in ("drop", "passthrough"): + continue + if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr( + t, "transform" + ): + raise TypeError( + "All estimators should implement fit and " + "transform. '%s' (type %s) doesn't" % (t, type(t)) + ) + + def _validate_transformer_weights(self): + if not self.transformer_weights: + return + + transformer_names = set(name for name, _ in self.transformer_list) + for name in self.transformer_weights: + if name not in transformer_names: + raise ValueError( + f'Attempting to weight transformer "{name}", ' + "but it is not present in transformer_list." + ) + + def _iter(self): + """ + Generate (name, trans, weight) tuples excluding None and + 'drop' transformers. + """ + + get_weight = (self.transformer_weights or {}).get + + for name, trans in self.transformer_list: + if trans == "drop": + continue + if trans == "passthrough": + trans = FunctionTransformer(feature_names_out="one-to-one") + yield (name, trans, get_weight(name)) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + # List of tuples (name, feature_names_out) + transformer_with_feature_names_out = [] + for name, trans, _ in self._iter(): + if not hasattr(trans, "get_feature_names_out"): + raise AttributeError( + "Transformer %s (type %s) does not provide get_feature_names_out." + % (str(name), type(trans).__name__) + ) + feature_names_out = trans.get_feature_names_out(input_features) + transformer_with_feature_names_out.append((name, feature_names_out)) + + return self._add_prefix_for_feature_names_out( + transformer_with_feature_names_out + ) + + def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out): + """Add prefix for feature names out that includes the transformer names. + + Parameters + ---------- + transformer_with_feature_names_out : list of tuples of (str, array-like of str) + The tuple consistent of the transformer's name and its feature names out. + + Returns + ------- + feature_names_out : ndarray of shape (n_features,), dtype=str + Transformed feature names. + """ + if self.verbose_feature_names_out: + # Prefix the feature names out with the transformers name + names = list( + chain.from_iterable( + (f"{name}__{i}" for i in feature_names_out) + for name, feature_names_out in transformer_with_feature_names_out + ) + ) + return np.asarray(names, dtype=object) + + # verbose_feature_names_out is False + # Check that names are all unique without a prefix + feature_names_count = Counter( + chain.from_iterable(s for _, s in transformer_with_feature_names_out) + ) + top_6_overlap = [ + name for name, count in feature_names_count.most_common(6) if count > 1 + ] + top_6_overlap.sort() + if top_6_overlap: + if len(top_6_overlap) == 6: + # There are more than 5 overlapping names, we only show the 5 + # of the feature names + names_repr = str(top_6_overlap[:5])[:-1] + ", ...]" + else: + names_repr = str(top_6_overlap) + raise ValueError( + f"Output feature names: {names_repr} are not unique. Please set " + "verbose_feature_names_out=True to add prefixes to feature names" + ) + + return np.concatenate( + [name for _, name in transformer_with_feature_names_out], + ) + + def fit(self, X, y=None, **fit_params): + """Fit all transformers using X. + + Parameters + ---------- + X : iterable or array-like, depending on transformers + Input data, used to fit transformers. + + y : array-like of shape (n_samples, n_outputs), default=None + Targets for supervised learning. + + **fit_params : dict, default=None + - If `enable_metadata_routing=False` (default): + Parameters directly passed to the `fit` methods of the + sub-transformers. + + - If `enable_metadata_routing=True`: + Parameters safely routed to the `fit` methods of the + sub-transformers. See :ref:`Metadata Routing User Guide + ` for more details. + + .. versionchanged:: 1.5 + `**fit_params` can be routed via metadata routing API. + + Returns + ------- + self : object + FeatureUnion class instance. + """ + if _routing_enabled(): + routed_params = process_routing(self, "fit", **fit_params) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + routed_params = Bunch() + for name, _ in self.transformer_list: + routed_params[name] = Bunch(fit={}) + routed_params[name].fit = fit_params + + transformers = self._parallel_func(X, y, _fit_one, routed_params) + + if not transformers: + # All transformers are None + return self + + self._update_transformer_list(transformers) + return self + + def fit_transform(self, X, y=None, **params): + """Fit all transformers, transform the data and concatenate results. + + Parameters + ---------- + X : iterable or array-like, depending on transformers + Input data to be transformed. + + y : array-like of shape (n_samples, n_outputs), default=None + Targets for supervised learning. + + **params : dict, default=None + - If `enable_metadata_routing=False` (default): + Parameters directly passed to the `fit` methods of the + sub-transformers. + + - If `enable_metadata_routing=True`: + Parameters safely routed to the `fit` methods of the + sub-transformers. See :ref:`Metadata Routing User Guide + ` for more details. + + .. versionchanged:: 1.5 + `**params` can now be routed via metadata routing API. + + Returns + ------- + X_t : array-like or sparse matrix of \ + shape (n_samples, sum_n_components) + The `hstack` of results of transformers. `sum_n_components` is the + sum of `n_components` (output dimension) over transformers. + """ + if _routing_enabled(): + routed_params = process_routing(self, "fit_transform", **params) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + routed_params = Bunch() + for name, obj in self.transformer_list: + if hasattr(obj, "fit_transform"): + routed_params[name] = Bunch(fit_transform={}) + routed_params[name].fit_transform = params + else: + routed_params[name] = Bunch(fit={}) + routed_params[name] = Bunch(transform={}) + routed_params[name].fit = params + + results = self._parallel_func(X, y, _fit_transform_one, routed_params) + if not results: + # All transformers are None + return np.zeros((X.shape[0], 0)) + + Xs, transformers = zip(*results) + self._update_transformer_list(transformers) + + return self._hstack(Xs) + + def _log_message(self, name, idx, total): + if not self.verbose: + return None + return "(step %d of %d) Processing %s" % (idx, total, name) + + def _parallel_func(self, X, y, func, routed_params): + """Runs func in parallel on X and y""" + self.transformer_list = list(self.transformer_list) + self._validate_transformers() + self._validate_transformer_weights() + transformers = list(self._iter()) + + return Parallel(n_jobs=self.n_jobs)( + delayed(func)( + transformer, + X, + y, + weight, + message_clsname="FeatureUnion", + message=self._log_message(name, idx, len(transformers)), + params=routed_params[name], + ) + for idx, (name, transformer, weight) in enumerate(transformers, 1) + ) + + def transform(self, X, **params): + """Transform X separately by each transformer, concatenate results. + + Parameters + ---------- + X : iterable or array-like, depending on transformers + Input data to be transformed. + + **params : dict, default=None + + Parameters routed to the `transform` method of the sub-transformers via the + metadata routing API. See :ref:`Metadata Routing User Guide + ` for more details. + + .. versionadded:: 1.5 + + Returns + ------- + X_t : array-like or sparse matrix of shape (n_samples, sum_n_components) + The `hstack` of results of transformers. `sum_n_components` is the + sum of `n_components` (output dimension) over transformers. + """ + _raise_for_params(params, self, "transform") + + if _routing_enabled(): + routed_params = process_routing(self, "transform", **params) + else: + # TODO(SLEP6): remove when metadata routing cannot be disabled. + routed_params = Bunch() + for name, _ in self.transformer_list: + routed_params[name] = Bunch(transform={}) + + Xs = Parallel(n_jobs=self.n_jobs)( + delayed(_transform_one)(trans, X, None, weight, params=routed_params[name]) + for name, trans, weight in self._iter() + ) + if not Xs: + # All transformers are None + return np.zeros((X.shape[0], 0)) + + return self._hstack(Xs) + + def _hstack(self, Xs): + adapter = _get_container_adapter("transform", self) + if adapter and all(adapter.is_supported_container(X) for X in Xs): + return adapter.hstack(Xs) + + if any(sparse.issparse(f) for f in Xs): + Xs = sparse.hstack(Xs).tocsr() + else: + Xs = np.hstack(Xs) + return Xs + + def _update_transformer_list(self, transformers): + transformers = iter(transformers) + self.transformer_list[:] = [ + (name, old if old == "drop" else next(transformers)) + for name, old in self.transformer_list + ] + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`.""" + + # X is passed to all transformers so we just delegate to the first one + return self.transformer_list[0][1].n_features_in_ + + @property + def feature_names_in_(self): + """Names of features seen during :term:`fit`.""" + # X is passed to all transformers -- delegate to the first one + return self.transformer_list[0][1].feature_names_in_ + + def __sklearn_is_fitted__(self): + # Delegate whether feature union was fitted + for _, transformer, _ in self._iter(): + check_is_fitted(transformer) + return True + + def _sk_visual_block_(self): + names, transformers = zip(*self.transformer_list) + return _VisualBlock("parallel", transformers, names=names) + + def __getitem__(self, name): + """Return transformer with name.""" + if not isinstance(name, str): + raise KeyError("Only string keys are supported") + return self.named_transformers[name] + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.5 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + + for name, transformer in self.transformer_list: + router.add( + **{name: transformer}, + method_mapping=MethodMapping() + .add(caller="fit", callee="fit") + .add(caller="fit_transform", callee="fit_transform") + .add(caller="fit_transform", callee="fit") + .add(caller="fit_transform", callee="transform") + .add(caller="transform", callee="transform"), + ) + + return router + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + try: + tags.input_tags.sparse = all( + get_tags(trans).input_tags.sparse + for name, trans in self.transformer_list + if trans not in {"passthrough", "drop"} + ) + except Exception: + # If `transformer_list` does not comply with our API (list of tuples) + # then it will fail. In this case, we assume that `sparse` is False + # but the parameter validation will raise an error during `fit`. + pass # pragma: no cover + return tags + + +def make_union(*transformers, n_jobs=None, verbose=False): + """Construct a :class:`FeatureUnion` from the given transformers. + + This is a shorthand for the :class:`FeatureUnion` constructor; it does not + require, and does not permit, naming the transformers. Instead, they will + be given names automatically based on their types. It also does not allow + weighting. + + Parameters + ---------- + *transformers : list of estimators + One or more estimators. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + Returns + ------- + f : FeatureUnion + A :class:`FeatureUnion` object for concatenating the results of multiple + transformer objects. + + See Also + -------- + FeatureUnion : Class for concatenating the results of multiple transformer + objects. + + Examples + -------- + >>> from sklearn.decomposition import PCA, TruncatedSVD + >>> from sklearn.pipeline import make_union + >>> make_union(PCA(), TruncatedSVD()) + FeatureUnion(transformer_list=[('pca', PCA()), + ('truncatedsvd', TruncatedSVD())]) + """ + return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/random_projection.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/random_projection.py new file mode 100644 index 0000000000000000000000000000000000000000..74741585f77611b2b9ba8e02928ec289d9e00924 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/random_projection.py @@ -0,0 +1,824 @@ +"""Random projection transformers. + +Random projections are a simple and computationally efficient way to +reduce the dimensionality of the data by trading a controlled amount +of accuracy (as additional variance) for faster processing times and +smaller model sizes. + +The dimensions and distribution of random projections matrices are +controlled so as to preserve the pairwise distances between any two +samples of the dataset. + +The main theoretical result behind the efficiency of random projection is the +`Johnson-Lindenstrauss lemma (quoting Wikipedia) +`_: + + In mathematics, the Johnson-Lindenstrauss lemma is a result + concerning low-distortion embeddings of points from high-dimensional + into low-dimensional Euclidean space. The lemma states that a small set + of points in a high-dimensional space can be embedded into a space of + much lower dimension in such a way that distances between the points are + nearly preserved. The map used for the embedding is at least Lipschitz, + and can even be taken to be an orthogonal projection. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp +from scipy import linalg + +from .base import ( + BaseEstimator, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from .exceptions import DataDimensionalityWarning +from .utils import check_random_state +from .utils._param_validation import Interval, StrOptions, validate_params +from .utils.extmath import safe_sparse_dot +from .utils.random import sample_without_replacement +from .utils.validation import check_array, check_is_fitted, validate_data + +__all__ = [ + "SparseRandomProjection", + "GaussianRandomProjection", + "johnson_lindenstrauss_min_dim", +] + + +@validate_params( + { + "n_samples": ["array-like", Interval(Real, 1, None, closed="left")], + "eps": ["array-like", Interval(Real, 0, 1, closed="neither")], + }, + prefer_skip_nested_validation=True, +) +def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1): + """Find a 'safe' number of components to randomly project to. + + The distortion introduced by a random projection `p` only changes the + distance between two points by a factor (1 +- eps) in a euclidean space + with good probability. The projection `p` is an eps-embedding as defined + by: + + .. code-block:: text + + (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2 + + Where u and v are any rows taken from a dataset of shape (n_samples, + n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian + N(0, 1) matrix of shape (n_components, n_features) (or a sparse + Achlioptas matrix). + + The minimum number of components to guarantee the eps-embedding is + given by: + + .. code-block:: text + + n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3) + + Note that the number of dimensions is independent of the original + number of features but instead depends on the size of the dataset: + the larger the dataset, the higher is the minimal dimensionality of + an eps-embedding. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_samples : int or array-like of int + Number of samples that should be an integer greater than 0. If an array + is given, it will compute a safe number of components array-wise. + + eps : float or array-like of shape (n_components,), dtype=float, \ + default=0.1 + Maximum distortion rate in the range (0, 1) as defined by the + Johnson-Lindenstrauss lemma. If an array is given, it will compute a + safe number of components array-wise. + + Returns + ------- + n_components : int or ndarray of int + The minimal number of components to guarantee with good probability + an eps-embedding with n_samples. + + References + ---------- + + .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma + + .. [2] `Sanjoy Dasgupta and Anupam Gupta, 1999, + "An elementary proof of the Johnson-Lindenstrauss Lemma." + `_ + + Examples + -------- + >>> from sklearn.random_projection import johnson_lindenstrauss_min_dim + >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5) + np.int64(663) + + >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01]) + array([ 663, 11841, 1112658]) + + >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1) + array([ 7894, 9868, 11841]) + """ + eps = np.asarray(eps) + n_samples = np.asarray(n_samples) + + if np.any(eps <= 0.0) or np.any(eps >= 1): + raise ValueError("The JL bound is defined for eps in ]0, 1[, got %r" % eps) + + if np.any(n_samples <= 0): + raise ValueError( + "The JL bound is defined for n_samples greater than zero, got %r" + % n_samples + ) + + denominator = (eps**2 / 2) - (eps**3 / 3) + return (4 * np.log(n_samples) / denominator).astype(np.int64) + + +def _check_density(density, n_features): + """Factorize density check according to Li et al.""" + if density == "auto": + density = 1 / np.sqrt(n_features) + + elif density <= 0 or density > 1: + raise ValueError("Expected density in range ]0, 1], got: %r" % density) + return density + + +def _check_input_size(n_components, n_features): + """Factorize argument checking for random matrix generation.""" + if n_components <= 0: + raise ValueError( + "n_components must be strictly positive, got %d" % n_components + ) + if n_features <= 0: + raise ValueError("n_features must be strictly positive, got %d" % n_features) + + +def _gaussian_random_matrix(n_components, n_features, random_state=None): + """Generate a dense Gaussian random matrix. + + The components of the random matrix are drawn from + + N(0, 1.0 / n_components). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, + Dimensionality of the target projection space. + + n_features : int, + Dimensionality of the original source space. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generator used to generate the matrix + at fit time. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + components : ndarray of shape (n_components, n_features) + The generated Gaussian random matrix. + + See Also + -------- + GaussianRandomProjection + """ + _check_input_size(n_components, n_features) + rng = check_random_state(random_state) + components = rng.normal( + loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features) + ) + return components + + +def _sparse_random_matrix(n_components, n_features, density="auto", random_state=None): + """Generalized Achlioptas random sparse matrix for random projection. + + Setting density to 1 / 3 will yield the original matrix by Dimitris + Achlioptas while setting a lower value will yield the generalization + by Ping Li et al. + + If we note :math:`s = 1 / density`, the components of the random matrix are + drawn from: + + - -sqrt(s) / sqrt(n_components) with probability 1 / 2s + - 0 with probability 1 - 1 / s + - +sqrt(s) / sqrt(n_components) with probability 1 / 2s + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, + Dimensionality of the target projection space. + + n_features : int, + Dimensionality of the original source space. + + density : float or 'auto', default='auto' + Ratio of non-zero component in the random projection matrix in the + range `(0, 1]` + + If density = 'auto', the value is set to the minimum density + as recommended by Ping Li et al.: 1 / sqrt(n_features). + + Use density = 1 / 3.0 if you want to reproduce the results from + Achlioptas, 2001. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generator used to generate the matrix + at fit time. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + components : {ndarray, sparse matrix} of shape (n_components, n_features) + The generated Gaussian random matrix. Sparse matrix will be of CSR + format. + + See Also + -------- + SparseRandomProjection + + References + ---------- + + .. [1] Ping Li, T. Hastie and K. W. Church, 2006, + "Very Sparse Random Projections". + https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf + + .. [2] D. Achlioptas, 2001, "Database-friendly random projections", + https://cgi.di.uoa.gr/~optas/papers/jl.pdf + + """ + _check_input_size(n_components, n_features) + density = _check_density(density, n_features) + rng = check_random_state(random_state) + + if density == 1: + # skip index generation if totally dense + components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1 + return 1 / np.sqrt(n_components) * components + + else: + # Generate location of non zero elements + indices = [] + offset = 0 + indptr = [offset] + for _ in range(n_components): + # find the indices of the non-zero components for row i + n_nonzero_i = rng.binomial(n_features, density) + indices_i = sample_without_replacement( + n_features, n_nonzero_i, random_state=rng + ) + indices.append(indices_i) + offset += n_nonzero_i + indptr.append(offset) + + indices = np.concatenate(indices) + + # Among non zero components the probability of the sign is 50%/50% + data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1 + + # build the CSR structure by concatenating the rows + components = sp.csr_matrix( + (data, indices, indptr), shape=(n_components, n_features) + ) + + return np.sqrt(1 / density) / np.sqrt(n_components) * components + + +class BaseRandomProjection( + ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta +): + """Base class for random projections. + + Warning: This class should not be used directly. + Use derived classes instead. + """ + + _parameter_constraints: dict = { + "n_components": [ + Interval(Integral, 1, None, closed="left"), + StrOptions({"auto"}), + ], + "eps": [Interval(Real, 0, None, closed="neither")], + "compute_inverse_components": ["boolean"], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + n_components="auto", + *, + eps=0.1, + compute_inverse_components=False, + random_state=None, + ): + self.n_components = n_components + self.eps = eps + self.compute_inverse_components = compute_inverse_components + self.random_state = random_state + + @abstractmethod + def _make_random_matrix(self, n_components, n_features): + """Generate the random projection matrix. + + Parameters + ---------- + n_components : int, + Dimensionality of the target projection space. + + n_features : int, + Dimensionality of the original source space. + + Returns + ------- + components : {ndarray, sparse matrix} of shape (n_components, n_features) + The generated random matrix. Sparse matrix will be of CSR format. + + """ + + def _compute_inverse_components(self): + """Compute the pseudo-inverse of the (densified) components.""" + components = self.components_ + if sp.issparse(components): + components = components.toarray() + return linalg.pinv(components, check_finite=False) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Generate a sparse random projection matrix. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training set: only the shape is used to find optimal random + matrix dimensions based on the theory referenced in the + afore mentioned papers. + + y : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + self : object + BaseRandomProjection class instance. + """ + X = validate_data( + self, X, accept_sparse=["csr", "csc"], dtype=[np.float64, np.float32] + ) + + n_samples, n_features = X.shape + + if self.n_components == "auto": + self.n_components_ = johnson_lindenstrauss_min_dim( + n_samples=n_samples, eps=self.eps + ) + + if self.n_components_ <= 0: + raise ValueError( + "eps=%f and n_samples=%d lead to a target dimension of " + "%d which is invalid" % (self.eps, n_samples, self.n_components_) + ) + + elif self.n_components_ > n_features: + raise ValueError( + "eps=%f and n_samples=%d lead to a target dimension of " + "%d which is larger than the original space with " + "n_features=%d" + % (self.eps, n_samples, self.n_components_, n_features) + ) + else: + if self.n_components > n_features: + warnings.warn( + "The number of components is higher than the number of" + " features: n_features < n_components (%s < %s)." + "The dimensionality of the problem will not be reduced." + % (n_features, self.n_components), + DataDimensionalityWarning, + ) + + self.n_components_ = self.n_components + + # Generate a projection matrix of size [n_components, n_features] + self.components_ = self._make_random_matrix( + self.n_components_, n_features + ).astype(X.dtype, copy=False) + + if self.compute_inverse_components: + self.inverse_components_ = self._compute_inverse_components() + + # Required by ClassNamePrefixFeaturesOutMixin.get_feature_names_out. + self._n_features_out = self.n_components + + return self + + def inverse_transform(self, X): + """Project data back to its original space. + + Returns an array X_original whose transform would be X. Note that even + if X is sparse, X_original is dense: this may use a lot of RAM. + + If `compute_inverse_components` is False, the inverse of the components is + computed during each call to `inverse_transform` which can be costly. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_components) + Data to be transformed back. + + Returns + ------- + X_original : ndarray of shape (n_samples, n_features) + Reconstructed data. + """ + check_is_fitted(self) + + X = check_array(X, dtype=[np.float64, np.float32], accept_sparse=("csr", "csc")) + + if self.compute_inverse_components: + return X @ self.inverse_components_.T + + inverse_components = self._compute_inverse_components() + return X @ inverse_components.T + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.transformer_tags.preserves_dtype = ["float64", "float32"] + tags.input_tags.sparse = True + return tags + + +class GaussianRandomProjection(BaseRandomProjection): + """Reduce dimensionality through Gaussian random projection. + + The components of the random matrix are drawn from N(0, 1 / n_components). + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_components : int or 'auto', default='auto' + Dimensionality of the target projection space. + + n_components can be automatically adjusted according to the + number of samples in the dataset and the bound given by the + Johnson-Lindenstrauss lemma. In that case the quality of the + embedding is controlled by the ``eps`` parameter. + + It should be noted that Johnson-Lindenstrauss lemma can yield + very conservative estimated of the required number of components + as it makes no assumption on the structure of the dataset. + + eps : float, default=0.1 + Parameter to control the quality of the embedding according to + the Johnson-Lindenstrauss lemma when `n_components` is set to + 'auto'. The value should be strictly positive. + + Smaller values lead to better embedding and higher number of + dimensions (n_components) in the target projection space. + + compute_inverse_components : bool, default=False + Learn the inverse transform by computing the pseudo-inverse of the + components during fit. Note that computing the pseudo-inverse does not + scale well to large matrices. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generator used to generate the + projection matrix at fit time. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + n_components_ : int + Concrete number of components computed when n_components="auto". + + components_ : ndarray of shape (n_components, n_features) + Random matrix used for the projection. + + inverse_components_ : ndarray of shape (n_features, n_components) + Pseudo-inverse of the components, only computed if + `compute_inverse_components` is True. + + .. versionadded:: 1.1 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + SparseRandomProjection : Reduce dimensionality through sparse + random projection. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.random_projection import GaussianRandomProjection + >>> rng = np.random.RandomState(42) + >>> X = rng.rand(25, 3000) + >>> transformer = GaussianRandomProjection(random_state=rng) + >>> X_new = transformer.fit_transform(X) + >>> X_new.shape + (25, 2759) + """ + + def __init__( + self, + n_components="auto", + *, + eps=0.1, + compute_inverse_components=False, + random_state=None, + ): + super().__init__( + n_components=n_components, + eps=eps, + compute_inverse_components=compute_inverse_components, + random_state=random_state, + ) + + def _make_random_matrix(self, n_components, n_features): + """Generate the random projection matrix. + + Parameters + ---------- + n_components : int, + Dimensionality of the target projection space. + + n_features : int, + Dimensionality of the original source space. + + Returns + ------- + components : ndarray of shape (n_components, n_features) + The generated random matrix. + """ + random_state = check_random_state(self.random_state) + return _gaussian_random_matrix( + n_components, n_features, random_state=random_state + ) + + def transform(self, X): + """Project the data by using matrix product with the random matrix. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input data to project into a smaller dimensional space. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) + Projected array. + """ + check_is_fitted(self) + X = validate_data( + self, + X, + accept_sparse=["csr", "csc"], + reset=False, + dtype=[np.float64, np.float32], + ) + + return X @ self.components_.T + + +class SparseRandomProjection(BaseRandomProjection): + """Reduce dimensionality through sparse random projection. + + Sparse random matrix is an alternative to dense random + projection matrix that guarantees similar embedding quality while being + much more memory efficient and allowing faster computation of the + projected data. + + If we note `s = 1 / density` the components of the random matrix are + drawn from: + + .. code-block:: text + + -sqrt(s) / sqrt(n_components) with probability 1 / 2s + 0 with probability 1 - 1 / s + +sqrt(s) / sqrt(n_components) with probability 1 / 2s + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + n_components : int or 'auto', default='auto' + Dimensionality of the target projection space. + + n_components can be automatically adjusted according to the + number of samples in the dataset and the bound given by the + Johnson-Lindenstrauss lemma. In that case the quality of the + embedding is controlled by the ``eps`` parameter. + + It should be noted that Johnson-Lindenstrauss lemma can yield + very conservative estimated of the required number of components + as it makes no assumption on the structure of the dataset. + + density : float or 'auto', default='auto' + Ratio in the range (0, 1] of non-zero component in the random + projection matrix. + + If density = 'auto', the value is set to the minimum density + as recommended by Ping Li et al.: 1 / sqrt(n_features). + + Use density = 1 / 3.0 if you want to reproduce the results from + Achlioptas, 2001. + + eps : float, default=0.1 + Parameter to control the quality of the embedding according to + the Johnson-Lindenstrauss lemma when n_components is set to + 'auto'. This value should be strictly positive. + + Smaller values lead to better embedding and higher number of + dimensions (n_components) in the target projection space. + + dense_output : bool, default=False + If True, ensure that the output of the random projection is a + dense numpy array even if the input and random projection matrix + are both sparse. In practice, if the number of components is + small the number of zero components in the projected data will + be very small and it will be more CPU and memory efficient to + use a dense representation. + + If False, the projected data uses a sparse representation if + the input is sparse. + + compute_inverse_components : bool, default=False + Learn the inverse transform by computing the pseudo-inverse of the + components during fit. Note that the pseudo-inverse is always a dense + array, even if the training data was sparse. This means that it might be + necessary to call `inverse_transform` on a small batch of samples at a + time to avoid exhausting the available memory on the host. Moreover, + computing the pseudo-inverse does not scale well to large matrices. + + random_state : int, RandomState instance or None, default=None + Controls the pseudo random number generator used to generate the + projection matrix at fit time. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + n_components_ : int + Concrete number of components computed when n_components="auto". + + components_ : sparse matrix of shape (n_components, n_features) + Random matrix used for the projection. Sparse matrix will be of CSR + format. + + inverse_components_ : ndarray of shape (n_features, n_components) + Pseudo-inverse of the components, only computed if + `compute_inverse_components` is True. + + .. versionadded:: 1.1 + + density_ : float in range 0.0 - 1.0 + Concrete density computed from when density = "auto". + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianRandomProjection : Reduce dimensionality through Gaussian + random projection. + + References + ---------- + + .. [1] Ping Li, T. Hastie and K. W. Church, 2006, + "Very Sparse Random Projections". + https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf + + .. [2] D. Achlioptas, 2001, "Database-friendly random projections", + https://cgi.di.uoa.gr/~optas/papers/jl.pdf + + Examples + -------- + >>> import numpy as np + >>> from sklearn.random_projection import SparseRandomProjection + >>> rng = np.random.RandomState(42) + >>> X = rng.rand(25, 3000) + >>> transformer = SparseRandomProjection(random_state=rng) + >>> X_new = transformer.fit_transform(X) + >>> X_new.shape + (25, 2759) + >>> # very few components are non-zero + >>> np.mean(transformer.components_ != 0) + np.float64(0.0182...) + """ + + _parameter_constraints: dict = { + **BaseRandomProjection._parameter_constraints, + "density": [Interval(Real, 0.0, 1.0, closed="right"), StrOptions({"auto"})], + "dense_output": ["boolean"], + } + + def __init__( + self, + n_components="auto", + *, + density="auto", + eps=0.1, + dense_output=False, + compute_inverse_components=False, + random_state=None, + ): + super().__init__( + n_components=n_components, + eps=eps, + compute_inverse_components=compute_inverse_components, + random_state=random_state, + ) + + self.dense_output = dense_output + self.density = density + + def _make_random_matrix(self, n_components, n_features): + """Generate the random projection matrix + + Parameters + ---------- + n_components : int + Dimensionality of the target projection space. + + n_features : int + Dimensionality of the original source space. + + Returns + ------- + components : sparse matrix of shape (n_components, n_features) + The generated random matrix in CSR format. + + """ + random_state = check_random_state(self.random_state) + self.density_ = _check_density(self.density, n_features) + return _sparse_random_matrix( + n_components, n_features, density=self.density_, random_state=random_state + ) + + def transform(self, X): + """Project the data by using matrix product with the random matrix. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + The input data to project into a smaller dimensional space. + + Returns + ------- + X_new : {ndarray, sparse matrix} of shape (n_samples, n_components) + Projected array. It is a sparse matrix only when the input is sparse and + `dense_output = False`. + """ + check_is_fitted(self) + X = validate_data( + self, + X, + accept_sparse=["csr", "csc"], + reset=False, + dtype=[np.float64, np.float32], + ) + + return safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output) diff --git a/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/LICENSE b/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5d61ece22a75a759aed8e52af280eca28d35d6bf --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) Aymeric Augustin and contributors + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/METADATA b/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3c5800c4371520900d097e578a711ad51acd1de2 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/METADATA @@ -0,0 +1,174 @@ +Metadata-Version: 2.1 +Name: websockets +Version: 12.0 +Summary: An implementation of the WebSocket Protocol (RFC 6455 & 7692) +Author-email: Aymeric Augustin +License: BSD-3-Clause +Project-URL: Homepage, https://github.com/python-websockets/websockets +Project-URL: Changelog, https://websockets.readthedocs.io/en/stable/project/changelog.html +Project-URL: Documentation, https://websockets.readthedocs.io/ +Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-websockets?utm_source=pypi-websockets&utm_medium=referral&utm_campaign=readme +Project-URL: Tracker, https://github.com/python-websockets/websockets/issues +Keywords: WebSocket +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.8 +License-File: LICENSE + +.. image:: logo/horizontal.svg + :width: 480px + :alt: websockets + +|licence| |version| |pyversions| |tests| |docs| |openssf| + +.. |licence| image:: https://img.shields.io/pypi/l/websockets.svg + :target: https://pypi.python.org/pypi/websockets + +.. |version| image:: https://img.shields.io/pypi/v/websockets.svg + :target: https://pypi.python.org/pypi/websockets + +.. |pyversions| image:: https://img.shields.io/pypi/pyversions/websockets.svg + :target: https://pypi.python.org/pypi/websockets + +.. |tests| image:: https://img.shields.io/github/checks-status/python-websockets/websockets/main?label=tests + :target: https://github.com/python-websockets/websockets/actions/workflows/tests.yml + +.. |docs| image:: https://img.shields.io/readthedocs/websockets.svg + :target: https://websockets.readthedocs.io/ + +.. |openssf| image:: https://bestpractices.coreinfrastructure.org/projects/6475/badge + :target: https://bestpractices.coreinfrastructure.org/projects/6475 + +What is ``websockets``? +----------------------- + +websockets is a library for building WebSocket_ servers and clients in Python +with a focus on correctness, simplicity, robustness, and performance. + +.. _WebSocket: https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API + +Built on top of ``asyncio``, Python's standard asynchronous I/O framework, the +default implementation provides an elegant coroutine-based API. + +An implementation on top of ``threading`` and a Sans-I/O implementation are also +available. + +`Documentation is available on Read the Docs. `_ + +.. copy-pasted because GitHub doesn't support the include directive + +Here's an echo server with the ``asyncio`` API: + +.. code:: python + + #!/usr/bin/env python + + import asyncio + from websockets.server import serve + + async def echo(websocket): + async for message in websocket: + await websocket.send(message) + + async def main(): + async with serve(echo, "localhost", 8765): + await asyncio.Future() # run forever + + asyncio.run(main()) + +Here's how a client sends and receives messages with the ``threading`` API: + +.. code:: python + + #!/usr/bin/env python + + from websockets.sync.client import connect + + def hello(): + with connect("ws://localhost:8765") as websocket: + websocket.send("Hello world!") + message = websocket.recv() + print(f"Received: {message}") + + hello() + + +Does that look good? + +`Get started with the tutorial! `_ + +Why should I use ``websockets``? +-------------------------------- + +The development of ``websockets`` is shaped by four principles: + +1. **Correctness**: ``websockets`` is heavily tested for compliance with + :rfc:`6455`. Continuous integration fails under 100% branch coverage. + +2. **Simplicity**: all you need to understand is ``msg = await ws.recv()`` and + ``await ws.send(msg)``. ``websockets`` takes care of managing connections + so you can focus on your application. + +3. **Robustness**: ``websockets`` is built for production. For example, it was + the only library to `handle backpressure correctly`_ before the issue + became widely known in the Python community. + +4. **Performance**: memory usage is optimized and configurable. A C extension + accelerates expensive operations. It's pre-compiled for Linux, macOS and + Windows and packaged in the wheel format for each system and Python version. + +Documentation is a first class concern in the project. Head over to `Read the +Docs`_ and see for yourself. + +.. _Read the Docs: https://websockets.readthedocs.io/ +.. _handle backpressure correctly: https://vorpus.org/blog/some-thoughts-on-asynchronous-api-design-in-a-post-asyncawait-world/#websocket-servers + +Why shouldn't I use ``websockets``? +----------------------------------- + +* If you prefer callbacks over coroutines: ``websockets`` was created to + provide the best coroutine-based API to manage WebSocket connections in + Python. Pick another library for a callback-based API. + +* If you're looking for a mixed HTTP / WebSocket library: ``websockets`` aims + at being an excellent implementation of :rfc:`6455`: The WebSocket Protocol + and :rfc:`7692`: Compression Extensions for WebSocket. Its support for HTTP + is minimal — just enough for an HTTP health check. + + If you want to do both in the same server, look at HTTP frameworks that + build on top of ``websockets`` to support WebSocket connections, like + Sanic_. + +.. _Sanic: https://sanicframework.org/en/ + +What else? +---------- + +Bug reports, patches and suggestions are welcome! + +To report a security vulnerability, please use the `Tidelift security +contact`_. Tidelift will coordinate the fix and disclosure. + +.. _Tidelift security contact: https://tidelift.com/security + +For anything else, please open an issue_ or send a `pull request`_. + +.. _issue: https://github.com/python-websockets/websockets/issues/new +.. _pull request: https://github.com/python-websockets/websockets/compare/ + +Participants must uphold the `Contributor Covenant code of conduct`_. + +.. _Contributor Covenant code of conduct: https://github.com/python-websockets/websockets/blob/main/CODE_OF_CONDUCT.md + +``websockets`` is released under the `BSD license`_. + +.. _BSD license: https://github.com/python-websockets/websockets/blob/main/LICENSE diff --git a/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/RECORD b/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..81cd3cd94722b0e284c1ede2cf4b4d20ef3a6620 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/RECORD @@ -0,0 +1,86 @@ +websockets-12.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +websockets-12.0.dist-info/LICENSE,sha256=PWoMBQ2L7FL6utUC5F-yW9ArytvXDeo01Ee2oP9Obag,1514 +websockets-12.0.dist-info/METADATA,sha256=AxtN_yngdOy_wohEsaZLR4wyOo2SrG0R3a4ASgFWk0U,6612 +websockets-12.0.dist-info/RECORD,, +websockets-12.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websockets-12.0.dist-info/WHEEL,sha256=y1AKLertNPgulRu7mWGJBCw1hZvgiqIPyqpgggUFceU,225 +websockets-12.0.dist-info/top_level.txt,sha256=CMpdKklxKsvZgCgyltxUWOHibZXZ1uYIVpca9xsQ8Hk,11 +websockets/__init__.py,sha256=Y4tu2IwlZ71xBCVGRaovGDBVdKH9dabTFyHMF3g-DgE,5658 +websockets/__main__.py,sha256=8Dtga-XePHQ4jqgMMuXHT8XRH_hSvs8bEZ7-v49vTKg,4744 +websockets/__pycache__/__init__.cpython-310.pyc,, +websockets/__pycache__/__main__.cpython-310.pyc,, +websockets/__pycache__/auth.cpython-310.pyc,, +websockets/__pycache__/client.cpython-310.pyc,, +websockets/__pycache__/connection.cpython-310.pyc,, +websockets/__pycache__/datastructures.cpython-310.pyc,, +websockets/__pycache__/exceptions.cpython-310.pyc,, +websockets/__pycache__/frames.cpython-310.pyc,, +websockets/__pycache__/headers.cpython-310.pyc,, +websockets/__pycache__/http.cpython-310.pyc,, +websockets/__pycache__/http11.cpython-310.pyc,, +websockets/__pycache__/imports.cpython-310.pyc,, +websockets/__pycache__/protocol.cpython-310.pyc,, +websockets/__pycache__/server.cpython-310.pyc,, +websockets/__pycache__/streams.cpython-310.pyc,, +websockets/__pycache__/typing.cpython-310.pyc,, +websockets/__pycache__/uri.cpython-310.pyc,, +websockets/__pycache__/utils.cpython-310.pyc,, +websockets/__pycache__/version.cpython-310.pyc,, +websockets/auth.py,sha256=pCeunT3V2AdwRt_Tpq9TrkdGY7qUlDHIEqeggj5yQFk,262 +websockets/client.py,sha256=lIa508JN6xCQdzX3xfL3D6x8LpWyhct29Z64ScX1muQ,12562 +websockets/connection.py,sha256=UivBmLaKmEOGpL1bU8uwh1PXIqMFiOUTVRi_gM7w5Rg,333 +websockets/datastructures.py,sha256=6OgolFXa_rj9OOEtRnO2rXXzEB2A8aTd9GaTL1Yp66Q,5582 +websockets/exceptions.py,sha256=DYWg9XEZ47WmWWC26117rfF5zes6zpBs-N4rZXkcn50,10296 +websockets/extensions/__init__.py,sha256=QkZsxaJVllVSp1uhdD5uPGibdbx_091GrVVfS5LXcpw,98 +websockets/extensions/__pycache__/__init__.cpython-310.pyc,, +websockets/extensions/__pycache__/base.cpython-310.pyc,, +websockets/extensions/__pycache__/permessage_deflate.cpython-310.pyc,, +websockets/extensions/base.py,sha256=5shEU7lqmsLC7-y3OCWih1VdS_wOImmZwuAaEKl9kDU,3271 +websockets/extensions/permessage_deflate.py,sha256=bPFOAyTUDU7IIJyCGnWfr5yZF_J8dhCwJWt7jWuYM6c,24782 +websockets/frames.py,sha256=wEmgahW6XwtGucEH4y7S93NWR3elfBvcQSNT3mzPOQw,13700 +websockets/headers.py,sha256=RYryH2zqB_2Y02BTF2KsQFfYxAM6-Kh-A3Dv_32opAA,16120 +websockets/http.py,sha256=T-c3LhgQHcdjKj2AJPzEJ6y_XK8tARDy_XjtmWqO8CM,897 +websockets/http11.py,sha256=QcZ7u-UYbO98xQXrUz43qgaBXk-AyoQBHJBR0J9qYRE,12565 +websockets/imports.py,sha256=SXXs0glid-UHcwla5yjR72DIbGeUTrS9VFagPvPvRNY,2790 +websockets/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websockets/legacy/__pycache__/__init__.cpython-310.pyc,, +websockets/legacy/__pycache__/async_timeout.cpython-310.pyc,, +websockets/legacy/__pycache__/auth.cpython-310.pyc,, +websockets/legacy/__pycache__/client.cpython-310.pyc,, +websockets/legacy/__pycache__/compatibility.cpython-310.pyc,, +websockets/legacy/__pycache__/framing.cpython-310.pyc,, +websockets/legacy/__pycache__/handshake.cpython-310.pyc,, +websockets/legacy/__pycache__/http.cpython-310.pyc,, +websockets/legacy/__pycache__/protocol.cpython-310.pyc,, +websockets/legacy/__pycache__/server.cpython-310.pyc,, +websockets/legacy/async_timeout.py,sha256=nHW_nJYnxtuprwPduZMTl789KAymwmv0ukLbzm2Z8yU,8540 +websockets/legacy/auth.py,sha256=WP68nZ1KAS0YCfNRyYG2M6LrNmT6xa430YnAjoeAP3g,6287 +websockets/legacy/client.py,sha256=-tProFDSLOxIGhjo44klpFUKsrpYCpe6NJrk2qDzd-0,26123 +websockets/legacy/compatibility.py,sha256=zgisuAUM4D5zKnyocY4JoNxhKjslodjOjGxu0QCbX1I,260 +websockets/legacy/framing.py,sha256=M4J6ZPRK-zNqY_UgPQ4Qppc4R64aSMftO7FR_0VpG-Q,4998 +websockets/legacy/handshake.py,sha256=RggPKl-w8oFJZQYZR0IdIOTrsz040pYp0Gu4L_D7_4U,5479 +websockets/legacy/http.py,sha256=qmrM7pa0kuuJIroMVahBAH8_ZVqkD91YhwVux_xpfeI,6938 +websockets/legacy/protocol.py,sha256=5H5uKIF7DbFgVCrwMd330GUH0ZWonihoi6QP27EN0Ww,63343 +websockets/legacy/server.py,sha256=DwapgRDPgcCeZrC-2a1BxoNModWC0xbEk6ESLNcpReI,44737 +websockets/protocol.py,sha256=bs5OH2UKrF1t7BQCpzU1DCzCJAHVNyLRzfnh8tcdbS0,24002 +websockets/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websockets/server.py,sha256=hJB3yl-QO2LLhPhyo_ahAI_dhJoxy9AB6S9vsyrpzuY,21136 +websockets/speedups.c,sha256=ghPq-NF35VLVNkMv0uFDIruNpVISyW-qvoZgPpE65qw,5834 +websockets/speedups.cpython-310-x86_64-linux-gnu.so,sha256=K8gFRfHuId7PWwJRKhIGrHto64Ps-javBFVvv_cpEfc,33736 +websockets/streams.py,sha256=8nv62HYyS74t_JSWGie4SoYAz8-jMcQacaHnD0RkK90,4038 +websockets/sync/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websockets/sync/__pycache__/__init__.cpython-310.pyc,, +websockets/sync/__pycache__/client.cpython-310.pyc,, +websockets/sync/__pycache__/connection.cpython-310.pyc,, +websockets/sync/__pycache__/messages.cpython-310.pyc,, +websockets/sync/__pycache__/server.cpython-310.pyc,, +websockets/sync/__pycache__/utils.cpython-310.pyc,, +websockets/sync/client.py,sha256=-9we3AHtE25pcT6EHGQ0oIyGzfYs18AzLpyDn4RLi94,11265 +websockets/sync/connection.py,sha256=XBdZgDyZKyW3Bm_N5hhiT8uQvtj9lrB0qDVA4BP5wfA,29578 +websockets/sync/messages.py,sha256=pTcWhwD-uwA0l4a26_xgPHgP8pjRYk5xrX5Vhq-JuCo,9484 +websockets/sync/server.py,sha256=081Gzsneal-pFTvo5d7iPljUlQjV05ZNp4yVM57tfw0,18674 +websockets/sync/utils.py,sha256=yUDxjeM4yVeXOZ_Go4ajgTUDOy-0rEWkjcR_RZDqcYY,1151 +websockets/typing.py,sha256=naXTu4ToUF2k43XZhbW9Tt519yt876hVw66CjxQihII,1527 +websockets/uri.py,sha256=oymYUo7bX8LofYzXpT3UqTZfkCt2y4s680Xr-qw88qk,3215 +websockets/utils.py,sha256=QBhgbXn9ZvvLEzj-X8-zSHWVMkUqc6Wm-_HBjga5RNM,1150 +websockets/version.py,sha256=XFPgNJrcyqeM0MPa7rBzmzplATL8PdK3-pv6hHkyBAw,2747 diff --git a/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/REQUESTED b/evalkit_tf437/lib/python3.10/site-packages/websockets-12.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391