Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- infer_4_37_2/lib/python3.10/site-packages/_yaml/__init__.py +33 -0
- infer_4_37_2/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/__init__.py +76 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/from_thread.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/lowlevel.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/pytest_plugin.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/to_thread.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/__init__.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/__pycache__/_trio.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/_asyncio.py +2851 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/_trio.py +1334 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/__init__.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/__pycache__/_eventloop.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/__pycache__/_resources.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_fileio.py +674 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_resources.py +18 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_signals.py +27 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_subprocesses.py +196 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_testing.py +78 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/abc/__init__.py +55 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/abc/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/abc/__pycache__/_eventloop.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/abc/__pycache__/_tasks.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/abc/_streams.py +203 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/abc/_subprocesses.py +79 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/from_thread.py +527 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/lowlevel.py +161 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/py.typed +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/pytest_plugin.py +191 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/streams/__init__.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/streams/buffered.py +119 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/streams/file.py +148 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/streams/memory.py +317 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/streams/stapled.py +141 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/streams/text.py +147 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/streams/tls.py +337 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/to_process.py +258 -0
- infer_4_37_2/lib/python3.10/site-packages/anyio/to_thread.py +69 -0
- infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER +1 -0
- infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/METADATA +246 -0
- infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/RECORD +56 -0
- infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/REQUESTED +0 -0
- infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/WHEEL +4 -0
- infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE +21 -0
- infer_4_37_2/lib/python3.10/site-packages/click/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/click/__pycache__/decorators.cpython-310.pyc +0 -0
infer_4_37_2/lib/python3.10/site-packages/_yaml/__init__.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This is a stub package designed to roughly emulate the _yaml
|
| 2 |
+
# extension module, which previously existed as a standalone module
|
| 3 |
+
# and has been moved into the `yaml` package namespace.
|
| 4 |
+
# It does not perfectly mimic its old counterpart, but should get
|
| 5 |
+
# close enough for anyone who's relying on it even when they shouldn't.
|
| 6 |
+
import yaml
|
| 7 |
+
|
| 8 |
+
# in some circumstances, the yaml module we imoprted may be from a different version, so we need
|
| 9 |
+
# to tread carefully when poking at it here (it may not have the attributes we expect)
|
| 10 |
+
if not getattr(yaml, '__with_libyaml__', False):
|
| 11 |
+
from sys import version_info
|
| 12 |
+
|
| 13 |
+
exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
|
| 14 |
+
raise exc("No module named '_yaml'")
|
| 15 |
+
else:
|
| 16 |
+
from yaml._yaml import *
|
| 17 |
+
import warnings
|
| 18 |
+
warnings.warn(
|
| 19 |
+
'The _yaml extension module is now located at yaml._yaml'
|
| 20 |
+
' and its location is subject to change. To use the'
|
| 21 |
+
' LibYAML-based parser and emitter, import from `yaml`:'
|
| 22 |
+
' `from yaml import CLoader as Loader, CDumper as Dumper`.',
|
| 23 |
+
DeprecationWarning
|
| 24 |
+
)
|
| 25 |
+
del warnings
|
| 26 |
+
# Don't `del yaml` here because yaml is actually an existing
|
| 27 |
+
# namespace member of _yaml.
|
| 28 |
+
|
| 29 |
+
__name__ = '_yaml'
|
| 30 |
+
# If the module is top-level (i.e. not a part of any specific package)
|
| 31 |
+
# then the attribute should be set to ''.
|
| 32 |
+
# https://docs.python.org/3.8/library/types.html
|
| 33 |
+
__package__ = ''
|
infer_4_37_2/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (725 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/__init__.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from ._core._eventloop import current_time as current_time
|
| 4 |
+
from ._core._eventloop import get_all_backends as get_all_backends
|
| 5 |
+
from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
|
| 6 |
+
from ._core._eventloop import run as run
|
| 7 |
+
from ._core._eventloop import sleep as sleep
|
| 8 |
+
from ._core._eventloop import sleep_forever as sleep_forever
|
| 9 |
+
from ._core._eventloop import sleep_until as sleep_until
|
| 10 |
+
from ._core._exceptions import BrokenResourceError as BrokenResourceError
|
| 11 |
+
from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
|
| 12 |
+
from ._core._exceptions import BusyResourceError as BusyResourceError
|
| 13 |
+
from ._core._exceptions import ClosedResourceError as ClosedResourceError
|
| 14 |
+
from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
|
| 15 |
+
from ._core._exceptions import EndOfStream as EndOfStream
|
| 16 |
+
from ._core._exceptions import IncompleteRead as IncompleteRead
|
| 17 |
+
from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
|
| 18 |
+
from ._core._exceptions import WouldBlock as WouldBlock
|
| 19 |
+
from ._core._fileio import AsyncFile as AsyncFile
|
| 20 |
+
from ._core._fileio import Path as Path
|
| 21 |
+
from ._core._fileio import open_file as open_file
|
| 22 |
+
from ._core._fileio import wrap_file as wrap_file
|
| 23 |
+
from ._core._resources import aclose_forcefully as aclose_forcefully
|
| 24 |
+
from ._core._signals import open_signal_receiver as open_signal_receiver
|
| 25 |
+
from ._core._sockets import connect_tcp as connect_tcp
|
| 26 |
+
from ._core._sockets import connect_unix as connect_unix
|
| 27 |
+
from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
|
| 28 |
+
from ._core._sockets import (
|
| 29 |
+
create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
|
| 30 |
+
)
|
| 31 |
+
from ._core._sockets import create_tcp_listener as create_tcp_listener
|
| 32 |
+
from ._core._sockets import create_udp_socket as create_udp_socket
|
| 33 |
+
from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
|
| 34 |
+
from ._core._sockets import create_unix_listener as create_unix_listener
|
| 35 |
+
from ._core._sockets import getaddrinfo as getaddrinfo
|
| 36 |
+
from ._core._sockets import getnameinfo as getnameinfo
|
| 37 |
+
from ._core._sockets import wait_readable as wait_readable
|
| 38 |
+
from ._core._sockets import wait_socket_readable as wait_socket_readable
|
| 39 |
+
from ._core._sockets import wait_socket_writable as wait_socket_writable
|
| 40 |
+
from ._core._sockets import wait_writable as wait_writable
|
| 41 |
+
from ._core._streams import create_memory_object_stream as create_memory_object_stream
|
| 42 |
+
from ._core._subprocesses import open_process as open_process
|
| 43 |
+
from ._core._subprocesses import run_process as run_process
|
| 44 |
+
from ._core._synchronization import CapacityLimiter as CapacityLimiter
|
| 45 |
+
from ._core._synchronization import (
|
| 46 |
+
CapacityLimiterStatistics as CapacityLimiterStatistics,
|
| 47 |
+
)
|
| 48 |
+
from ._core._synchronization import Condition as Condition
|
| 49 |
+
from ._core._synchronization import ConditionStatistics as ConditionStatistics
|
| 50 |
+
from ._core._synchronization import Event as Event
|
| 51 |
+
from ._core._synchronization import EventStatistics as EventStatistics
|
| 52 |
+
from ._core._synchronization import Lock as Lock
|
| 53 |
+
from ._core._synchronization import LockStatistics as LockStatistics
|
| 54 |
+
from ._core._synchronization import ResourceGuard as ResourceGuard
|
| 55 |
+
from ._core._synchronization import Semaphore as Semaphore
|
| 56 |
+
from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
|
| 57 |
+
from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
|
| 58 |
+
from ._core._tasks import CancelScope as CancelScope
|
| 59 |
+
from ._core._tasks import create_task_group as create_task_group
|
| 60 |
+
from ._core._tasks import current_effective_deadline as current_effective_deadline
|
| 61 |
+
from ._core._tasks import fail_after as fail_after
|
| 62 |
+
from ._core._tasks import move_on_after as move_on_after
|
| 63 |
+
from ._core._testing import TaskInfo as TaskInfo
|
| 64 |
+
from ._core._testing import get_current_task as get_current_task
|
| 65 |
+
from ._core._testing import get_running_tasks as get_running_tasks
|
| 66 |
+
from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
|
| 67 |
+
from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
|
| 68 |
+
from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
|
| 69 |
+
from ._core._typedattr import typed_attribute as typed_attribute
|
| 70 |
+
|
| 71 |
+
# Re-export imports so they look like they live directly in this package
|
| 72 |
+
for __value in list(locals().values()):
|
| 73 |
+
if getattr(__value, "__module__", "").startswith("anyio."):
|
| 74 |
+
__value.__module__ = __name__
|
| 75 |
+
|
| 76 |
+
del __value
|
infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.99 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/from_thread.cpython-310.pyc
ADDED
|
Binary file (17.2 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/lowlevel.cpython-310.pyc
ADDED
|
Binary file (5.16 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/pytest_plugin.cpython-310.pyc
ADDED
|
Binary file (5.93 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/__pycache__/to_thread.cpython-310.pyc
ADDED
|
Binary file (2.6 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/__init__.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-310.pyc
ADDED
|
Binary file (78.2 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/__pycache__/_trio.cpython-310.pyc
ADDED
|
Binary file (44.2 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/_asyncio.py
ADDED
|
@@ -0,0 +1,2851 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import array
|
| 4 |
+
import asyncio
|
| 5 |
+
import concurrent.futures
|
| 6 |
+
import math
|
| 7 |
+
import os
|
| 8 |
+
import socket
|
| 9 |
+
import sys
|
| 10 |
+
import threading
|
| 11 |
+
import weakref
|
| 12 |
+
from asyncio import (
|
| 13 |
+
AbstractEventLoop,
|
| 14 |
+
CancelledError,
|
| 15 |
+
all_tasks,
|
| 16 |
+
create_task,
|
| 17 |
+
current_task,
|
| 18 |
+
get_running_loop,
|
| 19 |
+
sleep,
|
| 20 |
+
)
|
| 21 |
+
from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined]
|
| 22 |
+
from collections import OrderedDict, deque
|
| 23 |
+
from collections.abc import (
|
| 24 |
+
AsyncGenerator,
|
| 25 |
+
AsyncIterator,
|
| 26 |
+
Awaitable,
|
| 27 |
+
Callable,
|
| 28 |
+
Collection,
|
| 29 |
+
Coroutine,
|
| 30 |
+
Iterable,
|
| 31 |
+
Iterator,
|
| 32 |
+
MutableMapping,
|
| 33 |
+
Sequence,
|
| 34 |
+
)
|
| 35 |
+
from concurrent.futures import Future
|
| 36 |
+
from contextlib import AbstractContextManager, suppress
|
| 37 |
+
from contextvars import Context, copy_context
|
| 38 |
+
from dataclasses import dataclass
|
| 39 |
+
from functools import partial, wraps
|
| 40 |
+
from inspect import (
|
| 41 |
+
CORO_RUNNING,
|
| 42 |
+
CORO_SUSPENDED,
|
| 43 |
+
getcoroutinestate,
|
| 44 |
+
iscoroutine,
|
| 45 |
+
)
|
| 46 |
+
from io import IOBase
|
| 47 |
+
from os import PathLike
|
| 48 |
+
from queue import Queue
|
| 49 |
+
from signal import Signals
|
| 50 |
+
from socket import AddressFamily, SocketKind
|
| 51 |
+
from threading import Thread
|
| 52 |
+
from types import TracebackType
|
| 53 |
+
from typing import (
|
| 54 |
+
IO,
|
| 55 |
+
TYPE_CHECKING,
|
| 56 |
+
Any,
|
| 57 |
+
Optional,
|
| 58 |
+
TypeVar,
|
| 59 |
+
cast,
|
| 60 |
+
)
|
| 61 |
+
from weakref import WeakKeyDictionary
|
| 62 |
+
|
| 63 |
+
import sniffio
|
| 64 |
+
|
| 65 |
+
from .. import (
|
| 66 |
+
CapacityLimiterStatistics,
|
| 67 |
+
EventStatistics,
|
| 68 |
+
LockStatistics,
|
| 69 |
+
TaskInfo,
|
| 70 |
+
abc,
|
| 71 |
+
)
|
| 72 |
+
from .._core._eventloop import claim_worker_thread, threadlocals
|
| 73 |
+
from .._core._exceptions import (
|
| 74 |
+
BrokenResourceError,
|
| 75 |
+
BusyResourceError,
|
| 76 |
+
ClosedResourceError,
|
| 77 |
+
EndOfStream,
|
| 78 |
+
WouldBlock,
|
| 79 |
+
iterate_exceptions,
|
| 80 |
+
)
|
| 81 |
+
from .._core._sockets import convert_ipv6_sockaddr
|
| 82 |
+
from .._core._streams import create_memory_object_stream
|
| 83 |
+
from .._core._synchronization import (
|
| 84 |
+
CapacityLimiter as BaseCapacityLimiter,
|
| 85 |
+
)
|
| 86 |
+
from .._core._synchronization import Event as BaseEvent
|
| 87 |
+
from .._core._synchronization import Lock as BaseLock
|
| 88 |
+
from .._core._synchronization import (
|
| 89 |
+
ResourceGuard,
|
| 90 |
+
SemaphoreStatistics,
|
| 91 |
+
)
|
| 92 |
+
from .._core._synchronization import Semaphore as BaseSemaphore
|
| 93 |
+
from .._core._tasks import CancelScope as BaseCancelScope
|
| 94 |
+
from ..abc import (
|
| 95 |
+
AsyncBackend,
|
| 96 |
+
IPSockAddrType,
|
| 97 |
+
SocketListener,
|
| 98 |
+
UDPPacketType,
|
| 99 |
+
UNIXDatagramPacketType,
|
| 100 |
+
)
|
| 101 |
+
from ..abc._eventloop import StrOrBytesPath
|
| 102 |
+
from ..lowlevel import RunVar
|
| 103 |
+
from ..streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
| 104 |
+
|
| 105 |
+
if TYPE_CHECKING:
|
| 106 |
+
from _typeshed import FileDescriptorLike
|
| 107 |
+
else:
|
| 108 |
+
FileDescriptorLike = object
|
| 109 |
+
|
| 110 |
+
if sys.version_info >= (3, 10):
|
| 111 |
+
from typing import ParamSpec
|
| 112 |
+
else:
|
| 113 |
+
from typing_extensions import ParamSpec
|
| 114 |
+
|
| 115 |
+
if sys.version_info >= (3, 11):
|
| 116 |
+
from asyncio import Runner
|
| 117 |
+
from typing import TypeVarTuple, Unpack
|
| 118 |
+
else:
|
| 119 |
+
import contextvars
|
| 120 |
+
import enum
|
| 121 |
+
import signal
|
| 122 |
+
from asyncio import coroutines, events, exceptions, tasks
|
| 123 |
+
|
| 124 |
+
from exceptiongroup import BaseExceptionGroup
|
| 125 |
+
from typing_extensions import TypeVarTuple, Unpack
|
| 126 |
+
|
| 127 |
+
class _State(enum.Enum):
|
| 128 |
+
CREATED = "created"
|
| 129 |
+
INITIALIZED = "initialized"
|
| 130 |
+
CLOSED = "closed"
|
| 131 |
+
|
| 132 |
+
class Runner:
|
| 133 |
+
# Copied from CPython 3.11
|
| 134 |
+
def __init__(
|
| 135 |
+
self,
|
| 136 |
+
*,
|
| 137 |
+
debug: bool | None = None,
|
| 138 |
+
loop_factory: Callable[[], AbstractEventLoop] | None = None,
|
| 139 |
+
):
|
| 140 |
+
self._state = _State.CREATED
|
| 141 |
+
self._debug = debug
|
| 142 |
+
self._loop_factory = loop_factory
|
| 143 |
+
self._loop: AbstractEventLoop | None = None
|
| 144 |
+
self._context = None
|
| 145 |
+
self._interrupt_count = 0
|
| 146 |
+
self._set_event_loop = False
|
| 147 |
+
|
| 148 |
+
def __enter__(self) -> Runner:
|
| 149 |
+
self._lazy_init()
|
| 150 |
+
return self
|
| 151 |
+
|
| 152 |
+
def __exit__(
|
| 153 |
+
self,
|
| 154 |
+
exc_type: type[BaseException],
|
| 155 |
+
exc_val: BaseException,
|
| 156 |
+
exc_tb: TracebackType,
|
| 157 |
+
) -> None:
|
| 158 |
+
self.close()
|
| 159 |
+
|
| 160 |
+
def close(self) -> None:
|
| 161 |
+
"""Shutdown and close event loop."""
|
| 162 |
+
if self._state is not _State.INITIALIZED:
|
| 163 |
+
return
|
| 164 |
+
try:
|
| 165 |
+
loop = self._loop
|
| 166 |
+
_cancel_all_tasks(loop)
|
| 167 |
+
loop.run_until_complete(loop.shutdown_asyncgens())
|
| 168 |
+
if hasattr(loop, "shutdown_default_executor"):
|
| 169 |
+
loop.run_until_complete(loop.shutdown_default_executor())
|
| 170 |
+
else:
|
| 171 |
+
loop.run_until_complete(_shutdown_default_executor(loop))
|
| 172 |
+
finally:
|
| 173 |
+
if self._set_event_loop:
|
| 174 |
+
events.set_event_loop(None)
|
| 175 |
+
loop.close()
|
| 176 |
+
self._loop = None
|
| 177 |
+
self._state = _State.CLOSED
|
| 178 |
+
|
| 179 |
+
def get_loop(self) -> AbstractEventLoop:
|
| 180 |
+
"""Return embedded event loop."""
|
| 181 |
+
self._lazy_init()
|
| 182 |
+
return self._loop
|
| 183 |
+
|
| 184 |
+
def run(self, coro: Coroutine[T_Retval], *, context=None) -> T_Retval:
|
| 185 |
+
"""Run a coroutine inside the embedded event loop."""
|
| 186 |
+
if not coroutines.iscoroutine(coro):
|
| 187 |
+
raise ValueError(f"a coroutine was expected, got {coro!r}")
|
| 188 |
+
|
| 189 |
+
if events._get_running_loop() is not None:
|
| 190 |
+
# fail fast with short traceback
|
| 191 |
+
raise RuntimeError(
|
| 192 |
+
"Runner.run() cannot be called from a running event loop"
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
self._lazy_init()
|
| 196 |
+
|
| 197 |
+
if context is None:
|
| 198 |
+
context = self._context
|
| 199 |
+
task = context.run(self._loop.create_task, coro)
|
| 200 |
+
|
| 201 |
+
if (
|
| 202 |
+
threading.current_thread() is threading.main_thread()
|
| 203 |
+
and signal.getsignal(signal.SIGINT) is signal.default_int_handler
|
| 204 |
+
):
|
| 205 |
+
sigint_handler = partial(self._on_sigint, main_task=task)
|
| 206 |
+
try:
|
| 207 |
+
signal.signal(signal.SIGINT, sigint_handler)
|
| 208 |
+
except ValueError:
|
| 209 |
+
# `signal.signal` may throw if `threading.main_thread` does
|
| 210 |
+
# not support signals (e.g. embedded interpreter with signals
|
| 211 |
+
# not registered - see gh-91880)
|
| 212 |
+
sigint_handler = None
|
| 213 |
+
else:
|
| 214 |
+
sigint_handler = None
|
| 215 |
+
|
| 216 |
+
self._interrupt_count = 0
|
| 217 |
+
try:
|
| 218 |
+
return self._loop.run_until_complete(task)
|
| 219 |
+
except exceptions.CancelledError:
|
| 220 |
+
if self._interrupt_count > 0:
|
| 221 |
+
uncancel = getattr(task, "uncancel", None)
|
| 222 |
+
if uncancel is not None and uncancel() == 0:
|
| 223 |
+
raise KeyboardInterrupt()
|
| 224 |
+
raise # CancelledError
|
| 225 |
+
finally:
|
| 226 |
+
if (
|
| 227 |
+
sigint_handler is not None
|
| 228 |
+
and signal.getsignal(signal.SIGINT) is sigint_handler
|
| 229 |
+
):
|
| 230 |
+
signal.signal(signal.SIGINT, signal.default_int_handler)
|
| 231 |
+
|
| 232 |
+
def _lazy_init(self) -> None:
|
| 233 |
+
if self._state is _State.CLOSED:
|
| 234 |
+
raise RuntimeError("Runner is closed")
|
| 235 |
+
if self._state is _State.INITIALIZED:
|
| 236 |
+
return
|
| 237 |
+
if self._loop_factory is None:
|
| 238 |
+
self._loop = events.new_event_loop()
|
| 239 |
+
if not self._set_event_loop:
|
| 240 |
+
# Call set_event_loop only once to avoid calling
|
| 241 |
+
# attach_loop multiple times on child watchers
|
| 242 |
+
events.set_event_loop(self._loop)
|
| 243 |
+
self._set_event_loop = True
|
| 244 |
+
else:
|
| 245 |
+
self._loop = self._loop_factory()
|
| 246 |
+
if self._debug is not None:
|
| 247 |
+
self._loop.set_debug(self._debug)
|
| 248 |
+
self._context = contextvars.copy_context()
|
| 249 |
+
self._state = _State.INITIALIZED
|
| 250 |
+
|
| 251 |
+
def _on_sigint(self, signum, frame, main_task: asyncio.Task) -> None:
|
| 252 |
+
self._interrupt_count += 1
|
| 253 |
+
if self._interrupt_count == 1 and not main_task.done():
|
| 254 |
+
main_task.cancel()
|
| 255 |
+
# wakeup loop if it is blocked by select() with long timeout
|
| 256 |
+
self._loop.call_soon_threadsafe(lambda: None)
|
| 257 |
+
return
|
| 258 |
+
raise KeyboardInterrupt()
|
| 259 |
+
|
| 260 |
+
def _cancel_all_tasks(loop: AbstractEventLoop) -> None:
|
| 261 |
+
to_cancel = tasks.all_tasks(loop)
|
| 262 |
+
if not to_cancel:
|
| 263 |
+
return
|
| 264 |
+
|
| 265 |
+
for task in to_cancel:
|
| 266 |
+
task.cancel()
|
| 267 |
+
|
| 268 |
+
loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
|
| 269 |
+
|
| 270 |
+
for task in to_cancel:
|
| 271 |
+
if task.cancelled():
|
| 272 |
+
continue
|
| 273 |
+
if task.exception() is not None:
|
| 274 |
+
loop.call_exception_handler(
|
| 275 |
+
{
|
| 276 |
+
"message": "unhandled exception during asyncio.run() shutdown",
|
| 277 |
+
"exception": task.exception(),
|
| 278 |
+
"task": task,
|
| 279 |
+
}
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
async def _shutdown_default_executor(loop: AbstractEventLoop) -> None:
|
| 283 |
+
"""Schedule the shutdown of the default executor."""
|
| 284 |
+
|
| 285 |
+
def _do_shutdown(future: asyncio.futures.Future) -> None:
|
| 286 |
+
try:
|
| 287 |
+
loop._default_executor.shutdown(wait=True) # type: ignore[attr-defined]
|
| 288 |
+
loop.call_soon_threadsafe(future.set_result, None)
|
| 289 |
+
except Exception as ex:
|
| 290 |
+
loop.call_soon_threadsafe(future.set_exception, ex)
|
| 291 |
+
|
| 292 |
+
loop._executor_shutdown_called = True
|
| 293 |
+
if loop._default_executor is None:
|
| 294 |
+
return
|
| 295 |
+
future = loop.create_future()
|
| 296 |
+
thread = threading.Thread(target=_do_shutdown, args=(future,))
|
| 297 |
+
thread.start()
|
| 298 |
+
try:
|
| 299 |
+
await future
|
| 300 |
+
finally:
|
| 301 |
+
thread.join()
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
T_Retval = TypeVar("T_Retval")
|
| 305 |
+
T_contra = TypeVar("T_contra", contravariant=True)
|
| 306 |
+
PosArgsT = TypeVarTuple("PosArgsT")
|
| 307 |
+
P = ParamSpec("P")
|
| 308 |
+
|
| 309 |
+
_root_task: RunVar[asyncio.Task | None] = RunVar("_root_task")
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def find_root_task() -> asyncio.Task:
|
| 313 |
+
root_task = _root_task.get(None)
|
| 314 |
+
if root_task is not None and not root_task.done():
|
| 315 |
+
return root_task
|
| 316 |
+
|
| 317 |
+
# Look for a task that has been started via run_until_complete()
|
| 318 |
+
for task in all_tasks():
|
| 319 |
+
if task._callbacks and not task.done():
|
| 320 |
+
callbacks = [cb for cb, context in task._callbacks]
|
| 321 |
+
for cb in callbacks:
|
| 322 |
+
if (
|
| 323 |
+
cb is _run_until_complete_cb
|
| 324 |
+
or getattr(cb, "__module__", None) == "uvloop.loop"
|
| 325 |
+
):
|
| 326 |
+
_root_task.set(task)
|
| 327 |
+
return task
|
| 328 |
+
|
| 329 |
+
# Look up the topmost task in the AnyIO task tree, if possible
|
| 330 |
+
task = cast(asyncio.Task, current_task())
|
| 331 |
+
state = _task_states.get(task)
|
| 332 |
+
if state:
|
| 333 |
+
cancel_scope = state.cancel_scope
|
| 334 |
+
while cancel_scope and cancel_scope._parent_scope is not None:
|
| 335 |
+
cancel_scope = cancel_scope._parent_scope
|
| 336 |
+
|
| 337 |
+
if cancel_scope is not None:
|
| 338 |
+
return cast(asyncio.Task, cancel_scope._host_task)
|
| 339 |
+
|
| 340 |
+
return task
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def get_callable_name(func: Callable) -> str:
|
| 344 |
+
module = getattr(func, "__module__", None)
|
| 345 |
+
qualname = getattr(func, "__qualname__", None)
|
| 346 |
+
return ".".join([x for x in (module, qualname) if x])
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
#
|
| 350 |
+
# Event loop
|
| 351 |
+
#
|
| 352 |
+
|
| 353 |
+
_run_vars: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] = WeakKeyDictionary()
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def _task_started(task: asyncio.Task) -> bool:
|
| 357 |
+
"""Return ``True`` if the task has been started and has not finished."""
|
| 358 |
+
# The task coro should never be None here, as we never add finished tasks to the
|
| 359 |
+
# task list
|
| 360 |
+
coro = task.get_coro()
|
| 361 |
+
assert coro is not None
|
| 362 |
+
try:
|
| 363 |
+
return getcoroutinestate(coro) in (CORO_RUNNING, CORO_SUSPENDED)
|
| 364 |
+
except AttributeError:
|
| 365 |
+
# task coro is async_genenerator_asend https://bugs.python.org/issue37771
|
| 366 |
+
raise Exception(f"Cannot determine if task {task} has started or not") from None
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
#
|
| 370 |
+
# Timeouts and cancellation
|
| 371 |
+
#
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def is_anyio_cancellation(exc: CancelledError) -> bool:
|
| 375 |
+
# Sometimes third party frameworks catch a CancelledError and raise a new one, so as
|
| 376 |
+
# a workaround we have to look at the previous ones in __context__ too for a
|
| 377 |
+
# matching cancel message
|
| 378 |
+
while True:
|
| 379 |
+
if (
|
| 380 |
+
exc.args
|
| 381 |
+
and isinstance(exc.args[0], str)
|
| 382 |
+
and exc.args[0].startswith("Cancelled by cancel scope ")
|
| 383 |
+
):
|
| 384 |
+
return True
|
| 385 |
+
|
| 386 |
+
if isinstance(exc.__context__, CancelledError):
|
| 387 |
+
exc = exc.__context__
|
| 388 |
+
continue
|
| 389 |
+
|
| 390 |
+
return False
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
class CancelScope(BaseCancelScope):
|
| 394 |
+
def __new__(
|
| 395 |
+
cls, *, deadline: float = math.inf, shield: bool = False
|
| 396 |
+
) -> CancelScope:
|
| 397 |
+
return object.__new__(cls)
|
| 398 |
+
|
| 399 |
+
def __init__(self, deadline: float = math.inf, shield: bool = False):
|
| 400 |
+
self._deadline = deadline
|
| 401 |
+
self._shield = shield
|
| 402 |
+
self._parent_scope: CancelScope | None = None
|
| 403 |
+
self._child_scopes: set[CancelScope] = set()
|
| 404 |
+
self._cancel_called = False
|
| 405 |
+
self._cancelled_caught = False
|
| 406 |
+
self._active = False
|
| 407 |
+
self._timeout_handle: asyncio.TimerHandle | None = None
|
| 408 |
+
self._cancel_handle: asyncio.Handle | None = None
|
| 409 |
+
self._tasks: set[asyncio.Task] = set()
|
| 410 |
+
self._host_task: asyncio.Task | None = None
|
| 411 |
+
if sys.version_info >= (3, 11):
|
| 412 |
+
self._pending_uncancellations: int | None = 0
|
| 413 |
+
else:
|
| 414 |
+
self._pending_uncancellations = None
|
| 415 |
+
|
| 416 |
+
def __enter__(self) -> CancelScope:
|
| 417 |
+
if self._active:
|
| 418 |
+
raise RuntimeError(
|
| 419 |
+
"Each CancelScope may only be used for a single 'with' block"
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
self._host_task = host_task = cast(asyncio.Task, current_task())
|
| 423 |
+
self._tasks.add(host_task)
|
| 424 |
+
try:
|
| 425 |
+
task_state = _task_states[host_task]
|
| 426 |
+
except KeyError:
|
| 427 |
+
task_state = TaskState(None, self)
|
| 428 |
+
_task_states[host_task] = task_state
|
| 429 |
+
else:
|
| 430 |
+
self._parent_scope = task_state.cancel_scope
|
| 431 |
+
task_state.cancel_scope = self
|
| 432 |
+
if self._parent_scope is not None:
|
| 433 |
+
# If using an eager task factory, the parent scope may not even contain
|
| 434 |
+
# the host task
|
| 435 |
+
self._parent_scope._child_scopes.add(self)
|
| 436 |
+
self._parent_scope._tasks.discard(host_task)
|
| 437 |
+
|
| 438 |
+
self._timeout()
|
| 439 |
+
self._active = True
|
| 440 |
+
|
| 441 |
+
# Start cancelling the host task if the scope was cancelled before entering
|
| 442 |
+
if self._cancel_called:
|
| 443 |
+
self._deliver_cancellation(self)
|
| 444 |
+
|
| 445 |
+
return self
|
| 446 |
+
|
| 447 |
+
def __exit__(
|
| 448 |
+
self,
|
| 449 |
+
exc_type: type[BaseException] | None,
|
| 450 |
+
exc_val: BaseException | None,
|
| 451 |
+
exc_tb: TracebackType | None,
|
| 452 |
+
) -> bool | None:
|
| 453 |
+
del exc_tb
|
| 454 |
+
|
| 455 |
+
if not self._active:
|
| 456 |
+
raise RuntimeError("This cancel scope is not active")
|
| 457 |
+
if current_task() is not self._host_task:
|
| 458 |
+
raise RuntimeError(
|
| 459 |
+
"Attempted to exit cancel scope in a different task than it was "
|
| 460 |
+
"entered in"
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
assert self._host_task is not None
|
| 464 |
+
host_task_state = _task_states.get(self._host_task)
|
| 465 |
+
if host_task_state is None or host_task_state.cancel_scope is not self:
|
| 466 |
+
raise RuntimeError(
|
| 467 |
+
"Attempted to exit a cancel scope that isn't the current tasks's "
|
| 468 |
+
"current cancel scope"
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
try:
|
| 472 |
+
self._active = False
|
| 473 |
+
if self._timeout_handle:
|
| 474 |
+
self._timeout_handle.cancel()
|
| 475 |
+
self._timeout_handle = None
|
| 476 |
+
|
| 477 |
+
self._tasks.remove(self._host_task)
|
| 478 |
+
if self._parent_scope is not None:
|
| 479 |
+
self._parent_scope._child_scopes.remove(self)
|
| 480 |
+
self._parent_scope._tasks.add(self._host_task)
|
| 481 |
+
|
| 482 |
+
host_task_state.cancel_scope = self._parent_scope
|
| 483 |
+
|
| 484 |
+
# Restart the cancellation effort in the closest visible, cancelled parent
|
| 485 |
+
# scope if necessary
|
| 486 |
+
self._restart_cancellation_in_parent()
|
| 487 |
+
|
| 488 |
+
# We only swallow the exception iff it was an AnyIO CancelledError, either
|
| 489 |
+
# directly as exc_val or inside an exception group and there are no cancelled
|
| 490 |
+
# parent cancel scopes visible to us here
|
| 491 |
+
if self._cancel_called and not self._parent_cancellation_is_visible_to_us:
|
| 492 |
+
# For each level-cancel() call made on the host task, call uncancel()
|
| 493 |
+
while self._pending_uncancellations:
|
| 494 |
+
self._host_task.uncancel()
|
| 495 |
+
self._pending_uncancellations -= 1
|
| 496 |
+
|
| 497 |
+
# Update cancelled_caught and check for exceptions we must not swallow
|
| 498 |
+
cannot_swallow_exc_val = False
|
| 499 |
+
if exc_val is not None:
|
| 500 |
+
for exc in iterate_exceptions(exc_val):
|
| 501 |
+
if isinstance(exc, CancelledError) and is_anyio_cancellation(
|
| 502 |
+
exc
|
| 503 |
+
):
|
| 504 |
+
self._cancelled_caught = True
|
| 505 |
+
else:
|
| 506 |
+
cannot_swallow_exc_val = True
|
| 507 |
+
|
| 508 |
+
return self._cancelled_caught and not cannot_swallow_exc_val
|
| 509 |
+
else:
|
| 510 |
+
if self._pending_uncancellations:
|
| 511 |
+
assert self._parent_scope is not None
|
| 512 |
+
assert self._parent_scope._pending_uncancellations is not None
|
| 513 |
+
self._parent_scope._pending_uncancellations += (
|
| 514 |
+
self._pending_uncancellations
|
| 515 |
+
)
|
| 516 |
+
self._pending_uncancellations = 0
|
| 517 |
+
|
| 518 |
+
return False
|
| 519 |
+
finally:
|
| 520 |
+
self._host_task = None
|
| 521 |
+
del exc_val
|
| 522 |
+
|
| 523 |
+
@property
|
| 524 |
+
def _effectively_cancelled(self) -> bool:
|
| 525 |
+
cancel_scope: CancelScope | None = self
|
| 526 |
+
while cancel_scope is not None:
|
| 527 |
+
if cancel_scope._cancel_called:
|
| 528 |
+
return True
|
| 529 |
+
|
| 530 |
+
if cancel_scope.shield:
|
| 531 |
+
return False
|
| 532 |
+
|
| 533 |
+
cancel_scope = cancel_scope._parent_scope
|
| 534 |
+
|
| 535 |
+
return False
|
| 536 |
+
|
| 537 |
+
@property
|
| 538 |
+
def _parent_cancellation_is_visible_to_us(self) -> bool:
|
| 539 |
+
return (
|
| 540 |
+
self._parent_scope is not None
|
| 541 |
+
and not self.shield
|
| 542 |
+
and self._parent_scope._effectively_cancelled
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
def _timeout(self) -> None:
|
| 546 |
+
if self._deadline != math.inf:
|
| 547 |
+
loop = get_running_loop()
|
| 548 |
+
if loop.time() >= self._deadline:
|
| 549 |
+
self.cancel()
|
| 550 |
+
else:
|
| 551 |
+
self._timeout_handle = loop.call_at(self._deadline, self._timeout)
|
| 552 |
+
|
| 553 |
+
def _deliver_cancellation(self, origin: CancelScope) -> bool:
|
| 554 |
+
"""
|
| 555 |
+
Deliver cancellation to directly contained tasks and nested cancel scopes.
|
| 556 |
+
|
| 557 |
+
Schedule another run at the end if we still have tasks eligible for
|
| 558 |
+
cancellation.
|
| 559 |
+
|
| 560 |
+
:param origin: the cancel scope that originated the cancellation
|
| 561 |
+
:return: ``True`` if the delivery needs to be retried on the next cycle
|
| 562 |
+
|
| 563 |
+
"""
|
| 564 |
+
should_retry = False
|
| 565 |
+
current = current_task()
|
| 566 |
+
for task in self._tasks:
|
| 567 |
+
should_retry = True
|
| 568 |
+
if task._must_cancel: # type: ignore[attr-defined]
|
| 569 |
+
continue
|
| 570 |
+
|
| 571 |
+
# The task is eligible for cancellation if it has started
|
| 572 |
+
if task is not current and (task is self._host_task or _task_started(task)):
|
| 573 |
+
waiter = task._fut_waiter # type: ignore[attr-defined]
|
| 574 |
+
if not isinstance(waiter, asyncio.Future) or not waiter.done():
|
| 575 |
+
task.cancel(f"Cancelled by cancel scope {id(origin):x}")
|
| 576 |
+
if (
|
| 577 |
+
task is origin._host_task
|
| 578 |
+
and origin._pending_uncancellations is not None
|
| 579 |
+
):
|
| 580 |
+
origin._pending_uncancellations += 1
|
| 581 |
+
|
| 582 |
+
# Deliver cancellation to child scopes that aren't shielded or running their own
|
| 583 |
+
# cancellation callbacks
|
| 584 |
+
for scope in self._child_scopes:
|
| 585 |
+
if not scope._shield and not scope.cancel_called:
|
| 586 |
+
should_retry = scope._deliver_cancellation(origin) or should_retry
|
| 587 |
+
|
| 588 |
+
# Schedule another callback if there are still tasks left
|
| 589 |
+
if origin is self:
|
| 590 |
+
if should_retry:
|
| 591 |
+
self._cancel_handle = get_running_loop().call_soon(
|
| 592 |
+
self._deliver_cancellation, origin
|
| 593 |
+
)
|
| 594 |
+
else:
|
| 595 |
+
self._cancel_handle = None
|
| 596 |
+
|
| 597 |
+
return should_retry
|
| 598 |
+
|
| 599 |
+
def _restart_cancellation_in_parent(self) -> None:
|
| 600 |
+
"""
|
| 601 |
+
Restart the cancellation effort in the closest directly cancelled parent scope.
|
| 602 |
+
|
| 603 |
+
"""
|
| 604 |
+
scope = self._parent_scope
|
| 605 |
+
while scope is not None:
|
| 606 |
+
if scope._cancel_called:
|
| 607 |
+
if scope._cancel_handle is None:
|
| 608 |
+
scope._deliver_cancellation(scope)
|
| 609 |
+
|
| 610 |
+
break
|
| 611 |
+
|
| 612 |
+
# No point in looking beyond any shielded scope
|
| 613 |
+
if scope._shield:
|
| 614 |
+
break
|
| 615 |
+
|
| 616 |
+
scope = scope._parent_scope
|
| 617 |
+
|
| 618 |
+
def cancel(self) -> None:
|
| 619 |
+
if not self._cancel_called:
|
| 620 |
+
if self._timeout_handle:
|
| 621 |
+
self._timeout_handle.cancel()
|
| 622 |
+
self._timeout_handle = None
|
| 623 |
+
|
| 624 |
+
self._cancel_called = True
|
| 625 |
+
if self._host_task is not None:
|
| 626 |
+
self._deliver_cancellation(self)
|
| 627 |
+
|
| 628 |
+
@property
|
| 629 |
+
def deadline(self) -> float:
|
| 630 |
+
return self._deadline
|
| 631 |
+
|
| 632 |
+
@deadline.setter
|
| 633 |
+
def deadline(self, value: float) -> None:
|
| 634 |
+
self._deadline = float(value)
|
| 635 |
+
if self._timeout_handle is not None:
|
| 636 |
+
self._timeout_handle.cancel()
|
| 637 |
+
self._timeout_handle = None
|
| 638 |
+
|
| 639 |
+
if self._active and not self._cancel_called:
|
| 640 |
+
self._timeout()
|
| 641 |
+
|
| 642 |
+
@property
|
| 643 |
+
def cancel_called(self) -> bool:
|
| 644 |
+
return self._cancel_called
|
| 645 |
+
|
| 646 |
+
@property
|
| 647 |
+
def cancelled_caught(self) -> bool:
|
| 648 |
+
return self._cancelled_caught
|
| 649 |
+
|
| 650 |
+
@property
|
| 651 |
+
def shield(self) -> bool:
|
| 652 |
+
return self._shield
|
| 653 |
+
|
| 654 |
+
@shield.setter
|
| 655 |
+
def shield(self, value: bool) -> None:
|
| 656 |
+
if self._shield != value:
|
| 657 |
+
self._shield = value
|
| 658 |
+
if not value:
|
| 659 |
+
self._restart_cancellation_in_parent()
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
#
|
| 663 |
+
# Task states
|
| 664 |
+
#
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
class TaskState:
|
| 668 |
+
"""
|
| 669 |
+
Encapsulates auxiliary task information that cannot be added to the Task instance
|
| 670 |
+
itself because there are no guarantees about its implementation.
|
| 671 |
+
"""
|
| 672 |
+
|
| 673 |
+
__slots__ = "parent_id", "cancel_scope", "__weakref__"
|
| 674 |
+
|
| 675 |
+
def __init__(self, parent_id: int | None, cancel_scope: CancelScope | None):
|
| 676 |
+
self.parent_id = parent_id
|
| 677 |
+
self.cancel_scope = cancel_scope
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
class TaskStateStore(MutableMapping["Awaitable[Any] | asyncio.Task", TaskState]):
|
| 681 |
+
def __init__(self) -> None:
|
| 682 |
+
self._task_states = WeakKeyDictionary[asyncio.Task, TaskState]()
|
| 683 |
+
self._preliminary_task_states: dict[Awaitable[Any], TaskState] = {}
|
| 684 |
+
|
| 685 |
+
def __getitem__(self, key: Awaitable[Any] | asyncio.Task, /) -> TaskState:
|
| 686 |
+
assert isinstance(key, asyncio.Task)
|
| 687 |
+
try:
|
| 688 |
+
return self._task_states[key]
|
| 689 |
+
except KeyError:
|
| 690 |
+
if coro := key.get_coro():
|
| 691 |
+
if state := self._preliminary_task_states.get(coro):
|
| 692 |
+
return state
|
| 693 |
+
|
| 694 |
+
raise KeyError(key)
|
| 695 |
+
|
| 696 |
+
def __setitem__(
|
| 697 |
+
self, key: asyncio.Task | Awaitable[Any], value: TaskState, /
|
| 698 |
+
) -> None:
|
| 699 |
+
if isinstance(key, asyncio.Task):
|
| 700 |
+
self._task_states[key] = value
|
| 701 |
+
else:
|
| 702 |
+
self._preliminary_task_states[key] = value
|
| 703 |
+
|
| 704 |
+
def __delitem__(self, key: asyncio.Task | Awaitable[Any], /) -> None:
|
| 705 |
+
if isinstance(key, asyncio.Task):
|
| 706 |
+
del self._task_states[key]
|
| 707 |
+
else:
|
| 708 |
+
del self._preliminary_task_states[key]
|
| 709 |
+
|
| 710 |
+
def __len__(self) -> int:
|
| 711 |
+
return len(self._task_states) + len(self._preliminary_task_states)
|
| 712 |
+
|
| 713 |
+
def __iter__(self) -> Iterator[Awaitable[Any] | asyncio.Task]:
|
| 714 |
+
yield from self._task_states
|
| 715 |
+
yield from self._preliminary_task_states
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
_task_states = TaskStateStore()
|
| 719 |
+
|
| 720 |
+
|
| 721 |
+
#
|
| 722 |
+
# Task groups
|
| 723 |
+
#
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
class _AsyncioTaskStatus(abc.TaskStatus):
|
| 727 |
+
def __init__(self, future: asyncio.Future, parent_id: int):
|
| 728 |
+
self._future = future
|
| 729 |
+
self._parent_id = parent_id
|
| 730 |
+
|
| 731 |
+
def started(self, value: T_contra | None = None) -> None:
|
| 732 |
+
try:
|
| 733 |
+
self._future.set_result(value)
|
| 734 |
+
except asyncio.InvalidStateError:
|
| 735 |
+
if not self._future.cancelled():
|
| 736 |
+
raise RuntimeError(
|
| 737 |
+
"called 'started' twice on the same task status"
|
| 738 |
+
) from None
|
| 739 |
+
|
| 740 |
+
task = cast(asyncio.Task, current_task())
|
| 741 |
+
_task_states[task].parent_id = self._parent_id
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
async def _wait(tasks: Iterable[asyncio.Task[object]]) -> None:
|
| 745 |
+
tasks = set(tasks)
|
| 746 |
+
waiter = get_running_loop().create_future()
|
| 747 |
+
|
| 748 |
+
def on_completion(task: asyncio.Task[object]) -> None:
|
| 749 |
+
tasks.discard(task)
|
| 750 |
+
if not tasks and not waiter.done():
|
| 751 |
+
waiter.set_result(None)
|
| 752 |
+
|
| 753 |
+
for task in tasks:
|
| 754 |
+
task.add_done_callback(on_completion)
|
| 755 |
+
del task
|
| 756 |
+
|
| 757 |
+
try:
|
| 758 |
+
await waiter
|
| 759 |
+
finally:
|
| 760 |
+
while tasks:
|
| 761 |
+
tasks.pop().remove_done_callback(on_completion)
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
class TaskGroup(abc.TaskGroup):
|
| 765 |
+
def __init__(self) -> None:
|
| 766 |
+
self.cancel_scope: CancelScope = CancelScope()
|
| 767 |
+
self._active = False
|
| 768 |
+
self._exceptions: list[BaseException] = []
|
| 769 |
+
self._tasks: set[asyncio.Task] = set()
|
| 770 |
+
|
| 771 |
+
async def __aenter__(self) -> TaskGroup:
|
| 772 |
+
self.cancel_scope.__enter__()
|
| 773 |
+
self._active = True
|
| 774 |
+
return self
|
| 775 |
+
|
| 776 |
+
async def __aexit__(
|
| 777 |
+
self,
|
| 778 |
+
exc_type: type[BaseException] | None,
|
| 779 |
+
exc_val: BaseException | None,
|
| 780 |
+
exc_tb: TracebackType | None,
|
| 781 |
+
) -> bool | None:
|
| 782 |
+
try:
|
| 783 |
+
if exc_val is not None:
|
| 784 |
+
self.cancel_scope.cancel()
|
| 785 |
+
if not isinstance(exc_val, CancelledError):
|
| 786 |
+
self._exceptions.append(exc_val)
|
| 787 |
+
|
| 788 |
+
try:
|
| 789 |
+
if self._tasks:
|
| 790 |
+
with CancelScope() as wait_scope:
|
| 791 |
+
while self._tasks:
|
| 792 |
+
try:
|
| 793 |
+
await _wait(self._tasks)
|
| 794 |
+
except CancelledError as exc:
|
| 795 |
+
# Shield the scope against further cancellation attempts,
|
| 796 |
+
# as they're not productive (#695)
|
| 797 |
+
wait_scope.shield = True
|
| 798 |
+
self.cancel_scope.cancel()
|
| 799 |
+
|
| 800 |
+
# Set exc_val from the cancellation exception if it was
|
| 801 |
+
# previously unset. However, we should not replace a native
|
| 802 |
+
# cancellation exception with one raise by a cancel scope.
|
| 803 |
+
if exc_val is None or (
|
| 804 |
+
isinstance(exc_val, CancelledError)
|
| 805 |
+
and not is_anyio_cancellation(exc)
|
| 806 |
+
):
|
| 807 |
+
exc_val = exc
|
| 808 |
+
else:
|
| 809 |
+
# If there are no child tasks to wait on, run at least one checkpoint
|
| 810 |
+
# anyway
|
| 811 |
+
await AsyncIOBackend.cancel_shielded_checkpoint()
|
| 812 |
+
|
| 813 |
+
self._active = False
|
| 814 |
+
if self._exceptions:
|
| 815 |
+
raise BaseExceptionGroup(
|
| 816 |
+
"unhandled errors in a TaskGroup", self._exceptions
|
| 817 |
+
)
|
| 818 |
+
elif exc_val:
|
| 819 |
+
raise exc_val
|
| 820 |
+
except BaseException as exc:
|
| 821 |
+
if self.cancel_scope.__exit__(type(exc), exc, exc.__traceback__):
|
| 822 |
+
return True
|
| 823 |
+
|
| 824 |
+
raise
|
| 825 |
+
|
| 826 |
+
return self.cancel_scope.__exit__(exc_type, exc_val, exc_tb)
|
| 827 |
+
finally:
|
| 828 |
+
del exc_val, exc_tb, self._exceptions
|
| 829 |
+
|
| 830 |
+
def _spawn(
|
| 831 |
+
self,
|
| 832 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
|
| 833 |
+
args: tuple[Unpack[PosArgsT]],
|
| 834 |
+
name: object,
|
| 835 |
+
task_status_future: asyncio.Future | None = None,
|
| 836 |
+
) -> asyncio.Task:
|
| 837 |
+
def task_done(_task: asyncio.Task) -> None:
|
| 838 |
+
# task_state = _task_states[_task]
|
| 839 |
+
assert task_state.cancel_scope is not None
|
| 840 |
+
assert _task in task_state.cancel_scope._tasks
|
| 841 |
+
task_state.cancel_scope._tasks.remove(_task)
|
| 842 |
+
self._tasks.remove(task)
|
| 843 |
+
del _task_states[_task]
|
| 844 |
+
|
| 845 |
+
try:
|
| 846 |
+
exc = _task.exception()
|
| 847 |
+
except CancelledError as e:
|
| 848 |
+
while isinstance(e.__context__, CancelledError):
|
| 849 |
+
e = e.__context__
|
| 850 |
+
|
| 851 |
+
exc = e
|
| 852 |
+
|
| 853 |
+
if exc is not None:
|
| 854 |
+
# The future can only be in the cancelled state if the host task was
|
| 855 |
+
# cancelled, so return immediately instead of adding one more
|
| 856 |
+
# CancelledError to the exceptions list
|
| 857 |
+
if task_status_future is not None and task_status_future.cancelled():
|
| 858 |
+
return
|
| 859 |
+
|
| 860 |
+
if task_status_future is None or task_status_future.done():
|
| 861 |
+
if not isinstance(exc, CancelledError):
|
| 862 |
+
self._exceptions.append(exc)
|
| 863 |
+
|
| 864 |
+
if not self.cancel_scope._effectively_cancelled:
|
| 865 |
+
self.cancel_scope.cancel()
|
| 866 |
+
else:
|
| 867 |
+
task_status_future.set_exception(exc)
|
| 868 |
+
elif task_status_future is not None and not task_status_future.done():
|
| 869 |
+
task_status_future.set_exception(
|
| 870 |
+
RuntimeError("Child exited without calling task_status.started()")
|
| 871 |
+
)
|
| 872 |
+
|
| 873 |
+
if not self._active:
|
| 874 |
+
raise RuntimeError(
|
| 875 |
+
"This task group is not active; no new tasks can be started."
|
| 876 |
+
)
|
| 877 |
+
|
| 878 |
+
kwargs = {}
|
| 879 |
+
if task_status_future:
|
| 880 |
+
parent_id = id(current_task())
|
| 881 |
+
kwargs["task_status"] = _AsyncioTaskStatus(
|
| 882 |
+
task_status_future, id(self.cancel_scope._host_task)
|
| 883 |
+
)
|
| 884 |
+
else:
|
| 885 |
+
parent_id = id(self.cancel_scope._host_task)
|
| 886 |
+
|
| 887 |
+
coro = func(*args, **kwargs)
|
| 888 |
+
if not iscoroutine(coro):
|
| 889 |
+
prefix = f"{func.__module__}." if hasattr(func, "__module__") else ""
|
| 890 |
+
raise TypeError(
|
| 891 |
+
f"Expected {prefix}{func.__qualname__}() to return a coroutine, but "
|
| 892 |
+
f"the return value ({coro!r}) is not a coroutine object"
|
| 893 |
+
)
|
| 894 |
+
|
| 895 |
+
# Make the spawned task inherit the task group's cancel scope
|
| 896 |
+
_task_states[coro] = task_state = TaskState(
|
| 897 |
+
parent_id=parent_id, cancel_scope=self.cancel_scope
|
| 898 |
+
)
|
| 899 |
+
name = get_callable_name(func) if name is None else str(name)
|
| 900 |
+
try:
|
| 901 |
+
task = create_task(coro, name=name)
|
| 902 |
+
finally:
|
| 903 |
+
del _task_states[coro]
|
| 904 |
+
|
| 905 |
+
_task_states[task] = task_state
|
| 906 |
+
self.cancel_scope._tasks.add(task)
|
| 907 |
+
self._tasks.add(task)
|
| 908 |
+
|
| 909 |
+
if task.done():
|
| 910 |
+
# This can happen with eager task factories
|
| 911 |
+
task_done(task)
|
| 912 |
+
else:
|
| 913 |
+
task.add_done_callback(task_done)
|
| 914 |
+
|
| 915 |
+
return task
|
| 916 |
+
|
| 917 |
+
def start_soon(
|
| 918 |
+
self,
|
| 919 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
|
| 920 |
+
*args: Unpack[PosArgsT],
|
| 921 |
+
name: object = None,
|
| 922 |
+
) -> None:
|
| 923 |
+
self._spawn(func, args, name)
|
| 924 |
+
|
| 925 |
+
async def start(
|
| 926 |
+
self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
|
| 927 |
+
) -> Any:
|
| 928 |
+
future: asyncio.Future = asyncio.Future()
|
| 929 |
+
task = self._spawn(func, args, name, future)
|
| 930 |
+
|
| 931 |
+
# If the task raises an exception after sending a start value without a switch
|
| 932 |
+
# point between, the task group is cancelled and this method never proceeds to
|
| 933 |
+
# process the completed future. That's why we have to have a shielded cancel
|
| 934 |
+
# scope here.
|
| 935 |
+
try:
|
| 936 |
+
return await future
|
| 937 |
+
except CancelledError:
|
| 938 |
+
# Cancel the task and wait for it to exit before returning
|
| 939 |
+
task.cancel()
|
| 940 |
+
with CancelScope(shield=True), suppress(CancelledError):
|
| 941 |
+
await task
|
| 942 |
+
|
| 943 |
+
raise
|
| 944 |
+
|
| 945 |
+
|
| 946 |
+
#
|
| 947 |
+
# Threads
|
| 948 |
+
#
|
| 949 |
+
|
| 950 |
+
_Retval_Queue_Type = tuple[Optional[T_Retval], Optional[BaseException]]
|
| 951 |
+
|
| 952 |
+
|
| 953 |
+
class WorkerThread(Thread):
|
| 954 |
+
MAX_IDLE_TIME = 10 # seconds
|
| 955 |
+
|
| 956 |
+
def __init__(
|
| 957 |
+
self,
|
| 958 |
+
root_task: asyncio.Task,
|
| 959 |
+
workers: set[WorkerThread],
|
| 960 |
+
idle_workers: deque[WorkerThread],
|
| 961 |
+
):
|
| 962 |
+
super().__init__(name="AnyIO worker thread")
|
| 963 |
+
self.root_task = root_task
|
| 964 |
+
self.workers = workers
|
| 965 |
+
self.idle_workers = idle_workers
|
| 966 |
+
self.loop = root_task._loop
|
| 967 |
+
self.queue: Queue[
|
| 968 |
+
tuple[Context, Callable, tuple, asyncio.Future, CancelScope] | None
|
| 969 |
+
] = Queue(2)
|
| 970 |
+
self.idle_since = AsyncIOBackend.current_time()
|
| 971 |
+
self.stopping = False
|
| 972 |
+
|
| 973 |
+
def _report_result(
|
| 974 |
+
self, future: asyncio.Future, result: Any, exc: BaseException | None
|
| 975 |
+
) -> None:
|
| 976 |
+
self.idle_since = AsyncIOBackend.current_time()
|
| 977 |
+
if not self.stopping:
|
| 978 |
+
self.idle_workers.append(self)
|
| 979 |
+
|
| 980 |
+
if not future.cancelled():
|
| 981 |
+
if exc is not None:
|
| 982 |
+
if isinstance(exc, StopIteration):
|
| 983 |
+
new_exc = RuntimeError("coroutine raised StopIteration")
|
| 984 |
+
new_exc.__cause__ = exc
|
| 985 |
+
exc = new_exc
|
| 986 |
+
|
| 987 |
+
future.set_exception(exc)
|
| 988 |
+
else:
|
| 989 |
+
future.set_result(result)
|
| 990 |
+
|
| 991 |
+
def run(self) -> None:
|
| 992 |
+
with claim_worker_thread(AsyncIOBackend, self.loop):
|
| 993 |
+
while True:
|
| 994 |
+
item = self.queue.get()
|
| 995 |
+
if item is None:
|
| 996 |
+
# Shutdown command received
|
| 997 |
+
return
|
| 998 |
+
|
| 999 |
+
context, func, args, future, cancel_scope = item
|
| 1000 |
+
if not future.cancelled():
|
| 1001 |
+
result = None
|
| 1002 |
+
exception: BaseException | None = None
|
| 1003 |
+
threadlocals.current_cancel_scope = cancel_scope
|
| 1004 |
+
try:
|
| 1005 |
+
result = context.run(func, *args)
|
| 1006 |
+
except BaseException as exc:
|
| 1007 |
+
exception = exc
|
| 1008 |
+
finally:
|
| 1009 |
+
del threadlocals.current_cancel_scope
|
| 1010 |
+
|
| 1011 |
+
if not self.loop.is_closed():
|
| 1012 |
+
self.loop.call_soon_threadsafe(
|
| 1013 |
+
self._report_result, future, result, exception
|
| 1014 |
+
)
|
| 1015 |
+
|
| 1016 |
+
self.queue.task_done()
|
| 1017 |
+
|
| 1018 |
+
def stop(self, f: asyncio.Task | None = None) -> None:
|
| 1019 |
+
self.stopping = True
|
| 1020 |
+
self.queue.put_nowait(None)
|
| 1021 |
+
self.workers.discard(self)
|
| 1022 |
+
try:
|
| 1023 |
+
self.idle_workers.remove(self)
|
| 1024 |
+
except ValueError:
|
| 1025 |
+
pass
|
| 1026 |
+
|
| 1027 |
+
|
| 1028 |
+
_threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar(
|
| 1029 |
+
"_threadpool_idle_workers"
|
| 1030 |
+
)
|
| 1031 |
+
_threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers")
|
| 1032 |
+
|
| 1033 |
+
|
| 1034 |
+
class BlockingPortal(abc.BlockingPortal):
|
| 1035 |
+
def __new__(cls) -> BlockingPortal:
|
| 1036 |
+
return object.__new__(cls)
|
| 1037 |
+
|
| 1038 |
+
def __init__(self) -> None:
|
| 1039 |
+
super().__init__()
|
| 1040 |
+
self._loop = get_running_loop()
|
| 1041 |
+
|
| 1042 |
+
def _spawn_task_from_thread(
|
| 1043 |
+
self,
|
| 1044 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
| 1045 |
+
args: tuple[Unpack[PosArgsT]],
|
| 1046 |
+
kwargs: dict[str, Any],
|
| 1047 |
+
name: object,
|
| 1048 |
+
future: Future[T_Retval],
|
| 1049 |
+
) -> None:
|
| 1050 |
+
AsyncIOBackend.run_sync_from_thread(
|
| 1051 |
+
partial(self._task_group.start_soon, name=name),
|
| 1052 |
+
(self._call_func, func, args, kwargs, future),
|
| 1053 |
+
self._loop,
|
| 1054 |
+
)
|
| 1055 |
+
|
| 1056 |
+
|
| 1057 |
+
#
|
| 1058 |
+
# Subprocesses
|
| 1059 |
+
#
|
| 1060 |
+
|
| 1061 |
+
|
| 1062 |
+
@dataclass(eq=False)
|
| 1063 |
+
class StreamReaderWrapper(abc.ByteReceiveStream):
|
| 1064 |
+
_stream: asyncio.StreamReader
|
| 1065 |
+
|
| 1066 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 1067 |
+
data = await self._stream.read(max_bytes)
|
| 1068 |
+
if data:
|
| 1069 |
+
return data
|
| 1070 |
+
else:
|
| 1071 |
+
raise EndOfStream
|
| 1072 |
+
|
| 1073 |
+
async def aclose(self) -> None:
|
| 1074 |
+
self._stream.set_exception(ClosedResourceError())
|
| 1075 |
+
await AsyncIOBackend.checkpoint()
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
@dataclass(eq=False)
|
| 1079 |
+
class StreamWriterWrapper(abc.ByteSendStream):
|
| 1080 |
+
_stream: asyncio.StreamWriter
|
| 1081 |
+
|
| 1082 |
+
async def send(self, item: bytes) -> None:
|
| 1083 |
+
self._stream.write(item)
|
| 1084 |
+
await self._stream.drain()
|
| 1085 |
+
|
| 1086 |
+
async def aclose(self) -> None:
|
| 1087 |
+
self._stream.close()
|
| 1088 |
+
await AsyncIOBackend.checkpoint()
|
| 1089 |
+
|
| 1090 |
+
|
| 1091 |
+
@dataclass(eq=False)
|
| 1092 |
+
class Process(abc.Process):
|
| 1093 |
+
_process: asyncio.subprocess.Process
|
| 1094 |
+
_stdin: StreamWriterWrapper | None
|
| 1095 |
+
_stdout: StreamReaderWrapper | None
|
| 1096 |
+
_stderr: StreamReaderWrapper | None
|
| 1097 |
+
|
| 1098 |
+
async def aclose(self) -> None:
|
| 1099 |
+
with CancelScope(shield=True) as scope:
|
| 1100 |
+
if self._stdin:
|
| 1101 |
+
await self._stdin.aclose()
|
| 1102 |
+
if self._stdout:
|
| 1103 |
+
await self._stdout.aclose()
|
| 1104 |
+
if self._stderr:
|
| 1105 |
+
await self._stderr.aclose()
|
| 1106 |
+
|
| 1107 |
+
scope.shield = False
|
| 1108 |
+
try:
|
| 1109 |
+
await self.wait()
|
| 1110 |
+
except BaseException:
|
| 1111 |
+
scope.shield = True
|
| 1112 |
+
self.kill()
|
| 1113 |
+
await self.wait()
|
| 1114 |
+
raise
|
| 1115 |
+
|
| 1116 |
+
async def wait(self) -> int:
|
| 1117 |
+
return await self._process.wait()
|
| 1118 |
+
|
| 1119 |
+
def terminate(self) -> None:
|
| 1120 |
+
self._process.terminate()
|
| 1121 |
+
|
| 1122 |
+
def kill(self) -> None:
|
| 1123 |
+
self._process.kill()
|
| 1124 |
+
|
| 1125 |
+
def send_signal(self, signal: int) -> None:
|
| 1126 |
+
self._process.send_signal(signal)
|
| 1127 |
+
|
| 1128 |
+
@property
|
| 1129 |
+
def pid(self) -> int:
|
| 1130 |
+
return self._process.pid
|
| 1131 |
+
|
| 1132 |
+
@property
|
| 1133 |
+
def returncode(self) -> int | None:
|
| 1134 |
+
return self._process.returncode
|
| 1135 |
+
|
| 1136 |
+
@property
|
| 1137 |
+
def stdin(self) -> abc.ByteSendStream | None:
|
| 1138 |
+
return self._stdin
|
| 1139 |
+
|
| 1140 |
+
@property
|
| 1141 |
+
def stdout(self) -> abc.ByteReceiveStream | None:
|
| 1142 |
+
return self._stdout
|
| 1143 |
+
|
| 1144 |
+
@property
|
| 1145 |
+
def stderr(self) -> abc.ByteReceiveStream | None:
|
| 1146 |
+
return self._stderr
|
| 1147 |
+
|
| 1148 |
+
|
| 1149 |
+
def _forcibly_shutdown_process_pool_on_exit(
|
| 1150 |
+
workers: set[Process], _task: object
|
| 1151 |
+
) -> None:
|
| 1152 |
+
"""
|
| 1153 |
+
Forcibly shuts down worker processes belonging to this event loop."""
|
| 1154 |
+
child_watcher: asyncio.AbstractChildWatcher | None = None
|
| 1155 |
+
if sys.version_info < (3, 12):
|
| 1156 |
+
try:
|
| 1157 |
+
child_watcher = asyncio.get_event_loop_policy().get_child_watcher()
|
| 1158 |
+
except NotImplementedError:
|
| 1159 |
+
pass
|
| 1160 |
+
|
| 1161 |
+
# Close as much as possible (w/o async/await) to avoid warnings
|
| 1162 |
+
for process in workers:
|
| 1163 |
+
if process.returncode is None:
|
| 1164 |
+
continue
|
| 1165 |
+
|
| 1166 |
+
process._stdin._stream._transport.close() # type: ignore[union-attr]
|
| 1167 |
+
process._stdout._stream._transport.close() # type: ignore[union-attr]
|
| 1168 |
+
process._stderr._stream._transport.close() # type: ignore[union-attr]
|
| 1169 |
+
process.kill()
|
| 1170 |
+
if child_watcher:
|
| 1171 |
+
child_watcher.remove_child_handler(process.pid)
|
| 1172 |
+
|
| 1173 |
+
|
| 1174 |
+
async def _shutdown_process_pool_on_exit(workers: set[abc.Process]) -> None:
|
| 1175 |
+
"""
|
| 1176 |
+
Shuts down worker processes belonging to this event loop.
|
| 1177 |
+
|
| 1178 |
+
NOTE: this only works when the event loop was started using asyncio.run() or
|
| 1179 |
+
anyio.run().
|
| 1180 |
+
|
| 1181 |
+
"""
|
| 1182 |
+
process: abc.Process
|
| 1183 |
+
try:
|
| 1184 |
+
await sleep(math.inf)
|
| 1185 |
+
except asyncio.CancelledError:
|
| 1186 |
+
for process in workers:
|
| 1187 |
+
if process.returncode is None:
|
| 1188 |
+
process.kill()
|
| 1189 |
+
|
| 1190 |
+
for process in workers:
|
| 1191 |
+
await process.aclose()
|
| 1192 |
+
|
| 1193 |
+
|
| 1194 |
+
#
|
| 1195 |
+
# Sockets and networking
|
| 1196 |
+
#
|
| 1197 |
+
|
| 1198 |
+
|
| 1199 |
+
class StreamProtocol(asyncio.Protocol):
|
| 1200 |
+
read_queue: deque[bytes]
|
| 1201 |
+
read_event: asyncio.Event
|
| 1202 |
+
write_event: asyncio.Event
|
| 1203 |
+
exception: Exception | None = None
|
| 1204 |
+
is_at_eof: bool = False
|
| 1205 |
+
|
| 1206 |
+
def connection_made(self, transport: asyncio.BaseTransport) -> None:
|
| 1207 |
+
self.read_queue = deque()
|
| 1208 |
+
self.read_event = asyncio.Event()
|
| 1209 |
+
self.write_event = asyncio.Event()
|
| 1210 |
+
self.write_event.set()
|
| 1211 |
+
cast(asyncio.Transport, transport).set_write_buffer_limits(0)
|
| 1212 |
+
|
| 1213 |
+
def connection_lost(self, exc: Exception | None) -> None:
|
| 1214 |
+
if exc:
|
| 1215 |
+
self.exception = BrokenResourceError()
|
| 1216 |
+
self.exception.__cause__ = exc
|
| 1217 |
+
|
| 1218 |
+
self.read_event.set()
|
| 1219 |
+
self.write_event.set()
|
| 1220 |
+
|
| 1221 |
+
def data_received(self, data: bytes) -> None:
|
| 1222 |
+
# ProactorEventloop sometimes sends bytearray instead of bytes
|
| 1223 |
+
self.read_queue.append(bytes(data))
|
| 1224 |
+
self.read_event.set()
|
| 1225 |
+
|
| 1226 |
+
def eof_received(self) -> bool | None:
|
| 1227 |
+
self.is_at_eof = True
|
| 1228 |
+
self.read_event.set()
|
| 1229 |
+
return True
|
| 1230 |
+
|
| 1231 |
+
def pause_writing(self) -> None:
|
| 1232 |
+
self.write_event = asyncio.Event()
|
| 1233 |
+
|
| 1234 |
+
def resume_writing(self) -> None:
|
| 1235 |
+
self.write_event.set()
|
| 1236 |
+
|
| 1237 |
+
|
| 1238 |
+
class DatagramProtocol(asyncio.DatagramProtocol):
|
| 1239 |
+
read_queue: deque[tuple[bytes, IPSockAddrType]]
|
| 1240 |
+
read_event: asyncio.Event
|
| 1241 |
+
write_event: asyncio.Event
|
| 1242 |
+
exception: Exception | None = None
|
| 1243 |
+
|
| 1244 |
+
def connection_made(self, transport: asyncio.BaseTransport) -> None:
|
| 1245 |
+
self.read_queue = deque(maxlen=100) # arbitrary value
|
| 1246 |
+
self.read_event = asyncio.Event()
|
| 1247 |
+
self.write_event = asyncio.Event()
|
| 1248 |
+
self.write_event.set()
|
| 1249 |
+
|
| 1250 |
+
def connection_lost(self, exc: Exception | None) -> None:
|
| 1251 |
+
self.read_event.set()
|
| 1252 |
+
self.write_event.set()
|
| 1253 |
+
|
| 1254 |
+
def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
|
| 1255 |
+
addr = convert_ipv6_sockaddr(addr)
|
| 1256 |
+
self.read_queue.append((data, addr))
|
| 1257 |
+
self.read_event.set()
|
| 1258 |
+
|
| 1259 |
+
def error_received(self, exc: Exception) -> None:
|
| 1260 |
+
self.exception = exc
|
| 1261 |
+
|
| 1262 |
+
def pause_writing(self) -> None:
|
| 1263 |
+
self.write_event.clear()
|
| 1264 |
+
|
| 1265 |
+
def resume_writing(self) -> None:
|
| 1266 |
+
self.write_event.set()
|
| 1267 |
+
|
| 1268 |
+
|
| 1269 |
+
class SocketStream(abc.SocketStream):
|
| 1270 |
+
def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
|
| 1271 |
+
self._transport = transport
|
| 1272 |
+
self._protocol = protocol
|
| 1273 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 1274 |
+
self._send_guard = ResourceGuard("writing to")
|
| 1275 |
+
self._closed = False
|
| 1276 |
+
|
| 1277 |
+
@property
|
| 1278 |
+
def _raw_socket(self) -> socket.socket:
|
| 1279 |
+
return self._transport.get_extra_info("socket")
|
| 1280 |
+
|
| 1281 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 1282 |
+
with self._receive_guard:
|
| 1283 |
+
if (
|
| 1284 |
+
not self._protocol.read_event.is_set()
|
| 1285 |
+
and not self._transport.is_closing()
|
| 1286 |
+
and not self._protocol.is_at_eof
|
| 1287 |
+
):
|
| 1288 |
+
self._transport.resume_reading()
|
| 1289 |
+
await self._protocol.read_event.wait()
|
| 1290 |
+
self._transport.pause_reading()
|
| 1291 |
+
else:
|
| 1292 |
+
await AsyncIOBackend.checkpoint()
|
| 1293 |
+
|
| 1294 |
+
try:
|
| 1295 |
+
chunk = self._protocol.read_queue.popleft()
|
| 1296 |
+
except IndexError:
|
| 1297 |
+
if self._closed:
|
| 1298 |
+
raise ClosedResourceError from None
|
| 1299 |
+
elif self._protocol.exception:
|
| 1300 |
+
raise self._protocol.exception from None
|
| 1301 |
+
else:
|
| 1302 |
+
raise EndOfStream from None
|
| 1303 |
+
|
| 1304 |
+
if len(chunk) > max_bytes:
|
| 1305 |
+
# Split the oversized chunk
|
| 1306 |
+
chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
|
| 1307 |
+
self._protocol.read_queue.appendleft(leftover)
|
| 1308 |
+
|
| 1309 |
+
# If the read queue is empty, clear the flag so that the next call will
|
| 1310 |
+
# block until data is available
|
| 1311 |
+
if not self._protocol.read_queue:
|
| 1312 |
+
self._protocol.read_event.clear()
|
| 1313 |
+
|
| 1314 |
+
return chunk
|
| 1315 |
+
|
| 1316 |
+
async def send(self, item: bytes) -> None:
|
| 1317 |
+
with self._send_guard:
|
| 1318 |
+
await AsyncIOBackend.checkpoint()
|
| 1319 |
+
|
| 1320 |
+
if self._closed:
|
| 1321 |
+
raise ClosedResourceError
|
| 1322 |
+
elif self._protocol.exception is not None:
|
| 1323 |
+
raise self._protocol.exception
|
| 1324 |
+
|
| 1325 |
+
try:
|
| 1326 |
+
self._transport.write(item)
|
| 1327 |
+
except RuntimeError as exc:
|
| 1328 |
+
if self._transport.is_closing():
|
| 1329 |
+
raise BrokenResourceError from exc
|
| 1330 |
+
else:
|
| 1331 |
+
raise
|
| 1332 |
+
|
| 1333 |
+
await self._protocol.write_event.wait()
|
| 1334 |
+
|
| 1335 |
+
async def send_eof(self) -> None:
|
| 1336 |
+
try:
|
| 1337 |
+
self._transport.write_eof()
|
| 1338 |
+
except OSError:
|
| 1339 |
+
pass
|
| 1340 |
+
|
| 1341 |
+
async def aclose(self) -> None:
|
| 1342 |
+
if not self._transport.is_closing():
|
| 1343 |
+
self._closed = True
|
| 1344 |
+
try:
|
| 1345 |
+
self._transport.write_eof()
|
| 1346 |
+
except OSError:
|
| 1347 |
+
pass
|
| 1348 |
+
|
| 1349 |
+
self._transport.close()
|
| 1350 |
+
await sleep(0)
|
| 1351 |
+
self._transport.abort()
|
| 1352 |
+
|
| 1353 |
+
|
| 1354 |
+
class _RawSocketMixin:
|
| 1355 |
+
_receive_future: asyncio.Future | None = None
|
| 1356 |
+
_send_future: asyncio.Future | None = None
|
| 1357 |
+
_closing = False
|
| 1358 |
+
|
| 1359 |
+
def __init__(self, raw_socket: socket.socket):
|
| 1360 |
+
self.__raw_socket = raw_socket
|
| 1361 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 1362 |
+
self._send_guard = ResourceGuard("writing to")
|
| 1363 |
+
|
| 1364 |
+
@property
|
| 1365 |
+
def _raw_socket(self) -> socket.socket:
|
| 1366 |
+
return self.__raw_socket
|
| 1367 |
+
|
| 1368 |
+
def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
|
| 1369 |
+
def callback(f: object) -> None:
|
| 1370 |
+
del self._receive_future
|
| 1371 |
+
loop.remove_reader(self.__raw_socket)
|
| 1372 |
+
|
| 1373 |
+
f = self._receive_future = asyncio.Future()
|
| 1374 |
+
loop.add_reader(self.__raw_socket, f.set_result, None)
|
| 1375 |
+
f.add_done_callback(callback)
|
| 1376 |
+
return f
|
| 1377 |
+
|
| 1378 |
+
def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
|
| 1379 |
+
def callback(f: object) -> None:
|
| 1380 |
+
del self._send_future
|
| 1381 |
+
loop.remove_writer(self.__raw_socket)
|
| 1382 |
+
|
| 1383 |
+
f = self._send_future = asyncio.Future()
|
| 1384 |
+
loop.add_writer(self.__raw_socket, f.set_result, None)
|
| 1385 |
+
f.add_done_callback(callback)
|
| 1386 |
+
return f
|
| 1387 |
+
|
| 1388 |
+
async def aclose(self) -> None:
|
| 1389 |
+
if not self._closing:
|
| 1390 |
+
self._closing = True
|
| 1391 |
+
if self.__raw_socket.fileno() != -1:
|
| 1392 |
+
self.__raw_socket.close()
|
| 1393 |
+
|
| 1394 |
+
if self._receive_future:
|
| 1395 |
+
self._receive_future.set_result(None)
|
| 1396 |
+
if self._send_future:
|
| 1397 |
+
self._send_future.set_result(None)
|
| 1398 |
+
|
| 1399 |
+
|
| 1400 |
+
class UNIXSocketStream(_RawSocketMixin, abc.UNIXSocketStream):
|
| 1401 |
+
async def send_eof(self) -> None:
|
| 1402 |
+
with self._send_guard:
|
| 1403 |
+
self._raw_socket.shutdown(socket.SHUT_WR)
|
| 1404 |
+
|
| 1405 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 1406 |
+
loop = get_running_loop()
|
| 1407 |
+
await AsyncIOBackend.checkpoint()
|
| 1408 |
+
with self._receive_guard:
|
| 1409 |
+
while True:
|
| 1410 |
+
try:
|
| 1411 |
+
data = self._raw_socket.recv(max_bytes)
|
| 1412 |
+
except BlockingIOError:
|
| 1413 |
+
await self._wait_until_readable(loop)
|
| 1414 |
+
except OSError as exc:
|
| 1415 |
+
if self._closing:
|
| 1416 |
+
raise ClosedResourceError from None
|
| 1417 |
+
else:
|
| 1418 |
+
raise BrokenResourceError from exc
|
| 1419 |
+
else:
|
| 1420 |
+
if not data:
|
| 1421 |
+
raise EndOfStream
|
| 1422 |
+
|
| 1423 |
+
return data
|
| 1424 |
+
|
| 1425 |
+
async def send(self, item: bytes) -> None:
|
| 1426 |
+
loop = get_running_loop()
|
| 1427 |
+
await AsyncIOBackend.checkpoint()
|
| 1428 |
+
with self._send_guard:
|
| 1429 |
+
view = memoryview(item)
|
| 1430 |
+
while view:
|
| 1431 |
+
try:
|
| 1432 |
+
bytes_sent = self._raw_socket.send(view)
|
| 1433 |
+
except BlockingIOError:
|
| 1434 |
+
await self._wait_until_writable(loop)
|
| 1435 |
+
except OSError as exc:
|
| 1436 |
+
if self._closing:
|
| 1437 |
+
raise ClosedResourceError from None
|
| 1438 |
+
else:
|
| 1439 |
+
raise BrokenResourceError from exc
|
| 1440 |
+
else:
|
| 1441 |
+
view = view[bytes_sent:]
|
| 1442 |
+
|
| 1443 |
+
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
|
| 1444 |
+
if not isinstance(msglen, int) or msglen < 0:
|
| 1445 |
+
raise ValueError("msglen must be a non-negative integer")
|
| 1446 |
+
if not isinstance(maxfds, int) or maxfds < 1:
|
| 1447 |
+
raise ValueError("maxfds must be a positive integer")
|
| 1448 |
+
|
| 1449 |
+
loop = get_running_loop()
|
| 1450 |
+
fds = array.array("i")
|
| 1451 |
+
await AsyncIOBackend.checkpoint()
|
| 1452 |
+
with self._receive_guard:
|
| 1453 |
+
while True:
|
| 1454 |
+
try:
|
| 1455 |
+
message, ancdata, flags, addr = self._raw_socket.recvmsg(
|
| 1456 |
+
msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
|
| 1457 |
+
)
|
| 1458 |
+
except BlockingIOError:
|
| 1459 |
+
await self._wait_until_readable(loop)
|
| 1460 |
+
except OSError as exc:
|
| 1461 |
+
if self._closing:
|
| 1462 |
+
raise ClosedResourceError from None
|
| 1463 |
+
else:
|
| 1464 |
+
raise BrokenResourceError from exc
|
| 1465 |
+
else:
|
| 1466 |
+
if not message and not ancdata:
|
| 1467 |
+
raise EndOfStream
|
| 1468 |
+
|
| 1469 |
+
break
|
| 1470 |
+
|
| 1471 |
+
for cmsg_level, cmsg_type, cmsg_data in ancdata:
|
| 1472 |
+
if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
|
| 1473 |
+
raise RuntimeError(
|
| 1474 |
+
f"Received unexpected ancillary data; message = {message!r}, "
|
| 1475 |
+
f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
|
| 1476 |
+
)
|
| 1477 |
+
|
| 1478 |
+
fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
|
| 1479 |
+
|
| 1480 |
+
return message, list(fds)
|
| 1481 |
+
|
| 1482 |
+
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
|
| 1483 |
+
if not message:
|
| 1484 |
+
raise ValueError("message must not be empty")
|
| 1485 |
+
if not fds:
|
| 1486 |
+
raise ValueError("fds must not be empty")
|
| 1487 |
+
|
| 1488 |
+
loop = get_running_loop()
|
| 1489 |
+
filenos: list[int] = []
|
| 1490 |
+
for fd in fds:
|
| 1491 |
+
if isinstance(fd, int):
|
| 1492 |
+
filenos.append(fd)
|
| 1493 |
+
elif isinstance(fd, IOBase):
|
| 1494 |
+
filenos.append(fd.fileno())
|
| 1495 |
+
|
| 1496 |
+
fdarray = array.array("i", filenos)
|
| 1497 |
+
await AsyncIOBackend.checkpoint()
|
| 1498 |
+
with self._send_guard:
|
| 1499 |
+
while True:
|
| 1500 |
+
try:
|
| 1501 |
+
# The ignore can be removed after mypy picks up
|
| 1502 |
+
# https://github.com/python/typeshed/pull/5545
|
| 1503 |
+
self._raw_socket.sendmsg(
|
| 1504 |
+
[message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)]
|
| 1505 |
+
)
|
| 1506 |
+
break
|
| 1507 |
+
except BlockingIOError:
|
| 1508 |
+
await self._wait_until_writable(loop)
|
| 1509 |
+
except OSError as exc:
|
| 1510 |
+
if self._closing:
|
| 1511 |
+
raise ClosedResourceError from None
|
| 1512 |
+
else:
|
| 1513 |
+
raise BrokenResourceError from exc
|
| 1514 |
+
|
| 1515 |
+
|
| 1516 |
+
class TCPSocketListener(abc.SocketListener):
|
| 1517 |
+
_accept_scope: CancelScope | None = None
|
| 1518 |
+
_closed = False
|
| 1519 |
+
|
| 1520 |
+
def __init__(self, raw_socket: socket.socket):
|
| 1521 |
+
self.__raw_socket = raw_socket
|
| 1522 |
+
self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
|
| 1523 |
+
self._accept_guard = ResourceGuard("accepting connections from")
|
| 1524 |
+
|
| 1525 |
+
@property
|
| 1526 |
+
def _raw_socket(self) -> socket.socket:
|
| 1527 |
+
return self.__raw_socket
|
| 1528 |
+
|
| 1529 |
+
async def accept(self) -> abc.SocketStream:
|
| 1530 |
+
if self._closed:
|
| 1531 |
+
raise ClosedResourceError
|
| 1532 |
+
|
| 1533 |
+
with self._accept_guard:
|
| 1534 |
+
await AsyncIOBackend.checkpoint()
|
| 1535 |
+
with CancelScope() as self._accept_scope:
|
| 1536 |
+
try:
|
| 1537 |
+
client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
|
| 1538 |
+
except asyncio.CancelledError:
|
| 1539 |
+
# Workaround for https://bugs.python.org/issue41317
|
| 1540 |
+
try:
|
| 1541 |
+
self._loop.remove_reader(self._raw_socket)
|
| 1542 |
+
except (ValueError, NotImplementedError):
|
| 1543 |
+
pass
|
| 1544 |
+
|
| 1545 |
+
if self._closed:
|
| 1546 |
+
raise ClosedResourceError from None
|
| 1547 |
+
|
| 1548 |
+
raise
|
| 1549 |
+
finally:
|
| 1550 |
+
self._accept_scope = None
|
| 1551 |
+
|
| 1552 |
+
client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
| 1553 |
+
transport, protocol = await self._loop.connect_accepted_socket(
|
| 1554 |
+
StreamProtocol, client_sock
|
| 1555 |
+
)
|
| 1556 |
+
return SocketStream(transport, protocol)
|
| 1557 |
+
|
| 1558 |
+
async def aclose(self) -> None:
|
| 1559 |
+
if self._closed:
|
| 1560 |
+
return
|
| 1561 |
+
|
| 1562 |
+
self._closed = True
|
| 1563 |
+
if self._accept_scope:
|
| 1564 |
+
# Workaround for https://bugs.python.org/issue41317
|
| 1565 |
+
try:
|
| 1566 |
+
self._loop.remove_reader(self._raw_socket)
|
| 1567 |
+
except (ValueError, NotImplementedError):
|
| 1568 |
+
pass
|
| 1569 |
+
|
| 1570 |
+
self._accept_scope.cancel()
|
| 1571 |
+
await sleep(0)
|
| 1572 |
+
|
| 1573 |
+
self._raw_socket.close()
|
| 1574 |
+
|
| 1575 |
+
|
| 1576 |
+
class UNIXSocketListener(abc.SocketListener):
|
| 1577 |
+
def __init__(self, raw_socket: socket.socket):
|
| 1578 |
+
self.__raw_socket = raw_socket
|
| 1579 |
+
self._loop = get_running_loop()
|
| 1580 |
+
self._accept_guard = ResourceGuard("accepting connections from")
|
| 1581 |
+
self._closed = False
|
| 1582 |
+
|
| 1583 |
+
async def accept(self) -> abc.SocketStream:
|
| 1584 |
+
await AsyncIOBackend.checkpoint()
|
| 1585 |
+
with self._accept_guard:
|
| 1586 |
+
while True:
|
| 1587 |
+
try:
|
| 1588 |
+
client_sock, _ = self.__raw_socket.accept()
|
| 1589 |
+
client_sock.setblocking(False)
|
| 1590 |
+
return UNIXSocketStream(client_sock)
|
| 1591 |
+
except BlockingIOError:
|
| 1592 |
+
f: asyncio.Future = asyncio.Future()
|
| 1593 |
+
self._loop.add_reader(self.__raw_socket, f.set_result, None)
|
| 1594 |
+
f.add_done_callback(
|
| 1595 |
+
lambda _: self._loop.remove_reader(self.__raw_socket)
|
| 1596 |
+
)
|
| 1597 |
+
await f
|
| 1598 |
+
except OSError as exc:
|
| 1599 |
+
if self._closed:
|
| 1600 |
+
raise ClosedResourceError from None
|
| 1601 |
+
else:
|
| 1602 |
+
raise BrokenResourceError from exc
|
| 1603 |
+
|
| 1604 |
+
async def aclose(self) -> None:
|
| 1605 |
+
self._closed = True
|
| 1606 |
+
self.__raw_socket.close()
|
| 1607 |
+
|
| 1608 |
+
@property
|
| 1609 |
+
def _raw_socket(self) -> socket.socket:
|
| 1610 |
+
return self.__raw_socket
|
| 1611 |
+
|
| 1612 |
+
|
| 1613 |
+
class UDPSocket(abc.UDPSocket):
|
| 1614 |
+
def __init__(
|
| 1615 |
+
self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
|
| 1616 |
+
):
|
| 1617 |
+
self._transport = transport
|
| 1618 |
+
self._protocol = protocol
|
| 1619 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 1620 |
+
self._send_guard = ResourceGuard("writing to")
|
| 1621 |
+
self._closed = False
|
| 1622 |
+
|
| 1623 |
+
@property
|
| 1624 |
+
def _raw_socket(self) -> socket.socket:
|
| 1625 |
+
return self._transport.get_extra_info("socket")
|
| 1626 |
+
|
| 1627 |
+
async def aclose(self) -> None:
|
| 1628 |
+
if not self._transport.is_closing():
|
| 1629 |
+
self._closed = True
|
| 1630 |
+
self._transport.close()
|
| 1631 |
+
|
| 1632 |
+
async def receive(self) -> tuple[bytes, IPSockAddrType]:
|
| 1633 |
+
with self._receive_guard:
|
| 1634 |
+
await AsyncIOBackend.checkpoint()
|
| 1635 |
+
|
| 1636 |
+
# If the buffer is empty, ask for more data
|
| 1637 |
+
if not self._protocol.read_queue and not self._transport.is_closing():
|
| 1638 |
+
self._protocol.read_event.clear()
|
| 1639 |
+
await self._protocol.read_event.wait()
|
| 1640 |
+
|
| 1641 |
+
try:
|
| 1642 |
+
return self._protocol.read_queue.popleft()
|
| 1643 |
+
except IndexError:
|
| 1644 |
+
if self._closed:
|
| 1645 |
+
raise ClosedResourceError from None
|
| 1646 |
+
else:
|
| 1647 |
+
raise BrokenResourceError from None
|
| 1648 |
+
|
| 1649 |
+
async def send(self, item: UDPPacketType) -> None:
|
| 1650 |
+
with self._send_guard:
|
| 1651 |
+
await AsyncIOBackend.checkpoint()
|
| 1652 |
+
await self._protocol.write_event.wait()
|
| 1653 |
+
if self._closed:
|
| 1654 |
+
raise ClosedResourceError
|
| 1655 |
+
elif self._transport.is_closing():
|
| 1656 |
+
raise BrokenResourceError
|
| 1657 |
+
else:
|
| 1658 |
+
self._transport.sendto(*item)
|
| 1659 |
+
|
| 1660 |
+
|
| 1661 |
+
class ConnectedUDPSocket(abc.ConnectedUDPSocket):
|
| 1662 |
+
def __init__(
|
| 1663 |
+
self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
|
| 1664 |
+
):
|
| 1665 |
+
self._transport = transport
|
| 1666 |
+
self._protocol = protocol
|
| 1667 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 1668 |
+
self._send_guard = ResourceGuard("writing to")
|
| 1669 |
+
self._closed = False
|
| 1670 |
+
|
| 1671 |
+
@property
|
| 1672 |
+
def _raw_socket(self) -> socket.socket:
|
| 1673 |
+
return self._transport.get_extra_info("socket")
|
| 1674 |
+
|
| 1675 |
+
async def aclose(self) -> None:
|
| 1676 |
+
if not self._transport.is_closing():
|
| 1677 |
+
self._closed = True
|
| 1678 |
+
self._transport.close()
|
| 1679 |
+
|
| 1680 |
+
async def receive(self) -> bytes:
|
| 1681 |
+
with self._receive_guard:
|
| 1682 |
+
await AsyncIOBackend.checkpoint()
|
| 1683 |
+
|
| 1684 |
+
# If the buffer is empty, ask for more data
|
| 1685 |
+
if not self._protocol.read_queue and not self._transport.is_closing():
|
| 1686 |
+
self._protocol.read_event.clear()
|
| 1687 |
+
await self._protocol.read_event.wait()
|
| 1688 |
+
|
| 1689 |
+
try:
|
| 1690 |
+
packet = self._protocol.read_queue.popleft()
|
| 1691 |
+
except IndexError:
|
| 1692 |
+
if self._closed:
|
| 1693 |
+
raise ClosedResourceError from None
|
| 1694 |
+
else:
|
| 1695 |
+
raise BrokenResourceError from None
|
| 1696 |
+
|
| 1697 |
+
return packet[0]
|
| 1698 |
+
|
| 1699 |
+
async def send(self, item: bytes) -> None:
|
| 1700 |
+
with self._send_guard:
|
| 1701 |
+
await AsyncIOBackend.checkpoint()
|
| 1702 |
+
await self._protocol.write_event.wait()
|
| 1703 |
+
if self._closed:
|
| 1704 |
+
raise ClosedResourceError
|
| 1705 |
+
elif self._transport.is_closing():
|
| 1706 |
+
raise BrokenResourceError
|
| 1707 |
+
else:
|
| 1708 |
+
self._transport.sendto(item)
|
| 1709 |
+
|
| 1710 |
+
|
| 1711 |
+
class UNIXDatagramSocket(_RawSocketMixin, abc.UNIXDatagramSocket):
|
| 1712 |
+
async def receive(self) -> UNIXDatagramPacketType:
|
| 1713 |
+
loop = get_running_loop()
|
| 1714 |
+
await AsyncIOBackend.checkpoint()
|
| 1715 |
+
with self._receive_guard:
|
| 1716 |
+
while True:
|
| 1717 |
+
try:
|
| 1718 |
+
data = self._raw_socket.recvfrom(65536)
|
| 1719 |
+
except BlockingIOError:
|
| 1720 |
+
await self._wait_until_readable(loop)
|
| 1721 |
+
except OSError as exc:
|
| 1722 |
+
if self._closing:
|
| 1723 |
+
raise ClosedResourceError from None
|
| 1724 |
+
else:
|
| 1725 |
+
raise BrokenResourceError from exc
|
| 1726 |
+
else:
|
| 1727 |
+
return data
|
| 1728 |
+
|
| 1729 |
+
async def send(self, item: UNIXDatagramPacketType) -> None:
|
| 1730 |
+
loop = get_running_loop()
|
| 1731 |
+
await AsyncIOBackend.checkpoint()
|
| 1732 |
+
with self._send_guard:
|
| 1733 |
+
while True:
|
| 1734 |
+
try:
|
| 1735 |
+
self._raw_socket.sendto(*item)
|
| 1736 |
+
except BlockingIOError:
|
| 1737 |
+
await self._wait_until_writable(loop)
|
| 1738 |
+
except OSError as exc:
|
| 1739 |
+
if self._closing:
|
| 1740 |
+
raise ClosedResourceError from None
|
| 1741 |
+
else:
|
| 1742 |
+
raise BrokenResourceError from exc
|
| 1743 |
+
else:
|
| 1744 |
+
return
|
| 1745 |
+
|
| 1746 |
+
|
| 1747 |
+
class ConnectedUNIXDatagramSocket(_RawSocketMixin, abc.ConnectedUNIXDatagramSocket):
|
| 1748 |
+
async def receive(self) -> bytes:
|
| 1749 |
+
loop = get_running_loop()
|
| 1750 |
+
await AsyncIOBackend.checkpoint()
|
| 1751 |
+
with self._receive_guard:
|
| 1752 |
+
while True:
|
| 1753 |
+
try:
|
| 1754 |
+
data = self._raw_socket.recv(65536)
|
| 1755 |
+
except BlockingIOError:
|
| 1756 |
+
await self._wait_until_readable(loop)
|
| 1757 |
+
except OSError as exc:
|
| 1758 |
+
if self._closing:
|
| 1759 |
+
raise ClosedResourceError from None
|
| 1760 |
+
else:
|
| 1761 |
+
raise BrokenResourceError from exc
|
| 1762 |
+
else:
|
| 1763 |
+
return data
|
| 1764 |
+
|
| 1765 |
+
async def send(self, item: bytes) -> None:
|
| 1766 |
+
loop = get_running_loop()
|
| 1767 |
+
await AsyncIOBackend.checkpoint()
|
| 1768 |
+
with self._send_guard:
|
| 1769 |
+
while True:
|
| 1770 |
+
try:
|
| 1771 |
+
self._raw_socket.send(item)
|
| 1772 |
+
except BlockingIOError:
|
| 1773 |
+
await self._wait_until_writable(loop)
|
| 1774 |
+
except OSError as exc:
|
| 1775 |
+
if self._closing:
|
| 1776 |
+
raise ClosedResourceError from None
|
| 1777 |
+
else:
|
| 1778 |
+
raise BrokenResourceError from exc
|
| 1779 |
+
else:
|
| 1780 |
+
return
|
| 1781 |
+
|
| 1782 |
+
|
| 1783 |
+
_read_events: RunVar[dict[int, asyncio.Event]] = RunVar("read_events")
|
| 1784 |
+
_write_events: RunVar[dict[int, asyncio.Event]] = RunVar("write_events")
|
| 1785 |
+
|
| 1786 |
+
|
| 1787 |
+
#
|
| 1788 |
+
# Synchronization
|
| 1789 |
+
#
|
| 1790 |
+
|
| 1791 |
+
|
| 1792 |
+
class Event(BaseEvent):
|
| 1793 |
+
def __new__(cls) -> Event:
|
| 1794 |
+
return object.__new__(cls)
|
| 1795 |
+
|
| 1796 |
+
def __init__(self) -> None:
|
| 1797 |
+
self._event = asyncio.Event()
|
| 1798 |
+
|
| 1799 |
+
def set(self) -> None:
|
| 1800 |
+
self._event.set()
|
| 1801 |
+
|
| 1802 |
+
def is_set(self) -> bool:
|
| 1803 |
+
return self._event.is_set()
|
| 1804 |
+
|
| 1805 |
+
async def wait(self) -> None:
|
| 1806 |
+
if self.is_set():
|
| 1807 |
+
await AsyncIOBackend.checkpoint()
|
| 1808 |
+
else:
|
| 1809 |
+
await self._event.wait()
|
| 1810 |
+
|
| 1811 |
+
def statistics(self) -> EventStatistics:
|
| 1812 |
+
return EventStatistics(len(self._event._waiters))
|
| 1813 |
+
|
| 1814 |
+
|
| 1815 |
+
class Lock(BaseLock):
|
| 1816 |
+
def __new__(cls, *, fast_acquire: bool = False) -> Lock:
|
| 1817 |
+
return object.__new__(cls)
|
| 1818 |
+
|
| 1819 |
+
def __init__(self, *, fast_acquire: bool = False) -> None:
|
| 1820 |
+
self._fast_acquire = fast_acquire
|
| 1821 |
+
self._owner_task: asyncio.Task | None = None
|
| 1822 |
+
self._waiters: deque[tuple[asyncio.Task, asyncio.Future]] = deque()
|
| 1823 |
+
|
| 1824 |
+
async def acquire(self) -> None:
|
| 1825 |
+
task = cast(asyncio.Task, current_task())
|
| 1826 |
+
if self._owner_task is None and not self._waiters:
|
| 1827 |
+
await AsyncIOBackend.checkpoint_if_cancelled()
|
| 1828 |
+
self._owner_task = task
|
| 1829 |
+
|
| 1830 |
+
# Unless on the "fast path", yield control of the event loop so that other
|
| 1831 |
+
# tasks can run too
|
| 1832 |
+
if not self._fast_acquire:
|
| 1833 |
+
try:
|
| 1834 |
+
await AsyncIOBackend.cancel_shielded_checkpoint()
|
| 1835 |
+
except CancelledError:
|
| 1836 |
+
self.release()
|
| 1837 |
+
raise
|
| 1838 |
+
|
| 1839 |
+
return
|
| 1840 |
+
|
| 1841 |
+
if self._owner_task == task:
|
| 1842 |
+
raise RuntimeError("Attempted to acquire an already held Lock")
|
| 1843 |
+
|
| 1844 |
+
fut: asyncio.Future[None] = asyncio.Future()
|
| 1845 |
+
item = task, fut
|
| 1846 |
+
self._waiters.append(item)
|
| 1847 |
+
try:
|
| 1848 |
+
await fut
|
| 1849 |
+
except CancelledError:
|
| 1850 |
+
self._waiters.remove(item)
|
| 1851 |
+
if self._owner_task is task:
|
| 1852 |
+
self.release()
|
| 1853 |
+
|
| 1854 |
+
raise
|
| 1855 |
+
|
| 1856 |
+
self._waiters.remove(item)
|
| 1857 |
+
|
| 1858 |
+
def acquire_nowait(self) -> None:
|
| 1859 |
+
task = cast(asyncio.Task, current_task())
|
| 1860 |
+
if self._owner_task is None and not self._waiters:
|
| 1861 |
+
self._owner_task = task
|
| 1862 |
+
return
|
| 1863 |
+
|
| 1864 |
+
if self._owner_task is task:
|
| 1865 |
+
raise RuntimeError("Attempted to acquire an already held Lock")
|
| 1866 |
+
|
| 1867 |
+
raise WouldBlock
|
| 1868 |
+
|
| 1869 |
+
def locked(self) -> bool:
|
| 1870 |
+
return self._owner_task is not None
|
| 1871 |
+
|
| 1872 |
+
def release(self) -> None:
|
| 1873 |
+
if self._owner_task != current_task():
|
| 1874 |
+
raise RuntimeError("The current task is not holding this lock")
|
| 1875 |
+
|
| 1876 |
+
for task, fut in self._waiters:
|
| 1877 |
+
if not fut.cancelled():
|
| 1878 |
+
self._owner_task = task
|
| 1879 |
+
fut.set_result(None)
|
| 1880 |
+
return
|
| 1881 |
+
|
| 1882 |
+
self._owner_task = None
|
| 1883 |
+
|
| 1884 |
+
def statistics(self) -> LockStatistics:
|
| 1885 |
+
task_info = AsyncIOTaskInfo(self._owner_task) if self._owner_task else None
|
| 1886 |
+
return LockStatistics(self.locked(), task_info, len(self._waiters))
|
| 1887 |
+
|
| 1888 |
+
|
| 1889 |
+
class Semaphore(BaseSemaphore):
|
| 1890 |
+
def __new__(
|
| 1891 |
+
cls,
|
| 1892 |
+
initial_value: int,
|
| 1893 |
+
*,
|
| 1894 |
+
max_value: int | None = None,
|
| 1895 |
+
fast_acquire: bool = False,
|
| 1896 |
+
) -> Semaphore:
|
| 1897 |
+
return object.__new__(cls)
|
| 1898 |
+
|
| 1899 |
+
def __init__(
|
| 1900 |
+
self,
|
| 1901 |
+
initial_value: int,
|
| 1902 |
+
*,
|
| 1903 |
+
max_value: int | None = None,
|
| 1904 |
+
fast_acquire: bool = False,
|
| 1905 |
+
):
|
| 1906 |
+
super().__init__(initial_value, max_value=max_value)
|
| 1907 |
+
self._value = initial_value
|
| 1908 |
+
self._max_value = max_value
|
| 1909 |
+
self._fast_acquire = fast_acquire
|
| 1910 |
+
self._waiters: deque[asyncio.Future[None]] = deque()
|
| 1911 |
+
|
| 1912 |
+
async def acquire(self) -> None:
|
| 1913 |
+
if self._value > 0 and not self._waiters:
|
| 1914 |
+
await AsyncIOBackend.checkpoint_if_cancelled()
|
| 1915 |
+
self._value -= 1
|
| 1916 |
+
|
| 1917 |
+
# Unless on the "fast path", yield control of the event loop so that other
|
| 1918 |
+
# tasks can run too
|
| 1919 |
+
if not self._fast_acquire:
|
| 1920 |
+
try:
|
| 1921 |
+
await AsyncIOBackend.cancel_shielded_checkpoint()
|
| 1922 |
+
except CancelledError:
|
| 1923 |
+
self.release()
|
| 1924 |
+
raise
|
| 1925 |
+
|
| 1926 |
+
return
|
| 1927 |
+
|
| 1928 |
+
fut: asyncio.Future[None] = asyncio.Future()
|
| 1929 |
+
self._waiters.append(fut)
|
| 1930 |
+
try:
|
| 1931 |
+
await fut
|
| 1932 |
+
except CancelledError:
|
| 1933 |
+
try:
|
| 1934 |
+
self._waiters.remove(fut)
|
| 1935 |
+
except ValueError:
|
| 1936 |
+
self.release()
|
| 1937 |
+
|
| 1938 |
+
raise
|
| 1939 |
+
|
| 1940 |
+
def acquire_nowait(self) -> None:
|
| 1941 |
+
if self._value == 0:
|
| 1942 |
+
raise WouldBlock
|
| 1943 |
+
|
| 1944 |
+
self._value -= 1
|
| 1945 |
+
|
| 1946 |
+
def release(self) -> None:
|
| 1947 |
+
if self._max_value is not None and self._value == self._max_value:
|
| 1948 |
+
raise ValueError("semaphore released too many times")
|
| 1949 |
+
|
| 1950 |
+
for fut in self._waiters:
|
| 1951 |
+
if not fut.cancelled():
|
| 1952 |
+
fut.set_result(None)
|
| 1953 |
+
self._waiters.remove(fut)
|
| 1954 |
+
return
|
| 1955 |
+
|
| 1956 |
+
self._value += 1
|
| 1957 |
+
|
| 1958 |
+
@property
|
| 1959 |
+
def value(self) -> int:
|
| 1960 |
+
return self._value
|
| 1961 |
+
|
| 1962 |
+
@property
|
| 1963 |
+
def max_value(self) -> int | None:
|
| 1964 |
+
return self._max_value
|
| 1965 |
+
|
| 1966 |
+
def statistics(self) -> SemaphoreStatistics:
|
| 1967 |
+
return SemaphoreStatistics(len(self._waiters))
|
| 1968 |
+
|
| 1969 |
+
|
| 1970 |
+
class CapacityLimiter(BaseCapacityLimiter):
|
| 1971 |
+
_total_tokens: float = 0
|
| 1972 |
+
|
| 1973 |
+
def __new__(cls, total_tokens: float) -> CapacityLimiter:
|
| 1974 |
+
return object.__new__(cls)
|
| 1975 |
+
|
| 1976 |
+
def __init__(self, total_tokens: float):
|
| 1977 |
+
self._borrowers: set[Any] = set()
|
| 1978 |
+
self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict()
|
| 1979 |
+
self.total_tokens = total_tokens
|
| 1980 |
+
|
| 1981 |
+
async def __aenter__(self) -> None:
|
| 1982 |
+
await self.acquire()
|
| 1983 |
+
|
| 1984 |
+
async def __aexit__(
|
| 1985 |
+
self,
|
| 1986 |
+
exc_type: type[BaseException] | None,
|
| 1987 |
+
exc_val: BaseException | None,
|
| 1988 |
+
exc_tb: TracebackType | None,
|
| 1989 |
+
) -> None:
|
| 1990 |
+
self.release()
|
| 1991 |
+
|
| 1992 |
+
@property
|
| 1993 |
+
def total_tokens(self) -> float:
|
| 1994 |
+
return self._total_tokens
|
| 1995 |
+
|
| 1996 |
+
@total_tokens.setter
|
| 1997 |
+
def total_tokens(self, value: float) -> None:
|
| 1998 |
+
if not isinstance(value, int) and not math.isinf(value):
|
| 1999 |
+
raise TypeError("total_tokens must be an int or math.inf")
|
| 2000 |
+
if value < 1:
|
| 2001 |
+
raise ValueError("total_tokens must be >= 1")
|
| 2002 |
+
|
| 2003 |
+
waiters_to_notify = max(value - self._total_tokens, 0)
|
| 2004 |
+
self._total_tokens = value
|
| 2005 |
+
|
| 2006 |
+
# Notify waiting tasks that they have acquired the limiter
|
| 2007 |
+
while self._wait_queue and waiters_to_notify:
|
| 2008 |
+
event = self._wait_queue.popitem(last=False)[1]
|
| 2009 |
+
event.set()
|
| 2010 |
+
waiters_to_notify -= 1
|
| 2011 |
+
|
| 2012 |
+
@property
|
| 2013 |
+
def borrowed_tokens(self) -> int:
|
| 2014 |
+
return len(self._borrowers)
|
| 2015 |
+
|
| 2016 |
+
@property
|
| 2017 |
+
def available_tokens(self) -> float:
|
| 2018 |
+
return self._total_tokens - len(self._borrowers)
|
| 2019 |
+
|
| 2020 |
+
def acquire_nowait(self) -> None:
|
| 2021 |
+
self.acquire_on_behalf_of_nowait(current_task())
|
| 2022 |
+
|
| 2023 |
+
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
|
| 2024 |
+
if borrower in self._borrowers:
|
| 2025 |
+
raise RuntimeError(
|
| 2026 |
+
"this borrower is already holding one of this CapacityLimiter's "
|
| 2027 |
+
"tokens"
|
| 2028 |
+
)
|
| 2029 |
+
|
| 2030 |
+
if self._wait_queue or len(self._borrowers) >= self._total_tokens:
|
| 2031 |
+
raise WouldBlock
|
| 2032 |
+
|
| 2033 |
+
self._borrowers.add(borrower)
|
| 2034 |
+
|
| 2035 |
+
async def acquire(self) -> None:
|
| 2036 |
+
return await self.acquire_on_behalf_of(current_task())
|
| 2037 |
+
|
| 2038 |
+
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
| 2039 |
+
await AsyncIOBackend.checkpoint_if_cancelled()
|
| 2040 |
+
try:
|
| 2041 |
+
self.acquire_on_behalf_of_nowait(borrower)
|
| 2042 |
+
except WouldBlock:
|
| 2043 |
+
event = asyncio.Event()
|
| 2044 |
+
self._wait_queue[borrower] = event
|
| 2045 |
+
try:
|
| 2046 |
+
await event.wait()
|
| 2047 |
+
except BaseException:
|
| 2048 |
+
self._wait_queue.pop(borrower, None)
|
| 2049 |
+
raise
|
| 2050 |
+
|
| 2051 |
+
self._borrowers.add(borrower)
|
| 2052 |
+
else:
|
| 2053 |
+
try:
|
| 2054 |
+
await AsyncIOBackend.cancel_shielded_checkpoint()
|
| 2055 |
+
except BaseException:
|
| 2056 |
+
self.release()
|
| 2057 |
+
raise
|
| 2058 |
+
|
| 2059 |
+
def release(self) -> None:
|
| 2060 |
+
self.release_on_behalf_of(current_task())
|
| 2061 |
+
|
| 2062 |
+
def release_on_behalf_of(self, borrower: object) -> None:
|
| 2063 |
+
try:
|
| 2064 |
+
self._borrowers.remove(borrower)
|
| 2065 |
+
except KeyError:
|
| 2066 |
+
raise RuntimeError(
|
| 2067 |
+
"this borrower isn't holding any of this CapacityLimiter's tokens"
|
| 2068 |
+
) from None
|
| 2069 |
+
|
| 2070 |
+
# Notify the next task in line if this limiter has free capacity now
|
| 2071 |
+
if self._wait_queue and len(self._borrowers) < self._total_tokens:
|
| 2072 |
+
event = self._wait_queue.popitem(last=False)[1]
|
| 2073 |
+
event.set()
|
| 2074 |
+
|
| 2075 |
+
def statistics(self) -> CapacityLimiterStatistics:
|
| 2076 |
+
return CapacityLimiterStatistics(
|
| 2077 |
+
self.borrowed_tokens,
|
| 2078 |
+
self.total_tokens,
|
| 2079 |
+
tuple(self._borrowers),
|
| 2080 |
+
len(self._wait_queue),
|
| 2081 |
+
)
|
| 2082 |
+
|
| 2083 |
+
|
| 2084 |
+
_default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter")
|
| 2085 |
+
|
| 2086 |
+
|
| 2087 |
+
#
|
| 2088 |
+
# Operating system signals
|
| 2089 |
+
#
|
| 2090 |
+
|
| 2091 |
+
|
| 2092 |
+
class _SignalReceiver:
|
| 2093 |
+
def __init__(self, signals: tuple[Signals, ...]):
|
| 2094 |
+
self._signals = signals
|
| 2095 |
+
self._loop = get_running_loop()
|
| 2096 |
+
self._signal_queue: deque[Signals] = deque()
|
| 2097 |
+
self._future: asyncio.Future = asyncio.Future()
|
| 2098 |
+
self._handled_signals: set[Signals] = set()
|
| 2099 |
+
|
| 2100 |
+
def _deliver(self, signum: Signals) -> None:
|
| 2101 |
+
self._signal_queue.append(signum)
|
| 2102 |
+
if not self._future.done():
|
| 2103 |
+
self._future.set_result(None)
|
| 2104 |
+
|
| 2105 |
+
def __enter__(self) -> _SignalReceiver:
|
| 2106 |
+
for sig in set(self._signals):
|
| 2107 |
+
self._loop.add_signal_handler(sig, self._deliver, sig)
|
| 2108 |
+
self._handled_signals.add(sig)
|
| 2109 |
+
|
| 2110 |
+
return self
|
| 2111 |
+
|
| 2112 |
+
def __exit__(
|
| 2113 |
+
self,
|
| 2114 |
+
exc_type: type[BaseException] | None,
|
| 2115 |
+
exc_val: BaseException | None,
|
| 2116 |
+
exc_tb: TracebackType | None,
|
| 2117 |
+
) -> bool | None:
|
| 2118 |
+
for sig in self._handled_signals:
|
| 2119 |
+
self._loop.remove_signal_handler(sig)
|
| 2120 |
+
return None
|
| 2121 |
+
|
| 2122 |
+
def __aiter__(self) -> _SignalReceiver:
|
| 2123 |
+
return self
|
| 2124 |
+
|
| 2125 |
+
async def __anext__(self) -> Signals:
|
| 2126 |
+
await AsyncIOBackend.checkpoint()
|
| 2127 |
+
if not self._signal_queue:
|
| 2128 |
+
self._future = asyncio.Future()
|
| 2129 |
+
await self._future
|
| 2130 |
+
|
| 2131 |
+
return self._signal_queue.popleft()
|
| 2132 |
+
|
| 2133 |
+
|
| 2134 |
+
#
|
| 2135 |
+
# Testing and debugging
|
| 2136 |
+
#
|
| 2137 |
+
|
| 2138 |
+
|
| 2139 |
+
class AsyncIOTaskInfo(TaskInfo):
|
| 2140 |
+
def __init__(self, task: asyncio.Task):
|
| 2141 |
+
task_state = _task_states.get(task)
|
| 2142 |
+
if task_state is None:
|
| 2143 |
+
parent_id = None
|
| 2144 |
+
else:
|
| 2145 |
+
parent_id = task_state.parent_id
|
| 2146 |
+
|
| 2147 |
+
coro = task.get_coro()
|
| 2148 |
+
assert coro is not None, "created TaskInfo from a completed Task"
|
| 2149 |
+
super().__init__(id(task), parent_id, task.get_name(), coro)
|
| 2150 |
+
self._task = weakref.ref(task)
|
| 2151 |
+
|
| 2152 |
+
def has_pending_cancellation(self) -> bool:
|
| 2153 |
+
if not (task := self._task()):
|
| 2154 |
+
# If the task isn't around anymore, it won't have a pending cancellation
|
| 2155 |
+
return False
|
| 2156 |
+
|
| 2157 |
+
if task._must_cancel: # type: ignore[attr-defined]
|
| 2158 |
+
return True
|
| 2159 |
+
elif (
|
| 2160 |
+
isinstance(task._fut_waiter, asyncio.Future) # type: ignore[attr-defined]
|
| 2161 |
+
and task._fut_waiter.cancelled() # type: ignore[attr-defined]
|
| 2162 |
+
):
|
| 2163 |
+
return True
|
| 2164 |
+
|
| 2165 |
+
if task_state := _task_states.get(task):
|
| 2166 |
+
if cancel_scope := task_state.cancel_scope:
|
| 2167 |
+
return cancel_scope._effectively_cancelled
|
| 2168 |
+
|
| 2169 |
+
return False
|
| 2170 |
+
|
| 2171 |
+
|
| 2172 |
+
class TestRunner(abc.TestRunner):
|
| 2173 |
+
_send_stream: MemoryObjectSendStream[tuple[Awaitable[Any], asyncio.Future[Any]]]
|
| 2174 |
+
|
| 2175 |
+
def __init__(
|
| 2176 |
+
self,
|
| 2177 |
+
*,
|
| 2178 |
+
debug: bool | None = None,
|
| 2179 |
+
use_uvloop: bool = False,
|
| 2180 |
+
loop_factory: Callable[[], AbstractEventLoop] | None = None,
|
| 2181 |
+
) -> None:
|
| 2182 |
+
if use_uvloop and loop_factory is None:
|
| 2183 |
+
import uvloop
|
| 2184 |
+
|
| 2185 |
+
loop_factory = uvloop.new_event_loop
|
| 2186 |
+
|
| 2187 |
+
self._runner = Runner(debug=debug, loop_factory=loop_factory)
|
| 2188 |
+
self._exceptions: list[BaseException] = []
|
| 2189 |
+
self._runner_task: asyncio.Task | None = None
|
| 2190 |
+
|
| 2191 |
+
def __enter__(self) -> TestRunner:
|
| 2192 |
+
self._runner.__enter__()
|
| 2193 |
+
self.get_loop().set_exception_handler(self._exception_handler)
|
| 2194 |
+
return self
|
| 2195 |
+
|
| 2196 |
+
def __exit__(
|
| 2197 |
+
self,
|
| 2198 |
+
exc_type: type[BaseException] | None,
|
| 2199 |
+
exc_val: BaseException | None,
|
| 2200 |
+
exc_tb: TracebackType | None,
|
| 2201 |
+
) -> None:
|
| 2202 |
+
self._runner.__exit__(exc_type, exc_val, exc_tb)
|
| 2203 |
+
|
| 2204 |
+
def get_loop(self) -> AbstractEventLoop:
|
| 2205 |
+
return self._runner.get_loop()
|
| 2206 |
+
|
| 2207 |
+
def _exception_handler(
|
| 2208 |
+
self, loop: asyncio.AbstractEventLoop, context: dict[str, Any]
|
| 2209 |
+
) -> None:
|
| 2210 |
+
if isinstance(context.get("exception"), Exception):
|
| 2211 |
+
self._exceptions.append(context["exception"])
|
| 2212 |
+
else:
|
| 2213 |
+
loop.default_exception_handler(context)
|
| 2214 |
+
|
| 2215 |
+
def _raise_async_exceptions(self) -> None:
|
| 2216 |
+
# Re-raise any exceptions raised in asynchronous callbacks
|
| 2217 |
+
if self._exceptions:
|
| 2218 |
+
exceptions, self._exceptions = self._exceptions, []
|
| 2219 |
+
if len(exceptions) == 1:
|
| 2220 |
+
raise exceptions[0]
|
| 2221 |
+
elif exceptions:
|
| 2222 |
+
raise BaseExceptionGroup(
|
| 2223 |
+
"Multiple exceptions occurred in asynchronous callbacks", exceptions
|
| 2224 |
+
)
|
| 2225 |
+
|
| 2226 |
+
async def _run_tests_and_fixtures(
|
| 2227 |
+
self,
|
| 2228 |
+
receive_stream: MemoryObjectReceiveStream[
|
| 2229 |
+
tuple[Awaitable[T_Retval], asyncio.Future[T_Retval]]
|
| 2230 |
+
],
|
| 2231 |
+
) -> None:
|
| 2232 |
+
from _pytest.outcomes import OutcomeException
|
| 2233 |
+
|
| 2234 |
+
with receive_stream, self._send_stream:
|
| 2235 |
+
async for coro, future in receive_stream:
|
| 2236 |
+
try:
|
| 2237 |
+
retval = await coro
|
| 2238 |
+
except CancelledError as exc:
|
| 2239 |
+
if not future.cancelled():
|
| 2240 |
+
future.cancel(*exc.args)
|
| 2241 |
+
|
| 2242 |
+
raise
|
| 2243 |
+
except BaseException as exc:
|
| 2244 |
+
if not future.cancelled():
|
| 2245 |
+
future.set_exception(exc)
|
| 2246 |
+
|
| 2247 |
+
if not isinstance(exc, (Exception, OutcomeException)):
|
| 2248 |
+
raise
|
| 2249 |
+
else:
|
| 2250 |
+
if not future.cancelled():
|
| 2251 |
+
future.set_result(retval)
|
| 2252 |
+
|
| 2253 |
+
async def _call_in_runner_task(
|
| 2254 |
+
self,
|
| 2255 |
+
func: Callable[P, Awaitable[T_Retval]],
|
| 2256 |
+
*args: P.args,
|
| 2257 |
+
**kwargs: P.kwargs,
|
| 2258 |
+
) -> T_Retval:
|
| 2259 |
+
if not self._runner_task:
|
| 2260 |
+
self._send_stream, receive_stream = create_memory_object_stream[
|
| 2261 |
+
tuple[Awaitable[Any], asyncio.Future]
|
| 2262 |
+
](1)
|
| 2263 |
+
self._runner_task = self.get_loop().create_task(
|
| 2264 |
+
self._run_tests_and_fixtures(receive_stream)
|
| 2265 |
+
)
|
| 2266 |
+
|
| 2267 |
+
coro = func(*args, **kwargs)
|
| 2268 |
+
future: asyncio.Future[T_Retval] = self.get_loop().create_future()
|
| 2269 |
+
self._send_stream.send_nowait((coro, future))
|
| 2270 |
+
return await future
|
| 2271 |
+
|
| 2272 |
+
def run_asyncgen_fixture(
|
| 2273 |
+
self,
|
| 2274 |
+
fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
|
| 2275 |
+
kwargs: dict[str, Any],
|
| 2276 |
+
) -> Iterable[T_Retval]:
|
| 2277 |
+
asyncgen = fixture_func(**kwargs)
|
| 2278 |
+
fixturevalue: T_Retval = self.get_loop().run_until_complete(
|
| 2279 |
+
self._call_in_runner_task(asyncgen.asend, None)
|
| 2280 |
+
)
|
| 2281 |
+
self._raise_async_exceptions()
|
| 2282 |
+
|
| 2283 |
+
yield fixturevalue
|
| 2284 |
+
|
| 2285 |
+
try:
|
| 2286 |
+
self.get_loop().run_until_complete(
|
| 2287 |
+
self._call_in_runner_task(asyncgen.asend, None)
|
| 2288 |
+
)
|
| 2289 |
+
except StopAsyncIteration:
|
| 2290 |
+
self._raise_async_exceptions()
|
| 2291 |
+
else:
|
| 2292 |
+
self.get_loop().run_until_complete(asyncgen.aclose())
|
| 2293 |
+
raise RuntimeError("Async generator fixture did not stop")
|
| 2294 |
+
|
| 2295 |
+
def run_fixture(
|
| 2296 |
+
self,
|
| 2297 |
+
fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
|
| 2298 |
+
kwargs: dict[str, Any],
|
| 2299 |
+
) -> T_Retval:
|
| 2300 |
+
retval = self.get_loop().run_until_complete(
|
| 2301 |
+
self._call_in_runner_task(fixture_func, **kwargs)
|
| 2302 |
+
)
|
| 2303 |
+
self._raise_async_exceptions()
|
| 2304 |
+
return retval
|
| 2305 |
+
|
| 2306 |
+
def run_test(
|
| 2307 |
+
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
|
| 2308 |
+
) -> None:
|
| 2309 |
+
try:
|
| 2310 |
+
self.get_loop().run_until_complete(
|
| 2311 |
+
self._call_in_runner_task(test_func, **kwargs)
|
| 2312 |
+
)
|
| 2313 |
+
except Exception as exc:
|
| 2314 |
+
self._exceptions.append(exc)
|
| 2315 |
+
|
| 2316 |
+
self._raise_async_exceptions()
|
| 2317 |
+
|
| 2318 |
+
|
| 2319 |
+
class AsyncIOBackend(AsyncBackend):
|
| 2320 |
+
@classmethod
|
| 2321 |
+
def run(
|
| 2322 |
+
cls,
|
| 2323 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
| 2324 |
+
args: tuple[Unpack[PosArgsT]],
|
| 2325 |
+
kwargs: dict[str, Any],
|
| 2326 |
+
options: dict[str, Any],
|
| 2327 |
+
) -> T_Retval:
|
| 2328 |
+
@wraps(func)
|
| 2329 |
+
async def wrapper() -> T_Retval:
|
| 2330 |
+
task = cast(asyncio.Task, current_task())
|
| 2331 |
+
task.set_name(get_callable_name(func))
|
| 2332 |
+
_task_states[task] = TaskState(None, None)
|
| 2333 |
+
|
| 2334 |
+
try:
|
| 2335 |
+
return await func(*args)
|
| 2336 |
+
finally:
|
| 2337 |
+
del _task_states[task]
|
| 2338 |
+
|
| 2339 |
+
debug = options.get("debug", None)
|
| 2340 |
+
loop_factory = options.get("loop_factory", None)
|
| 2341 |
+
if loop_factory is None and options.get("use_uvloop", False):
|
| 2342 |
+
import uvloop
|
| 2343 |
+
|
| 2344 |
+
loop_factory = uvloop.new_event_loop
|
| 2345 |
+
|
| 2346 |
+
with Runner(debug=debug, loop_factory=loop_factory) as runner:
|
| 2347 |
+
return runner.run(wrapper())
|
| 2348 |
+
|
| 2349 |
+
@classmethod
|
| 2350 |
+
def current_token(cls) -> object:
|
| 2351 |
+
return get_running_loop()
|
| 2352 |
+
|
| 2353 |
+
@classmethod
|
| 2354 |
+
def current_time(cls) -> float:
|
| 2355 |
+
return get_running_loop().time()
|
| 2356 |
+
|
| 2357 |
+
@classmethod
|
| 2358 |
+
def cancelled_exception_class(cls) -> type[BaseException]:
|
| 2359 |
+
return CancelledError
|
| 2360 |
+
|
| 2361 |
+
@classmethod
|
| 2362 |
+
async def checkpoint(cls) -> None:
|
| 2363 |
+
await sleep(0)
|
| 2364 |
+
|
| 2365 |
+
@classmethod
|
| 2366 |
+
async def checkpoint_if_cancelled(cls) -> None:
|
| 2367 |
+
task = current_task()
|
| 2368 |
+
if task is None:
|
| 2369 |
+
return
|
| 2370 |
+
|
| 2371 |
+
try:
|
| 2372 |
+
cancel_scope = _task_states[task].cancel_scope
|
| 2373 |
+
except KeyError:
|
| 2374 |
+
return
|
| 2375 |
+
|
| 2376 |
+
while cancel_scope:
|
| 2377 |
+
if cancel_scope.cancel_called:
|
| 2378 |
+
await sleep(0)
|
| 2379 |
+
elif cancel_scope.shield:
|
| 2380 |
+
break
|
| 2381 |
+
else:
|
| 2382 |
+
cancel_scope = cancel_scope._parent_scope
|
| 2383 |
+
|
| 2384 |
+
@classmethod
|
| 2385 |
+
async def cancel_shielded_checkpoint(cls) -> None:
|
| 2386 |
+
with CancelScope(shield=True):
|
| 2387 |
+
await sleep(0)
|
| 2388 |
+
|
| 2389 |
+
@classmethod
|
| 2390 |
+
async def sleep(cls, delay: float) -> None:
|
| 2391 |
+
await sleep(delay)
|
| 2392 |
+
|
| 2393 |
+
@classmethod
|
| 2394 |
+
def create_cancel_scope(
|
| 2395 |
+
cls, *, deadline: float = math.inf, shield: bool = False
|
| 2396 |
+
) -> CancelScope:
|
| 2397 |
+
return CancelScope(deadline=deadline, shield=shield)
|
| 2398 |
+
|
| 2399 |
+
@classmethod
|
| 2400 |
+
def current_effective_deadline(cls) -> float:
|
| 2401 |
+
if (task := current_task()) is None:
|
| 2402 |
+
return math.inf
|
| 2403 |
+
|
| 2404 |
+
try:
|
| 2405 |
+
cancel_scope = _task_states[task].cancel_scope
|
| 2406 |
+
except KeyError:
|
| 2407 |
+
return math.inf
|
| 2408 |
+
|
| 2409 |
+
deadline = math.inf
|
| 2410 |
+
while cancel_scope:
|
| 2411 |
+
deadline = min(deadline, cancel_scope.deadline)
|
| 2412 |
+
if cancel_scope._cancel_called:
|
| 2413 |
+
deadline = -math.inf
|
| 2414 |
+
break
|
| 2415 |
+
elif cancel_scope.shield:
|
| 2416 |
+
break
|
| 2417 |
+
else:
|
| 2418 |
+
cancel_scope = cancel_scope._parent_scope
|
| 2419 |
+
|
| 2420 |
+
return deadline
|
| 2421 |
+
|
| 2422 |
+
@classmethod
|
| 2423 |
+
def create_task_group(cls) -> abc.TaskGroup:
|
| 2424 |
+
return TaskGroup()
|
| 2425 |
+
|
| 2426 |
+
@classmethod
|
| 2427 |
+
def create_event(cls) -> abc.Event:
|
| 2428 |
+
return Event()
|
| 2429 |
+
|
| 2430 |
+
@classmethod
|
| 2431 |
+
def create_lock(cls, *, fast_acquire: bool) -> abc.Lock:
|
| 2432 |
+
return Lock(fast_acquire=fast_acquire)
|
| 2433 |
+
|
| 2434 |
+
@classmethod
|
| 2435 |
+
def create_semaphore(
|
| 2436 |
+
cls,
|
| 2437 |
+
initial_value: int,
|
| 2438 |
+
*,
|
| 2439 |
+
max_value: int | None = None,
|
| 2440 |
+
fast_acquire: bool = False,
|
| 2441 |
+
) -> abc.Semaphore:
|
| 2442 |
+
return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire)
|
| 2443 |
+
|
| 2444 |
+
@classmethod
|
| 2445 |
+
def create_capacity_limiter(cls, total_tokens: float) -> abc.CapacityLimiter:
|
| 2446 |
+
return CapacityLimiter(total_tokens)
|
| 2447 |
+
|
| 2448 |
+
@classmethod
|
| 2449 |
+
async def run_sync_in_worker_thread(
|
| 2450 |
+
cls,
|
| 2451 |
+
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
| 2452 |
+
args: tuple[Unpack[PosArgsT]],
|
| 2453 |
+
abandon_on_cancel: bool = False,
|
| 2454 |
+
limiter: abc.CapacityLimiter | None = None,
|
| 2455 |
+
) -> T_Retval:
|
| 2456 |
+
await cls.checkpoint()
|
| 2457 |
+
|
| 2458 |
+
# If this is the first run in this event loop thread, set up the necessary
|
| 2459 |
+
# variables
|
| 2460 |
+
try:
|
| 2461 |
+
idle_workers = _threadpool_idle_workers.get()
|
| 2462 |
+
workers = _threadpool_workers.get()
|
| 2463 |
+
except LookupError:
|
| 2464 |
+
idle_workers = deque()
|
| 2465 |
+
workers = set()
|
| 2466 |
+
_threadpool_idle_workers.set(idle_workers)
|
| 2467 |
+
_threadpool_workers.set(workers)
|
| 2468 |
+
|
| 2469 |
+
async with limiter or cls.current_default_thread_limiter():
|
| 2470 |
+
with CancelScope(shield=not abandon_on_cancel) as scope:
|
| 2471 |
+
future: asyncio.Future = asyncio.Future()
|
| 2472 |
+
root_task = find_root_task()
|
| 2473 |
+
if not idle_workers:
|
| 2474 |
+
worker = WorkerThread(root_task, workers, idle_workers)
|
| 2475 |
+
worker.start()
|
| 2476 |
+
workers.add(worker)
|
| 2477 |
+
root_task.add_done_callback(worker.stop)
|
| 2478 |
+
else:
|
| 2479 |
+
worker = idle_workers.pop()
|
| 2480 |
+
|
| 2481 |
+
# Prune any other workers that have been idle for MAX_IDLE_TIME
|
| 2482 |
+
# seconds or longer
|
| 2483 |
+
now = cls.current_time()
|
| 2484 |
+
while idle_workers:
|
| 2485 |
+
if (
|
| 2486 |
+
now - idle_workers[0].idle_since
|
| 2487 |
+
< WorkerThread.MAX_IDLE_TIME
|
| 2488 |
+
):
|
| 2489 |
+
break
|
| 2490 |
+
|
| 2491 |
+
expired_worker = idle_workers.popleft()
|
| 2492 |
+
expired_worker.root_task.remove_done_callback(
|
| 2493 |
+
expired_worker.stop
|
| 2494 |
+
)
|
| 2495 |
+
expired_worker.stop()
|
| 2496 |
+
|
| 2497 |
+
context = copy_context()
|
| 2498 |
+
context.run(sniffio.current_async_library_cvar.set, None)
|
| 2499 |
+
if abandon_on_cancel or scope._parent_scope is None:
|
| 2500 |
+
worker_scope = scope
|
| 2501 |
+
else:
|
| 2502 |
+
worker_scope = scope._parent_scope
|
| 2503 |
+
|
| 2504 |
+
worker.queue.put_nowait((context, func, args, future, worker_scope))
|
| 2505 |
+
return await future
|
| 2506 |
+
|
| 2507 |
+
@classmethod
|
| 2508 |
+
def check_cancelled(cls) -> None:
|
| 2509 |
+
scope: CancelScope | None = threadlocals.current_cancel_scope
|
| 2510 |
+
while scope is not None:
|
| 2511 |
+
if scope.cancel_called:
|
| 2512 |
+
raise CancelledError(f"Cancelled by cancel scope {id(scope):x}")
|
| 2513 |
+
|
| 2514 |
+
if scope.shield:
|
| 2515 |
+
return
|
| 2516 |
+
|
| 2517 |
+
scope = scope._parent_scope
|
| 2518 |
+
|
| 2519 |
+
@classmethod
|
| 2520 |
+
def run_async_from_thread(
|
| 2521 |
+
cls,
|
| 2522 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
| 2523 |
+
args: tuple[Unpack[PosArgsT]],
|
| 2524 |
+
token: object,
|
| 2525 |
+
) -> T_Retval:
|
| 2526 |
+
async def task_wrapper(scope: CancelScope) -> T_Retval:
|
| 2527 |
+
__tracebackhide__ = True
|
| 2528 |
+
task = cast(asyncio.Task, current_task())
|
| 2529 |
+
_task_states[task] = TaskState(None, scope)
|
| 2530 |
+
scope._tasks.add(task)
|
| 2531 |
+
try:
|
| 2532 |
+
return await func(*args)
|
| 2533 |
+
except CancelledError as exc:
|
| 2534 |
+
raise concurrent.futures.CancelledError(str(exc)) from None
|
| 2535 |
+
finally:
|
| 2536 |
+
scope._tasks.discard(task)
|
| 2537 |
+
|
| 2538 |
+
loop = cast(AbstractEventLoop, token)
|
| 2539 |
+
context = copy_context()
|
| 2540 |
+
context.run(sniffio.current_async_library_cvar.set, "asyncio")
|
| 2541 |
+
wrapper = task_wrapper(threadlocals.current_cancel_scope)
|
| 2542 |
+
f: concurrent.futures.Future[T_Retval] = context.run(
|
| 2543 |
+
asyncio.run_coroutine_threadsafe, wrapper, loop
|
| 2544 |
+
)
|
| 2545 |
+
return f.result()
|
| 2546 |
+
|
| 2547 |
+
@classmethod
|
| 2548 |
+
def run_sync_from_thread(
|
| 2549 |
+
cls,
|
| 2550 |
+
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
| 2551 |
+
args: tuple[Unpack[PosArgsT]],
|
| 2552 |
+
token: object,
|
| 2553 |
+
) -> T_Retval:
|
| 2554 |
+
@wraps(func)
|
| 2555 |
+
def wrapper() -> None:
|
| 2556 |
+
try:
|
| 2557 |
+
sniffio.current_async_library_cvar.set("asyncio")
|
| 2558 |
+
f.set_result(func(*args))
|
| 2559 |
+
except BaseException as exc:
|
| 2560 |
+
f.set_exception(exc)
|
| 2561 |
+
if not isinstance(exc, Exception):
|
| 2562 |
+
raise
|
| 2563 |
+
|
| 2564 |
+
f: concurrent.futures.Future[T_Retval] = Future()
|
| 2565 |
+
loop = cast(AbstractEventLoop, token)
|
| 2566 |
+
loop.call_soon_threadsafe(wrapper)
|
| 2567 |
+
return f.result()
|
| 2568 |
+
|
| 2569 |
+
@classmethod
|
| 2570 |
+
def create_blocking_portal(cls) -> abc.BlockingPortal:
|
| 2571 |
+
return BlockingPortal()
|
| 2572 |
+
|
| 2573 |
+
@classmethod
|
| 2574 |
+
async def open_process(
|
| 2575 |
+
cls,
|
| 2576 |
+
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
| 2577 |
+
*,
|
| 2578 |
+
stdin: int | IO[Any] | None,
|
| 2579 |
+
stdout: int | IO[Any] | None,
|
| 2580 |
+
stderr: int | IO[Any] | None,
|
| 2581 |
+
**kwargs: Any,
|
| 2582 |
+
) -> Process:
|
| 2583 |
+
await cls.checkpoint()
|
| 2584 |
+
if isinstance(command, PathLike):
|
| 2585 |
+
command = os.fspath(command)
|
| 2586 |
+
|
| 2587 |
+
if isinstance(command, (str, bytes)):
|
| 2588 |
+
process = await asyncio.create_subprocess_shell(
|
| 2589 |
+
command,
|
| 2590 |
+
stdin=stdin,
|
| 2591 |
+
stdout=stdout,
|
| 2592 |
+
stderr=stderr,
|
| 2593 |
+
**kwargs,
|
| 2594 |
+
)
|
| 2595 |
+
else:
|
| 2596 |
+
process = await asyncio.create_subprocess_exec(
|
| 2597 |
+
*command,
|
| 2598 |
+
stdin=stdin,
|
| 2599 |
+
stdout=stdout,
|
| 2600 |
+
stderr=stderr,
|
| 2601 |
+
**kwargs,
|
| 2602 |
+
)
|
| 2603 |
+
|
| 2604 |
+
stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
|
| 2605 |
+
stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
|
| 2606 |
+
stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
|
| 2607 |
+
return Process(process, stdin_stream, stdout_stream, stderr_stream)
|
| 2608 |
+
|
| 2609 |
+
@classmethod
|
| 2610 |
+
def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:
|
| 2611 |
+
create_task(
|
| 2612 |
+
_shutdown_process_pool_on_exit(workers),
|
| 2613 |
+
name="AnyIO process pool shutdown task",
|
| 2614 |
+
)
|
| 2615 |
+
find_root_task().add_done_callback(
|
| 2616 |
+
partial(_forcibly_shutdown_process_pool_on_exit, workers) # type:ignore[arg-type]
|
| 2617 |
+
)
|
| 2618 |
+
|
| 2619 |
+
@classmethod
|
| 2620 |
+
async def connect_tcp(
|
| 2621 |
+
cls, host: str, port: int, local_address: IPSockAddrType | None = None
|
| 2622 |
+
) -> abc.SocketStream:
|
| 2623 |
+
transport, protocol = cast(
|
| 2624 |
+
tuple[asyncio.Transport, StreamProtocol],
|
| 2625 |
+
await get_running_loop().create_connection(
|
| 2626 |
+
StreamProtocol, host, port, local_addr=local_address
|
| 2627 |
+
),
|
| 2628 |
+
)
|
| 2629 |
+
transport.pause_reading()
|
| 2630 |
+
return SocketStream(transport, protocol)
|
| 2631 |
+
|
| 2632 |
+
@classmethod
|
| 2633 |
+
async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:
|
| 2634 |
+
await cls.checkpoint()
|
| 2635 |
+
loop = get_running_loop()
|
| 2636 |
+
raw_socket = socket.socket(socket.AF_UNIX)
|
| 2637 |
+
raw_socket.setblocking(False)
|
| 2638 |
+
while True:
|
| 2639 |
+
try:
|
| 2640 |
+
raw_socket.connect(path)
|
| 2641 |
+
except BlockingIOError:
|
| 2642 |
+
f: asyncio.Future = asyncio.Future()
|
| 2643 |
+
loop.add_writer(raw_socket, f.set_result, None)
|
| 2644 |
+
f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
|
| 2645 |
+
await f
|
| 2646 |
+
except BaseException:
|
| 2647 |
+
raw_socket.close()
|
| 2648 |
+
raise
|
| 2649 |
+
else:
|
| 2650 |
+
return UNIXSocketStream(raw_socket)
|
| 2651 |
+
|
| 2652 |
+
@classmethod
|
| 2653 |
+
def create_tcp_listener(cls, sock: socket.socket) -> SocketListener:
|
| 2654 |
+
return TCPSocketListener(sock)
|
| 2655 |
+
|
| 2656 |
+
@classmethod
|
| 2657 |
+
def create_unix_listener(cls, sock: socket.socket) -> SocketListener:
|
| 2658 |
+
return UNIXSocketListener(sock)
|
| 2659 |
+
|
| 2660 |
+
@classmethod
|
| 2661 |
+
async def create_udp_socket(
|
| 2662 |
+
cls,
|
| 2663 |
+
family: AddressFamily,
|
| 2664 |
+
local_address: IPSockAddrType | None,
|
| 2665 |
+
remote_address: IPSockAddrType | None,
|
| 2666 |
+
reuse_port: bool,
|
| 2667 |
+
) -> UDPSocket | ConnectedUDPSocket:
|
| 2668 |
+
transport, protocol = await get_running_loop().create_datagram_endpoint(
|
| 2669 |
+
DatagramProtocol,
|
| 2670 |
+
local_addr=local_address,
|
| 2671 |
+
remote_addr=remote_address,
|
| 2672 |
+
family=family,
|
| 2673 |
+
reuse_port=reuse_port,
|
| 2674 |
+
)
|
| 2675 |
+
if protocol.exception:
|
| 2676 |
+
transport.close()
|
| 2677 |
+
raise protocol.exception
|
| 2678 |
+
|
| 2679 |
+
if not remote_address:
|
| 2680 |
+
return UDPSocket(transport, protocol)
|
| 2681 |
+
else:
|
| 2682 |
+
return ConnectedUDPSocket(transport, protocol)
|
| 2683 |
+
|
| 2684 |
+
@classmethod
|
| 2685 |
+
async def create_unix_datagram_socket( # type: ignore[override]
|
| 2686 |
+
cls, raw_socket: socket.socket, remote_path: str | bytes | None
|
| 2687 |
+
) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:
|
| 2688 |
+
await cls.checkpoint()
|
| 2689 |
+
loop = get_running_loop()
|
| 2690 |
+
|
| 2691 |
+
if remote_path:
|
| 2692 |
+
while True:
|
| 2693 |
+
try:
|
| 2694 |
+
raw_socket.connect(remote_path)
|
| 2695 |
+
except BlockingIOError:
|
| 2696 |
+
f: asyncio.Future = asyncio.Future()
|
| 2697 |
+
loop.add_writer(raw_socket, f.set_result, None)
|
| 2698 |
+
f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
|
| 2699 |
+
await f
|
| 2700 |
+
except BaseException:
|
| 2701 |
+
raw_socket.close()
|
| 2702 |
+
raise
|
| 2703 |
+
else:
|
| 2704 |
+
return ConnectedUNIXDatagramSocket(raw_socket)
|
| 2705 |
+
else:
|
| 2706 |
+
return UNIXDatagramSocket(raw_socket)
|
| 2707 |
+
|
| 2708 |
+
@classmethod
|
| 2709 |
+
async def getaddrinfo(
|
| 2710 |
+
cls,
|
| 2711 |
+
host: bytes | str | None,
|
| 2712 |
+
port: str | int | None,
|
| 2713 |
+
*,
|
| 2714 |
+
family: int | AddressFamily = 0,
|
| 2715 |
+
type: int | SocketKind = 0,
|
| 2716 |
+
proto: int = 0,
|
| 2717 |
+
flags: int = 0,
|
| 2718 |
+
) -> list[
|
| 2719 |
+
tuple[
|
| 2720 |
+
AddressFamily,
|
| 2721 |
+
SocketKind,
|
| 2722 |
+
int,
|
| 2723 |
+
str,
|
| 2724 |
+
tuple[str, int] | tuple[str, int, int, int],
|
| 2725 |
+
]
|
| 2726 |
+
]:
|
| 2727 |
+
return await get_running_loop().getaddrinfo(
|
| 2728 |
+
host, port, family=family, type=type, proto=proto, flags=flags
|
| 2729 |
+
)
|
| 2730 |
+
|
| 2731 |
+
@classmethod
|
| 2732 |
+
async def getnameinfo(
|
| 2733 |
+
cls, sockaddr: IPSockAddrType, flags: int = 0
|
| 2734 |
+
) -> tuple[str, str]:
|
| 2735 |
+
return await get_running_loop().getnameinfo(sockaddr, flags)
|
| 2736 |
+
|
| 2737 |
+
@classmethod
|
| 2738 |
+
async def wait_readable(cls, obj: FileDescriptorLike) -> None:
|
| 2739 |
+
await cls.checkpoint()
|
| 2740 |
+
try:
|
| 2741 |
+
read_events = _read_events.get()
|
| 2742 |
+
except LookupError:
|
| 2743 |
+
read_events = {}
|
| 2744 |
+
_read_events.set(read_events)
|
| 2745 |
+
|
| 2746 |
+
if not isinstance(obj, int):
|
| 2747 |
+
obj = obj.fileno()
|
| 2748 |
+
|
| 2749 |
+
if read_events.get(obj):
|
| 2750 |
+
raise BusyResourceError("reading from")
|
| 2751 |
+
|
| 2752 |
+
loop = get_running_loop()
|
| 2753 |
+
event = asyncio.Event()
|
| 2754 |
+
try:
|
| 2755 |
+
loop.add_reader(obj, event.set)
|
| 2756 |
+
except NotImplementedError:
|
| 2757 |
+
from anyio._core._asyncio_selector_thread import get_selector
|
| 2758 |
+
|
| 2759 |
+
selector = get_selector()
|
| 2760 |
+
selector.add_reader(obj, event.set)
|
| 2761 |
+
remove_reader = selector.remove_reader
|
| 2762 |
+
else:
|
| 2763 |
+
remove_reader = loop.remove_reader
|
| 2764 |
+
|
| 2765 |
+
read_events[obj] = event
|
| 2766 |
+
try:
|
| 2767 |
+
await event.wait()
|
| 2768 |
+
finally:
|
| 2769 |
+
remove_reader(obj)
|
| 2770 |
+
del read_events[obj]
|
| 2771 |
+
|
| 2772 |
+
@classmethod
|
| 2773 |
+
async def wait_writable(cls, obj: FileDescriptorLike) -> None:
|
| 2774 |
+
await cls.checkpoint()
|
| 2775 |
+
try:
|
| 2776 |
+
write_events = _write_events.get()
|
| 2777 |
+
except LookupError:
|
| 2778 |
+
write_events = {}
|
| 2779 |
+
_write_events.set(write_events)
|
| 2780 |
+
|
| 2781 |
+
if not isinstance(obj, int):
|
| 2782 |
+
obj = obj.fileno()
|
| 2783 |
+
|
| 2784 |
+
if write_events.get(obj):
|
| 2785 |
+
raise BusyResourceError("writing to")
|
| 2786 |
+
|
| 2787 |
+
loop = get_running_loop()
|
| 2788 |
+
event = asyncio.Event()
|
| 2789 |
+
try:
|
| 2790 |
+
loop.add_writer(obj, event.set)
|
| 2791 |
+
except NotImplementedError:
|
| 2792 |
+
from anyio._core._asyncio_selector_thread import get_selector
|
| 2793 |
+
|
| 2794 |
+
selector = get_selector()
|
| 2795 |
+
selector.add_writer(obj, event.set)
|
| 2796 |
+
remove_writer = selector.remove_writer
|
| 2797 |
+
else:
|
| 2798 |
+
remove_writer = loop.remove_writer
|
| 2799 |
+
|
| 2800 |
+
write_events[obj] = event
|
| 2801 |
+
try:
|
| 2802 |
+
await event.wait()
|
| 2803 |
+
finally:
|
| 2804 |
+
del write_events[obj]
|
| 2805 |
+
remove_writer(obj)
|
| 2806 |
+
|
| 2807 |
+
@classmethod
|
| 2808 |
+
def current_default_thread_limiter(cls) -> CapacityLimiter:
|
| 2809 |
+
try:
|
| 2810 |
+
return _default_thread_limiter.get()
|
| 2811 |
+
except LookupError:
|
| 2812 |
+
limiter = CapacityLimiter(40)
|
| 2813 |
+
_default_thread_limiter.set(limiter)
|
| 2814 |
+
return limiter
|
| 2815 |
+
|
| 2816 |
+
@classmethod
|
| 2817 |
+
def open_signal_receiver(
|
| 2818 |
+
cls, *signals: Signals
|
| 2819 |
+
) -> AbstractContextManager[AsyncIterator[Signals]]:
|
| 2820 |
+
return _SignalReceiver(signals)
|
| 2821 |
+
|
| 2822 |
+
@classmethod
|
| 2823 |
+
def get_current_task(cls) -> TaskInfo:
|
| 2824 |
+
return AsyncIOTaskInfo(current_task()) # type: ignore[arg-type]
|
| 2825 |
+
|
| 2826 |
+
@classmethod
|
| 2827 |
+
def get_running_tasks(cls) -> Sequence[TaskInfo]:
|
| 2828 |
+
return [AsyncIOTaskInfo(task) for task in all_tasks() if not task.done()]
|
| 2829 |
+
|
| 2830 |
+
@classmethod
|
| 2831 |
+
async def wait_all_tasks_blocked(cls) -> None:
|
| 2832 |
+
await cls.checkpoint()
|
| 2833 |
+
this_task = current_task()
|
| 2834 |
+
while True:
|
| 2835 |
+
for task in all_tasks():
|
| 2836 |
+
if task is this_task:
|
| 2837 |
+
continue
|
| 2838 |
+
|
| 2839 |
+
waiter = task._fut_waiter # type: ignore[attr-defined]
|
| 2840 |
+
if waiter is None or waiter.done():
|
| 2841 |
+
await sleep(0.1)
|
| 2842 |
+
break
|
| 2843 |
+
else:
|
| 2844 |
+
return
|
| 2845 |
+
|
| 2846 |
+
@classmethod
|
| 2847 |
+
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
|
| 2848 |
+
return TestRunner(**options)
|
| 2849 |
+
|
| 2850 |
+
|
| 2851 |
+
backend_class = AsyncIOBackend
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_backends/_trio.py
ADDED
|
@@ -0,0 +1,1334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import array
|
| 4 |
+
import math
|
| 5 |
+
import os
|
| 6 |
+
import socket
|
| 7 |
+
import sys
|
| 8 |
+
import types
|
| 9 |
+
import weakref
|
| 10 |
+
from collections.abc import (
|
| 11 |
+
AsyncGenerator,
|
| 12 |
+
AsyncIterator,
|
| 13 |
+
Awaitable,
|
| 14 |
+
Callable,
|
| 15 |
+
Collection,
|
| 16 |
+
Coroutine,
|
| 17 |
+
Iterable,
|
| 18 |
+
Sequence,
|
| 19 |
+
)
|
| 20 |
+
from concurrent.futures import Future
|
| 21 |
+
from contextlib import AbstractContextManager
|
| 22 |
+
from dataclasses import dataclass
|
| 23 |
+
from functools import partial
|
| 24 |
+
from io import IOBase
|
| 25 |
+
from os import PathLike
|
| 26 |
+
from signal import Signals
|
| 27 |
+
from socket import AddressFamily, SocketKind
|
| 28 |
+
from types import TracebackType
|
| 29 |
+
from typing import (
|
| 30 |
+
IO,
|
| 31 |
+
TYPE_CHECKING,
|
| 32 |
+
Any,
|
| 33 |
+
Generic,
|
| 34 |
+
NoReturn,
|
| 35 |
+
TypeVar,
|
| 36 |
+
cast,
|
| 37 |
+
overload,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
import trio.from_thread
|
| 41 |
+
import trio.lowlevel
|
| 42 |
+
from outcome import Error, Outcome, Value
|
| 43 |
+
from trio.lowlevel import (
|
| 44 |
+
current_root_task,
|
| 45 |
+
current_task,
|
| 46 |
+
wait_readable,
|
| 47 |
+
wait_writable,
|
| 48 |
+
)
|
| 49 |
+
from trio.socket import SocketType as TrioSocketType
|
| 50 |
+
from trio.to_thread import run_sync
|
| 51 |
+
|
| 52 |
+
from .. import (
|
| 53 |
+
CapacityLimiterStatistics,
|
| 54 |
+
EventStatistics,
|
| 55 |
+
LockStatistics,
|
| 56 |
+
TaskInfo,
|
| 57 |
+
WouldBlock,
|
| 58 |
+
abc,
|
| 59 |
+
)
|
| 60 |
+
from .._core._eventloop import claim_worker_thread
|
| 61 |
+
from .._core._exceptions import (
|
| 62 |
+
BrokenResourceError,
|
| 63 |
+
BusyResourceError,
|
| 64 |
+
ClosedResourceError,
|
| 65 |
+
EndOfStream,
|
| 66 |
+
)
|
| 67 |
+
from .._core._sockets import convert_ipv6_sockaddr
|
| 68 |
+
from .._core._streams import create_memory_object_stream
|
| 69 |
+
from .._core._synchronization import (
|
| 70 |
+
CapacityLimiter as BaseCapacityLimiter,
|
| 71 |
+
)
|
| 72 |
+
from .._core._synchronization import Event as BaseEvent
|
| 73 |
+
from .._core._synchronization import Lock as BaseLock
|
| 74 |
+
from .._core._synchronization import (
|
| 75 |
+
ResourceGuard,
|
| 76 |
+
SemaphoreStatistics,
|
| 77 |
+
)
|
| 78 |
+
from .._core._synchronization import Semaphore as BaseSemaphore
|
| 79 |
+
from .._core._tasks import CancelScope as BaseCancelScope
|
| 80 |
+
from ..abc import IPSockAddrType, UDPPacketType, UNIXDatagramPacketType
|
| 81 |
+
from ..abc._eventloop import AsyncBackend, StrOrBytesPath
|
| 82 |
+
from ..streams.memory import MemoryObjectSendStream
|
| 83 |
+
|
| 84 |
+
if TYPE_CHECKING:
|
| 85 |
+
from _typeshed import HasFileno
|
| 86 |
+
|
| 87 |
+
if sys.version_info >= (3, 10):
|
| 88 |
+
from typing import ParamSpec
|
| 89 |
+
else:
|
| 90 |
+
from typing_extensions import ParamSpec
|
| 91 |
+
|
| 92 |
+
if sys.version_info >= (3, 11):
|
| 93 |
+
from typing import TypeVarTuple, Unpack
|
| 94 |
+
else:
|
| 95 |
+
from exceptiongroup import BaseExceptionGroup
|
| 96 |
+
from typing_extensions import TypeVarTuple, Unpack
|
| 97 |
+
|
| 98 |
+
T = TypeVar("T")
|
| 99 |
+
T_Retval = TypeVar("T_Retval")
|
| 100 |
+
T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType)
|
| 101 |
+
PosArgsT = TypeVarTuple("PosArgsT")
|
| 102 |
+
P = ParamSpec("P")
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
#
|
| 106 |
+
# Event loop
|
| 107 |
+
#
|
| 108 |
+
|
| 109 |
+
RunVar = trio.lowlevel.RunVar
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
#
|
| 113 |
+
# Timeouts and cancellation
|
| 114 |
+
#
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class CancelScope(BaseCancelScope):
|
| 118 |
+
def __new__(
|
| 119 |
+
cls, original: trio.CancelScope | None = None, **kwargs: object
|
| 120 |
+
) -> CancelScope:
|
| 121 |
+
return object.__new__(cls)
|
| 122 |
+
|
| 123 |
+
def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None:
|
| 124 |
+
self.__original = original or trio.CancelScope(**kwargs)
|
| 125 |
+
|
| 126 |
+
def __enter__(self) -> CancelScope:
|
| 127 |
+
self.__original.__enter__()
|
| 128 |
+
return self
|
| 129 |
+
|
| 130 |
+
def __exit__(
|
| 131 |
+
self,
|
| 132 |
+
exc_type: type[BaseException] | None,
|
| 133 |
+
exc_val: BaseException | None,
|
| 134 |
+
exc_tb: TracebackType | None,
|
| 135 |
+
) -> bool | None:
|
| 136 |
+
# https://github.com/python-trio/trio-typing/pull/79
|
| 137 |
+
return self.__original.__exit__(exc_type, exc_val, exc_tb)
|
| 138 |
+
|
| 139 |
+
def cancel(self) -> None:
|
| 140 |
+
self.__original.cancel()
|
| 141 |
+
|
| 142 |
+
@property
|
| 143 |
+
def deadline(self) -> float:
|
| 144 |
+
return self.__original.deadline
|
| 145 |
+
|
| 146 |
+
@deadline.setter
|
| 147 |
+
def deadline(self, value: float) -> None:
|
| 148 |
+
self.__original.deadline = value
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def cancel_called(self) -> bool:
|
| 152 |
+
return self.__original.cancel_called
|
| 153 |
+
|
| 154 |
+
@property
|
| 155 |
+
def cancelled_caught(self) -> bool:
|
| 156 |
+
return self.__original.cancelled_caught
|
| 157 |
+
|
| 158 |
+
@property
|
| 159 |
+
def shield(self) -> bool:
|
| 160 |
+
return self.__original.shield
|
| 161 |
+
|
| 162 |
+
@shield.setter
|
| 163 |
+
def shield(self, value: bool) -> None:
|
| 164 |
+
self.__original.shield = value
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
#
|
| 168 |
+
# Task groups
|
| 169 |
+
#
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class TaskGroup(abc.TaskGroup):
|
| 173 |
+
def __init__(self) -> None:
|
| 174 |
+
self._active = False
|
| 175 |
+
self._nursery_manager = trio.open_nursery(strict_exception_groups=True)
|
| 176 |
+
self.cancel_scope = None # type: ignore[assignment]
|
| 177 |
+
|
| 178 |
+
async def __aenter__(self) -> TaskGroup:
|
| 179 |
+
self._active = True
|
| 180 |
+
self._nursery = await self._nursery_manager.__aenter__()
|
| 181 |
+
self.cancel_scope = CancelScope(self._nursery.cancel_scope)
|
| 182 |
+
return self
|
| 183 |
+
|
| 184 |
+
async def __aexit__(
|
| 185 |
+
self,
|
| 186 |
+
exc_type: type[BaseException] | None,
|
| 187 |
+
exc_val: BaseException | None,
|
| 188 |
+
exc_tb: TracebackType | None,
|
| 189 |
+
) -> bool | None:
|
| 190 |
+
try:
|
| 191 |
+
return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb)
|
| 192 |
+
except BaseExceptionGroup as exc:
|
| 193 |
+
if not exc.split(trio.Cancelled)[1]:
|
| 194 |
+
raise trio.Cancelled._create() from exc
|
| 195 |
+
|
| 196 |
+
raise
|
| 197 |
+
finally:
|
| 198 |
+
del exc_val, exc_tb
|
| 199 |
+
self._active = False
|
| 200 |
+
|
| 201 |
+
def start_soon(
|
| 202 |
+
self,
|
| 203 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
|
| 204 |
+
*args: Unpack[PosArgsT],
|
| 205 |
+
name: object = None,
|
| 206 |
+
) -> None:
|
| 207 |
+
if not self._active:
|
| 208 |
+
raise RuntimeError(
|
| 209 |
+
"This task group is not active; no new tasks can be started."
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
self._nursery.start_soon(func, *args, name=name)
|
| 213 |
+
|
| 214 |
+
async def start(
|
| 215 |
+
self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
|
| 216 |
+
) -> Any:
|
| 217 |
+
if not self._active:
|
| 218 |
+
raise RuntimeError(
|
| 219 |
+
"This task group is not active; no new tasks can be started."
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
return await self._nursery.start(func, *args, name=name)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
#
|
| 226 |
+
# Threads
|
| 227 |
+
#
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class BlockingPortal(abc.BlockingPortal):
|
| 231 |
+
def __new__(cls) -> BlockingPortal:
|
| 232 |
+
return object.__new__(cls)
|
| 233 |
+
|
| 234 |
+
def __init__(self) -> None:
|
| 235 |
+
super().__init__()
|
| 236 |
+
self._token = trio.lowlevel.current_trio_token()
|
| 237 |
+
|
| 238 |
+
def _spawn_task_from_thread(
|
| 239 |
+
self,
|
| 240 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
| 241 |
+
args: tuple[Unpack[PosArgsT]],
|
| 242 |
+
kwargs: dict[str, Any],
|
| 243 |
+
name: object,
|
| 244 |
+
future: Future[T_Retval],
|
| 245 |
+
) -> None:
|
| 246 |
+
trio.from_thread.run_sync(
|
| 247 |
+
partial(self._task_group.start_soon, name=name),
|
| 248 |
+
self._call_func,
|
| 249 |
+
func,
|
| 250 |
+
args,
|
| 251 |
+
kwargs,
|
| 252 |
+
future,
|
| 253 |
+
trio_token=self._token,
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
#
|
| 258 |
+
# Subprocesses
|
| 259 |
+
#
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
@dataclass(eq=False)
|
| 263 |
+
class ReceiveStreamWrapper(abc.ByteReceiveStream):
|
| 264 |
+
_stream: trio.abc.ReceiveStream
|
| 265 |
+
|
| 266 |
+
async def receive(self, max_bytes: int | None = None) -> bytes:
|
| 267 |
+
try:
|
| 268 |
+
data = await self._stream.receive_some(max_bytes)
|
| 269 |
+
except trio.ClosedResourceError as exc:
|
| 270 |
+
raise ClosedResourceError from exc.__cause__
|
| 271 |
+
except trio.BrokenResourceError as exc:
|
| 272 |
+
raise BrokenResourceError from exc.__cause__
|
| 273 |
+
|
| 274 |
+
if data:
|
| 275 |
+
return data
|
| 276 |
+
else:
|
| 277 |
+
raise EndOfStream
|
| 278 |
+
|
| 279 |
+
async def aclose(self) -> None:
|
| 280 |
+
await self._stream.aclose()
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
@dataclass(eq=False)
|
| 284 |
+
class SendStreamWrapper(abc.ByteSendStream):
|
| 285 |
+
_stream: trio.abc.SendStream
|
| 286 |
+
|
| 287 |
+
async def send(self, item: bytes) -> None:
|
| 288 |
+
try:
|
| 289 |
+
await self._stream.send_all(item)
|
| 290 |
+
except trio.ClosedResourceError as exc:
|
| 291 |
+
raise ClosedResourceError from exc.__cause__
|
| 292 |
+
except trio.BrokenResourceError as exc:
|
| 293 |
+
raise BrokenResourceError from exc.__cause__
|
| 294 |
+
|
| 295 |
+
async def aclose(self) -> None:
|
| 296 |
+
await self._stream.aclose()
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
@dataclass(eq=False)
|
| 300 |
+
class Process(abc.Process):
|
| 301 |
+
_process: trio.Process
|
| 302 |
+
_stdin: abc.ByteSendStream | None
|
| 303 |
+
_stdout: abc.ByteReceiveStream | None
|
| 304 |
+
_stderr: abc.ByteReceiveStream | None
|
| 305 |
+
|
| 306 |
+
async def aclose(self) -> None:
|
| 307 |
+
with CancelScope(shield=True):
|
| 308 |
+
if self._stdin:
|
| 309 |
+
await self._stdin.aclose()
|
| 310 |
+
if self._stdout:
|
| 311 |
+
await self._stdout.aclose()
|
| 312 |
+
if self._stderr:
|
| 313 |
+
await self._stderr.aclose()
|
| 314 |
+
|
| 315 |
+
try:
|
| 316 |
+
await self.wait()
|
| 317 |
+
except BaseException:
|
| 318 |
+
self.kill()
|
| 319 |
+
with CancelScope(shield=True):
|
| 320 |
+
await self.wait()
|
| 321 |
+
raise
|
| 322 |
+
|
| 323 |
+
async def wait(self) -> int:
|
| 324 |
+
return await self._process.wait()
|
| 325 |
+
|
| 326 |
+
def terminate(self) -> None:
|
| 327 |
+
self._process.terminate()
|
| 328 |
+
|
| 329 |
+
def kill(self) -> None:
|
| 330 |
+
self._process.kill()
|
| 331 |
+
|
| 332 |
+
def send_signal(self, signal: Signals) -> None:
|
| 333 |
+
self._process.send_signal(signal)
|
| 334 |
+
|
| 335 |
+
@property
|
| 336 |
+
def pid(self) -> int:
|
| 337 |
+
return self._process.pid
|
| 338 |
+
|
| 339 |
+
@property
|
| 340 |
+
def returncode(self) -> int | None:
|
| 341 |
+
return self._process.returncode
|
| 342 |
+
|
| 343 |
+
@property
|
| 344 |
+
def stdin(self) -> abc.ByteSendStream | None:
|
| 345 |
+
return self._stdin
|
| 346 |
+
|
| 347 |
+
@property
|
| 348 |
+
def stdout(self) -> abc.ByteReceiveStream | None:
|
| 349 |
+
return self._stdout
|
| 350 |
+
|
| 351 |
+
@property
|
| 352 |
+
def stderr(self) -> abc.ByteReceiveStream | None:
|
| 353 |
+
return self._stderr
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class _ProcessPoolShutdownInstrument(trio.abc.Instrument):
|
| 357 |
+
def after_run(self) -> None:
|
| 358 |
+
super().after_run()
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
current_default_worker_process_limiter: trio.lowlevel.RunVar = RunVar(
|
| 362 |
+
"current_default_worker_process_limiter"
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
async def _shutdown_process_pool(workers: set[abc.Process]) -> None:
|
| 367 |
+
try:
|
| 368 |
+
await trio.sleep(math.inf)
|
| 369 |
+
except trio.Cancelled:
|
| 370 |
+
for process in workers:
|
| 371 |
+
if process.returncode is None:
|
| 372 |
+
process.kill()
|
| 373 |
+
|
| 374 |
+
with CancelScope(shield=True):
|
| 375 |
+
for process in workers:
|
| 376 |
+
await process.aclose()
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
#
|
| 380 |
+
# Sockets and networking
|
| 381 |
+
#
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
class _TrioSocketMixin(Generic[T_SockAddr]):
|
| 385 |
+
def __init__(self, trio_socket: TrioSocketType) -> None:
|
| 386 |
+
self._trio_socket = trio_socket
|
| 387 |
+
self._closed = False
|
| 388 |
+
|
| 389 |
+
def _check_closed(self) -> None:
|
| 390 |
+
if self._closed:
|
| 391 |
+
raise ClosedResourceError
|
| 392 |
+
if self._trio_socket.fileno() < 0:
|
| 393 |
+
raise BrokenResourceError
|
| 394 |
+
|
| 395 |
+
@property
|
| 396 |
+
def _raw_socket(self) -> socket.socket:
|
| 397 |
+
return self._trio_socket._sock # type: ignore[attr-defined]
|
| 398 |
+
|
| 399 |
+
async def aclose(self) -> None:
|
| 400 |
+
if self._trio_socket.fileno() >= 0:
|
| 401 |
+
self._closed = True
|
| 402 |
+
self._trio_socket.close()
|
| 403 |
+
|
| 404 |
+
def _convert_socket_error(self, exc: BaseException) -> NoReturn:
|
| 405 |
+
if isinstance(exc, trio.ClosedResourceError):
|
| 406 |
+
raise ClosedResourceError from exc
|
| 407 |
+
elif self._trio_socket.fileno() < 0 and self._closed:
|
| 408 |
+
raise ClosedResourceError from None
|
| 409 |
+
elif isinstance(exc, OSError):
|
| 410 |
+
raise BrokenResourceError from exc
|
| 411 |
+
else:
|
| 412 |
+
raise exc
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
class SocketStream(_TrioSocketMixin, abc.SocketStream):
|
| 416 |
+
def __init__(self, trio_socket: TrioSocketType) -> None:
|
| 417 |
+
super().__init__(trio_socket)
|
| 418 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 419 |
+
self._send_guard = ResourceGuard("writing to")
|
| 420 |
+
|
| 421 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 422 |
+
with self._receive_guard:
|
| 423 |
+
try:
|
| 424 |
+
data = await self._trio_socket.recv(max_bytes)
|
| 425 |
+
except BaseException as exc:
|
| 426 |
+
self._convert_socket_error(exc)
|
| 427 |
+
|
| 428 |
+
if data:
|
| 429 |
+
return data
|
| 430 |
+
else:
|
| 431 |
+
raise EndOfStream
|
| 432 |
+
|
| 433 |
+
async def send(self, item: bytes) -> None:
|
| 434 |
+
with self._send_guard:
|
| 435 |
+
view = memoryview(item)
|
| 436 |
+
while view:
|
| 437 |
+
try:
|
| 438 |
+
bytes_sent = await self._trio_socket.send(view)
|
| 439 |
+
except BaseException as exc:
|
| 440 |
+
self._convert_socket_error(exc)
|
| 441 |
+
|
| 442 |
+
view = view[bytes_sent:]
|
| 443 |
+
|
| 444 |
+
async def send_eof(self) -> None:
|
| 445 |
+
self._trio_socket.shutdown(socket.SHUT_WR)
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
class UNIXSocketStream(SocketStream, abc.UNIXSocketStream):
|
| 449 |
+
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
|
| 450 |
+
if not isinstance(msglen, int) or msglen < 0:
|
| 451 |
+
raise ValueError("msglen must be a non-negative integer")
|
| 452 |
+
if not isinstance(maxfds, int) or maxfds < 1:
|
| 453 |
+
raise ValueError("maxfds must be a positive integer")
|
| 454 |
+
|
| 455 |
+
fds = array.array("i")
|
| 456 |
+
await trio.lowlevel.checkpoint()
|
| 457 |
+
with self._receive_guard:
|
| 458 |
+
while True:
|
| 459 |
+
try:
|
| 460 |
+
message, ancdata, flags, addr = await self._trio_socket.recvmsg(
|
| 461 |
+
msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
|
| 462 |
+
)
|
| 463 |
+
except BaseException as exc:
|
| 464 |
+
self._convert_socket_error(exc)
|
| 465 |
+
else:
|
| 466 |
+
if not message and not ancdata:
|
| 467 |
+
raise EndOfStream
|
| 468 |
+
|
| 469 |
+
break
|
| 470 |
+
|
| 471 |
+
for cmsg_level, cmsg_type, cmsg_data in ancdata:
|
| 472 |
+
if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
|
| 473 |
+
raise RuntimeError(
|
| 474 |
+
f"Received unexpected ancillary data; message = {message!r}, "
|
| 475 |
+
f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
|
| 479 |
+
|
| 480 |
+
return message, list(fds)
|
| 481 |
+
|
| 482 |
+
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
|
| 483 |
+
if not message:
|
| 484 |
+
raise ValueError("message must not be empty")
|
| 485 |
+
if not fds:
|
| 486 |
+
raise ValueError("fds must not be empty")
|
| 487 |
+
|
| 488 |
+
filenos: list[int] = []
|
| 489 |
+
for fd in fds:
|
| 490 |
+
if isinstance(fd, int):
|
| 491 |
+
filenos.append(fd)
|
| 492 |
+
elif isinstance(fd, IOBase):
|
| 493 |
+
filenos.append(fd.fileno())
|
| 494 |
+
|
| 495 |
+
fdarray = array.array("i", filenos)
|
| 496 |
+
await trio.lowlevel.checkpoint()
|
| 497 |
+
with self._send_guard:
|
| 498 |
+
while True:
|
| 499 |
+
try:
|
| 500 |
+
await self._trio_socket.sendmsg(
|
| 501 |
+
[message],
|
| 502 |
+
[
|
| 503 |
+
(
|
| 504 |
+
socket.SOL_SOCKET,
|
| 505 |
+
socket.SCM_RIGHTS,
|
| 506 |
+
fdarray,
|
| 507 |
+
)
|
| 508 |
+
],
|
| 509 |
+
)
|
| 510 |
+
break
|
| 511 |
+
except BaseException as exc:
|
| 512 |
+
self._convert_socket_error(exc)
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
class TCPSocketListener(_TrioSocketMixin, abc.SocketListener):
|
| 516 |
+
def __init__(self, raw_socket: socket.socket):
|
| 517 |
+
super().__init__(trio.socket.from_stdlib_socket(raw_socket))
|
| 518 |
+
self._accept_guard = ResourceGuard("accepting connections from")
|
| 519 |
+
|
| 520 |
+
async def accept(self) -> SocketStream:
|
| 521 |
+
with self._accept_guard:
|
| 522 |
+
try:
|
| 523 |
+
trio_socket, _addr = await self._trio_socket.accept()
|
| 524 |
+
except BaseException as exc:
|
| 525 |
+
self._convert_socket_error(exc)
|
| 526 |
+
|
| 527 |
+
trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
| 528 |
+
return SocketStream(trio_socket)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):
|
| 532 |
+
def __init__(self, raw_socket: socket.socket):
|
| 533 |
+
super().__init__(trio.socket.from_stdlib_socket(raw_socket))
|
| 534 |
+
self._accept_guard = ResourceGuard("accepting connections from")
|
| 535 |
+
|
| 536 |
+
async def accept(self) -> UNIXSocketStream:
|
| 537 |
+
with self._accept_guard:
|
| 538 |
+
try:
|
| 539 |
+
trio_socket, _addr = await self._trio_socket.accept()
|
| 540 |
+
except BaseException as exc:
|
| 541 |
+
self._convert_socket_error(exc)
|
| 542 |
+
|
| 543 |
+
return UNIXSocketStream(trio_socket)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):
|
| 547 |
+
def __init__(self, trio_socket: TrioSocketType) -> None:
|
| 548 |
+
super().__init__(trio_socket)
|
| 549 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 550 |
+
self._send_guard = ResourceGuard("writing to")
|
| 551 |
+
|
| 552 |
+
async def receive(self) -> tuple[bytes, IPSockAddrType]:
|
| 553 |
+
with self._receive_guard:
|
| 554 |
+
try:
|
| 555 |
+
data, addr = await self._trio_socket.recvfrom(65536)
|
| 556 |
+
return data, convert_ipv6_sockaddr(addr)
|
| 557 |
+
except BaseException as exc:
|
| 558 |
+
self._convert_socket_error(exc)
|
| 559 |
+
|
| 560 |
+
async def send(self, item: UDPPacketType) -> None:
|
| 561 |
+
with self._send_guard:
|
| 562 |
+
try:
|
| 563 |
+
await self._trio_socket.sendto(*item)
|
| 564 |
+
except BaseException as exc:
|
| 565 |
+
self._convert_socket_error(exc)
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):
|
| 569 |
+
def __init__(self, trio_socket: TrioSocketType) -> None:
|
| 570 |
+
super().__init__(trio_socket)
|
| 571 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 572 |
+
self._send_guard = ResourceGuard("writing to")
|
| 573 |
+
|
| 574 |
+
async def receive(self) -> bytes:
|
| 575 |
+
with self._receive_guard:
|
| 576 |
+
try:
|
| 577 |
+
return await self._trio_socket.recv(65536)
|
| 578 |
+
except BaseException as exc:
|
| 579 |
+
self._convert_socket_error(exc)
|
| 580 |
+
|
| 581 |
+
async def send(self, item: bytes) -> None:
|
| 582 |
+
with self._send_guard:
|
| 583 |
+
try:
|
| 584 |
+
await self._trio_socket.send(item)
|
| 585 |
+
except BaseException as exc:
|
| 586 |
+
self._convert_socket_error(exc)
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
class UNIXDatagramSocket(_TrioSocketMixin[str], abc.UNIXDatagramSocket):
|
| 590 |
+
def __init__(self, trio_socket: TrioSocketType) -> None:
|
| 591 |
+
super().__init__(trio_socket)
|
| 592 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 593 |
+
self._send_guard = ResourceGuard("writing to")
|
| 594 |
+
|
| 595 |
+
async def receive(self) -> UNIXDatagramPacketType:
|
| 596 |
+
with self._receive_guard:
|
| 597 |
+
try:
|
| 598 |
+
data, addr = await self._trio_socket.recvfrom(65536)
|
| 599 |
+
return data, addr
|
| 600 |
+
except BaseException as exc:
|
| 601 |
+
self._convert_socket_error(exc)
|
| 602 |
+
|
| 603 |
+
async def send(self, item: UNIXDatagramPacketType) -> None:
|
| 604 |
+
with self._send_guard:
|
| 605 |
+
try:
|
| 606 |
+
await self._trio_socket.sendto(*item)
|
| 607 |
+
except BaseException as exc:
|
| 608 |
+
self._convert_socket_error(exc)
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
class ConnectedUNIXDatagramSocket(
|
| 612 |
+
_TrioSocketMixin[str], abc.ConnectedUNIXDatagramSocket
|
| 613 |
+
):
|
| 614 |
+
def __init__(self, trio_socket: TrioSocketType) -> None:
|
| 615 |
+
super().__init__(trio_socket)
|
| 616 |
+
self._receive_guard = ResourceGuard("reading from")
|
| 617 |
+
self._send_guard = ResourceGuard("writing to")
|
| 618 |
+
|
| 619 |
+
async def receive(self) -> bytes:
|
| 620 |
+
with self._receive_guard:
|
| 621 |
+
try:
|
| 622 |
+
return await self._trio_socket.recv(65536)
|
| 623 |
+
except BaseException as exc:
|
| 624 |
+
self._convert_socket_error(exc)
|
| 625 |
+
|
| 626 |
+
async def send(self, item: bytes) -> None:
|
| 627 |
+
with self._send_guard:
|
| 628 |
+
try:
|
| 629 |
+
await self._trio_socket.send(item)
|
| 630 |
+
except BaseException as exc:
|
| 631 |
+
self._convert_socket_error(exc)
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
#
|
| 635 |
+
# Synchronization
|
| 636 |
+
#
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
class Event(BaseEvent):
|
| 640 |
+
def __new__(cls) -> Event:
|
| 641 |
+
return object.__new__(cls)
|
| 642 |
+
|
| 643 |
+
def __init__(self) -> None:
|
| 644 |
+
self.__original = trio.Event()
|
| 645 |
+
|
| 646 |
+
def is_set(self) -> bool:
|
| 647 |
+
return self.__original.is_set()
|
| 648 |
+
|
| 649 |
+
async def wait(self) -> None:
|
| 650 |
+
return await self.__original.wait()
|
| 651 |
+
|
| 652 |
+
def statistics(self) -> EventStatistics:
|
| 653 |
+
orig_statistics = self.__original.statistics()
|
| 654 |
+
return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting)
|
| 655 |
+
|
| 656 |
+
def set(self) -> None:
|
| 657 |
+
self.__original.set()
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
class Lock(BaseLock):
|
| 661 |
+
def __new__(cls, *, fast_acquire: bool = False) -> Lock:
|
| 662 |
+
return object.__new__(cls)
|
| 663 |
+
|
| 664 |
+
def __init__(self, *, fast_acquire: bool = False) -> None:
|
| 665 |
+
self._fast_acquire = fast_acquire
|
| 666 |
+
self.__original = trio.Lock()
|
| 667 |
+
|
| 668 |
+
@staticmethod
|
| 669 |
+
def _convert_runtime_error_msg(exc: RuntimeError) -> None:
|
| 670 |
+
if exc.args == ("attempt to re-acquire an already held Lock",):
|
| 671 |
+
exc.args = ("Attempted to acquire an already held Lock",)
|
| 672 |
+
|
| 673 |
+
async def acquire(self) -> None:
|
| 674 |
+
if not self._fast_acquire:
|
| 675 |
+
try:
|
| 676 |
+
await self.__original.acquire()
|
| 677 |
+
except RuntimeError as exc:
|
| 678 |
+
self._convert_runtime_error_msg(exc)
|
| 679 |
+
raise
|
| 680 |
+
|
| 681 |
+
return
|
| 682 |
+
|
| 683 |
+
# This is the "fast path" where we don't let other tasks run
|
| 684 |
+
await trio.lowlevel.checkpoint_if_cancelled()
|
| 685 |
+
try:
|
| 686 |
+
self.__original.acquire_nowait()
|
| 687 |
+
except trio.WouldBlock:
|
| 688 |
+
await self.__original._lot.park()
|
| 689 |
+
except RuntimeError as exc:
|
| 690 |
+
self._convert_runtime_error_msg(exc)
|
| 691 |
+
raise
|
| 692 |
+
|
| 693 |
+
def acquire_nowait(self) -> None:
|
| 694 |
+
try:
|
| 695 |
+
self.__original.acquire_nowait()
|
| 696 |
+
except trio.WouldBlock:
|
| 697 |
+
raise WouldBlock from None
|
| 698 |
+
except RuntimeError as exc:
|
| 699 |
+
self._convert_runtime_error_msg(exc)
|
| 700 |
+
raise
|
| 701 |
+
|
| 702 |
+
def locked(self) -> bool:
|
| 703 |
+
return self.__original.locked()
|
| 704 |
+
|
| 705 |
+
def release(self) -> None:
|
| 706 |
+
self.__original.release()
|
| 707 |
+
|
| 708 |
+
def statistics(self) -> LockStatistics:
|
| 709 |
+
orig_statistics = self.__original.statistics()
|
| 710 |
+
owner = TrioTaskInfo(orig_statistics.owner) if orig_statistics.owner else None
|
| 711 |
+
return LockStatistics(
|
| 712 |
+
orig_statistics.locked, owner, orig_statistics.tasks_waiting
|
| 713 |
+
)
|
| 714 |
+
|
| 715 |
+
|
| 716 |
+
class Semaphore(BaseSemaphore):
|
| 717 |
+
def __new__(
|
| 718 |
+
cls,
|
| 719 |
+
initial_value: int,
|
| 720 |
+
*,
|
| 721 |
+
max_value: int | None = None,
|
| 722 |
+
fast_acquire: bool = False,
|
| 723 |
+
) -> Semaphore:
|
| 724 |
+
return object.__new__(cls)
|
| 725 |
+
|
| 726 |
+
def __init__(
|
| 727 |
+
self,
|
| 728 |
+
initial_value: int,
|
| 729 |
+
*,
|
| 730 |
+
max_value: int | None = None,
|
| 731 |
+
fast_acquire: bool = False,
|
| 732 |
+
) -> None:
|
| 733 |
+
super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)
|
| 734 |
+
self.__original = trio.Semaphore(initial_value, max_value=max_value)
|
| 735 |
+
|
| 736 |
+
async def acquire(self) -> None:
|
| 737 |
+
if not self._fast_acquire:
|
| 738 |
+
await self.__original.acquire()
|
| 739 |
+
return
|
| 740 |
+
|
| 741 |
+
# This is the "fast path" where we don't let other tasks run
|
| 742 |
+
await trio.lowlevel.checkpoint_if_cancelled()
|
| 743 |
+
try:
|
| 744 |
+
self.__original.acquire_nowait()
|
| 745 |
+
except trio.WouldBlock:
|
| 746 |
+
await self.__original._lot.park()
|
| 747 |
+
|
| 748 |
+
def acquire_nowait(self) -> None:
|
| 749 |
+
try:
|
| 750 |
+
self.__original.acquire_nowait()
|
| 751 |
+
except trio.WouldBlock:
|
| 752 |
+
raise WouldBlock from None
|
| 753 |
+
|
| 754 |
+
@property
|
| 755 |
+
def max_value(self) -> int | None:
|
| 756 |
+
return self.__original.max_value
|
| 757 |
+
|
| 758 |
+
@property
|
| 759 |
+
def value(self) -> int:
|
| 760 |
+
return self.__original.value
|
| 761 |
+
|
| 762 |
+
def release(self) -> None:
|
| 763 |
+
self.__original.release()
|
| 764 |
+
|
| 765 |
+
def statistics(self) -> SemaphoreStatistics:
|
| 766 |
+
orig_statistics = self.__original.statistics()
|
| 767 |
+
return SemaphoreStatistics(orig_statistics.tasks_waiting)
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
class CapacityLimiter(BaseCapacityLimiter):
|
| 771 |
+
def __new__(
|
| 772 |
+
cls,
|
| 773 |
+
total_tokens: float | None = None,
|
| 774 |
+
*,
|
| 775 |
+
original: trio.CapacityLimiter | None = None,
|
| 776 |
+
) -> CapacityLimiter:
|
| 777 |
+
return object.__new__(cls)
|
| 778 |
+
|
| 779 |
+
def __init__(
|
| 780 |
+
self,
|
| 781 |
+
total_tokens: float | None = None,
|
| 782 |
+
*,
|
| 783 |
+
original: trio.CapacityLimiter | None = None,
|
| 784 |
+
) -> None:
|
| 785 |
+
if original is not None:
|
| 786 |
+
self.__original = original
|
| 787 |
+
else:
|
| 788 |
+
assert total_tokens is not None
|
| 789 |
+
self.__original = trio.CapacityLimiter(total_tokens)
|
| 790 |
+
|
| 791 |
+
async def __aenter__(self) -> None:
|
| 792 |
+
return await self.__original.__aenter__()
|
| 793 |
+
|
| 794 |
+
async def __aexit__(
|
| 795 |
+
self,
|
| 796 |
+
exc_type: type[BaseException] | None,
|
| 797 |
+
exc_val: BaseException | None,
|
| 798 |
+
exc_tb: TracebackType | None,
|
| 799 |
+
) -> None:
|
| 800 |
+
await self.__original.__aexit__(exc_type, exc_val, exc_tb)
|
| 801 |
+
|
| 802 |
+
@property
|
| 803 |
+
def total_tokens(self) -> float:
|
| 804 |
+
return self.__original.total_tokens
|
| 805 |
+
|
| 806 |
+
@total_tokens.setter
|
| 807 |
+
def total_tokens(self, value: float) -> None:
|
| 808 |
+
self.__original.total_tokens = value
|
| 809 |
+
|
| 810 |
+
@property
|
| 811 |
+
def borrowed_tokens(self) -> int:
|
| 812 |
+
return self.__original.borrowed_tokens
|
| 813 |
+
|
| 814 |
+
@property
|
| 815 |
+
def available_tokens(self) -> float:
|
| 816 |
+
return self.__original.available_tokens
|
| 817 |
+
|
| 818 |
+
def acquire_nowait(self) -> None:
|
| 819 |
+
self.__original.acquire_nowait()
|
| 820 |
+
|
| 821 |
+
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
|
| 822 |
+
self.__original.acquire_on_behalf_of_nowait(borrower)
|
| 823 |
+
|
| 824 |
+
async def acquire(self) -> None:
|
| 825 |
+
await self.__original.acquire()
|
| 826 |
+
|
| 827 |
+
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
| 828 |
+
await self.__original.acquire_on_behalf_of(borrower)
|
| 829 |
+
|
| 830 |
+
def release(self) -> None:
|
| 831 |
+
return self.__original.release()
|
| 832 |
+
|
| 833 |
+
def release_on_behalf_of(self, borrower: object) -> None:
|
| 834 |
+
return self.__original.release_on_behalf_of(borrower)
|
| 835 |
+
|
| 836 |
+
def statistics(self) -> CapacityLimiterStatistics:
|
| 837 |
+
orig = self.__original.statistics()
|
| 838 |
+
return CapacityLimiterStatistics(
|
| 839 |
+
borrowed_tokens=orig.borrowed_tokens,
|
| 840 |
+
total_tokens=orig.total_tokens,
|
| 841 |
+
borrowers=tuple(orig.borrowers),
|
| 842 |
+
tasks_waiting=orig.tasks_waiting,
|
| 843 |
+
)
|
| 844 |
+
|
| 845 |
+
|
| 846 |
+
_capacity_limiter_wrapper: trio.lowlevel.RunVar = RunVar("_capacity_limiter_wrapper")
|
| 847 |
+
|
| 848 |
+
|
| 849 |
+
#
|
| 850 |
+
# Signal handling
|
| 851 |
+
#
|
| 852 |
+
|
| 853 |
+
|
| 854 |
+
class _SignalReceiver:
|
| 855 |
+
_iterator: AsyncIterator[int]
|
| 856 |
+
|
| 857 |
+
def __init__(self, signals: tuple[Signals, ...]):
|
| 858 |
+
self._signals = signals
|
| 859 |
+
|
| 860 |
+
def __enter__(self) -> _SignalReceiver:
|
| 861 |
+
self._cm = trio.open_signal_receiver(*self._signals)
|
| 862 |
+
self._iterator = self._cm.__enter__()
|
| 863 |
+
return self
|
| 864 |
+
|
| 865 |
+
def __exit__(
|
| 866 |
+
self,
|
| 867 |
+
exc_type: type[BaseException] | None,
|
| 868 |
+
exc_val: BaseException | None,
|
| 869 |
+
exc_tb: TracebackType | None,
|
| 870 |
+
) -> bool | None:
|
| 871 |
+
return self._cm.__exit__(exc_type, exc_val, exc_tb)
|
| 872 |
+
|
| 873 |
+
def __aiter__(self) -> _SignalReceiver:
|
| 874 |
+
return self
|
| 875 |
+
|
| 876 |
+
async def __anext__(self) -> Signals:
|
| 877 |
+
signum = await self._iterator.__anext__()
|
| 878 |
+
return Signals(signum)
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
#
|
| 882 |
+
# Testing and debugging
|
| 883 |
+
#
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
class TestRunner(abc.TestRunner):
|
| 887 |
+
def __init__(self, **options: Any) -> None:
|
| 888 |
+
from queue import Queue
|
| 889 |
+
|
| 890 |
+
self._call_queue: Queue[Callable[[], object]] = Queue()
|
| 891 |
+
self._send_stream: MemoryObjectSendStream | None = None
|
| 892 |
+
self._options = options
|
| 893 |
+
|
| 894 |
+
def __exit__(
|
| 895 |
+
self,
|
| 896 |
+
exc_type: type[BaseException] | None,
|
| 897 |
+
exc_val: BaseException | None,
|
| 898 |
+
exc_tb: types.TracebackType | None,
|
| 899 |
+
) -> None:
|
| 900 |
+
if self._send_stream:
|
| 901 |
+
self._send_stream.close()
|
| 902 |
+
while self._send_stream is not None:
|
| 903 |
+
self._call_queue.get()()
|
| 904 |
+
|
| 905 |
+
async def _run_tests_and_fixtures(self) -> None:
|
| 906 |
+
self._send_stream, receive_stream = create_memory_object_stream(1)
|
| 907 |
+
with receive_stream:
|
| 908 |
+
async for coro, outcome_holder in receive_stream:
|
| 909 |
+
try:
|
| 910 |
+
retval = await coro
|
| 911 |
+
except BaseException as exc:
|
| 912 |
+
outcome_holder.append(Error(exc))
|
| 913 |
+
else:
|
| 914 |
+
outcome_holder.append(Value(retval))
|
| 915 |
+
|
| 916 |
+
def _main_task_finished(self, outcome: object) -> None:
|
| 917 |
+
self._send_stream = None
|
| 918 |
+
|
| 919 |
+
def _call_in_runner_task(
|
| 920 |
+
self,
|
| 921 |
+
func: Callable[P, Awaitable[T_Retval]],
|
| 922 |
+
*args: P.args,
|
| 923 |
+
**kwargs: P.kwargs,
|
| 924 |
+
) -> T_Retval:
|
| 925 |
+
if self._send_stream is None:
|
| 926 |
+
trio.lowlevel.start_guest_run(
|
| 927 |
+
self._run_tests_and_fixtures,
|
| 928 |
+
run_sync_soon_threadsafe=self._call_queue.put,
|
| 929 |
+
done_callback=self._main_task_finished,
|
| 930 |
+
**self._options,
|
| 931 |
+
)
|
| 932 |
+
while self._send_stream is None:
|
| 933 |
+
self._call_queue.get()()
|
| 934 |
+
|
| 935 |
+
outcome_holder: list[Outcome] = []
|
| 936 |
+
self._send_stream.send_nowait((func(*args, **kwargs), outcome_holder))
|
| 937 |
+
while not outcome_holder:
|
| 938 |
+
self._call_queue.get()()
|
| 939 |
+
|
| 940 |
+
return outcome_holder[0].unwrap()
|
| 941 |
+
|
| 942 |
+
def run_asyncgen_fixture(
|
| 943 |
+
self,
|
| 944 |
+
fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
|
| 945 |
+
kwargs: dict[str, Any],
|
| 946 |
+
) -> Iterable[T_Retval]:
|
| 947 |
+
asyncgen = fixture_func(**kwargs)
|
| 948 |
+
fixturevalue: T_Retval = self._call_in_runner_task(asyncgen.asend, None)
|
| 949 |
+
|
| 950 |
+
yield fixturevalue
|
| 951 |
+
|
| 952 |
+
try:
|
| 953 |
+
self._call_in_runner_task(asyncgen.asend, None)
|
| 954 |
+
except StopAsyncIteration:
|
| 955 |
+
pass
|
| 956 |
+
else:
|
| 957 |
+
self._call_in_runner_task(asyncgen.aclose)
|
| 958 |
+
raise RuntimeError("Async generator fixture did not stop")
|
| 959 |
+
|
| 960 |
+
def run_fixture(
|
| 961 |
+
self,
|
| 962 |
+
fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
|
| 963 |
+
kwargs: dict[str, Any],
|
| 964 |
+
) -> T_Retval:
|
| 965 |
+
return self._call_in_runner_task(fixture_func, **kwargs)
|
| 966 |
+
|
| 967 |
+
def run_test(
|
| 968 |
+
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
|
| 969 |
+
) -> None:
|
| 970 |
+
self._call_in_runner_task(test_func, **kwargs)
|
| 971 |
+
|
| 972 |
+
|
| 973 |
+
class TrioTaskInfo(TaskInfo):
|
| 974 |
+
def __init__(self, task: trio.lowlevel.Task):
|
| 975 |
+
parent_id = None
|
| 976 |
+
if task.parent_nursery and task.parent_nursery.parent_task:
|
| 977 |
+
parent_id = id(task.parent_nursery.parent_task)
|
| 978 |
+
|
| 979 |
+
super().__init__(id(task), parent_id, task.name, task.coro)
|
| 980 |
+
self._task = weakref.proxy(task)
|
| 981 |
+
|
| 982 |
+
def has_pending_cancellation(self) -> bool:
|
| 983 |
+
try:
|
| 984 |
+
return self._task._cancel_status.effectively_cancelled
|
| 985 |
+
except ReferenceError:
|
| 986 |
+
# If the task is no longer around, it surely doesn't have a cancellation
|
| 987 |
+
# pending
|
| 988 |
+
return False
|
| 989 |
+
|
| 990 |
+
|
| 991 |
+
class TrioBackend(AsyncBackend):
|
| 992 |
+
@classmethod
|
| 993 |
+
def run(
|
| 994 |
+
cls,
|
| 995 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
| 996 |
+
args: tuple[Unpack[PosArgsT]],
|
| 997 |
+
kwargs: dict[str, Any],
|
| 998 |
+
options: dict[str, Any],
|
| 999 |
+
) -> T_Retval:
|
| 1000 |
+
return trio.run(func, *args)
|
| 1001 |
+
|
| 1002 |
+
@classmethod
|
| 1003 |
+
def current_token(cls) -> object:
|
| 1004 |
+
return trio.lowlevel.current_trio_token()
|
| 1005 |
+
|
| 1006 |
+
@classmethod
|
| 1007 |
+
def current_time(cls) -> float:
|
| 1008 |
+
return trio.current_time()
|
| 1009 |
+
|
| 1010 |
+
@classmethod
|
| 1011 |
+
def cancelled_exception_class(cls) -> type[BaseException]:
|
| 1012 |
+
return trio.Cancelled
|
| 1013 |
+
|
| 1014 |
+
@classmethod
|
| 1015 |
+
async def checkpoint(cls) -> None:
|
| 1016 |
+
await trio.lowlevel.checkpoint()
|
| 1017 |
+
|
| 1018 |
+
@classmethod
|
| 1019 |
+
async def checkpoint_if_cancelled(cls) -> None:
|
| 1020 |
+
await trio.lowlevel.checkpoint_if_cancelled()
|
| 1021 |
+
|
| 1022 |
+
@classmethod
|
| 1023 |
+
async def cancel_shielded_checkpoint(cls) -> None:
|
| 1024 |
+
await trio.lowlevel.cancel_shielded_checkpoint()
|
| 1025 |
+
|
| 1026 |
+
@classmethod
|
| 1027 |
+
async def sleep(cls, delay: float) -> None:
|
| 1028 |
+
await trio.sleep(delay)
|
| 1029 |
+
|
| 1030 |
+
@classmethod
|
| 1031 |
+
def create_cancel_scope(
|
| 1032 |
+
cls, *, deadline: float = math.inf, shield: bool = False
|
| 1033 |
+
) -> abc.CancelScope:
|
| 1034 |
+
return CancelScope(deadline=deadline, shield=shield)
|
| 1035 |
+
|
| 1036 |
+
@classmethod
|
| 1037 |
+
def current_effective_deadline(cls) -> float:
|
| 1038 |
+
return trio.current_effective_deadline()
|
| 1039 |
+
|
| 1040 |
+
@classmethod
|
| 1041 |
+
def create_task_group(cls) -> abc.TaskGroup:
|
| 1042 |
+
return TaskGroup()
|
| 1043 |
+
|
| 1044 |
+
@classmethod
|
| 1045 |
+
def create_event(cls) -> abc.Event:
|
| 1046 |
+
return Event()
|
| 1047 |
+
|
| 1048 |
+
@classmethod
|
| 1049 |
+
def create_lock(cls, *, fast_acquire: bool) -> Lock:
|
| 1050 |
+
return Lock(fast_acquire=fast_acquire)
|
| 1051 |
+
|
| 1052 |
+
@classmethod
|
| 1053 |
+
def create_semaphore(
|
| 1054 |
+
cls,
|
| 1055 |
+
initial_value: int,
|
| 1056 |
+
*,
|
| 1057 |
+
max_value: int | None = None,
|
| 1058 |
+
fast_acquire: bool = False,
|
| 1059 |
+
) -> abc.Semaphore:
|
| 1060 |
+
return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire)
|
| 1061 |
+
|
| 1062 |
+
@classmethod
|
| 1063 |
+
def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
|
| 1064 |
+
return CapacityLimiter(total_tokens)
|
| 1065 |
+
|
| 1066 |
+
@classmethod
|
| 1067 |
+
async def run_sync_in_worker_thread(
|
| 1068 |
+
cls,
|
| 1069 |
+
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
| 1070 |
+
args: tuple[Unpack[PosArgsT]],
|
| 1071 |
+
abandon_on_cancel: bool = False,
|
| 1072 |
+
limiter: abc.CapacityLimiter | None = None,
|
| 1073 |
+
) -> T_Retval:
|
| 1074 |
+
def wrapper() -> T_Retval:
|
| 1075 |
+
with claim_worker_thread(TrioBackend, token):
|
| 1076 |
+
return func(*args)
|
| 1077 |
+
|
| 1078 |
+
token = TrioBackend.current_token()
|
| 1079 |
+
return await run_sync(
|
| 1080 |
+
wrapper,
|
| 1081 |
+
abandon_on_cancel=abandon_on_cancel,
|
| 1082 |
+
limiter=cast(trio.CapacityLimiter, limiter),
|
| 1083 |
+
)
|
| 1084 |
+
|
| 1085 |
+
@classmethod
|
| 1086 |
+
def check_cancelled(cls) -> None:
|
| 1087 |
+
trio.from_thread.check_cancelled()
|
| 1088 |
+
|
| 1089 |
+
@classmethod
|
| 1090 |
+
def run_async_from_thread(
|
| 1091 |
+
cls,
|
| 1092 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
| 1093 |
+
args: tuple[Unpack[PosArgsT]],
|
| 1094 |
+
token: object,
|
| 1095 |
+
) -> T_Retval:
|
| 1096 |
+
return trio.from_thread.run(func, *args)
|
| 1097 |
+
|
| 1098 |
+
@classmethod
|
| 1099 |
+
def run_sync_from_thread(
|
| 1100 |
+
cls,
|
| 1101 |
+
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
| 1102 |
+
args: tuple[Unpack[PosArgsT]],
|
| 1103 |
+
token: object,
|
| 1104 |
+
) -> T_Retval:
|
| 1105 |
+
return trio.from_thread.run_sync(func, *args)
|
| 1106 |
+
|
| 1107 |
+
@classmethod
|
| 1108 |
+
def create_blocking_portal(cls) -> abc.BlockingPortal:
|
| 1109 |
+
return BlockingPortal()
|
| 1110 |
+
|
| 1111 |
+
@classmethod
|
| 1112 |
+
async def open_process(
|
| 1113 |
+
cls,
|
| 1114 |
+
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
| 1115 |
+
*,
|
| 1116 |
+
stdin: int | IO[Any] | None,
|
| 1117 |
+
stdout: int | IO[Any] | None,
|
| 1118 |
+
stderr: int | IO[Any] | None,
|
| 1119 |
+
**kwargs: Any,
|
| 1120 |
+
) -> Process:
|
| 1121 |
+
def convert_item(item: StrOrBytesPath) -> str:
|
| 1122 |
+
str_or_bytes = os.fspath(item)
|
| 1123 |
+
if isinstance(str_or_bytes, str):
|
| 1124 |
+
return str_or_bytes
|
| 1125 |
+
else:
|
| 1126 |
+
return os.fsdecode(str_or_bytes)
|
| 1127 |
+
|
| 1128 |
+
if isinstance(command, (str, bytes, PathLike)):
|
| 1129 |
+
process = await trio.lowlevel.open_process(
|
| 1130 |
+
convert_item(command),
|
| 1131 |
+
stdin=stdin,
|
| 1132 |
+
stdout=stdout,
|
| 1133 |
+
stderr=stderr,
|
| 1134 |
+
shell=True,
|
| 1135 |
+
**kwargs,
|
| 1136 |
+
)
|
| 1137 |
+
else:
|
| 1138 |
+
process = await trio.lowlevel.open_process(
|
| 1139 |
+
[convert_item(item) for item in command],
|
| 1140 |
+
stdin=stdin,
|
| 1141 |
+
stdout=stdout,
|
| 1142 |
+
stderr=stderr,
|
| 1143 |
+
shell=False,
|
| 1144 |
+
**kwargs,
|
| 1145 |
+
)
|
| 1146 |
+
|
| 1147 |
+
stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None
|
| 1148 |
+
stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None
|
| 1149 |
+
stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None
|
| 1150 |
+
return Process(process, stdin_stream, stdout_stream, stderr_stream)
|
| 1151 |
+
|
| 1152 |
+
@classmethod
|
| 1153 |
+
def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:
|
| 1154 |
+
trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)
|
| 1155 |
+
|
| 1156 |
+
@classmethod
|
| 1157 |
+
async def connect_tcp(
|
| 1158 |
+
cls, host: str, port: int, local_address: IPSockAddrType | None = None
|
| 1159 |
+
) -> SocketStream:
|
| 1160 |
+
family = socket.AF_INET6 if ":" in host else socket.AF_INET
|
| 1161 |
+
trio_socket = trio.socket.socket(family)
|
| 1162 |
+
trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
| 1163 |
+
if local_address:
|
| 1164 |
+
await trio_socket.bind(local_address)
|
| 1165 |
+
|
| 1166 |
+
try:
|
| 1167 |
+
await trio_socket.connect((host, port))
|
| 1168 |
+
except BaseException:
|
| 1169 |
+
trio_socket.close()
|
| 1170 |
+
raise
|
| 1171 |
+
|
| 1172 |
+
return SocketStream(trio_socket)
|
| 1173 |
+
|
| 1174 |
+
@classmethod
|
| 1175 |
+
async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:
|
| 1176 |
+
trio_socket = trio.socket.socket(socket.AF_UNIX)
|
| 1177 |
+
try:
|
| 1178 |
+
await trio_socket.connect(path)
|
| 1179 |
+
except BaseException:
|
| 1180 |
+
trio_socket.close()
|
| 1181 |
+
raise
|
| 1182 |
+
|
| 1183 |
+
return UNIXSocketStream(trio_socket)
|
| 1184 |
+
|
| 1185 |
+
@classmethod
|
| 1186 |
+
def create_tcp_listener(cls, sock: socket.socket) -> abc.SocketListener:
|
| 1187 |
+
return TCPSocketListener(sock)
|
| 1188 |
+
|
| 1189 |
+
@classmethod
|
| 1190 |
+
def create_unix_listener(cls, sock: socket.socket) -> abc.SocketListener:
|
| 1191 |
+
return UNIXSocketListener(sock)
|
| 1192 |
+
|
| 1193 |
+
@classmethod
|
| 1194 |
+
async def create_udp_socket(
|
| 1195 |
+
cls,
|
| 1196 |
+
family: socket.AddressFamily,
|
| 1197 |
+
local_address: IPSockAddrType | None,
|
| 1198 |
+
remote_address: IPSockAddrType | None,
|
| 1199 |
+
reuse_port: bool,
|
| 1200 |
+
) -> UDPSocket | ConnectedUDPSocket:
|
| 1201 |
+
trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)
|
| 1202 |
+
|
| 1203 |
+
if reuse_port:
|
| 1204 |
+
trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
| 1205 |
+
|
| 1206 |
+
if local_address:
|
| 1207 |
+
await trio_socket.bind(local_address)
|
| 1208 |
+
|
| 1209 |
+
if remote_address:
|
| 1210 |
+
await trio_socket.connect(remote_address)
|
| 1211 |
+
return ConnectedUDPSocket(trio_socket)
|
| 1212 |
+
else:
|
| 1213 |
+
return UDPSocket(trio_socket)
|
| 1214 |
+
|
| 1215 |
+
@classmethod
|
| 1216 |
+
@overload
|
| 1217 |
+
async def create_unix_datagram_socket(
|
| 1218 |
+
cls, raw_socket: socket.socket, remote_path: None
|
| 1219 |
+
) -> abc.UNIXDatagramSocket: ...
|
| 1220 |
+
|
| 1221 |
+
@classmethod
|
| 1222 |
+
@overload
|
| 1223 |
+
async def create_unix_datagram_socket(
|
| 1224 |
+
cls, raw_socket: socket.socket, remote_path: str | bytes
|
| 1225 |
+
) -> abc.ConnectedUNIXDatagramSocket: ...
|
| 1226 |
+
|
| 1227 |
+
@classmethod
|
| 1228 |
+
async def create_unix_datagram_socket(
|
| 1229 |
+
cls, raw_socket: socket.socket, remote_path: str | bytes | None
|
| 1230 |
+
) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:
|
| 1231 |
+
trio_socket = trio.socket.from_stdlib_socket(raw_socket)
|
| 1232 |
+
|
| 1233 |
+
if remote_path:
|
| 1234 |
+
await trio_socket.connect(remote_path)
|
| 1235 |
+
return ConnectedUNIXDatagramSocket(trio_socket)
|
| 1236 |
+
else:
|
| 1237 |
+
return UNIXDatagramSocket(trio_socket)
|
| 1238 |
+
|
| 1239 |
+
@classmethod
|
| 1240 |
+
async def getaddrinfo(
|
| 1241 |
+
cls,
|
| 1242 |
+
host: bytes | str | None,
|
| 1243 |
+
port: str | int | None,
|
| 1244 |
+
*,
|
| 1245 |
+
family: int | AddressFamily = 0,
|
| 1246 |
+
type: int | SocketKind = 0,
|
| 1247 |
+
proto: int = 0,
|
| 1248 |
+
flags: int = 0,
|
| 1249 |
+
) -> list[
|
| 1250 |
+
tuple[
|
| 1251 |
+
AddressFamily,
|
| 1252 |
+
SocketKind,
|
| 1253 |
+
int,
|
| 1254 |
+
str,
|
| 1255 |
+
tuple[str, int] | tuple[str, int, int, int],
|
| 1256 |
+
]
|
| 1257 |
+
]:
|
| 1258 |
+
return await trio.socket.getaddrinfo(host, port, family, type, proto, flags)
|
| 1259 |
+
|
| 1260 |
+
@classmethod
|
| 1261 |
+
async def getnameinfo(
|
| 1262 |
+
cls, sockaddr: IPSockAddrType, flags: int = 0
|
| 1263 |
+
) -> tuple[str, str]:
|
| 1264 |
+
return await trio.socket.getnameinfo(sockaddr, flags)
|
| 1265 |
+
|
| 1266 |
+
@classmethod
|
| 1267 |
+
async def wait_readable(cls, obj: HasFileno | int) -> None:
|
| 1268 |
+
try:
|
| 1269 |
+
await wait_readable(obj)
|
| 1270 |
+
except trio.ClosedResourceError as exc:
|
| 1271 |
+
raise ClosedResourceError().with_traceback(exc.__traceback__) from None
|
| 1272 |
+
except trio.BusyResourceError:
|
| 1273 |
+
raise BusyResourceError("reading from") from None
|
| 1274 |
+
|
| 1275 |
+
@classmethod
|
| 1276 |
+
async def wait_writable(cls, obj: HasFileno | int) -> None:
|
| 1277 |
+
try:
|
| 1278 |
+
await wait_writable(obj)
|
| 1279 |
+
except trio.ClosedResourceError as exc:
|
| 1280 |
+
raise ClosedResourceError().with_traceback(exc.__traceback__) from None
|
| 1281 |
+
except trio.BusyResourceError:
|
| 1282 |
+
raise BusyResourceError("writing to") from None
|
| 1283 |
+
|
| 1284 |
+
@classmethod
|
| 1285 |
+
def current_default_thread_limiter(cls) -> CapacityLimiter:
|
| 1286 |
+
try:
|
| 1287 |
+
return _capacity_limiter_wrapper.get()
|
| 1288 |
+
except LookupError:
|
| 1289 |
+
limiter = CapacityLimiter(
|
| 1290 |
+
original=trio.to_thread.current_default_thread_limiter()
|
| 1291 |
+
)
|
| 1292 |
+
_capacity_limiter_wrapper.set(limiter)
|
| 1293 |
+
return limiter
|
| 1294 |
+
|
| 1295 |
+
@classmethod
|
| 1296 |
+
def open_signal_receiver(
|
| 1297 |
+
cls, *signals: Signals
|
| 1298 |
+
) -> AbstractContextManager[AsyncIterator[Signals]]:
|
| 1299 |
+
return _SignalReceiver(signals)
|
| 1300 |
+
|
| 1301 |
+
@classmethod
|
| 1302 |
+
def get_current_task(cls) -> TaskInfo:
|
| 1303 |
+
task = current_task()
|
| 1304 |
+
return TrioTaskInfo(task)
|
| 1305 |
+
|
| 1306 |
+
@classmethod
|
| 1307 |
+
def get_running_tasks(cls) -> Sequence[TaskInfo]:
|
| 1308 |
+
root_task = current_root_task()
|
| 1309 |
+
assert root_task
|
| 1310 |
+
task_infos = [TrioTaskInfo(root_task)]
|
| 1311 |
+
nurseries = root_task.child_nurseries
|
| 1312 |
+
while nurseries:
|
| 1313 |
+
new_nurseries: list[trio.Nursery] = []
|
| 1314 |
+
for nursery in nurseries:
|
| 1315 |
+
for task in nursery.child_tasks:
|
| 1316 |
+
task_infos.append(TrioTaskInfo(task))
|
| 1317 |
+
new_nurseries.extend(task.child_nurseries)
|
| 1318 |
+
|
| 1319 |
+
nurseries = new_nurseries
|
| 1320 |
+
|
| 1321 |
+
return task_infos
|
| 1322 |
+
|
| 1323 |
+
@classmethod
|
| 1324 |
+
async def wait_all_tasks_blocked(cls) -> None:
|
| 1325 |
+
from trio.testing import wait_all_tasks_blocked
|
| 1326 |
+
|
| 1327 |
+
await wait_all_tasks_blocked()
|
| 1328 |
+
|
| 1329 |
+
@classmethod
|
| 1330 |
+
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
|
| 1331 |
+
return TestRunner(**options)
|
| 1332 |
+
|
| 1333 |
+
|
| 1334 |
+
backend_class = TrioBackend
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/__init__.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (169 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/__pycache__/_eventloop.cpython-310.pyc
ADDED
|
Binary file (4.78 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/__pycache__/_resources.cpython-310.pyc
ADDED
|
Binary file (748 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_fileio.py
ADDED
|
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import pathlib
|
| 5 |
+
import sys
|
| 6 |
+
from collections.abc import AsyncIterator, Callable, Iterable, Iterator, Sequence
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from functools import partial
|
| 9 |
+
from os import PathLike
|
| 10 |
+
from typing import (
|
| 11 |
+
IO,
|
| 12 |
+
TYPE_CHECKING,
|
| 13 |
+
Any,
|
| 14 |
+
AnyStr,
|
| 15 |
+
Final,
|
| 16 |
+
Generic,
|
| 17 |
+
overload,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
from .. import to_thread
|
| 21 |
+
from ..abc import AsyncResource
|
| 22 |
+
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
|
| 25 |
+
else:
|
| 26 |
+
ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class AsyncFile(AsyncResource, Generic[AnyStr]):
|
| 30 |
+
"""
|
| 31 |
+
An asynchronous file object.
|
| 32 |
+
|
| 33 |
+
This class wraps a standard file object and provides async friendly versions of the
|
| 34 |
+
following blocking methods (where available on the original file object):
|
| 35 |
+
|
| 36 |
+
* read
|
| 37 |
+
* read1
|
| 38 |
+
* readline
|
| 39 |
+
* readlines
|
| 40 |
+
* readinto
|
| 41 |
+
* readinto1
|
| 42 |
+
* write
|
| 43 |
+
* writelines
|
| 44 |
+
* truncate
|
| 45 |
+
* seek
|
| 46 |
+
* tell
|
| 47 |
+
* flush
|
| 48 |
+
|
| 49 |
+
All other methods are directly passed through.
|
| 50 |
+
|
| 51 |
+
This class supports the asynchronous context manager protocol which closes the
|
| 52 |
+
underlying file at the end of the context block.
|
| 53 |
+
|
| 54 |
+
This class also supports asynchronous iteration::
|
| 55 |
+
|
| 56 |
+
async with await open_file(...) as f:
|
| 57 |
+
async for line in f:
|
| 58 |
+
print(line)
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(self, fp: IO[AnyStr]) -> None:
|
| 62 |
+
self._fp: Any = fp
|
| 63 |
+
|
| 64 |
+
def __getattr__(self, name: str) -> object:
|
| 65 |
+
return getattr(self._fp, name)
|
| 66 |
+
|
| 67 |
+
@property
|
| 68 |
+
def wrapped(self) -> IO[AnyStr]:
|
| 69 |
+
"""The wrapped file object."""
|
| 70 |
+
return self._fp
|
| 71 |
+
|
| 72 |
+
async def __aiter__(self) -> AsyncIterator[AnyStr]:
|
| 73 |
+
while True:
|
| 74 |
+
line = await self.readline()
|
| 75 |
+
if line:
|
| 76 |
+
yield line
|
| 77 |
+
else:
|
| 78 |
+
break
|
| 79 |
+
|
| 80 |
+
async def aclose(self) -> None:
|
| 81 |
+
return await to_thread.run_sync(self._fp.close)
|
| 82 |
+
|
| 83 |
+
async def read(self, size: int = -1) -> AnyStr:
|
| 84 |
+
return await to_thread.run_sync(self._fp.read, size)
|
| 85 |
+
|
| 86 |
+
async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
|
| 87 |
+
return await to_thread.run_sync(self._fp.read1, size)
|
| 88 |
+
|
| 89 |
+
async def readline(self) -> AnyStr:
|
| 90 |
+
return await to_thread.run_sync(self._fp.readline)
|
| 91 |
+
|
| 92 |
+
async def readlines(self) -> list[AnyStr]:
|
| 93 |
+
return await to_thread.run_sync(self._fp.readlines)
|
| 94 |
+
|
| 95 |
+
async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
|
| 96 |
+
return await to_thread.run_sync(self._fp.readinto, b)
|
| 97 |
+
|
| 98 |
+
async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
|
| 99 |
+
return await to_thread.run_sync(self._fp.readinto1, b)
|
| 100 |
+
|
| 101 |
+
@overload
|
| 102 |
+
async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ...
|
| 103 |
+
|
| 104 |
+
@overload
|
| 105 |
+
async def write(self: AsyncFile[str], b: str) -> int: ...
|
| 106 |
+
|
| 107 |
+
async def write(self, b: ReadableBuffer | str) -> int:
|
| 108 |
+
return await to_thread.run_sync(self._fp.write, b)
|
| 109 |
+
|
| 110 |
+
@overload
|
| 111 |
+
async def writelines(
|
| 112 |
+
self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
|
| 113 |
+
) -> None: ...
|
| 114 |
+
|
| 115 |
+
@overload
|
| 116 |
+
async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ...
|
| 117 |
+
|
| 118 |
+
async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
|
| 119 |
+
return await to_thread.run_sync(self._fp.writelines, lines)
|
| 120 |
+
|
| 121 |
+
async def truncate(self, size: int | None = None) -> int:
|
| 122 |
+
return await to_thread.run_sync(self._fp.truncate, size)
|
| 123 |
+
|
| 124 |
+
async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
|
| 125 |
+
return await to_thread.run_sync(self._fp.seek, offset, whence)
|
| 126 |
+
|
| 127 |
+
async def tell(self) -> int:
|
| 128 |
+
return await to_thread.run_sync(self._fp.tell)
|
| 129 |
+
|
| 130 |
+
async def flush(self) -> None:
|
| 131 |
+
return await to_thread.run_sync(self._fp.flush)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@overload
|
| 135 |
+
async def open_file(
|
| 136 |
+
file: str | PathLike[str] | int,
|
| 137 |
+
mode: OpenBinaryMode,
|
| 138 |
+
buffering: int = ...,
|
| 139 |
+
encoding: str | None = ...,
|
| 140 |
+
errors: str | None = ...,
|
| 141 |
+
newline: str | None = ...,
|
| 142 |
+
closefd: bool = ...,
|
| 143 |
+
opener: Callable[[str, int], int] | None = ...,
|
| 144 |
+
) -> AsyncFile[bytes]: ...
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@overload
|
| 148 |
+
async def open_file(
|
| 149 |
+
file: str | PathLike[str] | int,
|
| 150 |
+
mode: OpenTextMode = ...,
|
| 151 |
+
buffering: int = ...,
|
| 152 |
+
encoding: str | None = ...,
|
| 153 |
+
errors: str | None = ...,
|
| 154 |
+
newline: str | None = ...,
|
| 155 |
+
closefd: bool = ...,
|
| 156 |
+
opener: Callable[[str, int], int] | None = ...,
|
| 157 |
+
) -> AsyncFile[str]: ...
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
async def open_file(
|
| 161 |
+
file: str | PathLike[str] | int,
|
| 162 |
+
mode: str = "r",
|
| 163 |
+
buffering: int = -1,
|
| 164 |
+
encoding: str | None = None,
|
| 165 |
+
errors: str | None = None,
|
| 166 |
+
newline: str | None = None,
|
| 167 |
+
closefd: bool = True,
|
| 168 |
+
opener: Callable[[str, int], int] | None = None,
|
| 169 |
+
) -> AsyncFile[Any]:
|
| 170 |
+
"""
|
| 171 |
+
Open a file asynchronously.
|
| 172 |
+
|
| 173 |
+
The arguments are exactly the same as for the builtin :func:`open`.
|
| 174 |
+
|
| 175 |
+
:return: an asynchronous file object
|
| 176 |
+
|
| 177 |
+
"""
|
| 178 |
+
fp = await to_thread.run_sync(
|
| 179 |
+
open, file, mode, buffering, encoding, errors, newline, closefd, opener
|
| 180 |
+
)
|
| 181 |
+
return AsyncFile(fp)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
|
| 185 |
+
"""
|
| 186 |
+
Wrap an existing file as an asynchronous file.
|
| 187 |
+
|
| 188 |
+
:param file: an existing file-like object
|
| 189 |
+
:return: an asynchronous file object
|
| 190 |
+
|
| 191 |
+
"""
|
| 192 |
+
return AsyncFile(file)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
@dataclass(eq=False)
|
| 196 |
+
class _PathIterator(AsyncIterator["Path"]):
|
| 197 |
+
iterator: Iterator[PathLike[str]]
|
| 198 |
+
|
| 199 |
+
async def __anext__(self) -> Path:
|
| 200 |
+
nextval = await to_thread.run_sync(
|
| 201 |
+
next, self.iterator, None, abandon_on_cancel=True
|
| 202 |
+
)
|
| 203 |
+
if nextval is None:
|
| 204 |
+
raise StopAsyncIteration from None
|
| 205 |
+
|
| 206 |
+
return Path(nextval)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
class Path:
|
| 210 |
+
"""
|
| 211 |
+
An asynchronous version of :class:`pathlib.Path`.
|
| 212 |
+
|
| 213 |
+
This class cannot be substituted for :class:`pathlib.Path` or
|
| 214 |
+
:class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike`
|
| 215 |
+
interface.
|
| 216 |
+
|
| 217 |
+
It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for
|
| 218 |
+
the deprecated :meth:`~pathlib.Path.link_to` method.
|
| 219 |
+
|
| 220 |
+
Some methods may be unavailable or have limited functionality, based on the Python
|
| 221 |
+
version:
|
| 222 |
+
|
| 223 |
+
* :meth:`~pathlib.Path.from_uri` (available on Python 3.13 or later)
|
| 224 |
+
* :meth:`~pathlib.Path.full_match` (available on Python 3.13 or later)
|
| 225 |
+
* :meth:`~pathlib.Path.is_junction` (available on Python 3.12 or later)
|
| 226 |
+
* :meth:`~pathlib.Path.match` (the ``case_sensitive`` paramater is only available on
|
| 227 |
+
Python 3.13 or later)
|
| 228 |
+
* :meth:`~pathlib.Path.relative_to` (the ``walk_up`` parameter is only available on
|
| 229 |
+
Python 3.12 or later)
|
| 230 |
+
* :meth:`~pathlib.Path.walk` (available on Python 3.12 or later)
|
| 231 |
+
|
| 232 |
+
Any methods that do disk I/O need to be awaited on. These methods are:
|
| 233 |
+
|
| 234 |
+
* :meth:`~pathlib.Path.absolute`
|
| 235 |
+
* :meth:`~pathlib.Path.chmod`
|
| 236 |
+
* :meth:`~pathlib.Path.cwd`
|
| 237 |
+
* :meth:`~pathlib.Path.exists`
|
| 238 |
+
* :meth:`~pathlib.Path.expanduser`
|
| 239 |
+
* :meth:`~pathlib.Path.group`
|
| 240 |
+
* :meth:`~pathlib.Path.hardlink_to`
|
| 241 |
+
* :meth:`~pathlib.Path.home`
|
| 242 |
+
* :meth:`~pathlib.Path.is_block_device`
|
| 243 |
+
* :meth:`~pathlib.Path.is_char_device`
|
| 244 |
+
* :meth:`~pathlib.Path.is_dir`
|
| 245 |
+
* :meth:`~pathlib.Path.is_fifo`
|
| 246 |
+
* :meth:`~pathlib.Path.is_file`
|
| 247 |
+
* :meth:`~pathlib.Path.is_junction`
|
| 248 |
+
* :meth:`~pathlib.Path.is_mount`
|
| 249 |
+
* :meth:`~pathlib.Path.is_socket`
|
| 250 |
+
* :meth:`~pathlib.Path.is_symlink`
|
| 251 |
+
* :meth:`~pathlib.Path.lchmod`
|
| 252 |
+
* :meth:`~pathlib.Path.lstat`
|
| 253 |
+
* :meth:`~pathlib.Path.mkdir`
|
| 254 |
+
* :meth:`~pathlib.Path.open`
|
| 255 |
+
* :meth:`~pathlib.Path.owner`
|
| 256 |
+
* :meth:`~pathlib.Path.read_bytes`
|
| 257 |
+
* :meth:`~pathlib.Path.read_text`
|
| 258 |
+
* :meth:`~pathlib.Path.readlink`
|
| 259 |
+
* :meth:`~pathlib.Path.rename`
|
| 260 |
+
* :meth:`~pathlib.Path.replace`
|
| 261 |
+
* :meth:`~pathlib.Path.resolve`
|
| 262 |
+
* :meth:`~pathlib.Path.rmdir`
|
| 263 |
+
* :meth:`~pathlib.Path.samefile`
|
| 264 |
+
* :meth:`~pathlib.Path.stat`
|
| 265 |
+
* :meth:`~pathlib.Path.symlink_to`
|
| 266 |
+
* :meth:`~pathlib.Path.touch`
|
| 267 |
+
* :meth:`~pathlib.Path.unlink`
|
| 268 |
+
* :meth:`~pathlib.Path.walk`
|
| 269 |
+
* :meth:`~pathlib.Path.write_bytes`
|
| 270 |
+
* :meth:`~pathlib.Path.write_text`
|
| 271 |
+
|
| 272 |
+
Additionally, the following methods return an async iterator yielding
|
| 273 |
+
:class:`~.Path` objects:
|
| 274 |
+
|
| 275 |
+
* :meth:`~pathlib.Path.glob`
|
| 276 |
+
* :meth:`~pathlib.Path.iterdir`
|
| 277 |
+
* :meth:`~pathlib.Path.rglob`
|
| 278 |
+
"""
|
| 279 |
+
|
| 280 |
+
__slots__ = "_path", "__weakref__"
|
| 281 |
+
|
| 282 |
+
__weakref__: Any
|
| 283 |
+
|
| 284 |
+
def __init__(self, *args: str | PathLike[str]) -> None:
|
| 285 |
+
self._path: Final[pathlib.Path] = pathlib.Path(*args)
|
| 286 |
+
|
| 287 |
+
def __fspath__(self) -> str:
|
| 288 |
+
return self._path.__fspath__()
|
| 289 |
+
|
| 290 |
+
def __str__(self) -> str:
|
| 291 |
+
return self._path.__str__()
|
| 292 |
+
|
| 293 |
+
def __repr__(self) -> str:
|
| 294 |
+
return f"{self.__class__.__name__}({self.as_posix()!r})"
|
| 295 |
+
|
| 296 |
+
def __bytes__(self) -> bytes:
|
| 297 |
+
return self._path.__bytes__()
|
| 298 |
+
|
| 299 |
+
def __hash__(self) -> int:
|
| 300 |
+
return self._path.__hash__()
|
| 301 |
+
|
| 302 |
+
def __eq__(self, other: object) -> bool:
|
| 303 |
+
target = other._path if isinstance(other, Path) else other
|
| 304 |
+
return self._path.__eq__(target)
|
| 305 |
+
|
| 306 |
+
def __lt__(self, other: pathlib.PurePath | Path) -> bool:
|
| 307 |
+
target = other._path if isinstance(other, Path) else other
|
| 308 |
+
return self._path.__lt__(target)
|
| 309 |
+
|
| 310 |
+
def __le__(self, other: pathlib.PurePath | Path) -> bool:
|
| 311 |
+
target = other._path if isinstance(other, Path) else other
|
| 312 |
+
return self._path.__le__(target)
|
| 313 |
+
|
| 314 |
+
def __gt__(self, other: pathlib.PurePath | Path) -> bool:
|
| 315 |
+
target = other._path if isinstance(other, Path) else other
|
| 316 |
+
return self._path.__gt__(target)
|
| 317 |
+
|
| 318 |
+
def __ge__(self, other: pathlib.PurePath | Path) -> bool:
|
| 319 |
+
target = other._path if isinstance(other, Path) else other
|
| 320 |
+
return self._path.__ge__(target)
|
| 321 |
+
|
| 322 |
+
def __truediv__(self, other: str | PathLike[str]) -> Path:
|
| 323 |
+
return Path(self._path / other)
|
| 324 |
+
|
| 325 |
+
def __rtruediv__(self, other: str | PathLike[str]) -> Path:
|
| 326 |
+
return Path(other) / self
|
| 327 |
+
|
| 328 |
+
@property
|
| 329 |
+
def parts(self) -> tuple[str, ...]:
|
| 330 |
+
return self._path.parts
|
| 331 |
+
|
| 332 |
+
@property
|
| 333 |
+
def drive(self) -> str:
|
| 334 |
+
return self._path.drive
|
| 335 |
+
|
| 336 |
+
@property
|
| 337 |
+
def root(self) -> str:
|
| 338 |
+
return self._path.root
|
| 339 |
+
|
| 340 |
+
@property
|
| 341 |
+
def anchor(self) -> str:
|
| 342 |
+
return self._path.anchor
|
| 343 |
+
|
| 344 |
+
@property
|
| 345 |
+
def parents(self) -> Sequence[Path]:
|
| 346 |
+
return tuple(Path(p) for p in self._path.parents)
|
| 347 |
+
|
| 348 |
+
@property
|
| 349 |
+
def parent(self) -> Path:
|
| 350 |
+
return Path(self._path.parent)
|
| 351 |
+
|
| 352 |
+
@property
|
| 353 |
+
def name(self) -> str:
|
| 354 |
+
return self._path.name
|
| 355 |
+
|
| 356 |
+
@property
|
| 357 |
+
def suffix(self) -> str:
|
| 358 |
+
return self._path.suffix
|
| 359 |
+
|
| 360 |
+
@property
|
| 361 |
+
def suffixes(self) -> list[str]:
|
| 362 |
+
return self._path.suffixes
|
| 363 |
+
|
| 364 |
+
@property
|
| 365 |
+
def stem(self) -> str:
|
| 366 |
+
return self._path.stem
|
| 367 |
+
|
| 368 |
+
async def absolute(self) -> Path:
|
| 369 |
+
path = await to_thread.run_sync(self._path.absolute)
|
| 370 |
+
return Path(path)
|
| 371 |
+
|
| 372 |
+
def as_posix(self) -> str:
|
| 373 |
+
return self._path.as_posix()
|
| 374 |
+
|
| 375 |
+
def as_uri(self) -> str:
|
| 376 |
+
return self._path.as_uri()
|
| 377 |
+
|
| 378 |
+
if sys.version_info >= (3, 13):
|
| 379 |
+
parser = pathlib.Path.parser
|
| 380 |
+
|
| 381 |
+
@classmethod
|
| 382 |
+
def from_uri(cls, uri: str) -> Path:
|
| 383 |
+
return Path(pathlib.Path.from_uri(uri))
|
| 384 |
+
|
| 385 |
+
def full_match(
|
| 386 |
+
self, path_pattern: str, *, case_sensitive: bool | None = None
|
| 387 |
+
) -> bool:
|
| 388 |
+
return self._path.full_match(path_pattern, case_sensitive=case_sensitive)
|
| 389 |
+
|
| 390 |
+
def match(
|
| 391 |
+
self, path_pattern: str, *, case_sensitive: bool | None = None
|
| 392 |
+
) -> bool:
|
| 393 |
+
return self._path.match(path_pattern, case_sensitive=case_sensitive)
|
| 394 |
+
else:
|
| 395 |
+
|
| 396 |
+
def match(self, path_pattern: str) -> bool:
|
| 397 |
+
return self._path.match(path_pattern)
|
| 398 |
+
|
| 399 |
+
def is_relative_to(self, other: str | PathLike[str]) -> bool:
|
| 400 |
+
try:
|
| 401 |
+
self.relative_to(other)
|
| 402 |
+
return True
|
| 403 |
+
except ValueError:
|
| 404 |
+
return False
|
| 405 |
+
|
| 406 |
+
async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
|
| 407 |
+
func = partial(os.chmod, follow_symlinks=follow_symlinks)
|
| 408 |
+
return await to_thread.run_sync(func, self._path, mode)
|
| 409 |
+
|
| 410 |
+
@classmethod
|
| 411 |
+
async def cwd(cls) -> Path:
|
| 412 |
+
path = await to_thread.run_sync(pathlib.Path.cwd)
|
| 413 |
+
return cls(path)
|
| 414 |
+
|
| 415 |
+
async def exists(self) -> bool:
|
| 416 |
+
return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True)
|
| 417 |
+
|
| 418 |
+
async def expanduser(self) -> Path:
|
| 419 |
+
return Path(
|
| 420 |
+
await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True)
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
def glob(self, pattern: str) -> AsyncIterator[Path]:
|
| 424 |
+
gen = self._path.glob(pattern)
|
| 425 |
+
return _PathIterator(gen)
|
| 426 |
+
|
| 427 |
+
async def group(self) -> str:
|
| 428 |
+
return await to_thread.run_sync(self._path.group, abandon_on_cancel=True)
|
| 429 |
+
|
| 430 |
+
async def hardlink_to(
|
| 431 |
+
self, target: str | bytes | PathLike[str] | PathLike[bytes]
|
| 432 |
+
) -> None:
|
| 433 |
+
if isinstance(target, Path):
|
| 434 |
+
target = target._path
|
| 435 |
+
|
| 436 |
+
await to_thread.run_sync(os.link, target, self)
|
| 437 |
+
|
| 438 |
+
@classmethod
|
| 439 |
+
async def home(cls) -> Path:
|
| 440 |
+
home_path = await to_thread.run_sync(pathlib.Path.home)
|
| 441 |
+
return cls(home_path)
|
| 442 |
+
|
| 443 |
+
def is_absolute(self) -> bool:
|
| 444 |
+
return self._path.is_absolute()
|
| 445 |
+
|
| 446 |
+
async def is_block_device(self) -> bool:
|
| 447 |
+
return await to_thread.run_sync(
|
| 448 |
+
self._path.is_block_device, abandon_on_cancel=True
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
async def is_char_device(self) -> bool:
|
| 452 |
+
return await to_thread.run_sync(
|
| 453 |
+
self._path.is_char_device, abandon_on_cancel=True
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
async def is_dir(self) -> bool:
|
| 457 |
+
return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True)
|
| 458 |
+
|
| 459 |
+
async def is_fifo(self) -> bool:
|
| 460 |
+
return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True)
|
| 461 |
+
|
| 462 |
+
async def is_file(self) -> bool:
|
| 463 |
+
return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True)
|
| 464 |
+
|
| 465 |
+
if sys.version_info >= (3, 12):
|
| 466 |
+
|
| 467 |
+
async def is_junction(self) -> bool:
|
| 468 |
+
return await to_thread.run_sync(self._path.is_junction)
|
| 469 |
+
|
| 470 |
+
async def is_mount(self) -> bool:
|
| 471 |
+
return await to_thread.run_sync(
|
| 472 |
+
os.path.ismount, self._path, abandon_on_cancel=True
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
def is_reserved(self) -> bool:
|
| 476 |
+
return self._path.is_reserved()
|
| 477 |
+
|
| 478 |
+
async def is_socket(self) -> bool:
|
| 479 |
+
return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True)
|
| 480 |
+
|
| 481 |
+
async def is_symlink(self) -> bool:
|
| 482 |
+
return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True)
|
| 483 |
+
|
| 484 |
+
def iterdir(self) -> AsyncIterator[Path]:
|
| 485 |
+
gen = self._path.iterdir()
|
| 486 |
+
return _PathIterator(gen)
|
| 487 |
+
|
| 488 |
+
def joinpath(self, *args: str | PathLike[str]) -> Path:
|
| 489 |
+
return Path(self._path.joinpath(*args))
|
| 490 |
+
|
| 491 |
+
async def lchmod(self, mode: int) -> None:
|
| 492 |
+
await to_thread.run_sync(self._path.lchmod, mode)
|
| 493 |
+
|
| 494 |
+
async def lstat(self) -> os.stat_result:
|
| 495 |
+
return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True)
|
| 496 |
+
|
| 497 |
+
async def mkdir(
|
| 498 |
+
self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
|
| 499 |
+
) -> None:
|
| 500 |
+
await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
|
| 501 |
+
|
| 502 |
+
@overload
|
| 503 |
+
async def open(
|
| 504 |
+
self,
|
| 505 |
+
mode: OpenBinaryMode,
|
| 506 |
+
buffering: int = ...,
|
| 507 |
+
encoding: str | None = ...,
|
| 508 |
+
errors: str | None = ...,
|
| 509 |
+
newline: str | None = ...,
|
| 510 |
+
) -> AsyncFile[bytes]: ...
|
| 511 |
+
|
| 512 |
+
@overload
|
| 513 |
+
async def open(
|
| 514 |
+
self,
|
| 515 |
+
mode: OpenTextMode = ...,
|
| 516 |
+
buffering: int = ...,
|
| 517 |
+
encoding: str | None = ...,
|
| 518 |
+
errors: str | None = ...,
|
| 519 |
+
newline: str | None = ...,
|
| 520 |
+
) -> AsyncFile[str]: ...
|
| 521 |
+
|
| 522 |
+
async def open(
|
| 523 |
+
self,
|
| 524 |
+
mode: str = "r",
|
| 525 |
+
buffering: int = -1,
|
| 526 |
+
encoding: str | None = None,
|
| 527 |
+
errors: str | None = None,
|
| 528 |
+
newline: str | None = None,
|
| 529 |
+
) -> AsyncFile[Any]:
|
| 530 |
+
fp = await to_thread.run_sync(
|
| 531 |
+
self._path.open, mode, buffering, encoding, errors, newline
|
| 532 |
+
)
|
| 533 |
+
return AsyncFile(fp)
|
| 534 |
+
|
| 535 |
+
async def owner(self) -> str:
|
| 536 |
+
return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True)
|
| 537 |
+
|
| 538 |
+
async def read_bytes(self) -> bytes:
|
| 539 |
+
return await to_thread.run_sync(self._path.read_bytes)
|
| 540 |
+
|
| 541 |
+
async def read_text(
|
| 542 |
+
self, encoding: str | None = None, errors: str | None = None
|
| 543 |
+
) -> str:
|
| 544 |
+
return await to_thread.run_sync(self._path.read_text, encoding, errors)
|
| 545 |
+
|
| 546 |
+
if sys.version_info >= (3, 12):
|
| 547 |
+
|
| 548 |
+
def relative_to(
|
| 549 |
+
self, *other: str | PathLike[str], walk_up: bool = False
|
| 550 |
+
) -> Path:
|
| 551 |
+
return Path(self._path.relative_to(*other, walk_up=walk_up))
|
| 552 |
+
|
| 553 |
+
else:
|
| 554 |
+
|
| 555 |
+
def relative_to(self, *other: str | PathLike[str]) -> Path:
|
| 556 |
+
return Path(self._path.relative_to(*other))
|
| 557 |
+
|
| 558 |
+
async def readlink(self) -> Path:
|
| 559 |
+
target = await to_thread.run_sync(os.readlink, self._path)
|
| 560 |
+
return Path(target)
|
| 561 |
+
|
| 562 |
+
async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
|
| 563 |
+
if isinstance(target, Path):
|
| 564 |
+
target = target._path
|
| 565 |
+
|
| 566 |
+
await to_thread.run_sync(self._path.rename, target)
|
| 567 |
+
return Path(target)
|
| 568 |
+
|
| 569 |
+
async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
|
| 570 |
+
if isinstance(target, Path):
|
| 571 |
+
target = target._path
|
| 572 |
+
|
| 573 |
+
await to_thread.run_sync(self._path.replace, target)
|
| 574 |
+
return Path(target)
|
| 575 |
+
|
| 576 |
+
async def resolve(self, strict: bool = False) -> Path:
|
| 577 |
+
func = partial(self._path.resolve, strict=strict)
|
| 578 |
+
return Path(await to_thread.run_sync(func, abandon_on_cancel=True))
|
| 579 |
+
|
| 580 |
+
def rglob(self, pattern: str) -> AsyncIterator[Path]:
|
| 581 |
+
gen = self._path.rglob(pattern)
|
| 582 |
+
return _PathIterator(gen)
|
| 583 |
+
|
| 584 |
+
async def rmdir(self) -> None:
|
| 585 |
+
await to_thread.run_sync(self._path.rmdir)
|
| 586 |
+
|
| 587 |
+
async def samefile(self, other_path: str | PathLike[str]) -> bool:
|
| 588 |
+
if isinstance(other_path, Path):
|
| 589 |
+
other_path = other_path._path
|
| 590 |
+
|
| 591 |
+
return await to_thread.run_sync(
|
| 592 |
+
self._path.samefile, other_path, abandon_on_cancel=True
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
|
| 596 |
+
func = partial(os.stat, follow_symlinks=follow_symlinks)
|
| 597 |
+
return await to_thread.run_sync(func, self._path, abandon_on_cancel=True)
|
| 598 |
+
|
| 599 |
+
async def symlink_to(
|
| 600 |
+
self,
|
| 601 |
+
target: str | bytes | PathLike[str] | PathLike[bytes],
|
| 602 |
+
target_is_directory: bool = False,
|
| 603 |
+
) -> None:
|
| 604 |
+
if isinstance(target, Path):
|
| 605 |
+
target = target._path
|
| 606 |
+
|
| 607 |
+
await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
|
| 608 |
+
|
| 609 |
+
async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
|
| 610 |
+
await to_thread.run_sync(self._path.touch, mode, exist_ok)
|
| 611 |
+
|
| 612 |
+
async def unlink(self, missing_ok: bool = False) -> None:
|
| 613 |
+
try:
|
| 614 |
+
await to_thread.run_sync(self._path.unlink)
|
| 615 |
+
except FileNotFoundError:
|
| 616 |
+
if not missing_ok:
|
| 617 |
+
raise
|
| 618 |
+
|
| 619 |
+
if sys.version_info >= (3, 12):
|
| 620 |
+
|
| 621 |
+
async def walk(
|
| 622 |
+
self,
|
| 623 |
+
top_down: bool = True,
|
| 624 |
+
on_error: Callable[[OSError], object] | None = None,
|
| 625 |
+
follow_symlinks: bool = False,
|
| 626 |
+
) -> AsyncIterator[tuple[Path, list[str], list[str]]]:
|
| 627 |
+
def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None:
|
| 628 |
+
try:
|
| 629 |
+
return next(gen)
|
| 630 |
+
except StopIteration:
|
| 631 |
+
return None
|
| 632 |
+
|
| 633 |
+
gen = self._path.walk(top_down, on_error, follow_symlinks)
|
| 634 |
+
while True:
|
| 635 |
+
value = await to_thread.run_sync(get_next_value)
|
| 636 |
+
if value is None:
|
| 637 |
+
return
|
| 638 |
+
|
| 639 |
+
root, dirs, paths = value
|
| 640 |
+
yield Path(root), dirs, paths
|
| 641 |
+
|
| 642 |
+
def with_name(self, name: str) -> Path:
|
| 643 |
+
return Path(self._path.with_name(name))
|
| 644 |
+
|
| 645 |
+
def with_stem(self, stem: str) -> Path:
|
| 646 |
+
return Path(self._path.with_name(stem + self._path.suffix))
|
| 647 |
+
|
| 648 |
+
def with_suffix(self, suffix: str) -> Path:
|
| 649 |
+
return Path(self._path.with_suffix(suffix))
|
| 650 |
+
|
| 651 |
+
def with_segments(self, *pathsegments: str | PathLike[str]) -> Path:
|
| 652 |
+
return Path(*pathsegments)
|
| 653 |
+
|
| 654 |
+
async def write_bytes(self, data: bytes) -> int:
|
| 655 |
+
return await to_thread.run_sync(self._path.write_bytes, data)
|
| 656 |
+
|
| 657 |
+
async def write_text(
|
| 658 |
+
self,
|
| 659 |
+
data: str,
|
| 660 |
+
encoding: str | None = None,
|
| 661 |
+
errors: str | None = None,
|
| 662 |
+
newline: str | None = None,
|
| 663 |
+
) -> int:
|
| 664 |
+
# Path.write_text() does not support the "newline" parameter before Python 3.10
|
| 665 |
+
def sync_write_text() -> int:
|
| 666 |
+
with self._path.open(
|
| 667 |
+
"w", encoding=encoding, errors=errors, newline=newline
|
| 668 |
+
) as fp:
|
| 669 |
+
return fp.write(data)
|
| 670 |
+
|
| 671 |
+
return await to_thread.run_sync(sync_write_text)
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
PathLike.register(Path)
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_resources.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from ..abc import AsyncResource
|
| 4 |
+
from ._tasks import CancelScope
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
async def aclose_forcefully(resource: AsyncResource) -> None:
|
| 8 |
+
"""
|
| 9 |
+
Close an asynchronous resource in a cancelled scope.
|
| 10 |
+
|
| 11 |
+
Doing this closes the resource without waiting on anything.
|
| 12 |
+
|
| 13 |
+
:param resource: the resource to close
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
with CancelScope() as scope:
|
| 17 |
+
scope.cancel()
|
| 18 |
+
await resource.aclose()
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_signals.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections.abc import AsyncIterator
|
| 4 |
+
from contextlib import AbstractContextManager
|
| 5 |
+
from signal import Signals
|
| 6 |
+
|
| 7 |
+
from ._eventloop import get_async_backend
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def open_signal_receiver(
|
| 11 |
+
*signals: Signals,
|
| 12 |
+
) -> AbstractContextManager[AsyncIterator[Signals]]:
|
| 13 |
+
"""
|
| 14 |
+
Start receiving operating system signals.
|
| 15 |
+
|
| 16 |
+
:param signals: signals to receive (e.g. ``signal.SIGINT``)
|
| 17 |
+
:return: an asynchronous context manager for an asynchronous iterator which yields
|
| 18 |
+
signal numbers
|
| 19 |
+
|
| 20 |
+
.. warning:: Windows does not support signals natively so it is best to avoid
|
| 21 |
+
relying on this in cross-platform applications.
|
| 22 |
+
|
| 23 |
+
.. warning:: On asyncio, this permanently replaces any previous signal handler for
|
| 24 |
+
the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
return get_async_backend().open_signal_receiver(*signals)
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_subprocesses.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
from collections.abc import AsyncIterable, Iterable, Mapping, Sequence
|
| 5 |
+
from io import BytesIO
|
| 6 |
+
from os import PathLike
|
| 7 |
+
from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess
|
| 8 |
+
from typing import IO, Any, Union, cast
|
| 9 |
+
|
| 10 |
+
from ..abc import Process
|
| 11 |
+
from ._eventloop import get_async_backend
|
| 12 |
+
from ._tasks import create_task_group
|
| 13 |
+
|
| 14 |
+
if sys.version_info >= (3, 10):
|
| 15 |
+
from typing import TypeAlias
|
| 16 |
+
else:
|
| 17 |
+
from typing_extensions import TypeAlias
|
| 18 |
+
|
| 19 |
+
StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
async def run_process(
|
| 23 |
+
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
| 24 |
+
*,
|
| 25 |
+
input: bytes | None = None,
|
| 26 |
+
stdout: int | IO[Any] | None = PIPE,
|
| 27 |
+
stderr: int | IO[Any] | None = PIPE,
|
| 28 |
+
check: bool = True,
|
| 29 |
+
cwd: StrOrBytesPath | None = None,
|
| 30 |
+
env: Mapping[str, str] | None = None,
|
| 31 |
+
startupinfo: Any = None,
|
| 32 |
+
creationflags: int = 0,
|
| 33 |
+
start_new_session: bool = False,
|
| 34 |
+
pass_fds: Sequence[int] = (),
|
| 35 |
+
user: str | int | None = None,
|
| 36 |
+
group: str | int | None = None,
|
| 37 |
+
extra_groups: Iterable[str | int] | None = None,
|
| 38 |
+
umask: int = -1,
|
| 39 |
+
) -> CompletedProcess[bytes]:
|
| 40 |
+
"""
|
| 41 |
+
Run an external command in a subprocess and wait until it completes.
|
| 42 |
+
|
| 43 |
+
.. seealso:: :func:`subprocess.run`
|
| 44 |
+
|
| 45 |
+
:param command: either a string to pass to the shell, or an iterable of strings
|
| 46 |
+
containing the executable name or path and its arguments
|
| 47 |
+
:param input: bytes passed to the standard input of the subprocess
|
| 48 |
+
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
| 49 |
+
a file-like object, or `None`
|
| 50 |
+
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
| 51 |
+
:data:`subprocess.STDOUT`, a file-like object, or `None`
|
| 52 |
+
:param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the
|
| 53 |
+
process terminates with a return code other than 0
|
| 54 |
+
:param cwd: If not ``None``, change the working directory to this before running the
|
| 55 |
+
command
|
| 56 |
+
:param env: if not ``None``, this mapping replaces the inherited environment
|
| 57 |
+
variables from the parent process
|
| 58 |
+
:param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
|
| 59 |
+
to specify process startup parameters (Windows only)
|
| 60 |
+
:param creationflags: flags that can be used to control the creation of the
|
| 61 |
+
subprocess (see :class:`subprocess.Popen` for the specifics)
|
| 62 |
+
:param start_new_session: if ``true`` the setsid() system call will be made in the
|
| 63 |
+
child process prior to the execution of the subprocess. (POSIX only)
|
| 64 |
+
:param pass_fds: sequence of file descriptors to keep open between the parent and
|
| 65 |
+
child processes. (POSIX only)
|
| 66 |
+
:param user: effective user to run the process as (Python >= 3.9, POSIX only)
|
| 67 |
+
:param group: effective group to run the process as (Python >= 3.9, POSIX only)
|
| 68 |
+
:param extra_groups: supplementary groups to set in the subprocess (Python >= 3.9,
|
| 69 |
+
POSIX only)
|
| 70 |
+
:param umask: if not negative, this umask is applied in the child process before
|
| 71 |
+
running the given command (Python >= 3.9, POSIX only)
|
| 72 |
+
:return: an object representing the completed process
|
| 73 |
+
:raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process
|
| 74 |
+
exits with a nonzero return code
|
| 75 |
+
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
|
| 79 |
+
buffer = BytesIO()
|
| 80 |
+
async for chunk in stream:
|
| 81 |
+
buffer.write(chunk)
|
| 82 |
+
|
| 83 |
+
stream_contents[index] = buffer.getvalue()
|
| 84 |
+
|
| 85 |
+
async with await open_process(
|
| 86 |
+
command,
|
| 87 |
+
stdin=PIPE if input else DEVNULL,
|
| 88 |
+
stdout=stdout,
|
| 89 |
+
stderr=stderr,
|
| 90 |
+
cwd=cwd,
|
| 91 |
+
env=env,
|
| 92 |
+
startupinfo=startupinfo,
|
| 93 |
+
creationflags=creationflags,
|
| 94 |
+
start_new_session=start_new_session,
|
| 95 |
+
pass_fds=pass_fds,
|
| 96 |
+
user=user,
|
| 97 |
+
group=group,
|
| 98 |
+
extra_groups=extra_groups,
|
| 99 |
+
umask=umask,
|
| 100 |
+
) as process:
|
| 101 |
+
stream_contents: list[bytes | None] = [None, None]
|
| 102 |
+
async with create_task_group() as tg:
|
| 103 |
+
if process.stdout:
|
| 104 |
+
tg.start_soon(drain_stream, process.stdout, 0)
|
| 105 |
+
|
| 106 |
+
if process.stderr:
|
| 107 |
+
tg.start_soon(drain_stream, process.stderr, 1)
|
| 108 |
+
|
| 109 |
+
if process.stdin and input:
|
| 110 |
+
await process.stdin.send(input)
|
| 111 |
+
await process.stdin.aclose()
|
| 112 |
+
|
| 113 |
+
await process.wait()
|
| 114 |
+
|
| 115 |
+
output, errors = stream_contents
|
| 116 |
+
if check and process.returncode != 0:
|
| 117 |
+
raise CalledProcessError(cast(int, process.returncode), command, output, errors)
|
| 118 |
+
|
| 119 |
+
return CompletedProcess(command, cast(int, process.returncode), output, errors)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
async def open_process(
|
| 123 |
+
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
| 124 |
+
*,
|
| 125 |
+
stdin: int | IO[Any] | None = PIPE,
|
| 126 |
+
stdout: int | IO[Any] | None = PIPE,
|
| 127 |
+
stderr: int | IO[Any] | None = PIPE,
|
| 128 |
+
cwd: StrOrBytesPath | None = None,
|
| 129 |
+
env: Mapping[str, str] | None = None,
|
| 130 |
+
startupinfo: Any = None,
|
| 131 |
+
creationflags: int = 0,
|
| 132 |
+
start_new_session: bool = False,
|
| 133 |
+
pass_fds: Sequence[int] = (),
|
| 134 |
+
user: str | int | None = None,
|
| 135 |
+
group: str | int | None = None,
|
| 136 |
+
extra_groups: Iterable[str | int] | None = None,
|
| 137 |
+
umask: int = -1,
|
| 138 |
+
) -> Process:
|
| 139 |
+
"""
|
| 140 |
+
Start an external command in a subprocess.
|
| 141 |
+
|
| 142 |
+
.. seealso:: :class:`subprocess.Popen`
|
| 143 |
+
|
| 144 |
+
:param command: either a string to pass to the shell, or an iterable of strings
|
| 145 |
+
containing the executable name or path and its arguments
|
| 146 |
+
:param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
|
| 147 |
+
file-like object, or ``None``
|
| 148 |
+
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
| 149 |
+
a file-like object, or ``None``
|
| 150 |
+
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
| 151 |
+
:data:`subprocess.STDOUT`, a file-like object, or ``None``
|
| 152 |
+
:param cwd: If not ``None``, the working directory is changed before executing
|
| 153 |
+
:param env: If env is not ``None``, it must be a mapping that defines the
|
| 154 |
+
environment variables for the new process
|
| 155 |
+
:param creationflags: flags that can be used to control the creation of the
|
| 156 |
+
subprocess (see :class:`subprocess.Popen` for the specifics)
|
| 157 |
+
:param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
|
| 158 |
+
to specify process startup parameters (Windows only)
|
| 159 |
+
:param start_new_session: if ``true`` the setsid() system call will be made in the
|
| 160 |
+
child process prior to the execution of the subprocess. (POSIX only)
|
| 161 |
+
:param pass_fds: sequence of file descriptors to keep open between the parent and
|
| 162 |
+
child processes. (POSIX only)
|
| 163 |
+
:param user: effective user to run the process as (POSIX only)
|
| 164 |
+
:param group: effective group to run the process as (POSIX only)
|
| 165 |
+
:param extra_groups: supplementary groups to set in the subprocess (POSIX only)
|
| 166 |
+
:param umask: if not negative, this umask is applied in the child process before
|
| 167 |
+
running the given command (POSIX only)
|
| 168 |
+
:return: an asynchronous process object
|
| 169 |
+
|
| 170 |
+
"""
|
| 171 |
+
kwargs: dict[str, Any] = {}
|
| 172 |
+
if user is not None:
|
| 173 |
+
kwargs["user"] = user
|
| 174 |
+
|
| 175 |
+
if group is not None:
|
| 176 |
+
kwargs["group"] = group
|
| 177 |
+
|
| 178 |
+
if extra_groups is not None:
|
| 179 |
+
kwargs["extra_groups"] = group
|
| 180 |
+
|
| 181 |
+
if umask >= 0:
|
| 182 |
+
kwargs["umask"] = umask
|
| 183 |
+
|
| 184 |
+
return await get_async_backend().open_process(
|
| 185 |
+
command,
|
| 186 |
+
stdin=stdin,
|
| 187 |
+
stdout=stdout,
|
| 188 |
+
stderr=stderr,
|
| 189 |
+
cwd=cwd,
|
| 190 |
+
env=env,
|
| 191 |
+
startupinfo=startupinfo,
|
| 192 |
+
creationflags=creationflags,
|
| 193 |
+
start_new_session=start_new_session,
|
| 194 |
+
pass_fds=pass_fds,
|
| 195 |
+
**kwargs,
|
| 196 |
+
)
|
infer_4_37_2/lib/python3.10/site-packages/anyio/_core/_testing.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections.abc import Awaitable, Generator
|
| 4 |
+
from typing import Any, cast
|
| 5 |
+
|
| 6 |
+
from ._eventloop import get_async_backend
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TaskInfo:
|
| 10 |
+
"""
|
| 11 |
+
Represents an asynchronous task.
|
| 12 |
+
|
| 13 |
+
:ivar int id: the unique identifier of the task
|
| 14 |
+
:ivar parent_id: the identifier of the parent task, if any
|
| 15 |
+
:vartype parent_id: Optional[int]
|
| 16 |
+
:ivar str name: the description of the task (if any)
|
| 17 |
+
:ivar ~collections.abc.Coroutine coro: the coroutine object of the task
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
__slots__ = "_name", "id", "parent_id", "name", "coro"
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
id: int,
|
| 25 |
+
parent_id: int | None,
|
| 26 |
+
name: str | None,
|
| 27 |
+
coro: Generator[Any, Any, Any] | Awaitable[Any],
|
| 28 |
+
):
|
| 29 |
+
func = get_current_task
|
| 30 |
+
self._name = f"{func.__module__}.{func.__qualname__}"
|
| 31 |
+
self.id: int = id
|
| 32 |
+
self.parent_id: int | None = parent_id
|
| 33 |
+
self.name: str | None = name
|
| 34 |
+
self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
|
| 35 |
+
|
| 36 |
+
def __eq__(self, other: object) -> bool:
|
| 37 |
+
if isinstance(other, TaskInfo):
|
| 38 |
+
return self.id == other.id
|
| 39 |
+
|
| 40 |
+
return NotImplemented
|
| 41 |
+
|
| 42 |
+
def __hash__(self) -> int:
|
| 43 |
+
return hash(self.id)
|
| 44 |
+
|
| 45 |
+
def __repr__(self) -> str:
|
| 46 |
+
return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
|
| 47 |
+
|
| 48 |
+
def has_pending_cancellation(self) -> bool:
|
| 49 |
+
"""
|
| 50 |
+
Return ``True`` if the task has a cancellation pending, ``False`` otherwise.
|
| 51 |
+
|
| 52 |
+
"""
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def get_current_task() -> TaskInfo:
|
| 57 |
+
"""
|
| 58 |
+
Return the current task.
|
| 59 |
+
|
| 60 |
+
:return: a representation of the current task
|
| 61 |
+
|
| 62 |
+
"""
|
| 63 |
+
return get_async_backend().get_current_task()
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_running_tasks() -> list[TaskInfo]:
|
| 67 |
+
"""
|
| 68 |
+
Return a list of running tasks in the current event loop.
|
| 69 |
+
|
| 70 |
+
:return: a list of task info objects
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
return cast("list[TaskInfo]", get_async_backend().get_running_tasks())
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
async def wait_all_tasks_blocked() -> None:
|
| 77 |
+
"""Wait until all other tasks are waiting for something."""
|
| 78 |
+
await get_async_backend().wait_all_tasks_blocked()
|
infer_4_37_2/lib/python3.10/site-packages/anyio/abc/__init__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from ._eventloop import AsyncBackend as AsyncBackend
|
| 4 |
+
from ._resources import AsyncResource as AsyncResource
|
| 5 |
+
from ._sockets import ConnectedUDPSocket as ConnectedUDPSocket
|
| 6 |
+
from ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket
|
| 7 |
+
from ._sockets import IPAddressType as IPAddressType
|
| 8 |
+
from ._sockets import IPSockAddrType as IPSockAddrType
|
| 9 |
+
from ._sockets import SocketAttribute as SocketAttribute
|
| 10 |
+
from ._sockets import SocketListener as SocketListener
|
| 11 |
+
from ._sockets import SocketStream as SocketStream
|
| 12 |
+
from ._sockets import UDPPacketType as UDPPacketType
|
| 13 |
+
from ._sockets import UDPSocket as UDPSocket
|
| 14 |
+
from ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType
|
| 15 |
+
from ._sockets import UNIXDatagramSocket as UNIXDatagramSocket
|
| 16 |
+
from ._sockets import UNIXSocketStream as UNIXSocketStream
|
| 17 |
+
from ._streams import AnyByteReceiveStream as AnyByteReceiveStream
|
| 18 |
+
from ._streams import AnyByteSendStream as AnyByteSendStream
|
| 19 |
+
from ._streams import AnyByteStream as AnyByteStream
|
| 20 |
+
from ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream
|
| 21 |
+
from ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream
|
| 22 |
+
from ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream
|
| 23 |
+
from ._streams import ByteReceiveStream as ByteReceiveStream
|
| 24 |
+
from ._streams import ByteSendStream as ByteSendStream
|
| 25 |
+
from ._streams import ByteStream as ByteStream
|
| 26 |
+
from ._streams import Listener as Listener
|
| 27 |
+
from ._streams import ObjectReceiveStream as ObjectReceiveStream
|
| 28 |
+
from ._streams import ObjectSendStream as ObjectSendStream
|
| 29 |
+
from ._streams import ObjectStream as ObjectStream
|
| 30 |
+
from ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream
|
| 31 |
+
from ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream
|
| 32 |
+
from ._streams import UnreliableObjectStream as UnreliableObjectStream
|
| 33 |
+
from ._subprocesses import Process as Process
|
| 34 |
+
from ._tasks import TaskGroup as TaskGroup
|
| 35 |
+
from ._tasks import TaskStatus as TaskStatus
|
| 36 |
+
from ._testing import TestRunner as TestRunner
|
| 37 |
+
|
| 38 |
+
# Re-exported here, for backwards compatibility
|
| 39 |
+
# isort: off
|
| 40 |
+
from .._core._synchronization import (
|
| 41 |
+
CapacityLimiter as CapacityLimiter,
|
| 42 |
+
Condition as Condition,
|
| 43 |
+
Event as Event,
|
| 44 |
+
Lock as Lock,
|
| 45 |
+
Semaphore as Semaphore,
|
| 46 |
+
)
|
| 47 |
+
from .._core._tasks import CancelScope as CancelScope
|
| 48 |
+
from ..from_thread import BlockingPortal as BlockingPortal
|
| 49 |
+
|
| 50 |
+
# Re-export imports so they look like they live directly in this package
|
| 51 |
+
for __value in list(locals().values()):
|
| 52 |
+
if getattr(__value, "__module__", "").startswith("anyio.abc."):
|
| 53 |
+
__value.__module__ = __name__
|
| 54 |
+
|
| 55 |
+
del __value
|
infer_4_37_2/lib/python3.10/site-packages/anyio/abc/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.02 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/abc/__pycache__/_eventloop.cpython-310.pyc
ADDED
|
Binary file (12.4 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/abc/__pycache__/_tasks.cpython-310.pyc
ADDED
|
Binary file (4.1 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/anyio/abc/_streams.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from abc import abstractmethod
|
| 4 |
+
from collections.abc import Callable
|
| 5 |
+
from typing import Any, Generic, TypeVar, Union
|
| 6 |
+
|
| 7 |
+
from .._core._exceptions import EndOfStream
|
| 8 |
+
from .._core._typedattr import TypedAttributeProvider
|
| 9 |
+
from ._resources import AsyncResource
|
| 10 |
+
from ._tasks import TaskGroup
|
| 11 |
+
|
| 12 |
+
T_Item = TypeVar("T_Item")
|
| 13 |
+
T_co = TypeVar("T_co", covariant=True)
|
| 14 |
+
T_contra = TypeVar("T_contra", contravariant=True)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class UnreliableObjectReceiveStream(
|
| 18 |
+
Generic[T_co], AsyncResource, TypedAttributeProvider
|
| 19 |
+
):
|
| 20 |
+
"""
|
| 21 |
+
An interface for receiving objects.
|
| 22 |
+
|
| 23 |
+
This interface makes no guarantees that the received messages arrive in the order in
|
| 24 |
+
which they were sent, or that no messages are missed.
|
| 25 |
+
|
| 26 |
+
Asynchronously iterating over objects of this type will yield objects matching the
|
| 27 |
+
given type parameter.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]:
|
| 31 |
+
return self
|
| 32 |
+
|
| 33 |
+
async def __anext__(self) -> T_co:
|
| 34 |
+
try:
|
| 35 |
+
return await self.receive()
|
| 36 |
+
except EndOfStream:
|
| 37 |
+
raise StopAsyncIteration
|
| 38 |
+
|
| 39 |
+
@abstractmethod
|
| 40 |
+
async def receive(self) -> T_co:
|
| 41 |
+
"""
|
| 42 |
+
Receive the next item.
|
| 43 |
+
|
| 44 |
+
:raises ~anyio.ClosedResourceError: if the receive stream has been explicitly
|
| 45 |
+
closed
|
| 46 |
+
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
|
| 47 |
+
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
|
| 48 |
+
due to external causes
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class UnreliableObjectSendStream(
|
| 53 |
+
Generic[T_contra], AsyncResource, TypedAttributeProvider
|
| 54 |
+
):
|
| 55 |
+
"""
|
| 56 |
+
An interface for sending objects.
|
| 57 |
+
|
| 58 |
+
This interface makes no guarantees that the messages sent will reach the
|
| 59 |
+
recipient(s) in the same order in which they were sent, or at all.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
@abstractmethod
|
| 63 |
+
async def send(self, item: T_contra) -> None:
|
| 64 |
+
"""
|
| 65 |
+
Send an item to the peer(s).
|
| 66 |
+
|
| 67 |
+
:param item: the item to send
|
| 68 |
+
:raises ~anyio.ClosedResourceError: if the send stream has been explicitly
|
| 69 |
+
closed
|
| 70 |
+
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
|
| 71 |
+
due to external causes
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class UnreliableObjectStream(
|
| 76 |
+
UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]
|
| 77 |
+
):
|
| 78 |
+
"""
|
| 79 |
+
A bidirectional message stream which does not guarantee the order or reliability of
|
| 80 |
+
message delivery.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]):
|
| 85 |
+
"""
|
| 86 |
+
A receive message stream which guarantees that messages are received in the same
|
| 87 |
+
order in which they were sent, and that no messages are missed.
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class ObjectSendStream(UnreliableObjectSendStream[T_contra]):
|
| 92 |
+
"""
|
| 93 |
+
A send message stream which guarantees that messages are delivered in the same order
|
| 94 |
+
in which they were sent, without missing any messages in the middle.
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class ObjectStream(
|
| 99 |
+
ObjectReceiveStream[T_Item],
|
| 100 |
+
ObjectSendStream[T_Item],
|
| 101 |
+
UnreliableObjectStream[T_Item],
|
| 102 |
+
):
|
| 103 |
+
"""
|
| 104 |
+
A bidirectional message stream which guarantees the order and reliability of message
|
| 105 |
+
delivery.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
@abstractmethod
|
| 109 |
+
async def send_eof(self) -> None:
|
| 110 |
+
"""
|
| 111 |
+
Send an end-of-file indication to the peer.
|
| 112 |
+
|
| 113 |
+
You should not try to send any further data to this stream after calling this
|
| 114 |
+
method. This method is idempotent (does nothing on successive calls).
|
| 115 |
+
"""
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class ByteReceiveStream(AsyncResource, TypedAttributeProvider):
|
| 119 |
+
"""
|
| 120 |
+
An interface for receiving bytes from a single peer.
|
| 121 |
+
|
| 122 |
+
Iterating this byte stream will yield a byte string of arbitrary length, but no more
|
| 123 |
+
than 65536 bytes.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def __aiter__(self) -> ByteReceiveStream:
|
| 127 |
+
return self
|
| 128 |
+
|
| 129 |
+
async def __anext__(self) -> bytes:
|
| 130 |
+
try:
|
| 131 |
+
return await self.receive()
|
| 132 |
+
except EndOfStream:
|
| 133 |
+
raise StopAsyncIteration
|
| 134 |
+
|
| 135 |
+
@abstractmethod
|
| 136 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 137 |
+
"""
|
| 138 |
+
Receive at most ``max_bytes`` bytes from the peer.
|
| 139 |
+
|
| 140 |
+
.. note:: Implementors of this interface should not return an empty
|
| 141 |
+
:class:`bytes` object, and users should ignore them.
|
| 142 |
+
|
| 143 |
+
:param max_bytes: maximum number of bytes to receive
|
| 144 |
+
:return: the received bytes
|
| 145 |
+
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class ByteSendStream(AsyncResource, TypedAttributeProvider):
|
| 150 |
+
"""An interface for sending bytes to a single peer."""
|
| 151 |
+
|
| 152 |
+
@abstractmethod
|
| 153 |
+
async def send(self, item: bytes) -> None:
|
| 154 |
+
"""
|
| 155 |
+
Send the given bytes to the peer.
|
| 156 |
+
|
| 157 |
+
:param item: the bytes to send
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class ByteStream(ByteReceiveStream, ByteSendStream):
|
| 162 |
+
"""A bidirectional byte stream."""
|
| 163 |
+
|
| 164 |
+
@abstractmethod
|
| 165 |
+
async def send_eof(self) -> None:
|
| 166 |
+
"""
|
| 167 |
+
Send an end-of-file indication to the peer.
|
| 168 |
+
|
| 169 |
+
You should not try to send any further data to this stream after calling this
|
| 170 |
+
method. This method is idempotent (does nothing on successive calls).
|
| 171 |
+
"""
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
#: Type alias for all unreliable bytes-oriented receive streams.
|
| 175 |
+
AnyUnreliableByteReceiveStream = Union[
|
| 176 |
+
UnreliableObjectReceiveStream[bytes], ByteReceiveStream
|
| 177 |
+
]
|
| 178 |
+
#: Type alias for all unreliable bytes-oriented send streams.
|
| 179 |
+
AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream]
|
| 180 |
+
#: Type alias for all unreliable bytes-oriented streams.
|
| 181 |
+
AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream]
|
| 182 |
+
#: Type alias for all bytes-oriented receive streams.
|
| 183 |
+
AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream]
|
| 184 |
+
#: Type alias for all bytes-oriented send streams.
|
| 185 |
+
AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream]
|
| 186 |
+
#: Type alias for all bytes-oriented streams.
|
| 187 |
+
AnyByteStream = Union[ObjectStream[bytes], ByteStream]
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider):
|
| 191 |
+
"""An interface for objects that let you accept incoming connections."""
|
| 192 |
+
|
| 193 |
+
@abstractmethod
|
| 194 |
+
async def serve(
|
| 195 |
+
self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None
|
| 196 |
+
) -> None:
|
| 197 |
+
"""
|
| 198 |
+
Accept incoming connections as they come in and start tasks to handle them.
|
| 199 |
+
|
| 200 |
+
:param handler: a callable that will be used to handle each accepted connection
|
| 201 |
+
:param task_group: the task group that will be used to start tasks for handling
|
| 202 |
+
each accepted connection (if omitted, an ad-hoc task group will be created)
|
| 203 |
+
"""
|
infer_4_37_2/lib/python3.10/site-packages/anyio/abc/_subprocesses.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from abc import abstractmethod
|
| 4 |
+
from signal import Signals
|
| 5 |
+
|
| 6 |
+
from ._resources import AsyncResource
|
| 7 |
+
from ._streams import ByteReceiveStream, ByteSendStream
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Process(AsyncResource):
|
| 11 |
+
"""An asynchronous version of :class:`subprocess.Popen`."""
|
| 12 |
+
|
| 13 |
+
@abstractmethod
|
| 14 |
+
async def wait(self) -> int:
|
| 15 |
+
"""
|
| 16 |
+
Wait until the process exits.
|
| 17 |
+
|
| 18 |
+
:return: the exit code of the process
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
@abstractmethod
|
| 22 |
+
def terminate(self) -> None:
|
| 23 |
+
"""
|
| 24 |
+
Terminates the process, gracefully if possible.
|
| 25 |
+
|
| 26 |
+
On Windows, this calls ``TerminateProcess()``.
|
| 27 |
+
On POSIX systems, this sends ``SIGTERM`` to the process.
|
| 28 |
+
|
| 29 |
+
.. seealso:: :meth:`subprocess.Popen.terminate`
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
@abstractmethod
|
| 33 |
+
def kill(self) -> None:
|
| 34 |
+
"""
|
| 35 |
+
Kills the process.
|
| 36 |
+
|
| 37 |
+
On Windows, this calls ``TerminateProcess()``.
|
| 38 |
+
On POSIX systems, this sends ``SIGKILL`` to the process.
|
| 39 |
+
|
| 40 |
+
.. seealso:: :meth:`subprocess.Popen.kill`
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
@abstractmethod
|
| 44 |
+
def send_signal(self, signal: Signals) -> None:
|
| 45 |
+
"""
|
| 46 |
+
Send a signal to the subprocess.
|
| 47 |
+
|
| 48 |
+
.. seealso:: :meth:`subprocess.Popen.send_signal`
|
| 49 |
+
|
| 50 |
+
:param signal: the signal number (e.g. :data:`signal.SIGHUP`)
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
@abstractmethod
|
| 55 |
+
def pid(self) -> int:
|
| 56 |
+
"""The process ID of the process."""
|
| 57 |
+
|
| 58 |
+
@property
|
| 59 |
+
@abstractmethod
|
| 60 |
+
def returncode(self) -> int | None:
|
| 61 |
+
"""
|
| 62 |
+
The return code of the process. If the process has not yet terminated, this will
|
| 63 |
+
be ``None``.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
@abstractmethod
|
| 68 |
+
def stdin(self) -> ByteSendStream | None:
|
| 69 |
+
"""The stream for the standard input of the process."""
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
@abstractmethod
|
| 73 |
+
def stdout(self) -> ByteReceiveStream | None:
|
| 74 |
+
"""The stream for the standard output of the process."""
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
@abstractmethod
|
| 78 |
+
def stderr(self) -> ByteReceiveStream | None:
|
| 79 |
+
"""The stream for the standard error output of the process."""
|
infer_4_37_2/lib/python3.10/site-packages/anyio/from_thread.py
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
from collections.abc import Awaitable, Callable, Generator
|
| 5 |
+
from concurrent.futures import Future
|
| 6 |
+
from contextlib import (
|
| 7 |
+
AbstractAsyncContextManager,
|
| 8 |
+
AbstractContextManager,
|
| 9 |
+
contextmanager,
|
| 10 |
+
)
|
| 11 |
+
from dataclasses import dataclass, field
|
| 12 |
+
from inspect import isawaitable
|
| 13 |
+
from threading import Lock, Thread, get_ident
|
| 14 |
+
from types import TracebackType
|
| 15 |
+
from typing import (
|
| 16 |
+
Any,
|
| 17 |
+
Generic,
|
| 18 |
+
TypeVar,
|
| 19 |
+
cast,
|
| 20 |
+
overload,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
from ._core import _eventloop
|
| 24 |
+
from ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals
|
| 25 |
+
from ._core._synchronization import Event
|
| 26 |
+
from ._core._tasks import CancelScope, create_task_group
|
| 27 |
+
from .abc import AsyncBackend
|
| 28 |
+
from .abc._tasks import TaskStatus
|
| 29 |
+
|
| 30 |
+
if sys.version_info >= (3, 11):
|
| 31 |
+
from typing import TypeVarTuple, Unpack
|
| 32 |
+
else:
|
| 33 |
+
from typing_extensions import TypeVarTuple, Unpack
|
| 34 |
+
|
| 35 |
+
T_Retval = TypeVar("T_Retval")
|
| 36 |
+
T_co = TypeVar("T_co", covariant=True)
|
| 37 |
+
PosArgsT = TypeVarTuple("PosArgsT")
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def run(
|
| 41 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT]
|
| 42 |
+
) -> T_Retval:
|
| 43 |
+
"""
|
| 44 |
+
Call a coroutine function from a worker thread.
|
| 45 |
+
|
| 46 |
+
:param func: a coroutine function
|
| 47 |
+
:param args: positional arguments for the callable
|
| 48 |
+
:return: the return value of the coroutine function
|
| 49 |
+
|
| 50 |
+
"""
|
| 51 |
+
try:
|
| 52 |
+
async_backend = threadlocals.current_async_backend
|
| 53 |
+
token = threadlocals.current_token
|
| 54 |
+
except AttributeError:
|
| 55 |
+
raise RuntimeError(
|
| 56 |
+
"This function can only be run from an AnyIO worker thread"
|
| 57 |
+
) from None
|
| 58 |
+
|
| 59 |
+
return async_backend.run_async_from_thread(func, args, token=token)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def run_sync(
|
| 63 |
+
func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
| 64 |
+
) -> T_Retval:
|
| 65 |
+
"""
|
| 66 |
+
Call a function in the event loop thread from a worker thread.
|
| 67 |
+
|
| 68 |
+
:param func: a callable
|
| 69 |
+
:param args: positional arguments for the callable
|
| 70 |
+
:return: the return value of the callable
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
try:
|
| 74 |
+
async_backend = threadlocals.current_async_backend
|
| 75 |
+
token = threadlocals.current_token
|
| 76 |
+
except AttributeError:
|
| 77 |
+
raise RuntimeError(
|
| 78 |
+
"This function can only be run from an AnyIO worker thread"
|
| 79 |
+
) from None
|
| 80 |
+
|
| 81 |
+
return async_backend.run_sync_from_thread(func, args, token=token)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):
|
| 85 |
+
_enter_future: Future[T_co]
|
| 86 |
+
_exit_future: Future[bool | None]
|
| 87 |
+
_exit_event: Event
|
| 88 |
+
_exit_exc_info: tuple[
|
| 89 |
+
type[BaseException] | None, BaseException | None, TracebackType | None
|
| 90 |
+
] = (None, None, None)
|
| 91 |
+
|
| 92 |
+
def __init__(
|
| 93 |
+
self, async_cm: AbstractAsyncContextManager[T_co], portal: BlockingPortal
|
| 94 |
+
):
|
| 95 |
+
self._async_cm = async_cm
|
| 96 |
+
self._portal = portal
|
| 97 |
+
|
| 98 |
+
async def run_async_cm(self) -> bool | None:
|
| 99 |
+
try:
|
| 100 |
+
self._exit_event = Event()
|
| 101 |
+
value = await self._async_cm.__aenter__()
|
| 102 |
+
except BaseException as exc:
|
| 103 |
+
self._enter_future.set_exception(exc)
|
| 104 |
+
raise
|
| 105 |
+
else:
|
| 106 |
+
self._enter_future.set_result(value)
|
| 107 |
+
|
| 108 |
+
try:
|
| 109 |
+
# Wait for the sync context manager to exit.
|
| 110 |
+
# This next statement can raise `get_cancelled_exc_class()` if
|
| 111 |
+
# something went wrong in a task group in this async context
|
| 112 |
+
# manager.
|
| 113 |
+
await self._exit_event.wait()
|
| 114 |
+
finally:
|
| 115 |
+
# In case of cancellation, it could be that we end up here before
|
| 116 |
+
# `_BlockingAsyncContextManager.__exit__` is called, and an
|
| 117 |
+
# `_exit_exc_info` has been set.
|
| 118 |
+
result = await self._async_cm.__aexit__(*self._exit_exc_info)
|
| 119 |
+
return result
|
| 120 |
+
|
| 121 |
+
def __enter__(self) -> T_co:
|
| 122 |
+
self._enter_future = Future()
|
| 123 |
+
self._exit_future = self._portal.start_task_soon(self.run_async_cm)
|
| 124 |
+
return self._enter_future.result()
|
| 125 |
+
|
| 126 |
+
def __exit__(
|
| 127 |
+
self,
|
| 128 |
+
__exc_type: type[BaseException] | None,
|
| 129 |
+
__exc_value: BaseException | None,
|
| 130 |
+
__traceback: TracebackType | None,
|
| 131 |
+
) -> bool | None:
|
| 132 |
+
self._exit_exc_info = __exc_type, __exc_value, __traceback
|
| 133 |
+
self._portal.call(self._exit_event.set)
|
| 134 |
+
return self._exit_future.result()
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class _BlockingPortalTaskStatus(TaskStatus):
|
| 138 |
+
def __init__(self, future: Future):
|
| 139 |
+
self._future = future
|
| 140 |
+
|
| 141 |
+
def started(self, value: object = None) -> None:
|
| 142 |
+
self._future.set_result(value)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class BlockingPortal:
|
| 146 |
+
"""An object that lets external threads run code in an asynchronous event loop."""
|
| 147 |
+
|
| 148 |
+
def __new__(cls) -> BlockingPortal:
|
| 149 |
+
return get_async_backend().create_blocking_portal()
|
| 150 |
+
|
| 151 |
+
def __init__(self) -> None:
|
| 152 |
+
self._event_loop_thread_id: int | None = get_ident()
|
| 153 |
+
self._stop_event = Event()
|
| 154 |
+
self._task_group = create_task_group()
|
| 155 |
+
self._cancelled_exc_class = get_cancelled_exc_class()
|
| 156 |
+
|
| 157 |
+
async def __aenter__(self) -> BlockingPortal:
|
| 158 |
+
await self._task_group.__aenter__()
|
| 159 |
+
return self
|
| 160 |
+
|
| 161 |
+
async def __aexit__(
|
| 162 |
+
self,
|
| 163 |
+
exc_type: type[BaseException] | None,
|
| 164 |
+
exc_val: BaseException | None,
|
| 165 |
+
exc_tb: TracebackType | None,
|
| 166 |
+
) -> bool | None:
|
| 167 |
+
await self.stop()
|
| 168 |
+
return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
|
| 169 |
+
|
| 170 |
+
def _check_running(self) -> None:
|
| 171 |
+
if self._event_loop_thread_id is None:
|
| 172 |
+
raise RuntimeError("This portal is not running")
|
| 173 |
+
if self._event_loop_thread_id == get_ident():
|
| 174 |
+
raise RuntimeError(
|
| 175 |
+
"This method cannot be called from the event loop thread"
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
async def sleep_until_stopped(self) -> None:
|
| 179 |
+
"""Sleep until :meth:`stop` is called."""
|
| 180 |
+
await self._stop_event.wait()
|
| 181 |
+
|
| 182 |
+
async def stop(self, cancel_remaining: bool = False) -> None:
|
| 183 |
+
"""
|
| 184 |
+
Signal the portal to shut down.
|
| 185 |
+
|
| 186 |
+
This marks the portal as no longer accepting new calls and exits from
|
| 187 |
+
:meth:`sleep_until_stopped`.
|
| 188 |
+
|
| 189 |
+
:param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``
|
| 190 |
+
to let them finish before returning
|
| 191 |
+
|
| 192 |
+
"""
|
| 193 |
+
self._event_loop_thread_id = None
|
| 194 |
+
self._stop_event.set()
|
| 195 |
+
if cancel_remaining:
|
| 196 |
+
self._task_group.cancel_scope.cancel()
|
| 197 |
+
|
| 198 |
+
async def _call_func(
|
| 199 |
+
self,
|
| 200 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
| 201 |
+
args: tuple[Unpack[PosArgsT]],
|
| 202 |
+
kwargs: dict[str, Any],
|
| 203 |
+
future: Future[T_Retval],
|
| 204 |
+
) -> None:
|
| 205 |
+
def callback(f: Future[T_Retval]) -> None:
|
| 206 |
+
if f.cancelled() and self._event_loop_thread_id not in (
|
| 207 |
+
None,
|
| 208 |
+
get_ident(),
|
| 209 |
+
):
|
| 210 |
+
self.call(scope.cancel)
|
| 211 |
+
|
| 212 |
+
try:
|
| 213 |
+
retval_or_awaitable = func(*args, **kwargs)
|
| 214 |
+
if isawaitable(retval_or_awaitable):
|
| 215 |
+
with CancelScope() as scope:
|
| 216 |
+
if future.cancelled():
|
| 217 |
+
scope.cancel()
|
| 218 |
+
else:
|
| 219 |
+
future.add_done_callback(callback)
|
| 220 |
+
|
| 221 |
+
retval = await retval_or_awaitable
|
| 222 |
+
else:
|
| 223 |
+
retval = retval_or_awaitable
|
| 224 |
+
except self._cancelled_exc_class:
|
| 225 |
+
future.cancel()
|
| 226 |
+
future.set_running_or_notify_cancel()
|
| 227 |
+
except BaseException as exc:
|
| 228 |
+
if not future.cancelled():
|
| 229 |
+
future.set_exception(exc)
|
| 230 |
+
|
| 231 |
+
# Let base exceptions fall through
|
| 232 |
+
if not isinstance(exc, Exception):
|
| 233 |
+
raise
|
| 234 |
+
else:
|
| 235 |
+
if not future.cancelled():
|
| 236 |
+
future.set_result(retval)
|
| 237 |
+
finally:
|
| 238 |
+
scope = None # type: ignore[assignment]
|
| 239 |
+
|
| 240 |
+
def _spawn_task_from_thread(
|
| 241 |
+
self,
|
| 242 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
| 243 |
+
args: tuple[Unpack[PosArgsT]],
|
| 244 |
+
kwargs: dict[str, Any],
|
| 245 |
+
name: object,
|
| 246 |
+
future: Future[T_Retval],
|
| 247 |
+
) -> None:
|
| 248 |
+
"""
|
| 249 |
+
Spawn a new task using the given callable.
|
| 250 |
+
|
| 251 |
+
Implementors must ensure that the future is resolved when the task finishes.
|
| 252 |
+
|
| 253 |
+
:param func: a callable
|
| 254 |
+
:param args: positional arguments to be passed to the callable
|
| 255 |
+
:param kwargs: keyword arguments to be passed to the callable
|
| 256 |
+
:param name: name of the task (will be coerced to a string if not ``None``)
|
| 257 |
+
:param future: a future that will resolve to the return value of the callable,
|
| 258 |
+
or the exception raised during its execution
|
| 259 |
+
|
| 260 |
+
"""
|
| 261 |
+
raise NotImplementedError
|
| 262 |
+
|
| 263 |
+
@overload
|
| 264 |
+
def call(
|
| 265 |
+
self,
|
| 266 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
| 267 |
+
*args: Unpack[PosArgsT],
|
| 268 |
+
) -> T_Retval: ...
|
| 269 |
+
|
| 270 |
+
@overload
|
| 271 |
+
def call(
|
| 272 |
+
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
| 273 |
+
) -> T_Retval: ...
|
| 274 |
+
|
| 275 |
+
def call(
|
| 276 |
+
self,
|
| 277 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
| 278 |
+
*args: Unpack[PosArgsT],
|
| 279 |
+
) -> T_Retval:
|
| 280 |
+
"""
|
| 281 |
+
Call the given function in the event loop thread.
|
| 282 |
+
|
| 283 |
+
If the callable returns a coroutine object, it is awaited on.
|
| 284 |
+
|
| 285 |
+
:param func: any callable
|
| 286 |
+
:raises RuntimeError: if the portal is not running or if this method is called
|
| 287 |
+
from within the event loop thread
|
| 288 |
+
|
| 289 |
+
"""
|
| 290 |
+
return cast(T_Retval, self.start_task_soon(func, *args).result())
|
| 291 |
+
|
| 292 |
+
@overload
|
| 293 |
+
def start_task_soon(
|
| 294 |
+
self,
|
| 295 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
| 296 |
+
*args: Unpack[PosArgsT],
|
| 297 |
+
name: object = None,
|
| 298 |
+
) -> Future[T_Retval]: ...
|
| 299 |
+
|
| 300 |
+
@overload
|
| 301 |
+
def start_task_soon(
|
| 302 |
+
self,
|
| 303 |
+
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
| 304 |
+
*args: Unpack[PosArgsT],
|
| 305 |
+
name: object = None,
|
| 306 |
+
) -> Future[T_Retval]: ...
|
| 307 |
+
|
| 308 |
+
def start_task_soon(
|
| 309 |
+
self,
|
| 310 |
+
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
| 311 |
+
*args: Unpack[PosArgsT],
|
| 312 |
+
name: object = None,
|
| 313 |
+
) -> Future[T_Retval]:
|
| 314 |
+
"""
|
| 315 |
+
Start a task in the portal's task group.
|
| 316 |
+
|
| 317 |
+
The task will be run inside a cancel scope which can be cancelled by cancelling
|
| 318 |
+
the returned future.
|
| 319 |
+
|
| 320 |
+
:param func: the target function
|
| 321 |
+
:param args: positional arguments passed to ``func``
|
| 322 |
+
:param name: name of the task (will be coerced to a string if not ``None``)
|
| 323 |
+
:return: a future that resolves with the return value of the callable if the
|
| 324 |
+
task completes successfully, or with the exception raised in the task
|
| 325 |
+
:raises RuntimeError: if the portal is not running or if this method is called
|
| 326 |
+
from within the event loop thread
|
| 327 |
+
:rtype: concurrent.futures.Future[T_Retval]
|
| 328 |
+
|
| 329 |
+
.. versionadded:: 3.0
|
| 330 |
+
|
| 331 |
+
"""
|
| 332 |
+
self._check_running()
|
| 333 |
+
f: Future[T_Retval] = Future()
|
| 334 |
+
self._spawn_task_from_thread(func, args, {}, name, f)
|
| 335 |
+
return f
|
| 336 |
+
|
| 337 |
+
def start_task(
|
| 338 |
+
self,
|
| 339 |
+
func: Callable[..., Awaitable[T_Retval]],
|
| 340 |
+
*args: object,
|
| 341 |
+
name: object = None,
|
| 342 |
+
) -> tuple[Future[T_Retval], Any]:
|
| 343 |
+
"""
|
| 344 |
+
Start a task in the portal's task group and wait until it signals for readiness.
|
| 345 |
+
|
| 346 |
+
This method works the same way as :meth:`.abc.TaskGroup.start`.
|
| 347 |
+
|
| 348 |
+
:param func: the target function
|
| 349 |
+
:param args: positional arguments passed to ``func``
|
| 350 |
+
:param name: name of the task (will be coerced to a string if not ``None``)
|
| 351 |
+
:return: a tuple of (future, task_status_value) where the ``task_status_value``
|
| 352 |
+
is the value passed to ``task_status.started()`` from within the target
|
| 353 |
+
function
|
| 354 |
+
:rtype: tuple[concurrent.futures.Future[T_Retval], Any]
|
| 355 |
+
|
| 356 |
+
.. versionadded:: 3.0
|
| 357 |
+
|
| 358 |
+
"""
|
| 359 |
+
|
| 360 |
+
def task_done(future: Future[T_Retval]) -> None:
|
| 361 |
+
if not task_status_future.done():
|
| 362 |
+
if future.cancelled():
|
| 363 |
+
task_status_future.cancel()
|
| 364 |
+
elif future.exception():
|
| 365 |
+
task_status_future.set_exception(future.exception())
|
| 366 |
+
else:
|
| 367 |
+
exc = RuntimeError(
|
| 368 |
+
"Task exited without calling task_status.started()"
|
| 369 |
+
)
|
| 370 |
+
task_status_future.set_exception(exc)
|
| 371 |
+
|
| 372 |
+
self._check_running()
|
| 373 |
+
task_status_future: Future = Future()
|
| 374 |
+
task_status = _BlockingPortalTaskStatus(task_status_future)
|
| 375 |
+
f: Future = Future()
|
| 376 |
+
f.add_done_callback(task_done)
|
| 377 |
+
self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)
|
| 378 |
+
return f, task_status_future.result()
|
| 379 |
+
|
| 380 |
+
def wrap_async_context_manager(
|
| 381 |
+
self, cm: AbstractAsyncContextManager[T_co]
|
| 382 |
+
) -> AbstractContextManager[T_co]:
|
| 383 |
+
"""
|
| 384 |
+
Wrap an async context manager as a synchronous context manager via this portal.
|
| 385 |
+
|
| 386 |
+
Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping
|
| 387 |
+
in the middle until the synchronous context manager exits.
|
| 388 |
+
|
| 389 |
+
:param cm: an asynchronous context manager
|
| 390 |
+
:return: a synchronous context manager
|
| 391 |
+
|
| 392 |
+
.. versionadded:: 2.1
|
| 393 |
+
|
| 394 |
+
"""
|
| 395 |
+
return _BlockingAsyncContextManager(cm, self)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
@dataclass
|
| 399 |
+
class BlockingPortalProvider:
|
| 400 |
+
"""
|
| 401 |
+
A manager for a blocking portal. Used as a context manager. The first thread to
|
| 402 |
+
enter this context manager causes a blocking portal to be started with the specific
|
| 403 |
+
parameters, and the last thread to exit causes the portal to be shut down. Thus,
|
| 404 |
+
there will be exactly one blocking portal running in this context as long as at
|
| 405 |
+
least one thread has entered this context manager.
|
| 406 |
+
|
| 407 |
+
The parameters are the same as for :func:`~anyio.run`.
|
| 408 |
+
|
| 409 |
+
:param backend: name of the backend
|
| 410 |
+
:param backend_options: backend options
|
| 411 |
+
|
| 412 |
+
.. versionadded:: 4.4
|
| 413 |
+
"""
|
| 414 |
+
|
| 415 |
+
backend: str = "asyncio"
|
| 416 |
+
backend_options: dict[str, Any] | None = None
|
| 417 |
+
_lock: Lock = field(init=False, default_factory=Lock)
|
| 418 |
+
_leases: int = field(init=False, default=0)
|
| 419 |
+
_portal: BlockingPortal = field(init=False)
|
| 420 |
+
_portal_cm: AbstractContextManager[BlockingPortal] | None = field(
|
| 421 |
+
init=False, default=None
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
def __enter__(self) -> BlockingPortal:
|
| 425 |
+
with self._lock:
|
| 426 |
+
if self._portal_cm is None:
|
| 427 |
+
self._portal_cm = start_blocking_portal(
|
| 428 |
+
self.backend, self.backend_options
|
| 429 |
+
)
|
| 430 |
+
self._portal = self._portal_cm.__enter__()
|
| 431 |
+
|
| 432 |
+
self._leases += 1
|
| 433 |
+
return self._portal
|
| 434 |
+
|
| 435 |
+
def __exit__(
|
| 436 |
+
self,
|
| 437 |
+
exc_type: type[BaseException] | None,
|
| 438 |
+
exc_val: BaseException | None,
|
| 439 |
+
exc_tb: TracebackType | None,
|
| 440 |
+
) -> None:
|
| 441 |
+
portal_cm: AbstractContextManager[BlockingPortal] | None = None
|
| 442 |
+
with self._lock:
|
| 443 |
+
assert self._portal_cm
|
| 444 |
+
assert self._leases > 0
|
| 445 |
+
self._leases -= 1
|
| 446 |
+
if not self._leases:
|
| 447 |
+
portal_cm = self._portal_cm
|
| 448 |
+
self._portal_cm = None
|
| 449 |
+
del self._portal
|
| 450 |
+
|
| 451 |
+
if portal_cm:
|
| 452 |
+
portal_cm.__exit__(None, None, None)
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
@contextmanager
|
| 456 |
+
def start_blocking_portal(
|
| 457 |
+
backend: str = "asyncio", backend_options: dict[str, Any] | None = None
|
| 458 |
+
) -> Generator[BlockingPortal, Any, None]:
|
| 459 |
+
"""
|
| 460 |
+
Start a new event loop in a new thread and run a blocking portal in its main task.
|
| 461 |
+
|
| 462 |
+
The parameters are the same as for :func:`~anyio.run`.
|
| 463 |
+
|
| 464 |
+
:param backend: name of the backend
|
| 465 |
+
:param backend_options: backend options
|
| 466 |
+
:return: a context manager that yields a blocking portal
|
| 467 |
+
|
| 468 |
+
.. versionchanged:: 3.0
|
| 469 |
+
Usage as a context manager is now required.
|
| 470 |
+
|
| 471 |
+
"""
|
| 472 |
+
|
| 473 |
+
async def run_portal() -> None:
|
| 474 |
+
async with BlockingPortal() as portal_:
|
| 475 |
+
future.set_result(portal_)
|
| 476 |
+
await portal_.sleep_until_stopped()
|
| 477 |
+
|
| 478 |
+
def run_blocking_portal() -> None:
|
| 479 |
+
if future.set_running_or_notify_cancel():
|
| 480 |
+
try:
|
| 481 |
+
_eventloop.run(
|
| 482 |
+
run_portal, backend=backend, backend_options=backend_options
|
| 483 |
+
)
|
| 484 |
+
except BaseException as exc:
|
| 485 |
+
if not future.done():
|
| 486 |
+
future.set_exception(exc)
|
| 487 |
+
|
| 488 |
+
future: Future[BlockingPortal] = Future()
|
| 489 |
+
thread = Thread(target=run_blocking_portal, daemon=True)
|
| 490 |
+
thread.start()
|
| 491 |
+
try:
|
| 492 |
+
cancel_remaining_tasks = False
|
| 493 |
+
portal = future.result()
|
| 494 |
+
try:
|
| 495 |
+
yield portal
|
| 496 |
+
except BaseException:
|
| 497 |
+
cancel_remaining_tasks = True
|
| 498 |
+
raise
|
| 499 |
+
finally:
|
| 500 |
+
try:
|
| 501 |
+
portal.call(portal.stop, cancel_remaining_tasks)
|
| 502 |
+
except RuntimeError:
|
| 503 |
+
pass
|
| 504 |
+
finally:
|
| 505 |
+
thread.join()
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def check_cancelled() -> None:
|
| 509 |
+
"""
|
| 510 |
+
Check if the cancel scope of the host task's running the current worker thread has
|
| 511 |
+
been cancelled.
|
| 512 |
+
|
| 513 |
+
If the host task's current cancel scope has indeed been cancelled, the
|
| 514 |
+
backend-specific cancellation exception will be raised.
|
| 515 |
+
|
| 516 |
+
:raises RuntimeError: if the current thread was not spawned by
|
| 517 |
+
:func:`.to_thread.run_sync`
|
| 518 |
+
|
| 519 |
+
"""
|
| 520 |
+
try:
|
| 521 |
+
async_backend: AsyncBackend = threadlocals.current_async_backend
|
| 522 |
+
except AttributeError:
|
| 523 |
+
raise RuntimeError(
|
| 524 |
+
"This function can only be run from an AnyIO worker thread"
|
| 525 |
+
) from None
|
| 526 |
+
|
| 527 |
+
async_backend.check_cancelled()
|
infer_4_37_2/lib/python3.10/site-packages/anyio/lowlevel.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import enum
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Any, Generic, Literal, TypeVar, overload
|
| 6 |
+
from weakref import WeakKeyDictionary
|
| 7 |
+
|
| 8 |
+
from ._core._eventloop import get_async_backend
|
| 9 |
+
|
| 10 |
+
T = TypeVar("T")
|
| 11 |
+
D = TypeVar("D")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
async def checkpoint() -> None:
|
| 15 |
+
"""
|
| 16 |
+
Check for cancellation and allow the scheduler to switch to another task.
|
| 17 |
+
|
| 18 |
+
Equivalent to (but more efficient than)::
|
| 19 |
+
|
| 20 |
+
await checkpoint_if_cancelled()
|
| 21 |
+
await cancel_shielded_checkpoint()
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
.. versionadded:: 3.0
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
await get_async_backend().checkpoint()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
async def checkpoint_if_cancelled() -> None:
|
| 31 |
+
"""
|
| 32 |
+
Enter a checkpoint if the enclosing cancel scope has been cancelled.
|
| 33 |
+
|
| 34 |
+
This does not allow the scheduler to switch to a different task.
|
| 35 |
+
|
| 36 |
+
.. versionadded:: 3.0
|
| 37 |
+
|
| 38 |
+
"""
|
| 39 |
+
await get_async_backend().checkpoint_if_cancelled()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
async def cancel_shielded_checkpoint() -> None:
|
| 43 |
+
"""
|
| 44 |
+
Allow the scheduler to switch to another task but without checking for cancellation.
|
| 45 |
+
|
| 46 |
+
Equivalent to (but potentially more efficient than)::
|
| 47 |
+
|
| 48 |
+
with CancelScope(shield=True):
|
| 49 |
+
await checkpoint()
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
.. versionadded:: 3.0
|
| 53 |
+
|
| 54 |
+
"""
|
| 55 |
+
await get_async_backend().cancel_shielded_checkpoint()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def current_token() -> object:
|
| 59 |
+
"""
|
| 60 |
+
Return a backend specific token object that can be used to get back to the event
|
| 61 |
+
loop.
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
return get_async_backend().current_token()
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()
|
| 68 |
+
_token_wrappers: dict[Any, _TokenWrapper] = {}
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@dataclass(frozen=True)
|
| 72 |
+
class _TokenWrapper:
|
| 73 |
+
__slots__ = "_token", "__weakref__"
|
| 74 |
+
_token: object
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class _NoValueSet(enum.Enum):
|
| 78 |
+
NO_VALUE_SET = enum.auto()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class RunvarToken(Generic[T]):
|
| 82 |
+
__slots__ = "_var", "_value", "_redeemed"
|
| 83 |
+
|
| 84 |
+
def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
|
| 85 |
+
self._var = var
|
| 86 |
+
self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
|
| 87 |
+
self._redeemed = False
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class RunVar(Generic[T]):
|
| 91 |
+
"""
|
| 92 |
+
Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
__slots__ = "_name", "_default"
|
| 96 |
+
|
| 97 |
+
NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
|
| 98 |
+
|
| 99 |
+
_token_wrappers: set[_TokenWrapper] = set()
|
| 100 |
+
|
| 101 |
+
def __init__(
|
| 102 |
+
self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
|
| 103 |
+
):
|
| 104 |
+
self._name = name
|
| 105 |
+
self._default = default
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def _current_vars(self) -> dict[str, T]:
|
| 109 |
+
token = current_token()
|
| 110 |
+
try:
|
| 111 |
+
return _run_vars[token]
|
| 112 |
+
except KeyError:
|
| 113 |
+
run_vars = _run_vars[token] = {}
|
| 114 |
+
return run_vars
|
| 115 |
+
|
| 116 |
+
@overload
|
| 117 |
+
def get(self, default: D) -> T | D: ...
|
| 118 |
+
|
| 119 |
+
@overload
|
| 120 |
+
def get(self) -> T: ...
|
| 121 |
+
|
| 122 |
+
def get(
|
| 123 |
+
self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
|
| 124 |
+
) -> T | D:
|
| 125 |
+
try:
|
| 126 |
+
return self._current_vars[self._name]
|
| 127 |
+
except KeyError:
|
| 128 |
+
if default is not RunVar.NO_VALUE_SET:
|
| 129 |
+
return default
|
| 130 |
+
elif self._default is not RunVar.NO_VALUE_SET:
|
| 131 |
+
return self._default
|
| 132 |
+
|
| 133 |
+
raise LookupError(
|
| 134 |
+
f'Run variable "{self._name}" has no value and no default set'
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
def set(self, value: T) -> RunvarToken[T]:
|
| 138 |
+
current_vars = self._current_vars
|
| 139 |
+
token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
|
| 140 |
+
current_vars[self._name] = value
|
| 141 |
+
return token
|
| 142 |
+
|
| 143 |
+
def reset(self, token: RunvarToken[T]) -> None:
|
| 144 |
+
if token._var is not self:
|
| 145 |
+
raise ValueError("This token does not belong to this RunVar")
|
| 146 |
+
|
| 147 |
+
if token._redeemed:
|
| 148 |
+
raise ValueError("This token has already been used")
|
| 149 |
+
|
| 150 |
+
if token._value is _NoValueSet.NO_VALUE_SET:
|
| 151 |
+
try:
|
| 152 |
+
del self._current_vars[self._name]
|
| 153 |
+
except KeyError:
|
| 154 |
+
pass
|
| 155 |
+
else:
|
| 156 |
+
self._current_vars[self._name] = token._value
|
| 157 |
+
|
| 158 |
+
token._redeemed = True
|
| 159 |
+
|
| 160 |
+
def __repr__(self) -> str:
|
| 161 |
+
return f"<RunVar name={self._name!r}>"
|
infer_4_37_2/lib/python3.10/site-packages/anyio/py.typed
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/anyio/pytest_plugin.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
from collections.abc import Generator, Iterator
|
| 5 |
+
from contextlib import ExitStack, contextmanager
|
| 6 |
+
from inspect import isasyncgenfunction, iscoroutinefunction, ismethod
|
| 7 |
+
from typing import Any, cast
|
| 8 |
+
|
| 9 |
+
import pytest
|
| 10 |
+
import sniffio
|
| 11 |
+
from _pytest.fixtures import SubRequest
|
| 12 |
+
from _pytest.outcomes import Exit
|
| 13 |
+
|
| 14 |
+
from ._core._eventloop import get_all_backends, get_async_backend
|
| 15 |
+
from ._core._exceptions import iterate_exceptions
|
| 16 |
+
from .abc import TestRunner
|
| 17 |
+
|
| 18 |
+
if sys.version_info < (3, 11):
|
| 19 |
+
from exceptiongroup import ExceptionGroup
|
| 20 |
+
|
| 21 |
+
_current_runner: TestRunner | None = None
|
| 22 |
+
_runner_stack: ExitStack | None = None
|
| 23 |
+
_runner_leases = 0
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
|
| 27 |
+
if isinstance(backend, str):
|
| 28 |
+
return backend, {}
|
| 29 |
+
elif isinstance(backend, tuple) and len(backend) == 2:
|
| 30 |
+
if isinstance(backend[0], str) and isinstance(backend[1], dict):
|
| 31 |
+
return cast(tuple[str, dict[str, Any]], backend)
|
| 32 |
+
|
| 33 |
+
raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@contextmanager
|
| 37 |
+
def get_runner(
|
| 38 |
+
backend_name: str, backend_options: dict[str, Any]
|
| 39 |
+
) -> Iterator[TestRunner]:
|
| 40 |
+
global _current_runner, _runner_leases, _runner_stack
|
| 41 |
+
if _current_runner is None:
|
| 42 |
+
asynclib = get_async_backend(backend_name)
|
| 43 |
+
_runner_stack = ExitStack()
|
| 44 |
+
if sniffio.current_async_library_cvar.get(None) is None:
|
| 45 |
+
# Since we're in control of the event loop, we can cache the name of the
|
| 46 |
+
# async library
|
| 47 |
+
token = sniffio.current_async_library_cvar.set(backend_name)
|
| 48 |
+
_runner_stack.callback(sniffio.current_async_library_cvar.reset, token)
|
| 49 |
+
|
| 50 |
+
backend_options = backend_options or {}
|
| 51 |
+
_current_runner = _runner_stack.enter_context(
|
| 52 |
+
asynclib.create_test_runner(backend_options)
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
_runner_leases += 1
|
| 56 |
+
try:
|
| 57 |
+
yield _current_runner
|
| 58 |
+
finally:
|
| 59 |
+
_runner_leases -= 1
|
| 60 |
+
if not _runner_leases:
|
| 61 |
+
assert _runner_stack is not None
|
| 62 |
+
_runner_stack.close()
|
| 63 |
+
_runner_stack = _current_runner = None
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def pytest_configure(config: Any) -> None:
|
| 67 |
+
config.addinivalue_line(
|
| 68 |
+
"markers",
|
| 69 |
+
"anyio: mark the (coroutine function) test to be run "
|
| 70 |
+
"asynchronously via anyio.",
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@pytest.hookimpl(hookwrapper=True)
|
| 75 |
+
def pytest_fixture_setup(fixturedef: Any, request: Any) -> Generator[Any]:
|
| 76 |
+
def wrapper(
|
| 77 |
+
*args: Any, anyio_backend: Any, request: SubRequest, **kwargs: Any
|
| 78 |
+
) -> Any:
|
| 79 |
+
# Rebind any fixture methods to the request instance
|
| 80 |
+
if (
|
| 81 |
+
request.instance
|
| 82 |
+
and ismethod(func)
|
| 83 |
+
and type(func.__self__) is type(request.instance)
|
| 84 |
+
):
|
| 85 |
+
local_func = func.__func__.__get__(request.instance)
|
| 86 |
+
else:
|
| 87 |
+
local_func = func
|
| 88 |
+
|
| 89 |
+
backend_name, backend_options = extract_backend_and_options(anyio_backend)
|
| 90 |
+
if has_backend_arg:
|
| 91 |
+
kwargs["anyio_backend"] = anyio_backend
|
| 92 |
+
|
| 93 |
+
if has_request_arg:
|
| 94 |
+
kwargs["request"] = request
|
| 95 |
+
|
| 96 |
+
with get_runner(backend_name, backend_options) as runner:
|
| 97 |
+
if isasyncgenfunction(local_func):
|
| 98 |
+
yield from runner.run_asyncgen_fixture(local_func, kwargs)
|
| 99 |
+
else:
|
| 100 |
+
yield runner.run_fixture(local_func, kwargs)
|
| 101 |
+
|
| 102 |
+
# Only apply this to coroutine functions and async generator functions in requests
|
| 103 |
+
# that involve the anyio_backend fixture
|
| 104 |
+
func = fixturedef.func
|
| 105 |
+
if isasyncgenfunction(func) or iscoroutinefunction(func):
|
| 106 |
+
if "anyio_backend" in request.fixturenames:
|
| 107 |
+
fixturedef.func = wrapper
|
| 108 |
+
original_argname = fixturedef.argnames
|
| 109 |
+
|
| 110 |
+
if not (has_backend_arg := "anyio_backend" in fixturedef.argnames):
|
| 111 |
+
fixturedef.argnames += ("anyio_backend",)
|
| 112 |
+
|
| 113 |
+
if not (has_request_arg := "request" in fixturedef.argnames):
|
| 114 |
+
fixturedef.argnames += ("request",)
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
return (yield)
|
| 118 |
+
finally:
|
| 119 |
+
fixturedef.func = func
|
| 120 |
+
fixturedef.argnames = original_argname
|
| 121 |
+
|
| 122 |
+
return (yield)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@pytest.hookimpl(tryfirst=True)
|
| 126 |
+
def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None:
|
| 127 |
+
if collector.istestfunction(obj, name):
|
| 128 |
+
inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
|
| 129 |
+
if iscoroutinefunction(inner_func):
|
| 130 |
+
marker = collector.get_closest_marker("anyio")
|
| 131 |
+
own_markers = getattr(obj, "pytestmark", ())
|
| 132 |
+
if marker or any(marker.name == "anyio" for marker in own_markers):
|
| 133 |
+
pytest.mark.usefixtures("anyio_backend")(obj)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@pytest.hookimpl(tryfirst=True)
|
| 137 |
+
def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
|
| 138 |
+
def run_with_hypothesis(**kwargs: Any) -> None:
|
| 139 |
+
with get_runner(backend_name, backend_options) as runner:
|
| 140 |
+
runner.run_test(original_func, kwargs)
|
| 141 |
+
|
| 142 |
+
backend = pyfuncitem.funcargs.get("anyio_backend")
|
| 143 |
+
if backend:
|
| 144 |
+
backend_name, backend_options = extract_backend_and_options(backend)
|
| 145 |
+
|
| 146 |
+
if hasattr(pyfuncitem.obj, "hypothesis"):
|
| 147 |
+
# Wrap the inner test function unless it's already wrapped
|
| 148 |
+
original_func = pyfuncitem.obj.hypothesis.inner_test
|
| 149 |
+
if original_func.__qualname__ != run_with_hypothesis.__qualname__:
|
| 150 |
+
if iscoroutinefunction(original_func):
|
| 151 |
+
pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
|
| 152 |
+
|
| 153 |
+
return None
|
| 154 |
+
|
| 155 |
+
if iscoroutinefunction(pyfuncitem.obj):
|
| 156 |
+
funcargs = pyfuncitem.funcargs
|
| 157 |
+
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
|
| 158 |
+
with get_runner(backend_name, backend_options) as runner:
|
| 159 |
+
try:
|
| 160 |
+
runner.run_test(pyfuncitem.obj, testargs)
|
| 161 |
+
except ExceptionGroup as excgrp:
|
| 162 |
+
for exc in iterate_exceptions(excgrp):
|
| 163 |
+
if isinstance(exc, (Exit, KeyboardInterrupt, SystemExit)):
|
| 164 |
+
raise exc from excgrp
|
| 165 |
+
|
| 166 |
+
raise
|
| 167 |
+
|
| 168 |
+
return True
|
| 169 |
+
|
| 170 |
+
return None
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
@pytest.fixture(scope="module", params=get_all_backends())
|
| 174 |
+
def anyio_backend(request: Any) -> Any:
|
| 175 |
+
return request.param
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@pytest.fixture
|
| 179 |
+
def anyio_backend_name(anyio_backend: Any) -> str:
|
| 180 |
+
if isinstance(anyio_backend, str):
|
| 181 |
+
return anyio_backend
|
| 182 |
+
else:
|
| 183 |
+
return anyio_backend[0]
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@pytest.fixture
|
| 187 |
+
def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
|
| 188 |
+
if isinstance(anyio_backend, str):
|
| 189 |
+
return {}
|
| 190 |
+
else:
|
| 191 |
+
return anyio_backend[1]
|
infer_4_37_2/lib/python3.10/site-packages/anyio/streams/__init__.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/anyio/streams/buffered.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections.abc import Callable, Mapping
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from typing import Any
|
| 6 |
+
|
| 7 |
+
from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead
|
| 8 |
+
from ..abc import AnyByteReceiveStream, ByteReceiveStream
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass(eq=False)
|
| 12 |
+
class BufferedByteReceiveStream(ByteReceiveStream):
|
| 13 |
+
"""
|
| 14 |
+
Wraps any bytes-based receive stream and uses a buffer to provide sophisticated
|
| 15 |
+
receiving capabilities in the form of a byte stream.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
receive_stream: AnyByteReceiveStream
|
| 19 |
+
_buffer: bytearray = field(init=False, default_factory=bytearray)
|
| 20 |
+
_closed: bool = field(init=False, default=False)
|
| 21 |
+
|
| 22 |
+
async def aclose(self) -> None:
|
| 23 |
+
await self.receive_stream.aclose()
|
| 24 |
+
self._closed = True
|
| 25 |
+
|
| 26 |
+
@property
|
| 27 |
+
def buffer(self) -> bytes:
|
| 28 |
+
"""The bytes currently in the buffer."""
|
| 29 |
+
return bytes(self._buffer)
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 33 |
+
return self.receive_stream.extra_attributes
|
| 34 |
+
|
| 35 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 36 |
+
if self._closed:
|
| 37 |
+
raise ClosedResourceError
|
| 38 |
+
|
| 39 |
+
if self._buffer:
|
| 40 |
+
chunk = bytes(self._buffer[:max_bytes])
|
| 41 |
+
del self._buffer[:max_bytes]
|
| 42 |
+
return chunk
|
| 43 |
+
elif isinstance(self.receive_stream, ByteReceiveStream):
|
| 44 |
+
return await self.receive_stream.receive(max_bytes)
|
| 45 |
+
else:
|
| 46 |
+
# With a bytes-oriented object stream, we need to handle any surplus bytes
|
| 47 |
+
# we get from the receive() call
|
| 48 |
+
chunk = await self.receive_stream.receive()
|
| 49 |
+
if len(chunk) > max_bytes:
|
| 50 |
+
# Save the surplus bytes in the buffer
|
| 51 |
+
self._buffer.extend(chunk[max_bytes:])
|
| 52 |
+
return chunk[:max_bytes]
|
| 53 |
+
else:
|
| 54 |
+
return chunk
|
| 55 |
+
|
| 56 |
+
async def receive_exactly(self, nbytes: int) -> bytes:
|
| 57 |
+
"""
|
| 58 |
+
Read exactly the given amount of bytes from the stream.
|
| 59 |
+
|
| 60 |
+
:param nbytes: the number of bytes to read
|
| 61 |
+
:return: the bytes read
|
| 62 |
+
:raises ~anyio.IncompleteRead: if the stream was closed before the requested
|
| 63 |
+
amount of bytes could be read from the stream
|
| 64 |
+
|
| 65 |
+
"""
|
| 66 |
+
while True:
|
| 67 |
+
remaining = nbytes - len(self._buffer)
|
| 68 |
+
if remaining <= 0:
|
| 69 |
+
retval = self._buffer[:nbytes]
|
| 70 |
+
del self._buffer[:nbytes]
|
| 71 |
+
return bytes(retval)
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
if isinstance(self.receive_stream, ByteReceiveStream):
|
| 75 |
+
chunk = await self.receive_stream.receive(remaining)
|
| 76 |
+
else:
|
| 77 |
+
chunk = await self.receive_stream.receive()
|
| 78 |
+
except EndOfStream as exc:
|
| 79 |
+
raise IncompleteRead from exc
|
| 80 |
+
|
| 81 |
+
self._buffer.extend(chunk)
|
| 82 |
+
|
| 83 |
+
async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:
|
| 84 |
+
"""
|
| 85 |
+
Read from the stream until the delimiter is found or max_bytes have been read.
|
| 86 |
+
|
| 87 |
+
:param delimiter: the marker to look for in the stream
|
| 88 |
+
:param max_bytes: maximum number of bytes that will be read before raising
|
| 89 |
+
:exc:`~anyio.DelimiterNotFound`
|
| 90 |
+
:return: the bytes read (not including the delimiter)
|
| 91 |
+
:raises ~anyio.IncompleteRead: if the stream was closed before the delimiter
|
| 92 |
+
was found
|
| 93 |
+
:raises ~anyio.DelimiterNotFound: if the delimiter is not found within the
|
| 94 |
+
bytes read up to the maximum allowed
|
| 95 |
+
|
| 96 |
+
"""
|
| 97 |
+
delimiter_size = len(delimiter)
|
| 98 |
+
offset = 0
|
| 99 |
+
while True:
|
| 100 |
+
# Check if the delimiter can be found in the current buffer
|
| 101 |
+
index = self._buffer.find(delimiter, offset)
|
| 102 |
+
if index >= 0:
|
| 103 |
+
found = self._buffer[:index]
|
| 104 |
+
del self._buffer[: index + len(delimiter) :]
|
| 105 |
+
return bytes(found)
|
| 106 |
+
|
| 107 |
+
# Check if the buffer is already at or over the limit
|
| 108 |
+
if len(self._buffer) >= max_bytes:
|
| 109 |
+
raise DelimiterNotFound(max_bytes)
|
| 110 |
+
|
| 111 |
+
# Read more data into the buffer from the socket
|
| 112 |
+
try:
|
| 113 |
+
data = await self.receive_stream.receive()
|
| 114 |
+
except EndOfStream as exc:
|
| 115 |
+
raise IncompleteRead from exc
|
| 116 |
+
|
| 117 |
+
# Move the offset forward and add the new data to the buffer
|
| 118 |
+
offset = max(len(self._buffer) - delimiter_size + 1, 0)
|
| 119 |
+
self._buffer.extend(data)
|
infer_4_37_2/lib/python3.10/site-packages/anyio/streams/file.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections.abc import Callable, Mapping
|
| 4 |
+
from io import SEEK_SET, UnsupportedOperation
|
| 5 |
+
from os import PathLike
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Any, BinaryIO, cast
|
| 8 |
+
|
| 9 |
+
from .. import (
|
| 10 |
+
BrokenResourceError,
|
| 11 |
+
ClosedResourceError,
|
| 12 |
+
EndOfStream,
|
| 13 |
+
TypedAttributeSet,
|
| 14 |
+
to_thread,
|
| 15 |
+
typed_attribute,
|
| 16 |
+
)
|
| 17 |
+
from ..abc import ByteReceiveStream, ByteSendStream
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class FileStreamAttribute(TypedAttributeSet):
|
| 21 |
+
#: the open file descriptor
|
| 22 |
+
file: BinaryIO = typed_attribute()
|
| 23 |
+
#: the path of the file on the file system, if available (file must be a real file)
|
| 24 |
+
path: Path = typed_attribute()
|
| 25 |
+
#: the file number, if available (file must be a real file or a TTY)
|
| 26 |
+
fileno: int = typed_attribute()
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class _BaseFileStream:
|
| 30 |
+
def __init__(self, file: BinaryIO):
|
| 31 |
+
self._file = file
|
| 32 |
+
|
| 33 |
+
async def aclose(self) -> None:
|
| 34 |
+
await to_thread.run_sync(self._file.close)
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 38 |
+
attributes: dict[Any, Callable[[], Any]] = {
|
| 39 |
+
FileStreamAttribute.file: lambda: self._file,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
if hasattr(self._file, "name"):
|
| 43 |
+
attributes[FileStreamAttribute.path] = lambda: Path(self._file.name)
|
| 44 |
+
|
| 45 |
+
try:
|
| 46 |
+
self._file.fileno()
|
| 47 |
+
except UnsupportedOperation:
|
| 48 |
+
pass
|
| 49 |
+
else:
|
| 50 |
+
attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno()
|
| 51 |
+
|
| 52 |
+
return attributes
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class FileReadStream(_BaseFileStream, ByteReceiveStream):
|
| 56 |
+
"""
|
| 57 |
+
A byte stream that reads from a file in the file system.
|
| 58 |
+
|
| 59 |
+
:param file: a file that has been opened for reading in binary mode
|
| 60 |
+
|
| 61 |
+
.. versionadded:: 3.0
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
@classmethod
|
| 65 |
+
async def from_path(cls, path: str | PathLike[str]) -> FileReadStream:
|
| 66 |
+
"""
|
| 67 |
+
Create a file read stream by opening the given file.
|
| 68 |
+
|
| 69 |
+
:param path: path of the file to read from
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
file = await to_thread.run_sync(Path(path).open, "rb")
|
| 73 |
+
return cls(cast(BinaryIO, file))
|
| 74 |
+
|
| 75 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 76 |
+
try:
|
| 77 |
+
data = await to_thread.run_sync(self._file.read, max_bytes)
|
| 78 |
+
except ValueError:
|
| 79 |
+
raise ClosedResourceError from None
|
| 80 |
+
except OSError as exc:
|
| 81 |
+
raise BrokenResourceError from exc
|
| 82 |
+
|
| 83 |
+
if data:
|
| 84 |
+
return data
|
| 85 |
+
else:
|
| 86 |
+
raise EndOfStream
|
| 87 |
+
|
| 88 |
+
async def seek(self, position: int, whence: int = SEEK_SET) -> int:
|
| 89 |
+
"""
|
| 90 |
+
Seek the file to the given position.
|
| 91 |
+
|
| 92 |
+
.. seealso:: :meth:`io.IOBase.seek`
|
| 93 |
+
|
| 94 |
+
.. note:: Not all file descriptors are seekable.
|
| 95 |
+
|
| 96 |
+
:param position: position to seek the file to
|
| 97 |
+
:param whence: controls how ``position`` is interpreted
|
| 98 |
+
:return: the new absolute position
|
| 99 |
+
:raises OSError: if the file is not seekable
|
| 100 |
+
|
| 101 |
+
"""
|
| 102 |
+
return await to_thread.run_sync(self._file.seek, position, whence)
|
| 103 |
+
|
| 104 |
+
async def tell(self) -> int:
|
| 105 |
+
"""
|
| 106 |
+
Return the current stream position.
|
| 107 |
+
|
| 108 |
+
.. note:: Not all file descriptors are seekable.
|
| 109 |
+
|
| 110 |
+
:return: the current absolute position
|
| 111 |
+
:raises OSError: if the file is not seekable
|
| 112 |
+
|
| 113 |
+
"""
|
| 114 |
+
return await to_thread.run_sync(self._file.tell)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class FileWriteStream(_BaseFileStream, ByteSendStream):
|
| 118 |
+
"""
|
| 119 |
+
A byte stream that writes to a file in the file system.
|
| 120 |
+
|
| 121 |
+
:param file: a file that has been opened for writing in binary mode
|
| 122 |
+
|
| 123 |
+
.. versionadded:: 3.0
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
@classmethod
|
| 127 |
+
async def from_path(
|
| 128 |
+
cls, path: str | PathLike[str], append: bool = False
|
| 129 |
+
) -> FileWriteStream:
|
| 130 |
+
"""
|
| 131 |
+
Create a file write stream by opening the given file for writing.
|
| 132 |
+
|
| 133 |
+
:param path: path of the file to write to
|
| 134 |
+
:param append: if ``True``, open the file for appending; if ``False``, any
|
| 135 |
+
existing file at the given path will be truncated
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
mode = "ab" if append else "wb"
|
| 139 |
+
file = await to_thread.run_sync(Path(path).open, mode)
|
| 140 |
+
return cls(cast(BinaryIO, file))
|
| 141 |
+
|
| 142 |
+
async def send(self, item: bytes) -> None:
|
| 143 |
+
try:
|
| 144 |
+
await to_thread.run_sync(self._file.write, item)
|
| 145 |
+
except ValueError:
|
| 146 |
+
raise ClosedResourceError from None
|
| 147 |
+
except OSError as exc:
|
| 148 |
+
raise BrokenResourceError from exc
|
infer_4_37_2/lib/python3.10/site-packages/anyio/streams/memory.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import warnings
|
| 4 |
+
from collections import OrderedDict, deque
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from types import TracebackType
|
| 7 |
+
from typing import Generic, NamedTuple, TypeVar
|
| 8 |
+
|
| 9 |
+
from .. import (
|
| 10 |
+
BrokenResourceError,
|
| 11 |
+
ClosedResourceError,
|
| 12 |
+
EndOfStream,
|
| 13 |
+
WouldBlock,
|
| 14 |
+
)
|
| 15 |
+
from .._core._testing import TaskInfo, get_current_task
|
| 16 |
+
from ..abc import Event, ObjectReceiveStream, ObjectSendStream
|
| 17 |
+
from ..lowlevel import checkpoint
|
| 18 |
+
|
| 19 |
+
T_Item = TypeVar("T_Item")
|
| 20 |
+
T_co = TypeVar("T_co", covariant=True)
|
| 21 |
+
T_contra = TypeVar("T_contra", contravariant=True)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class MemoryObjectStreamStatistics(NamedTuple):
|
| 25 |
+
current_buffer_used: int #: number of items stored in the buffer
|
| 26 |
+
#: maximum number of items that can be stored on this stream (or :data:`math.inf`)
|
| 27 |
+
max_buffer_size: float
|
| 28 |
+
open_send_streams: int #: number of unclosed clones of the send stream
|
| 29 |
+
open_receive_streams: int #: number of unclosed clones of the receive stream
|
| 30 |
+
#: number of tasks blocked on :meth:`MemoryObjectSendStream.send`
|
| 31 |
+
tasks_waiting_send: int
|
| 32 |
+
#: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive`
|
| 33 |
+
tasks_waiting_receive: int
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass(eq=False)
|
| 37 |
+
class MemoryObjectItemReceiver(Generic[T_Item]):
|
| 38 |
+
task_info: TaskInfo = field(init=False, default_factory=get_current_task)
|
| 39 |
+
item: T_Item = field(init=False)
|
| 40 |
+
|
| 41 |
+
def __repr__(self) -> str:
|
| 42 |
+
# When item is not defined, we get following error with default __repr__:
|
| 43 |
+
# AttributeError: 'MemoryObjectItemReceiver' object has no attribute 'item'
|
| 44 |
+
item = getattr(self, "item", None)
|
| 45 |
+
return f"{self.__class__.__name__}(task_info={self.task_info}, item={item!r})"
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@dataclass(eq=False)
|
| 49 |
+
class MemoryObjectStreamState(Generic[T_Item]):
|
| 50 |
+
max_buffer_size: float = field()
|
| 51 |
+
buffer: deque[T_Item] = field(init=False, default_factory=deque)
|
| 52 |
+
open_send_channels: int = field(init=False, default=0)
|
| 53 |
+
open_receive_channels: int = field(init=False, default=0)
|
| 54 |
+
waiting_receivers: OrderedDict[Event, MemoryObjectItemReceiver[T_Item]] = field(
|
| 55 |
+
init=False, default_factory=OrderedDict
|
| 56 |
+
)
|
| 57 |
+
waiting_senders: OrderedDict[Event, T_Item] = field(
|
| 58 |
+
init=False, default_factory=OrderedDict
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
def statistics(self) -> MemoryObjectStreamStatistics:
|
| 62 |
+
return MemoryObjectStreamStatistics(
|
| 63 |
+
len(self.buffer),
|
| 64 |
+
self.max_buffer_size,
|
| 65 |
+
self.open_send_channels,
|
| 66 |
+
self.open_receive_channels,
|
| 67 |
+
len(self.waiting_senders),
|
| 68 |
+
len(self.waiting_receivers),
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@dataclass(eq=False)
|
| 73 |
+
class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]):
|
| 74 |
+
_state: MemoryObjectStreamState[T_co]
|
| 75 |
+
_closed: bool = field(init=False, default=False)
|
| 76 |
+
|
| 77 |
+
def __post_init__(self) -> None:
|
| 78 |
+
self._state.open_receive_channels += 1
|
| 79 |
+
|
| 80 |
+
def receive_nowait(self) -> T_co:
|
| 81 |
+
"""
|
| 82 |
+
Receive the next item if it can be done without waiting.
|
| 83 |
+
|
| 84 |
+
:return: the received item
|
| 85 |
+
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
| 86 |
+
:raises ~anyio.EndOfStream: if the buffer is empty and this stream has been
|
| 87 |
+
closed from the sending end
|
| 88 |
+
:raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks
|
| 89 |
+
waiting to send
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
if self._closed:
|
| 93 |
+
raise ClosedResourceError
|
| 94 |
+
|
| 95 |
+
if self._state.waiting_senders:
|
| 96 |
+
# Get the item from the next sender
|
| 97 |
+
send_event, item = self._state.waiting_senders.popitem(last=False)
|
| 98 |
+
self._state.buffer.append(item)
|
| 99 |
+
send_event.set()
|
| 100 |
+
|
| 101 |
+
if self._state.buffer:
|
| 102 |
+
return self._state.buffer.popleft()
|
| 103 |
+
elif not self._state.open_send_channels:
|
| 104 |
+
raise EndOfStream
|
| 105 |
+
|
| 106 |
+
raise WouldBlock
|
| 107 |
+
|
| 108 |
+
async def receive(self) -> T_co:
|
| 109 |
+
await checkpoint()
|
| 110 |
+
try:
|
| 111 |
+
return self.receive_nowait()
|
| 112 |
+
except WouldBlock:
|
| 113 |
+
# Add ourselves in the queue
|
| 114 |
+
receive_event = Event()
|
| 115 |
+
receiver = MemoryObjectItemReceiver[T_co]()
|
| 116 |
+
self._state.waiting_receivers[receive_event] = receiver
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
await receive_event.wait()
|
| 120 |
+
finally:
|
| 121 |
+
self._state.waiting_receivers.pop(receive_event, None)
|
| 122 |
+
|
| 123 |
+
try:
|
| 124 |
+
return receiver.item
|
| 125 |
+
except AttributeError:
|
| 126 |
+
raise EndOfStream
|
| 127 |
+
|
| 128 |
+
def clone(self) -> MemoryObjectReceiveStream[T_co]:
|
| 129 |
+
"""
|
| 130 |
+
Create a clone of this receive stream.
|
| 131 |
+
|
| 132 |
+
Each clone can be closed separately. Only when all clones have been closed will
|
| 133 |
+
the receiving end of the memory stream be considered closed by the sending ends.
|
| 134 |
+
|
| 135 |
+
:return: the cloned stream
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
if self._closed:
|
| 139 |
+
raise ClosedResourceError
|
| 140 |
+
|
| 141 |
+
return MemoryObjectReceiveStream(_state=self._state)
|
| 142 |
+
|
| 143 |
+
def close(self) -> None:
|
| 144 |
+
"""
|
| 145 |
+
Close the stream.
|
| 146 |
+
|
| 147 |
+
This works the exact same way as :meth:`aclose`, but is provided as a special
|
| 148 |
+
case for the benefit of synchronous callbacks.
|
| 149 |
+
|
| 150 |
+
"""
|
| 151 |
+
if not self._closed:
|
| 152 |
+
self._closed = True
|
| 153 |
+
self._state.open_receive_channels -= 1
|
| 154 |
+
if self._state.open_receive_channels == 0:
|
| 155 |
+
send_events = list(self._state.waiting_senders.keys())
|
| 156 |
+
for event in send_events:
|
| 157 |
+
event.set()
|
| 158 |
+
|
| 159 |
+
async def aclose(self) -> None:
|
| 160 |
+
self.close()
|
| 161 |
+
|
| 162 |
+
def statistics(self) -> MemoryObjectStreamStatistics:
|
| 163 |
+
"""
|
| 164 |
+
Return statistics about the current state of this stream.
|
| 165 |
+
|
| 166 |
+
.. versionadded:: 3.0
|
| 167 |
+
"""
|
| 168 |
+
return self._state.statistics()
|
| 169 |
+
|
| 170 |
+
def __enter__(self) -> MemoryObjectReceiveStream[T_co]:
|
| 171 |
+
return self
|
| 172 |
+
|
| 173 |
+
def __exit__(
|
| 174 |
+
self,
|
| 175 |
+
exc_type: type[BaseException] | None,
|
| 176 |
+
exc_val: BaseException | None,
|
| 177 |
+
exc_tb: TracebackType | None,
|
| 178 |
+
) -> None:
|
| 179 |
+
self.close()
|
| 180 |
+
|
| 181 |
+
def __del__(self) -> None:
|
| 182 |
+
if not self._closed:
|
| 183 |
+
warnings.warn(
|
| 184 |
+
f"Unclosed <{self.__class__.__name__} at {id(self):x}>",
|
| 185 |
+
ResourceWarning,
|
| 186 |
+
source=self,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
@dataclass(eq=False)
|
| 191 |
+
class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]):
|
| 192 |
+
_state: MemoryObjectStreamState[T_contra]
|
| 193 |
+
_closed: bool = field(init=False, default=False)
|
| 194 |
+
|
| 195 |
+
def __post_init__(self) -> None:
|
| 196 |
+
self._state.open_send_channels += 1
|
| 197 |
+
|
| 198 |
+
def send_nowait(self, item: T_contra) -> None:
|
| 199 |
+
"""
|
| 200 |
+
Send an item immediately if it can be done without waiting.
|
| 201 |
+
|
| 202 |
+
:param item: the item to send
|
| 203 |
+
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
| 204 |
+
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
|
| 205 |
+
receiving end
|
| 206 |
+
:raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting
|
| 207 |
+
to receive
|
| 208 |
+
|
| 209 |
+
"""
|
| 210 |
+
if self._closed:
|
| 211 |
+
raise ClosedResourceError
|
| 212 |
+
if not self._state.open_receive_channels:
|
| 213 |
+
raise BrokenResourceError
|
| 214 |
+
|
| 215 |
+
while self._state.waiting_receivers:
|
| 216 |
+
receive_event, receiver = self._state.waiting_receivers.popitem(last=False)
|
| 217 |
+
if not receiver.task_info.has_pending_cancellation():
|
| 218 |
+
receiver.item = item
|
| 219 |
+
receive_event.set()
|
| 220 |
+
return
|
| 221 |
+
|
| 222 |
+
if len(self._state.buffer) < self._state.max_buffer_size:
|
| 223 |
+
self._state.buffer.append(item)
|
| 224 |
+
else:
|
| 225 |
+
raise WouldBlock
|
| 226 |
+
|
| 227 |
+
async def send(self, item: T_contra) -> None:
|
| 228 |
+
"""
|
| 229 |
+
Send an item to the stream.
|
| 230 |
+
|
| 231 |
+
If the buffer is full, this method blocks until there is again room in the
|
| 232 |
+
buffer or the item can be sent directly to a receiver.
|
| 233 |
+
|
| 234 |
+
:param item: the item to send
|
| 235 |
+
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
| 236 |
+
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
|
| 237 |
+
receiving end
|
| 238 |
+
|
| 239 |
+
"""
|
| 240 |
+
await checkpoint()
|
| 241 |
+
try:
|
| 242 |
+
self.send_nowait(item)
|
| 243 |
+
except WouldBlock:
|
| 244 |
+
# Wait until there's someone on the receiving end
|
| 245 |
+
send_event = Event()
|
| 246 |
+
self._state.waiting_senders[send_event] = item
|
| 247 |
+
try:
|
| 248 |
+
await send_event.wait()
|
| 249 |
+
except BaseException:
|
| 250 |
+
self._state.waiting_senders.pop(send_event, None)
|
| 251 |
+
raise
|
| 252 |
+
|
| 253 |
+
if send_event in self._state.waiting_senders:
|
| 254 |
+
del self._state.waiting_senders[send_event]
|
| 255 |
+
raise BrokenResourceError from None
|
| 256 |
+
|
| 257 |
+
def clone(self) -> MemoryObjectSendStream[T_contra]:
|
| 258 |
+
"""
|
| 259 |
+
Create a clone of this send stream.
|
| 260 |
+
|
| 261 |
+
Each clone can be closed separately. Only when all clones have been closed will
|
| 262 |
+
the sending end of the memory stream be considered closed by the receiving ends.
|
| 263 |
+
|
| 264 |
+
:return: the cloned stream
|
| 265 |
+
|
| 266 |
+
"""
|
| 267 |
+
if self._closed:
|
| 268 |
+
raise ClosedResourceError
|
| 269 |
+
|
| 270 |
+
return MemoryObjectSendStream(_state=self._state)
|
| 271 |
+
|
| 272 |
+
def close(self) -> None:
|
| 273 |
+
"""
|
| 274 |
+
Close the stream.
|
| 275 |
+
|
| 276 |
+
This works the exact same way as :meth:`aclose`, but is provided as a special
|
| 277 |
+
case for the benefit of synchronous callbacks.
|
| 278 |
+
|
| 279 |
+
"""
|
| 280 |
+
if not self._closed:
|
| 281 |
+
self._closed = True
|
| 282 |
+
self._state.open_send_channels -= 1
|
| 283 |
+
if self._state.open_send_channels == 0:
|
| 284 |
+
receive_events = list(self._state.waiting_receivers.keys())
|
| 285 |
+
self._state.waiting_receivers.clear()
|
| 286 |
+
for event in receive_events:
|
| 287 |
+
event.set()
|
| 288 |
+
|
| 289 |
+
async def aclose(self) -> None:
|
| 290 |
+
self.close()
|
| 291 |
+
|
| 292 |
+
def statistics(self) -> MemoryObjectStreamStatistics:
|
| 293 |
+
"""
|
| 294 |
+
Return statistics about the current state of this stream.
|
| 295 |
+
|
| 296 |
+
.. versionadded:: 3.0
|
| 297 |
+
"""
|
| 298 |
+
return self._state.statistics()
|
| 299 |
+
|
| 300 |
+
def __enter__(self) -> MemoryObjectSendStream[T_contra]:
|
| 301 |
+
return self
|
| 302 |
+
|
| 303 |
+
def __exit__(
|
| 304 |
+
self,
|
| 305 |
+
exc_type: type[BaseException] | None,
|
| 306 |
+
exc_val: BaseException | None,
|
| 307 |
+
exc_tb: TracebackType | None,
|
| 308 |
+
) -> None:
|
| 309 |
+
self.close()
|
| 310 |
+
|
| 311 |
+
def __del__(self) -> None:
|
| 312 |
+
if not self._closed:
|
| 313 |
+
warnings.warn(
|
| 314 |
+
f"Unclosed <{self.__class__.__name__} at {id(self):x}>",
|
| 315 |
+
ResourceWarning,
|
| 316 |
+
source=self,
|
| 317 |
+
)
|
infer_4_37_2/lib/python3.10/site-packages/anyio/streams/stapled.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from collections.abc import Callable, Mapping, Sequence
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Any, Generic, TypeVar
|
| 6 |
+
|
| 7 |
+
from ..abc import (
|
| 8 |
+
ByteReceiveStream,
|
| 9 |
+
ByteSendStream,
|
| 10 |
+
ByteStream,
|
| 11 |
+
Listener,
|
| 12 |
+
ObjectReceiveStream,
|
| 13 |
+
ObjectSendStream,
|
| 14 |
+
ObjectStream,
|
| 15 |
+
TaskGroup,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
T_Item = TypeVar("T_Item")
|
| 19 |
+
T_Stream = TypeVar("T_Stream")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@dataclass(eq=False)
|
| 23 |
+
class StapledByteStream(ByteStream):
|
| 24 |
+
"""
|
| 25 |
+
Combines two byte streams into a single, bidirectional byte stream.
|
| 26 |
+
|
| 27 |
+
Extra attributes will be provided from both streams, with the receive stream
|
| 28 |
+
providing the values in case of a conflict.
|
| 29 |
+
|
| 30 |
+
:param ByteSendStream send_stream: the sending byte stream
|
| 31 |
+
:param ByteReceiveStream receive_stream: the receiving byte stream
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
send_stream: ByteSendStream
|
| 35 |
+
receive_stream: ByteReceiveStream
|
| 36 |
+
|
| 37 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 38 |
+
return await self.receive_stream.receive(max_bytes)
|
| 39 |
+
|
| 40 |
+
async def send(self, item: bytes) -> None:
|
| 41 |
+
await self.send_stream.send(item)
|
| 42 |
+
|
| 43 |
+
async def send_eof(self) -> None:
|
| 44 |
+
await self.send_stream.aclose()
|
| 45 |
+
|
| 46 |
+
async def aclose(self) -> None:
|
| 47 |
+
await self.send_stream.aclose()
|
| 48 |
+
await self.receive_stream.aclose()
|
| 49 |
+
|
| 50 |
+
@property
|
| 51 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 52 |
+
return {
|
| 53 |
+
**self.send_stream.extra_attributes,
|
| 54 |
+
**self.receive_stream.extra_attributes,
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@dataclass(eq=False)
|
| 59 |
+
class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):
|
| 60 |
+
"""
|
| 61 |
+
Combines two object streams into a single, bidirectional object stream.
|
| 62 |
+
|
| 63 |
+
Extra attributes will be provided from both streams, with the receive stream
|
| 64 |
+
providing the values in case of a conflict.
|
| 65 |
+
|
| 66 |
+
:param ObjectSendStream send_stream: the sending object stream
|
| 67 |
+
:param ObjectReceiveStream receive_stream: the receiving object stream
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
send_stream: ObjectSendStream[T_Item]
|
| 71 |
+
receive_stream: ObjectReceiveStream[T_Item]
|
| 72 |
+
|
| 73 |
+
async def receive(self) -> T_Item:
|
| 74 |
+
return await self.receive_stream.receive()
|
| 75 |
+
|
| 76 |
+
async def send(self, item: T_Item) -> None:
|
| 77 |
+
await self.send_stream.send(item)
|
| 78 |
+
|
| 79 |
+
async def send_eof(self) -> None:
|
| 80 |
+
await self.send_stream.aclose()
|
| 81 |
+
|
| 82 |
+
async def aclose(self) -> None:
|
| 83 |
+
await self.send_stream.aclose()
|
| 84 |
+
await self.receive_stream.aclose()
|
| 85 |
+
|
| 86 |
+
@property
|
| 87 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 88 |
+
return {
|
| 89 |
+
**self.send_stream.extra_attributes,
|
| 90 |
+
**self.receive_stream.extra_attributes,
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@dataclass(eq=False)
|
| 95 |
+
class MultiListener(Generic[T_Stream], Listener[T_Stream]):
|
| 96 |
+
"""
|
| 97 |
+
Combines multiple listeners into one, serving connections from all of them at once.
|
| 98 |
+
|
| 99 |
+
Any MultiListeners in the given collection of listeners will have their listeners
|
| 100 |
+
moved into this one.
|
| 101 |
+
|
| 102 |
+
Extra attributes are provided from each listener, with each successive listener
|
| 103 |
+
overriding any conflicting attributes from the previous one.
|
| 104 |
+
|
| 105 |
+
:param listeners: listeners to serve
|
| 106 |
+
:type listeners: Sequence[Listener[T_Stream]]
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
listeners: Sequence[Listener[T_Stream]]
|
| 110 |
+
|
| 111 |
+
def __post_init__(self) -> None:
|
| 112 |
+
listeners: list[Listener[T_Stream]] = []
|
| 113 |
+
for listener in self.listeners:
|
| 114 |
+
if isinstance(listener, MultiListener):
|
| 115 |
+
listeners.extend(listener.listeners)
|
| 116 |
+
del listener.listeners[:] # type: ignore[attr-defined]
|
| 117 |
+
else:
|
| 118 |
+
listeners.append(listener)
|
| 119 |
+
|
| 120 |
+
self.listeners = listeners
|
| 121 |
+
|
| 122 |
+
async def serve(
|
| 123 |
+
self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None
|
| 124 |
+
) -> None:
|
| 125 |
+
from .. import create_task_group
|
| 126 |
+
|
| 127 |
+
async with create_task_group() as tg:
|
| 128 |
+
for listener in self.listeners:
|
| 129 |
+
tg.start_soon(listener.serve, handler, task_group)
|
| 130 |
+
|
| 131 |
+
async def aclose(self) -> None:
|
| 132 |
+
for listener in self.listeners:
|
| 133 |
+
await listener.aclose()
|
| 134 |
+
|
| 135 |
+
@property
|
| 136 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 137 |
+
attributes: dict = {}
|
| 138 |
+
for listener in self.listeners:
|
| 139 |
+
attributes.update(listener.extra_attributes)
|
| 140 |
+
|
| 141 |
+
return attributes
|
infer_4_37_2/lib/python3.10/site-packages/anyio/streams/text.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import codecs
|
| 4 |
+
from collections.abc import Callable, Mapping
|
| 5 |
+
from dataclasses import InitVar, dataclass, field
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
from ..abc import (
|
| 9 |
+
AnyByteReceiveStream,
|
| 10 |
+
AnyByteSendStream,
|
| 11 |
+
AnyByteStream,
|
| 12 |
+
ObjectReceiveStream,
|
| 13 |
+
ObjectSendStream,
|
| 14 |
+
ObjectStream,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass(eq=False)
|
| 19 |
+
class TextReceiveStream(ObjectReceiveStream[str]):
|
| 20 |
+
"""
|
| 21 |
+
Stream wrapper that decodes bytes to strings using the given encoding.
|
| 22 |
+
|
| 23 |
+
Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any
|
| 24 |
+
completely received unicode characters as soon as they come in.
|
| 25 |
+
|
| 26 |
+
:param transport_stream: any bytes-based receive stream
|
| 27 |
+
:param encoding: character encoding to use for decoding bytes to strings (defaults
|
| 28 |
+
to ``utf-8``)
|
| 29 |
+
:param errors: handling scheme for decoding errors (defaults to ``strict``; see the
|
| 30 |
+
`codecs module documentation`_ for a comprehensive list of options)
|
| 31 |
+
|
| 32 |
+
.. _codecs module documentation:
|
| 33 |
+
https://docs.python.org/3/library/codecs.html#codec-objects
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
transport_stream: AnyByteReceiveStream
|
| 37 |
+
encoding: InitVar[str] = "utf-8"
|
| 38 |
+
errors: InitVar[str] = "strict"
|
| 39 |
+
_decoder: codecs.IncrementalDecoder = field(init=False)
|
| 40 |
+
|
| 41 |
+
def __post_init__(self, encoding: str, errors: str) -> None:
|
| 42 |
+
decoder_class = codecs.getincrementaldecoder(encoding)
|
| 43 |
+
self._decoder = decoder_class(errors=errors)
|
| 44 |
+
|
| 45 |
+
async def receive(self) -> str:
|
| 46 |
+
while True:
|
| 47 |
+
chunk = await self.transport_stream.receive()
|
| 48 |
+
decoded = self._decoder.decode(chunk)
|
| 49 |
+
if decoded:
|
| 50 |
+
return decoded
|
| 51 |
+
|
| 52 |
+
async def aclose(self) -> None:
|
| 53 |
+
await self.transport_stream.aclose()
|
| 54 |
+
self._decoder.reset()
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 58 |
+
return self.transport_stream.extra_attributes
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@dataclass(eq=False)
|
| 62 |
+
class TextSendStream(ObjectSendStream[str]):
|
| 63 |
+
"""
|
| 64 |
+
Sends strings to the wrapped stream as bytes using the given encoding.
|
| 65 |
+
|
| 66 |
+
:param AnyByteSendStream transport_stream: any bytes-based send stream
|
| 67 |
+
:param str encoding: character encoding to use for encoding strings to bytes
|
| 68 |
+
(defaults to ``utf-8``)
|
| 69 |
+
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
|
| 70 |
+
the `codecs module documentation`_ for a comprehensive list of options)
|
| 71 |
+
|
| 72 |
+
.. _codecs module documentation:
|
| 73 |
+
https://docs.python.org/3/library/codecs.html#codec-objects
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
transport_stream: AnyByteSendStream
|
| 77 |
+
encoding: InitVar[str] = "utf-8"
|
| 78 |
+
errors: str = "strict"
|
| 79 |
+
_encoder: Callable[..., tuple[bytes, int]] = field(init=False)
|
| 80 |
+
|
| 81 |
+
def __post_init__(self, encoding: str) -> None:
|
| 82 |
+
self._encoder = codecs.getencoder(encoding)
|
| 83 |
+
|
| 84 |
+
async def send(self, item: str) -> None:
|
| 85 |
+
encoded = self._encoder(item, self.errors)[0]
|
| 86 |
+
await self.transport_stream.send(encoded)
|
| 87 |
+
|
| 88 |
+
async def aclose(self) -> None:
|
| 89 |
+
await self.transport_stream.aclose()
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 93 |
+
return self.transport_stream.extra_attributes
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@dataclass(eq=False)
|
| 97 |
+
class TextStream(ObjectStream[str]):
|
| 98 |
+
"""
|
| 99 |
+
A bidirectional stream that decodes bytes to strings on receive and encodes strings
|
| 100 |
+
to bytes on send.
|
| 101 |
+
|
| 102 |
+
Extra attributes will be provided from both streams, with the receive stream
|
| 103 |
+
providing the values in case of a conflict.
|
| 104 |
+
|
| 105 |
+
:param AnyByteStream transport_stream: any bytes-based stream
|
| 106 |
+
:param str encoding: character encoding to use for encoding/decoding strings to/from
|
| 107 |
+
bytes (defaults to ``utf-8``)
|
| 108 |
+
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
|
| 109 |
+
the `codecs module documentation`_ for a comprehensive list of options)
|
| 110 |
+
|
| 111 |
+
.. _codecs module documentation:
|
| 112 |
+
https://docs.python.org/3/library/codecs.html#codec-objects
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
transport_stream: AnyByteStream
|
| 116 |
+
encoding: InitVar[str] = "utf-8"
|
| 117 |
+
errors: InitVar[str] = "strict"
|
| 118 |
+
_receive_stream: TextReceiveStream = field(init=False)
|
| 119 |
+
_send_stream: TextSendStream = field(init=False)
|
| 120 |
+
|
| 121 |
+
def __post_init__(self, encoding: str, errors: str) -> None:
|
| 122 |
+
self._receive_stream = TextReceiveStream(
|
| 123 |
+
self.transport_stream, encoding=encoding, errors=errors
|
| 124 |
+
)
|
| 125 |
+
self._send_stream = TextSendStream(
|
| 126 |
+
self.transport_stream, encoding=encoding, errors=errors
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
async def receive(self) -> str:
|
| 130 |
+
return await self._receive_stream.receive()
|
| 131 |
+
|
| 132 |
+
async def send(self, item: str) -> None:
|
| 133 |
+
await self._send_stream.send(item)
|
| 134 |
+
|
| 135 |
+
async def send_eof(self) -> None:
|
| 136 |
+
await self.transport_stream.send_eof()
|
| 137 |
+
|
| 138 |
+
async def aclose(self) -> None:
|
| 139 |
+
await self._send_stream.aclose()
|
| 140 |
+
await self._receive_stream.aclose()
|
| 141 |
+
|
| 142 |
+
@property
|
| 143 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 144 |
+
return {
|
| 145 |
+
**self._send_stream.extra_attributes,
|
| 146 |
+
**self._receive_stream.extra_attributes,
|
| 147 |
+
}
|
infer_4_37_2/lib/python3.10/site-packages/anyio/streams/tls.py
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import re
|
| 5 |
+
import ssl
|
| 6 |
+
import sys
|
| 7 |
+
from collections.abc import Callable, Mapping
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from functools import wraps
|
| 10 |
+
from typing import Any, TypeVar
|
| 11 |
+
|
| 12 |
+
from .. import (
|
| 13 |
+
BrokenResourceError,
|
| 14 |
+
EndOfStream,
|
| 15 |
+
aclose_forcefully,
|
| 16 |
+
get_cancelled_exc_class,
|
| 17 |
+
)
|
| 18 |
+
from .._core._typedattr import TypedAttributeSet, typed_attribute
|
| 19 |
+
from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup
|
| 20 |
+
|
| 21 |
+
if sys.version_info >= (3, 11):
|
| 22 |
+
from typing import TypeVarTuple, Unpack
|
| 23 |
+
else:
|
| 24 |
+
from typing_extensions import TypeVarTuple, Unpack
|
| 25 |
+
|
| 26 |
+
T_Retval = TypeVar("T_Retval")
|
| 27 |
+
PosArgsT = TypeVarTuple("PosArgsT")
|
| 28 |
+
_PCTRTT = tuple[tuple[str, str], ...]
|
| 29 |
+
_PCTRTTT = tuple[_PCTRTT, ...]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class TLSAttribute(TypedAttributeSet):
|
| 33 |
+
"""Contains Transport Layer Security related attributes."""
|
| 34 |
+
|
| 35 |
+
#: the selected ALPN protocol
|
| 36 |
+
alpn_protocol: str | None = typed_attribute()
|
| 37 |
+
#: the channel binding for type ``tls-unique``
|
| 38 |
+
channel_binding_tls_unique: bytes = typed_attribute()
|
| 39 |
+
#: the selected cipher
|
| 40 |
+
cipher: tuple[str, str, int] = typed_attribute()
|
| 41 |
+
#: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert`
|
| 42 |
+
# for more information)
|
| 43 |
+
peer_certificate: None | (dict[str, str | _PCTRTTT | _PCTRTT]) = typed_attribute()
|
| 44 |
+
#: the peer certificate in binary form
|
| 45 |
+
peer_certificate_binary: bytes | None = typed_attribute()
|
| 46 |
+
#: ``True`` if this is the server side of the connection
|
| 47 |
+
server_side: bool = typed_attribute()
|
| 48 |
+
#: ciphers shared by the client during the TLS handshake (``None`` if this is the
|
| 49 |
+
#: client side)
|
| 50 |
+
shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute()
|
| 51 |
+
#: the :class:`~ssl.SSLObject` used for encryption
|
| 52 |
+
ssl_object: ssl.SSLObject = typed_attribute()
|
| 53 |
+
#: ``True`` if this stream does (and expects) a closing TLS handshake when the
|
| 54 |
+
#: stream is being closed
|
| 55 |
+
standard_compatible: bool = typed_attribute()
|
| 56 |
+
#: the TLS protocol version (e.g. ``TLSv1.2``)
|
| 57 |
+
tls_version: str = typed_attribute()
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@dataclass(eq=False)
|
| 61 |
+
class TLSStream(ByteStream):
|
| 62 |
+
"""
|
| 63 |
+
A stream wrapper that encrypts all sent data and decrypts received data.
|
| 64 |
+
|
| 65 |
+
This class has no public initializer; use :meth:`wrap` instead.
|
| 66 |
+
All extra attributes from :class:`~TLSAttribute` are supported.
|
| 67 |
+
|
| 68 |
+
:var AnyByteStream transport_stream: the wrapped stream
|
| 69 |
+
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
transport_stream: AnyByteStream
|
| 73 |
+
standard_compatible: bool
|
| 74 |
+
_ssl_object: ssl.SSLObject
|
| 75 |
+
_read_bio: ssl.MemoryBIO
|
| 76 |
+
_write_bio: ssl.MemoryBIO
|
| 77 |
+
|
| 78 |
+
@classmethod
|
| 79 |
+
async def wrap(
|
| 80 |
+
cls,
|
| 81 |
+
transport_stream: AnyByteStream,
|
| 82 |
+
*,
|
| 83 |
+
server_side: bool | None = None,
|
| 84 |
+
hostname: str | None = None,
|
| 85 |
+
ssl_context: ssl.SSLContext | None = None,
|
| 86 |
+
standard_compatible: bool = True,
|
| 87 |
+
) -> TLSStream:
|
| 88 |
+
"""
|
| 89 |
+
Wrap an existing stream with Transport Layer Security.
|
| 90 |
+
|
| 91 |
+
This performs a TLS handshake with the peer.
|
| 92 |
+
|
| 93 |
+
:param transport_stream: a bytes-transporting stream to wrap
|
| 94 |
+
:param server_side: ``True`` if this is the server side of the connection,
|
| 95 |
+
``False`` if this is the client side (if omitted, will be set to ``False``
|
| 96 |
+
if ``hostname`` has been provided, ``False`` otherwise). Used only to create
|
| 97 |
+
a default context when an explicit context has not been provided.
|
| 98 |
+
:param hostname: host name of the peer (if host name checking is desired)
|
| 99 |
+
:param ssl_context: the SSLContext object to use (if not provided, a secure
|
| 100 |
+
default will be created)
|
| 101 |
+
:param standard_compatible: if ``False``, skip the closing handshake when
|
| 102 |
+
closing the connection, and don't raise an exception if the peer does the
|
| 103 |
+
same
|
| 104 |
+
:raises ~ssl.SSLError: if the TLS handshake fails
|
| 105 |
+
|
| 106 |
+
"""
|
| 107 |
+
if server_side is None:
|
| 108 |
+
server_side = not hostname
|
| 109 |
+
|
| 110 |
+
if not ssl_context:
|
| 111 |
+
purpose = (
|
| 112 |
+
ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH
|
| 113 |
+
)
|
| 114 |
+
ssl_context = ssl.create_default_context(purpose)
|
| 115 |
+
|
| 116 |
+
# Re-enable detection of unexpected EOFs if it was disabled by Python
|
| 117 |
+
if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
|
| 118 |
+
ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
|
| 119 |
+
|
| 120 |
+
bio_in = ssl.MemoryBIO()
|
| 121 |
+
bio_out = ssl.MemoryBIO()
|
| 122 |
+
ssl_object = ssl_context.wrap_bio(
|
| 123 |
+
bio_in, bio_out, server_side=server_side, server_hostname=hostname
|
| 124 |
+
)
|
| 125 |
+
wrapper = cls(
|
| 126 |
+
transport_stream=transport_stream,
|
| 127 |
+
standard_compatible=standard_compatible,
|
| 128 |
+
_ssl_object=ssl_object,
|
| 129 |
+
_read_bio=bio_in,
|
| 130 |
+
_write_bio=bio_out,
|
| 131 |
+
)
|
| 132 |
+
await wrapper._call_sslobject_method(ssl_object.do_handshake)
|
| 133 |
+
return wrapper
|
| 134 |
+
|
| 135 |
+
async def _call_sslobject_method(
|
| 136 |
+
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
| 137 |
+
) -> T_Retval:
|
| 138 |
+
while True:
|
| 139 |
+
try:
|
| 140 |
+
result = func(*args)
|
| 141 |
+
except ssl.SSLWantReadError:
|
| 142 |
+
try:
|
| 143 |
+
# Flush any pending writes first
|
| 144 |
+
if self._write_bio.pending:
|
| 145 |
+
await self.transport_stream.send(self._write_bio.read())
|
| 146 |
+
|
| 147 |
+
data = await self.transport_stream.receive()
|
| 148 |
+
except EndOfStream:
|
| 149 |
+
self._read_bio.write_eof()
|
| 150 |
+
except OSError as exc:
|
| 151 |
+
self._read_bio.write_eof()
|
| 152 |
+
self._write_bio.write_eof()
|
| 153 |
+
raise BrokenResourceError from exc
|
| 154 |
+
else:
|
| 155 |
+
self._read_bio.write(data)
|
| 156 |
+
except ssl.SSLWantWriteError:
|
| 157 |
+
await self.transport_stream.send(self._write_bio.read())
|
| 158 |
+
except ssl.SSLSyscallError as exc:
|
| 159 |
+
self._read_bio.write_eof()
|
| 160 |
+
self._write_bio.write_eof()
|
| 161 |
+
raise BrokenResourceError from exc
|
| 162 |
+
except ssl.SSLError as exc:
|
| 163 |
+
self._read_bio.write_eof()
|
| 164 |
+
self._write_bio.write_eof()
|
| 165 |
+
if isinstance(exc, ssl.SSLEOFError) or (
|
| 166 |
+
exc.strerror and "UNEXPECTED_EOF_WHILE_READING" in exc.strerror
|
| 167 |
+
):
|
| 168 |
+
if self.standard_compatible:
|
| 169 |
+
raise BrokenResourceError from exc
|
| 170 |
+
else:
|
| 171 |
+
raise EndOfStream from None
|
| 172 |
+
|
| 173 |
+
raise
|
| 174 |
+
else:
|
| 175 |
+
# Flush any pending writes first
|
| 176 |
+
if self._write_bio.pending:
|
| 177 |
+
await self.transport_stream.send(self._write_bio.read())
|
| 178 |
+
|
| 179 |
+
return result
|
| 180 |
+
|
| 181 |
+
async def unwrap(self) -> tuple[AnyByteStream, bytes]:
|
| 182 |
+
"""
|
| 183 |
+
Does the TLS closing handshake.
|
| 184 |
+
|
| 185 |
+
:return: a tuple of (wrapped byte stream, bytes left in the read buffer)
|
| 186 |
+
|
| 187 |
+
"""
|
| 188 |
+
await self._call_sslobject_method(self._ssl_object.unwrap)
|
| 189 |
+
self._read_bio.write_eof()
|
| 190 |
+
self._write_bio.write_eof()
|
| 191 |
+
return self.transport_stream, self._read_bio.read()
|
| 192 |
+
|
| 193 |
+
async def aclose(self) -> None:
|
| 194 |
+
if self.standard_compatible:
|
| 195 |
+
try:
|
| 196 |
+
await self.unwrap()
|
| 197 |
+
except BaseException:
|
| 198 |
+
await aclose_forcefully(self.transport_stream)
|
| 199 |
+
raise
|
| 200 |
+
|
| 201 |
+
await self.transport_stream.aclose()
|
| 202 |
+
|
| 203 |
+
async def receive(self, max_bytes: int = 65536) -> bytes:
|
| 204 |
+
data = await self._call_sslobject_method(self._ssl_object.read, max_bytes)
|
| 205 |
+
if not data:
|
| 206 |
+
raise EndOfStream
|
| 207 |
+
|
| 208 |
+
return data
|
| 209 |
+
|
| 210 |
+
async def send(self, item: bytes) -> None:
|
| 211 |
+
await self._call_sslobject_method(self._ssl_object.write, item)
|
| 212 |
+
|
| 213 |
+
async def send_eof(self) -> None:
|
| 214 |
+
tls_version = self.extra(TLSAttribute.tls_version)
|
| 215 |
+
match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version)
|
| 216 |
+
if match:
|
| 217 |
+
major, minor = int(match.group(1)), int(match.group(2) or 0)
|
| 218 |
+
if (major, minor) < (1, 3):
|
| 219 |
+
raise NotImplementedError(
|
| 220 |
+
f"send_eof() requires at least TLSv1.3; current "
|
| 221 |
+
f"session uses {tls_version}"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
raise NotImplementedError(
|
| 225 |
+
"send_eof() has not yet been implemented for TLS streams"
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
@property
|
| 229 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 230 |
+
return {
|
| 231 |
+
**self.transport_stream.extra_attributes,
|
| 232 |
+
TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol,
|
| 233 |
+
TLSAttribute.channel_binding_tls_unique: (
|
| 234 |
+
self._ssl_object.get_channel_binding
|
| 235 |
+
),
|
| 236 |
+
TLSAttribute.cipher: self._ssl_object.cipher,
|
| 237 |
+
TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False),
|
| 238 |
+
TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert(
|
| 239 |
+
True
|
| 240 |
+
),
|
| 241 |
+
TLSAttribute.server_side: lambda: self._ssl_object.server_side,
|
| 242 |
+
TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers()
|
| 243 |
+
if self._ssl_object.server_side
|
| 244 |
+
else None,
|
| 245 |
+
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
|
| 246 |
+
TLSAttribute.ssl_object: lambda: self._ssl_object,
|
| 247 |
+
TLSAttribute.tls_version: self._ssl_object.version,
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
@dataclass(eq=False)
|
| 252 |
+
class TLSListener(Listener[TLSStream]):
|
| 253 |
+
"""
|
| 254 |
+
A convenience listener that wraps another listener and auto-negotiates a TLS session
|
| 255 |
+
on every accepted connection.
|
| 256 |
+
|
| 257 |
+
If the TLS handshake times out or raises an exception,
|
| 258 |
+
:meth:`handle_handshake_error` is called to do whatever post-mortem processing is
|
| 259 |
+
deemed necessary.
|
| 260 |
+
|
| 261 |
+
Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute.
|
| 262 |
+
|
| 263 |
+
:param Listener listener: the listener to wrap
|
| 264 |
+
:param ssl_context: the SSL context object
|
| 265 |
+
:param standard_compatible: a flag passed through to :meth:`TLSStream.wrap`
|
| 266 |
+
:param handshake_timeout: time limit for the TLS handshake
|
| 267 |
+
(passed to :func:`~anyio.fail_after`)
|
| 268 |
+
"""
|
| 269 |
+
|
| 270 |
+
listener: Listener[Any]
|
| 271 |
+
ssl_context: ssl.SSLContext
|
| 272 |
+
standard_compatible: bool = True
|
| 273 |
+
handshake_timeout: float = 30
|
| 274 |
+
|
| 275 |
+
@staticmethod
|
| 276 |
+
async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None:
|
| 277 |
+
"""
|
| 278 |
+
Handle an exception raised during the TLS handshake.
|
| 279 |
+
|
| 280 |
+
This method does 3 things:
|
| 281 |
+
|
| 282 |
+
#. Forcefully closes the original stream
|
| 283 |
+
#. Logs the exception (unless it was a cancellation exception) using the
|
| 284 |
+
``anyio.streams.tls`` logger
|
| 285 |
+
#. Reraises the exception if it was a base exception or a cancellation exception
|
| 286 |
+
|
| 287 |
+
:param exc: the exception
|
| 288 |
+
:param stream: the original stream
|
| 289 |
+
|
| 290 |
+
"""
|
| 291 |
+
await aclose_forcefully(stream)
|
| 292 |
+
|
| 293 |
+
# Log all except cancellation exceptions
|
| 294 |
+
if not isinstance(exc, get_cancelled_exc_class()):
|
| 295 |
+
# CPython (as of 3.11.5) returns incorrect `sys.exc_info()` here when using
|
| 296 |
+
# any asyncio implementation, so we explicitly pass the exception to log
|
| 297 |
+
# (https://github.com/python/cpython/issues/108668). Trio does not have this
|
| 298 |
+
# issue because it works around the CPython bug.
|
| 299 |
+
logging.getLogger(__name__).exception(
|
| 300 |
+
"Error during TLS handshake", exc_info=exc
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
# Only reraise base exceptions and cancellation exceptions
|
| 304 |
+
if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()):
|
| 305 |
+
raise
|
| 306 |
+
|
| 307 |
+
async def serve(
|
| 308 |
+
self,
|
| 309 |
+
handler: Callable[[TLSStream], Any],
|
| 310 |
+
task_group: TaskGroup | None = None,
|
| 311 |
+
) -> None:
|
| 312 |
+
@wraps(handler)
|
| 313 |
+
async def handler_wrapper(stream: AnyByteStream) -> None:
|
| 314 |
+
from .. import fail_after
|
| 315 |
+
|
| 316 |
+
try:
|
| 317 |
+
with fail_after(self.handshake_timeout):
|
| 318 |
+
wrapped_stream = await TLSStream.wrap(
|
| 319 |
+
stream,
|
| 320 |
+
ssl_context=self.ssl_context,
|
| 321 |
+
standard_compatible=self.standard_compatible,
|
| 322 |
+
)
|
| 323 |
+
except BaseException as exc:
|
| 324 |
+
await self.handle_handshake_error(exc, stream)
|
| 325 |
+
else:
|
| 326 |
+
await handler(wrapped_stream)
|
| 327 |
+
|
| 328 |
+
await self.listener.serve(handler_wrapper, task_group)
|
| 329 |
+
|
| 330 |
+
async def aclose(self) -> None:
|
| 331 |
+
await self.listener.aclose()
|
| 332 |
+
|
| 333 |
+
@property
|
| 334 |
+
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
| 335 |
+
return {
|
| 336 |
+
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
|
| 337 |
+
}
|
infer_4_37_2/lib/python3.10/site-packages/anyio/to_process.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import pickle
|
| 5 |
+
import subprocess
|
| 6 |
+
import sys
|
| 7 |
+
from collections import deque
|
| 8 |
+
from collections.abc import Callable
|
| 9 |
+
from importlib.util import module_from_spec, spec_from_file_location
|
| 10 |
+
from typing import TypeVar, cast
|
| 11 |
+
|
| 12 |
+
from ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class
|
| 13 |
+
from ._core._exceptions import BrokenWorkerProcess
|
| 14 |
+
from ._core._subprocesses import open_process
|
| 15 |
+
from ._core._synchronization import CapacityLimiter
|
| 16 |
+
from ._core._tasks import CancelScope, fail_after
|
| 17 |
+
from .abc import ByteReceiveStream, ByteSendStream, Process
|
| 18 |
+
from .lowlevel import RunVar, checkpoint_if_cancelled
|
| 19 |
+
from .streams.buffered import BufferedByteReceiveStream
|
| 20 |
+
|
| 21 |
+
if sys.version_info >= (3, 11):
|
| 22 |
+
from typing import TypeVarTuple, Unpack
|
| 23 |
+
else:
|
| 24 |
+
from typing_extensions import TypeVarTuple, Unpack
|
| 25 |
+
|
| 26 |
+
WORKER_MAX_IDLE_TIME = 300 # 5 minutes
|
| 27 |
+
|
| 28 |
+
T_Retval = TypeVar("T_Retval")
|
| 29 |
+
PosArgsT = TypeVarTuple("PosArgsT")
|
| 30 |
+
|
| 31 |
+
_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")
|
| 32 |
+
_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(
|
| 33 |
+
"_process_pool_idle_workers"
|
| 34 |
+
)
|
| 35 |
+
_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
async def run_sync(
|
| 39 |
+
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
| 40 |
+
*args: Unpack[PosArgsT],
|
| 41 |
+
cancellable: bool = False,
|
| 42 |
+
limiter: CapacityLimiter | None = None,
|
| 43 |
+
) -> T_Retval:
|
| 44 |
+
"""
|
| 45 |
+
Call the given function with the given arguments in a worker process.
|
| 46 |
+
|
| 47 |
+
If the ``cancellable`` option is enabled and the task waiting for its completion is
|
| 48 |
+
cancelled, the worker process running it will be abruptly terminated using SIGKILL
|
| 49 |
+
(or ``terminateProcess()`` on Windows).
|
| 50 |
+
|
| 51 |
+
:param func: a callable
|
| 52 |
+
:param args: positional arguments for the callable
|
| 53 |
+
:param cancellable: ``True`` to allow cancellation of the operation while it's
|
| 54 |
+
running
|
| 55 |
+
:param limiter: capacity limiter to use to limit the total amount of processes
|
| 56 |
+
running (if omitted, the default limiter is used)
|
| 57 |
+
:return: an awaitable that yields the return value of the function.
|
| 58 |
+
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
async def send_raw_command(pickled_cmd: bytes) -> object:
|
| 62 |
+
try:
|
| 63 |
+
await stdin.send(pickled_cmd)
|
| 64 |
+
response = await buffered.receive_until(b"\n", 50)
|
| 65 |
+
status, length = response.split(b" ")
|
| 66 |
+
if status not in (b"RETURN", b"EXCEPTION"):
|
| 67 |
+
raise RuntimeError(
|
| 68 |
+
f"Worker process returned unexpected response: {response!r}"
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
pickled_response = await buffered.receive_exactly(int(length))
|
| 72 |
+
except BaseException as exc:
|
| 73 |
+
workers.discard(process)
|
| 74 |
+
try:
|
| 75 |
+
process.kill()
|
| 76 |
+
with CancelScope(shield=True):
|
| 77 |
+
await process.aclose()
|
| 78 |
+
except ProcessLookupError:
|
| 79 |
+
pass
|
| 80 |
+
|
| 81 |
+
if isinstance(exc, get_cancelled_exc_class()):
|
| 82 |
+
raise
|
| 83 |
+
else:
|
| 84 |
+
raise BrokenWorkerProcess from exc
|
| 85 |
+
|
| 86 |
+
retval = pickle.loads(pickled_response)
|
| 87 |
+
if status == b"EXCEPTION":
|
| 88 |
+
assert isinstance(retval, BaseException)
|
| 89 |
+
raise retval
|
| 90 |
+
else:
|
| 91 |
+
return retval
|
| 92 |
+
|
| 93 |
+
# First pickle the request before trying to reserve a worker process
|
| 94 |
+
await checkpoint_if_cancelled()
|
| 95 |
+
request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)
|
| 96 |
+
|
| 97 |
+
# If this is the first run in this event loop thread, set up the necessary variables
|
| 98 |
+
try:
|
| 99 |
+
workers = _process_pool_workers.get()
|
| 100 |
+
idle_workers = _process_pool_idle_workers.get()
|
| 101 |
+
except LookupError:
|
| 102 |
+
workers = set()
|
| 103 |
+
idle_workers = deque()
|
| 104 |
+
_process_pool_workers.set(workers)
|
| 105 |
+
_process_pool_idle_workers.set(idle_workers)
|
| 106 |
+
get_async_backend().setup_process_pool_exit_at_shutdown(workers)
|
| 107 |
+
|
| 108 |
+
async with limiter or current_default_process_limiter():
|
| 109 |
+
# Pop processes from the pool (starting from the most recently used) until we
|
| 110 |
+
# find one that hasn't exited yet
|
| 111 |
+
process: Process
|
| 112 |
+
while idle_workers:
|
| 113 |
+
process, idle_since = idle_workers.pop()
|
| 114 |
+
if process.returncode is None:
|
| 115 |
+
stdin = cast(ByteSendStream, process.stdin)
|
| 116 |
+
buffered = BufferedByteReceiveStream(
|
| 117 |
+
cast(ByteReceiveStream, process.stdout)
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME
|
| 121 |
+
# seconds or longer
|
| 122 |
+
now = current_time()
|
| 123 |
+
killed_processes: list[Process] = []
|
| 124 |
+
while idle_workers:
|
| 125 |
+
if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:
|
| 126 |
+
break
|
| 127 |
+
|
| 128 |
+
process_to_kill, idle_since = idle_workers.popleft()
|
| 129 |
+
process_to_kill.kill()
|
| 130 |
+
workers.remove(process_to_kill)
|
| 131 |
+
killed_processes.append(process_to_kill)
|
| 132 |
+
|
| 133 |
+
with CancelScope(shield=True):
|
| 134 |
+
for killed_process in killed_processes:
|
| 135 |
+
await killed_process.aclose()
|
| 136 |
+
|
| 137 |
+
break
|
| 138 |
+
|
| 139 |
+
workers.remove(process)
|
| 140 |
+
else:
|
| 141 |
+
command = [sys.executable, "-u", "-m", __name__]
|
| 142 |
+
process = await open_process(
|
| 143 |
+
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
|
| 144 |
+
)
|
| 145 |
+
try:
|
| 146 |
+
stdin = cast(ByteSendStream, process.stdin)
|
| 147 |
+
buffered = BufferedByteReceiveStream(
|
| 148 |
+
cast(ByteReceiveStream, process.stdout)
|
| 149 |
+
)
|
| 150 |
+
with fail_after(20):
|
| 151 |
+
message = await buffered.receive(6)
|
| 152 |
+
|
| 153 |
+
if message != b"READY\n":
|
| 154 |
+
raise BrokenWorkerProcess(
|
| 155 |
+
f"Worker process returned unexpected response: {message!r}"
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
main_module_path = getattr(sys.modules["__main__"], "__file__", None)
|
| 159 |
+
pickled = pickle.dumps(
|
| 160 |
+
("init", sys.path, main_module_path),
|
| 161 |
+
protocol=pickle.HIGHEST_PROTOCOL,
|
| 162 |
+
)
|
| 163 |
+
await send_raw_command(pickled)
|
| 164 |
+
except (BrokenWorkerProcess, get_cancelled_exc_class()):
|
| 165 |
+
raise
|
| 166 |
+
except BaseException as exc:
|
| 167 |
+
process.kill()
|
| 168 |
+
raise BrokenWorkerProcess(
|
| 169 |
+
"Error during worker process initialization"
|
| 170 |
+
) from exc
|
| 171 |
+
|
| 172 |
+
workers.add(process)
|
| 173 |
+
|
| 174 |
+
with CancelScope(shield=not cancellable):
|
| 175 |
+
try:
|
| 176 |
+
return cast(T_Retval, await send_raw_command(request))
|
| 177 |
+
finally:
|
| 178 |
+
if process in workers:
|
| 179 |
+
idle_workers.append((process, current_time()))
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def current_default_process_limiter() -> CapacityLimiter:
|
| 183 |
+
"""
|
| 184 |
+
Return the capacity limiter that is used by default to limit the number of worker
|
| 185 |
+
processes.
|
| 186 |
+
|
| 187 |
+
:return: a capacity limiter object
|
| 188 |
+
|
| 189 |
+
"""
|
| 190 |
+
try:
|
| 191 |
+
return _default_process_limiter.get()
|
| 192 |
+
except LookupError:
|
| 193 |
+
limiter = CapacityLimiter(os.cpu_count() or 2)
|
| 194 |
+
_default_process_limiter.set(limiter)
|
| 195 |
+
return limiter
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def process_worker() -> None:
|
| 199 |
+
# Redirect standard streams to os.devnull so that user code won't interfere with the
|
| 200 |
+
# parent-worker communication
|
| 201 |
+
stdin = sys.stdin
|
| 202 |
+
stdout = sys.stdout
|
| 203 |
+
sys.stdin = open(os.devnull)
|
| 204 |
+
sys.stdout = open(os.devnull, "w")
|
| 205 |
+
|
| 206 |
+
stdout.buffer.write(b"READY\n")
|
| 207 |
+
while True:
|
| 208 |
+
retval = exception = None
|
| 209 |
+
try:
|
| 210 |
+
command, *args = pickle.load(stdin.buffer)
|
| 211 |
+
except EOFError:
|
| 212 |
+
return
|
| 213 |
+
except BaseException as exc:
|
| 214 |
+
exception = exc
|
| 215 |
+
else:
|
| 216 |
+
if command == "run":
|
| 217 |
+
func, args = args
|
| 218 |
+
try:
|
| 219 |
+
retval = func(*args)
|
| 220 |
+
except BaseException as exc:
|
| 221 |
+
exception = exc
|
| 222 |
+
elif command == "init":
|
| 223 |
+
main_module_path: str | None
|
| 224 |
+
sys.path, main_module_path = args
|
| 225 |
+
del sys.modules["__main__"]
|
| 226 |
+
if main_module_path and os.path.isfile(main_module_path):
|
| 227 |
+
# Load the parent's main module but as __mp_main__ instead of
|
| 228 |
+
# __main__ (like multiprocessing does) to avoid infinite recursion
|
| 229 |
+
try:
|
| 230 |
+
spec = spec_from_file_location("__mp_main__", main_module_path)
|
| 231 |
+
if spec and spec.loader:
|
| 232 |
+
main = module_from_spec(spec)
|
| 233 |
+
spec.loader.exec_module(main)
|
| 234 |
+
sys.modules["__main__"] = main
|
| 235 |
+
except BaseException as exc:
|
| 236 |
+
exception = exc
|
| 237 |
+
try:
|
| 238 |
+
if exception is not None:
|
| 239 |
+
status = b"EXCEPTION"
|
| 240 |
+
pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)
|
| 241 |
+
else:
|
| 242 |
+
status = b"RETURN"
|
| 243 |
+
pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)
|
| 244 |
+
except BaseException as exc:
|
| 245 |
+
exception = exc
|
| 246 |
+
status = b"EXCEPTION"
|
| 247 |
+
pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)
|
| 248 |
+
|
| 249 |
+
stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))
|
| 250 |
+
stdout.buffer.write(pickled)
|
| 251 |
+
|
| 252 |
+
# Respect SIGTERM
|
| 253 |
+
if isinstance(exception, SystemExit):
|
| 254 |
+
raise exception
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
if __name__ == "__main__":
|
| 258 |
+
process_worker()
|
infer_4_37_2/lib/python3.10/site-packages/anyio/to_thread.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
from collections.abc import Callable
|
| 5 |
+
from typing import TypeVar
|
| 6 |
+
from warnings import warn
|
| 7 |
+
|
| 8 |
+
from ._core._eventloop import get_async_backend
|
| 9 |
+
from .abc import CapacityLimiter
|
| 10 |
+
|
| 11 |
+
if sys.version_info >= (3, 11):
|
| 12 |
+
from typing import TypeVarTuple, Unpack
|
| 13 |
+
else:
|
| 14 |
+
from typing_extensions import TypeVarTuple, Unpack
|
| 15 |
+
|
| 16 |
+
T_Retval = TypeVar("T_Retval")
|
| 17 |
+
PosArgsT = TypeVarTuple("PosArgsT")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
async def run_sync(
|
| 21 |
+
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
| 22 |
+
*args: Unpack[PosArgsT],
|
| 23 |
+
abandon_on_cancel: bool = False,
|
| 24 |
+
cancellable: bool | None = None,
|
| 25 |
+
limiter: CapacityLimiter | None = None,
|
| 26 |
+
) -> T_Retval:
|
| 27 |
+
"""
|
| 28 |
+
Call the given function with the given arguments in a worker thread.
|
| 29 |
+
|
| 30 |
+
If the ``cancellable`` option is enabled and the task waiting for its completion is
|
| 31 |
+
cancelled, the thread will still run its course but its return value (or any raised
|
| 32 |
+
exception) will be ignored.
|
| 33 |
+
|
| 34 |
+
:param func: a callable
|
| 35 |
+
:param args: positional arguments for the callable
|
| 36 |
+
:param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run
|
| 37 |
+
unchecked on own) if the host task is cancelled, ``False`` to ignore
|
| 38 |
+
cancellations in the host task until the operation has completed in the worker
|
| 39 |
+
thread
|
| 40 |
+
:param cancellable: deprecated alias of ``abandon_on_cancel``; will override
|
| 41 |
+
``abandon_on_cancel`` if both parameters are passed
|
| 42 |
+
:param limiter: capacity limiter to use to limit the total amount of threads running
|
| 43 |
+
(if omitted, the default limiter is used)
|
| 44 |
+
:return: an awaitable that yields the return value of the function.
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
if cancellable is not None:
|
| 48 |
+
abandon_on_cancel = cancellable
|
| 49 |
+
warn(
|
| 50 |
+
"The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is "
|
| 51 |
+
"deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead",
|
| 52 |
+
DeprecationWarning,
|
| 53 |
+
stacklevel=2,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
return await get_async_backend().run_sync_in_worker_thread(
|
| 57 |
+
func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def current_default_thread_limiter() -> CapacityLimiter:
|
| 62 |
+
"""
|
| 63 |
+
Return the capacity limiter that is used by default to limit the number of
|
| 64 |
+
concurrent threads.
|
| 65 |
+
|
| 66 |
+
:return: a capacity limiter object
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
return get_async_backend().current_default_thread_limiter()
|
infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.4
|
| 2 |
+
Name: attrs
|
| 3 |
+
Version: 24.3.0
|
| 4 |
+
Summary: Classes Without Boilerplate
|
| 5 |
+
Project-URL: Documentation, https://www.attrs.org/
|
| 6 |
+
Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html
|
| 7 |
+
Project-URL: GitHub, https://github.com/python-attrs/attrs
|
| 8 |
+
Project-URL: Funding, https://github.com/sponsors/hynek
|
| 9 |
+
Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi
|
| 10 |
+
Author-email: Hynek Schlawack <[email protected]>
|
| 11 |
+
License-Expression: MIT
|
| 12 |
+
License-File: LICENSE
|
| 13 |
+
Keywords: attribute,boilerplate,class
|
| 14 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 15 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 16 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 17 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 21 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 22 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
| 23 |
+
Classifier: Typing :: Typed
|
| 24 |
+
Requires-Python: >=3.8
|
| 25 |
+
Provides-Extra: benchmark
|
| 26 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'benchmark'
|
| 27 |
+
Requires-Dist: hypothesis; extra == 'benchmark'
|
| 28 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark'
|
| 29 |
+
Requires-Dist: pympler; extra == 'benchmark'
|
| 30 |
+
Requires-Dist: pytest-codspeed; extra == 'benchmark'
|
| 31 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark'
|
| 32 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'benchmark'
|
| 33 |
+
Requires-Dist: pytest>=4.3.0; extra == 'benchmark'
|
| 34 |
+
Provides-Extra: cov
|
| 35 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'cov'
|
| 36 |
+
Requires-Dist: coverage[toml]>=5.3; extra == 'cov'
|
| 37 |
+
Requires-Dist: hypothesis; extra == 'cov'
|
| 38 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov'
|
| 39 |
+
Requires-Dist: pympler; extra == 'cov'
|
| 40 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov'
|
| 41 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'cov'
|
| 42 |
+
Requires-Dist: pytest>=4.3.0; extra == 'cov'
|
| 43 |
+
Provides-Extra: dev
|
| 44 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'dev'
|
| 45 |
+
Requires-Dist: hypothesis; extra == 'dev'
|
| 46 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev'
|
| 47 |
+
Requires-Dist: pre-commit-uv; extra == 'dev'
|
| 48 |
+
Requires-Dist: pympler; extra == 'dev'
|
| 49 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev'
|
| 50 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'dev'
|
| 51 |
+
Requires-Dist: pytest>=4.3.0; extra == 'dev'
|
| 52 |
+
Provides-Extra: docs
|
| 53 |
+
Requires-Dist: cogapp; extra == 'docs'
|
| 54 |
+
Requires-Dist: furo; extra == 'docs'
|
| 55 |
+
Requires-Dist: myst-parser; extra == 'docs'
|
| 56 |
+
Requires-Dist: sphinx; extra == 'docs'
|
| 57 |
+
Requires-Dist: sphinx-notfound-page; extra == 'docs'
|
| 58 |
+
Requires-Dist: sphinxcontrib-towncrier; extra == 'docs'
|
| 59 |
+
Requires-Dist: towncrier<24.7; extra == 'docs'
|
| 60 |
+
Provides-Extra: tests
|
| 61 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'tests'
|
| 62 |
+
Requires-Dist: hypothesis; extra == 'tests'
|
| 63 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests'
|
| 64 |
+
Requires-Dist: pympler; extra == 'tests'
|
| 65 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests'
|
| 66 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'tests'
|
| 67 |
+
Requires-Dist: pytest>=4.3.0; extra == 'tests'
|
| 68 |
+
Provides-Extra: tests-mypy
|
| 69 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy'
|
| 70 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy'
|
| 71 |
+
Description-Content-Type: text/markdown
|
| 72 |
+
|
| 73 |
+
<p align="center">
|
| 74 |
+
<a href="https://www.attrs.org/">
|
| 75 |
+
<img src="https://raw.githubusercontent.com/python-attrs/attrs/main/docs/_static/attrs_logo.svg" width="35%" alt="attrs" />
|
| 76 |
+
</a>
|
| 77 |
+
</p>
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)).
|
| 81 |
+
[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020!
|
| 82 |
+
|
| 83 |
+
Its main goal is to help you to write **concise** and **correct** software without slowing down your code.
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
## Sponsors
|
| 87 |
+
|
| 88 |
+
*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek).
|
| 89 |
+
Especially those generously supporting us at the *The Organization* tier and higher:
|
| 90 |
+
|
| 91 |
+
<!-- sponsor-break-begin -->
|
| 92 |
+
|
| 93 |
+
<p align="center">
|
| 94 |
+
|
| 95 |
+
<!-- [[[cog
|
| 96 |
+
import pathlib, tomllib
|
| 97 |
+
|
| 98 |
+
for sponsor in tomllib.loads(pathlib.Path("pyproject.toml").read_text())["tool"]["sponcon"]["sponsors"]:
|
| 99 |
+
print(f'<a href="{sponsor["url"]}"><img title="{sponsor["title"]}" src="https://www.attrs.org/en/24.3.0/_static/sponsors/{sponsor["img"]}" width="190" /></a>')
|
| 100 |
+
]]] -->
|
| 101 |
+
<a href="https://www.variomedia.de/"><img title="Variomedia AG" src="https://www.attrs.org/en/24.3.0/_static/sponsors/Variomedia.svg" width="190" /></a>
|
| 102 |
+
<a href="https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek"><img title="Tidelift" src="https://www.attrs.org/en/24.3.0/_static/sponsors/Tidelift.svg" width="190" /></a>
|
| 103 |
+
<a href="https://klaviyo.com/"><img title="Klaviyo" src="https://www.attrs.org/en/24.3.0/_static/sponsors/Klaviyo.svg" width="190" /></a>
|
| 104 |
+
<a href="https://www.emsys-renewables.com/"><img title="emsys renewables" src="https://www.attrs.org/en/24.3.0/_static/sponsors/emsys-renewables.svg" width="190" /></a>
|
| 105 |
+
<a href="https://filepreviews.io/"><img title="FilePreviews" src="https://www.attrs.org/en/24.3.0/_static/sponsors/FilePreviews.svg" width="190" /></a>
|
| 106 |
+
<a href="https://polar.sh/"><img title="Polar" src="https://www.attrs.org/en/24.3.0/_static/sponsors/Polar.svg" width="190" /></a>
|
| 107 |
+
<!-- [[[end]]] -->
|
| 108 |
+
|
| 109 |
+
</p>
|
| 110 |
+
|
| 111 |
+
<!-- sponsor-break-end -->
|
| 112 |
+
|
| 113 |
+
<p align="center">
|
| 114 |
+
<strong>Please consider <a href="https://github.com/sponsors/hynek">joining them</a> to help make <em>attrs</em>’s maintenance more sustainable!</strong>
|
| 115 |
+
</p>
|
| 116 |
+
|
| 117 |
+
<!-- teaser-end -->
|
| 118 |
+
|
| 119 |
+
## Example
|
| 120 |
+
|
| 121 |
+
*attrs* gives you a class decorator and a way to declaratively define the attributes on that class:
|
| 122 |
+
|
| 123 |
+
<!-- code-begin -->
|
| 124 |
+
|
| 125 |
+
```pycon
|
| 126 |
+
>>> from attrs import asdict, define, make_class, Factory
|
| 127 |
+
|
| 128 |
+
>>> @define
|
| 129 |
+
... class SomeClass:
|
| 130 |
+
... a_number: int = 42
|
| 131 |
+
... list_of_numbers: list[int] = Factory(list)
|
| 132 |
+
...
|
| 133 |
+
... def hard_math(self, another_number):
|
| 134 |
+
... return self.a_number + sum(self.list_of_numbers) * another_number
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
>>> sc = SomeClass(1, [1, 2, 3])
|
| 138 |
+
>>> sc
|
| 139 |
+
SomeClass(a_number=1, list_of_numbers=[1, 2, 3])
|
| 140 |
+
|
| 141 |
+
>>> sc.hard_math(3)
|
| 142 |
+
19
|
| 143 |
+
>>> sc == SomeClass(1, [1, 2, 3])
|
| 144 |
+
True
|
| 145 |
+
>>> sc != SomeClass(2, [3, 2, 1])
|
| 146 |
+
True
|
| 147 |
+
|
| 148 |
+
>>> asdict(sc)
|
| 149 |
+
{'a_number': 1, 'list_of_numbers': [1, 2, 3]}
|
| 150 |
+
|
| 151 |
+
>>> SomeClass()
|
| 152 |
+
SomeClass(a_number=42, list_of_numbers=[])
|
| 153 |
+
|
| 154 |
+
>>> C = make_class("C", ["a", "b"])
|
| 155 |
+
>>> C("foo", "bar")
|
| 156 |
+
C(a='foo', b='bar')
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
After *declaring* your attributes, *attrs* gives you:
|
| 160 |
+
|
| 161 |
+
- a concise and explicit overview of the class's attributes,
|
| 162 |
+
- a nice human-readable `__repr__`,
|
| 163 |
+
- equality-checking methods,
|
| 164 |
+
- an initializer,
|
| 165 |
+
- and much more,
|
| 166 |
+
|
| 167 |
+
*without* writing dull boilerplate code again and again and *without* runtime performance penalties.
|
| 168 |
+
|
| 169 |
+
---
|
| 170 |
+
|
| 171 |
+
This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0.
|
| 172 |
+
The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**.
|
| 173 |
+
|
| 174 |
+
Check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for an in-depth explanation!
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
### Hate Type Annotations!?
|
| 178 |
+
|
| 179 |
+
No problem!
|
| 180 |
+
Types are entirely **optional** with *attrs*.
|
| 181 |
+
Simply assign `attrs.field()` to the attributes instead of annotating them with types:
|
| 182 |
+
|
| 183 |
+
```python
|
| 184 |
+
from attrs import define, field
|
| 185 |
+
|
| 186 |
+
@define
|
| 187 |
+
class SomeClass:
|
| 188 |
+
a_number = field(default=42)
|
| 189 |
+
list_of_numbers = field(factory=list)
|
| 190 |
+
```
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
## Data Classes
|
| 194 |
+
|
| 195 |
+
On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*).
|
| 196 |
+
In practice it does a lot more and is more flexible.
|
| 197 |
+
For instance, it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), has a replacement for `__init_subclass__`, and allows for stepping through the generated methods using a debugger.
|
| 198 |
+
|
| 199 |
+
For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes), but generally speaking, we are more likely to commit crimes against nature to make things work that one would expect to work, but that are quite complicated in practice.
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
## Project Information
|
| 203 |
+
|
| 204 |
+
- [**Changelog**](https://www.attrs.org/en/stable/changelog.html)
|
| 205 |
+
- [**Documentation**](https://www.attrs.org/)
|
| 206 |
+
- [**PyPI**](https://pypi.org/project/attrs/)
|
| 207 |
+
- [**Source Code**](https://github.com/python-attrs/attrs)
|
| 208 |
+
- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md)
|
| 209 |
+
- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs)
|
| 210 |
+
- **Get Help**: use the `python-attrs` tag on [Stack Overflow](https://stackoverflow.com/questions/tagged/python-attrs)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
### *attrs* for Enterprise
|
| 214 |
+
|
| 215 |
+
Available as part of the [Tidelift Subscription](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek).
|
| 216 |
+
|
| 217 |
+
The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications.
|
| 218 |
+
Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use.
|
| 219 |
+
|
| 220 |
+
## Release Information
|
| 221 |
+
|
| 222 |
+
### Backwards-incompatible Changes
|
| 223 |
+
|
| 224 |
+
- Python 3.7 has been dropped.
|
| 225 |
+
[#1340](https://github.com/python-attrs/attrs/issues/1340)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
### Changes
|
| 229 |
+
|
| 230 |
+
- Introduce `attrs.NothingType`, for annotating types consistent with `attrs.NOTHING`.
|
| 231 |
+
[#1358](https://github.com/python-attrs/attrs/issues/1358)
|
| 232 |
+
- Allow mutating `__suppress_context__` and `__notes__` on frozen exceptions.
|
| 233 |
+
[#1365](https://github.com/python-attrs/attrs/issues/1365)
|
| 234 |
+
- `attrs.converters.optional()` works again when taking `attrs.converters.pipe()` or another Converter as its argument.
|
| 235 |
+
[#1372](https://github.com/python-attrs/attrs/issues/1372)
|
| 236 |
+
- *attrs* instances now support [`copy.replace()`](https://docs.python.org/3/library/copy.html#copy.replace).
|
| 237 |
+
[#1383](https://github.com/python-attrs/attrs/issues/1383)
|
| 238 |
+
- `attrs.validators.instance_of()`'s type hints now allow for union types.
|
| 239 |
+
For example: `instance_of(str | int)`
|
| 240 |
+
[#1385](https://github.com/python-attrs/attrs/issues/1385)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
---
|
| 245 |
+
|
| 246 |
+
[Full changelog →](https://www.attrs.org/en/stable/changelog.html)
|
infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
attr/__init__.py,sha256=fOYIvt1eGSqQre4uCS3sJWKZ0mwAuC8UD6qba5OS9_U,2057
|
| 2 |
+
attr/__init__.pyi,sha256=QIXnnHPoucmDWkbpNsWTP-cgJ1bn8le7DjyRa_wYdew,11281
|
| 3 |
+
attr/__pycache__/__init__.cpython-310.pyc,,
|
| 4 |
+
attr/__pycache__/_cmp.cpython-310.pyc,,
|
| 5 |
+
attr/__pycache__/_compat.cpython-310.pyc,,
|
| 6 |
+
attr/__pycache__/_config.cpython-310.pyc,,
|
| 7 |
+
attr/__pycache__/_funcs.cpython-310.pyc,,
|
| 8 |
+
attr/__pycache__/_make.cpython-310.pyc,,
|
| 9 |
+
attr/__pycache__/_next_gen.cpython-310.pyc,,
|
| 10 |
+
attr/__pycache__/_version_info.cpython-310.pyc,,
|
| 11 |
+
attr/__pycache__/converters.cpython-310.pyc,,
|
| 12 |
+
attr/__pycache__/exceptions.cpython-310.pyc,,
|
| 13 |
+
attr/__pycache__/filters.cpython-310.pyc,,
|
| 14 |
+
attr/__pycache__/setters.cpython-310.pyc,,
|
| 15 |
+
attr/__pycache__/validators.cpython-310.pyc,,
|
| 16 |
+
attr/_cmp.py,sha256=3umHiBtgsEYtvNP_8XrQwTCdFoZIX4DEur76N-2a3X8,4123
|
| 17 |
+
attr/_cmp.pyi,sha256=U-_RU_UZOyPUEQzXE6RMYQQcjkZRY25wTH99sN0s7MM,368
|
| 18 |
+
attr/_compat.py,sha256=4hlXbWhdDjQCDK6FKF1EgnZ3POiHgtpp54qE0nxaGHg,2704
|
| 19 |
+
attr/_config.py,sha256=dGq3xR6fgZEF6UBt_L0T-eUHIB4i43kRmH0P28sJVw8,843
|
| 20 |
+
attr/_funcs.py,sha256=5-tUKJtp3h5El55EcDl6GWXFp68fT8D8U7uCRN6497I,15854
|
| 21 |
+
attr/_make.py,sha256=orKSf6C-B1eZfpat4lbAtxvmSyE_yxlG8zY9115ufWk,94157
|
| 22 |
+
attr/_next_gen.py,sha256=7FRkbtl_N017SuBhf_Vw3mw2c2pGZhtCGOzadgz7tp4,24395
|
| 23 |
+
attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469
|
| 24 |
+
attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121
|
| 25 |
+
attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209
|
| 26 |
+
attr/converters.py,sha256=GlDeOzPeTFgeBBLbj9G57Ez5lAk68uhSALRYJ_exe84,3861
|
| 27 |
+
attr/converters.pyi,sha256=orU2bff-VjQa2kMDyvnMQV73oJT2WRyQuw4ZR1ym1bE,643
|
| 28 |
+
attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977
|
| 29 |
+
attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539
|
| 30 |
+
attr/filters.py,sha256=ZBiKWLp3R0LfCZsq7X11pn9WX8NslS2wXM4jsnLOGc8,1795
|
| 31 |
+
attr/filters.pyi,sha256=3J5BG-dTxltBk1_-RuNRUHrv2qu1v8v4aDNAQ7_mifA,208
|
| 32 |
+
attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 33 |
+
attr/setters.py,sha256=5-dcT63GQK35ONEzSgfXCkbB7pPkaR-qv15mm4PVSzQ,1617
|
| 34 |
+
attr/setters.pyi,sha256=NnVkaFU1BB4JB8E4JuXyrzTUgvtMpj8p3wBdJY7uix4,584
|
| 35 |
+
attr/validators.py,sha256=WaB1HLAHHqRHWsrv_K9H-sJ7ESil3H3Cmv2d8TtVZx4,20046
|
| 36 |
+
attr/validators.pyi,sha256=s2WhKPqskxbsckJfKk8zOuuB088GfgpyxcCYSNFLqNU,2603
|
| 37 |
+
attrs-24.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 38 |
+
attrs-24.3.0.dist-info/METADATA,sha256=f9hhECeAUyS7iewHPRuMLDy1tpJ6vyy8R_TKUnCmiA8,11654
|
| 39 |
+
attrs-24.3.0.dist-info/RECORD,,
|
| 40 |
+
attrs-24.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 41 |
+
attrs-24.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
| 42 |
+
attrs-24.3.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109
|
| 43 |
+
attrs/__init__.py,sha256=qeQJZ4O08yczSn840v9bYOaZyRE81WsVi-QCrY3krCU,1107
|
| 44 |
+
attrs/__init__.pyi,sha256=nZmInocjM7tHV4AQw0vxO_fo6oJjL_PonlV9zKKW8DY,7931
|
| 45 |
+
attrs/__pycache__/__init__.cpython-310.pyc,,
|
| 46 |
+
attrs/__pycache__/converters.cpython-310.pyc,,
|
| 47 |
+
attrs/__pycache__/exceptions.cpython-310.pyc,,
|
| 48 |
+
attrs/__pycache__/filters.cpython-310.pyc,,
|
| 49 |
+
attrs/__pycache__/setters.cpython-310.pyc,,
|
| 50 |
+
attrs/__pycache__/validators.cpython-310.pyc,,
|
| 51 |
+
attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76
|
| 52 |
+
attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76
|
| 53 |
+
attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73
|
| 54 |
+
attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 55 |
+
attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73
|
| 56 |
+
attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76
|
infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/REQUESTED
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: hatchling 1.27.0
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
infer_4_37_2/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The MIT License (MIT)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2015 Hynek Schlawack and the attrs contributors
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
infer_4_37_2/lib/python3.10/site-packages/click/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.61 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/click/__pycache__/decorators.cpython-310.pyc
ADDED
|
Binary file (17.2 kB). View file
|
|
|